code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
# Import the SlideSet base class
import math
from ..slidesets import RemarkSlideSet
##
# A special set of slides for creating cover page and contents
class MergeCoverSet(RemarkSlideSet):
##
# Extract the valid parameters for this object
@staticmethod
def validParams():
params = RemarkSlideSet.validParams()
params.addRequiredParam('slide_sets', 'A vector of slideset names to combine into a single contents')
return params
def __init__(self, name, params, **kwargs):
RemarkSlideSet.__init__(self, name, params)
# The SlideSetWarehoue
self.__warehouse = self.getParam('_warehouse')
# Build a list of sets to merge
self.__merge_list = self.getParam('slide_sets')
##
# Search through all the slides in the specified slide sets for table of contents content
def _extractContents(self):
# Count the number of contents entries
contents = []
for obj in self.__warehouse.objects:
if obj != self and (len(self.__merge_list) == 0 or obj.name() in self.__merge_list):
pages = obj._extractContents()
for page in pages:
contents += page
n = int(self.getParam('contents_items_per_slide'))
output = [contents[i:i+n] for i in range(0, len(contents),n)]
return output
| jinmm1992/moose | python/PresentationBuilder/slidesets/MergeCoverSet.py | Python | lgpl-2.1 | 1,263 |
from unittest import mock
import uuid
from django.test import TestCase
from django.conf import settings
from foobar import api, enums, models
from foobar.wallet import api as wallet_api
from utils.exceptions import InvalidTransition
from shop.tests.factories import ProductFactory
from wallet.tests.factories import WalletFactory, WalletTrxFactory
from wallet import enums as wallet_enums
from .factories import AccountFactory, CardFactory, PurchaseItemFactory
from moneyed import Money
from django.contrib.auth.models import User
class FoobarAPITest(TestCase):
def test_get_card(self):
# Retrieve an non-existent account
obj1 = api.get_card(1337)
self.assertIsNone(obj1)
# Create an account
CardFactory.create(number=1337)
obj2 = api.get_card(1337)
self.assertIsNotNone(obj2)
self.assertIsNotNone(obj2.date_used)
date_used = obj2.date_used
obj2 = api.get_card(1337)
self.assertGreater(obj2.date_used, date_used)
def test_get_account(self):
# Assure None when missing account
id = uuid.uuid4()
obj1 = api.get_account(account_id=id)
self.assertIsNone(obj1)
# Create an account
account_obj = AccountFactory.create()
obj2 = api.get_account(account_id=account_obj.id)
obj3 = api.get_account(id=account_obj.id)
self.assertEqual(obj2.id, obj3.id)
def test_get_account_by_card(self):
# Retrieve an non-existent account
obj1 = api.get_account_by_card(card_id=1337)
self.assertIsNone(obj1)
# Create an account
CardFactory.create(number=1337)
obj2 = api.get_account_by_card(card_id=1337)
self.assertIsNotNone(obj2)
account_objs = models.Account.objects.filter(id=obj2.id)
self.assertEqual(account_objs.count(), 1)
def test_update_account(self):
account_obj = AccountFactory.create()
api.update_account(account_id=account_obj.id,
name='1337',
email='1337@foo.com')
account = api.get_account(account_id=account_obj.id)
# Test that correct fields are updated
self.assertEqual('1337', account.name)
self.assertEqual('1337@foo.com', account.email)
def test_purchase(self):
account_obj = AccountFactory.create()
wallet_obj = WalletFactory.create(owner_id=account_obj.id)
trx_obj = WalletTrxFactory.create(
wallet=wallet_obj,
amount=Money(1000, 'SEK')
)
trx_obj.set_status(wallet_enums.TrxStatus.PENDING)
trx_obj.set_status(wallet_enums.TrxStatus.FINALIZED)
product_obj1 = ProductFactory.create(
code='1337733113370',
name='Billys Original',
price=Money(13, 'SEK')
)
product_obj2 = ProductFactory.create(
code='7331733113370',
name='Kebaba',
price=Money(30, 'SEK')
)
products = [
(product_obj1.id, 3),
(product_obj2.id, 1),
]
purchase_obj, _ = api.create_purchase(account_obj.id, products)
self.assertEqual(purchase_obj.status, enums.PurchaseStatus.PENDING)
self.assertEqual(purchase_obj.amount, Money(69, 'SEK'))
product_obj1.refresh_from_db()
product_obj2.refresh_from_db()
self.assertEqual(product_obj1.qty, -3)
self.assertEqual(product_obj2.qty, -1)
_, balance = wallet_api.get_balance(account_obj.id)
self.assertEqual(balance, Money(931, 'SEK'))
_, balance = wallet_api.get_balance(settings.FOOBAR_MAIN_WALLET)
self.assertEqual(balance, Money(0, 'SEK'))
purchase_obj = api.finalize_purchase(purchase_obj.pk)
self.assertEqual(purchase_obj.status, enums.PurchaseStatus.FINALIZED)
product_obj1.refresh_from_db()
product_obj2.refresh_from_db()
self.assertEqual(product_obj1.qty, -3)
self.assertEqual(product_obj2.qty, -1)
_, balance = wallet_api.get_balance(account_obj.id)
self.assertEqual(balance, Money(931, 'SEK'))
_, balance = wallet_api.get_balance(settings.FOOBAR_MAIN_WALLET)
self.assertEqual(balance, Money(69, 'SEK'))
def test_cancel_card_purchase(self):
account_obj = AccountFactory.create()
wallet_obj = WalletFactory.create(owner_id=account_obj.id)
trx_obj = WalletTrxFactory.create(
wallet=wallet_obj,
amount=Money(1000, 'SEK')
)
trx_obj.set_status(wallet_enums.TrxStatus.PENDING)
trx_obj.set_status(wallet_enums.TrxStatus.FINALIZED)
product_obj1 = ProductFactory.create(
code='1337733113370',
name='Billys Original',
price=Money(13, 'SEK')
)
product_obj2 = ProductFactory.create(
code='7331733113370',
name='Kebaba',
price=Money(30, 'SEK')
)
products = [
(product_obj1.id, 3),
(product_obj2.id, 1),
]
purchase_obj, _ = api.create_purchase(account_obj.id, products)
api.cancel_purchase(purchase_obj.id)
purchase_obj, _ = api.get_purchase(purchase_obj.id)
self.assertEqual(purchase_obj.status, enums.PurchaseStatus.CANCELED)
product_obj1.refresh_from_db()
product_obj2.refresh_from_db()
self.assertEqual(product_obj1.qty, 0)
self.assertEqual(product_obj2.qty, 0)
_, balance = wallet_api.get_balance(account_obj.id)
self.assertEqual(balance, Money(1000, 'SEK'))
_, balance = wallet_api.get_balance(settings.FOOBAR_MAIN_WALLET)
self.assertEqual(balance, Money(0, 'SEK'))
def test_cancel_cash_purchase(self):
product_obj1 = ProductFactory.create(
code='1337733113370',
name='Billys Original',
price=Money(13, 'SEK')
)
product_obj2 = ProductFactory.create(
code='7331733113370',
name='Kebaba',
price=Money(30, 'SEK')
)
products = [
(product_obj1.id, 3),
(product_obj2.id, 1),
]
purchase_obj, _ = api.create_purchase(None, products)
api.cancel_purchase(purchase_obj.id)
purchase_obj, _ = api.get_purchase(purchase_obj.id)
self.assertEqual(purchase_obj.status, enums.PurchaseStatus.CANCELED)
product_obj1.refresh_from_db()
product_obj2.refresh_from_db()
self.assertEqual(product_obj1.qty, 0)
self.assertEqual(product_obj2.qty, 0)
_, balance = wallet_api.get_balance(settings.FOOBAR_CASH_WALLET)
self.assertEqual(balance, Money(0, 'SEK'))
def test_cash_purchase(self):
product_obj1 = ProductFactory.create(
code='1337733113370',
name='Billys Original',
price=Money(13, 'SEK')
)
product_obj2 = ProductFactory.create(
code='7331733113370',
name='Kebaba',
price=Money(30, 'SEK')
)
products = [
(product_obj1.id, 3),
(product_obj2.id, 1),
]
purchase_obj, _ = api.create_purchase(account_id=None,
products=products)
self.assertEqual(purchase_obj.status, enums.PurchaseStatus.PENDING)
product_obj1.refresh_from_db()
product_obj2.refresh_from_db()
self.assertEqual(product_obj1.qty, -3)
self.assertEqual(product_obj2.qty, -1)
_, balance = wallet_api.get_balance(settings.FOOBAR_CASH_WALLET)
self.assertEqual(balance, Money(0, 'SEK'))
purchase_obj = api.finalize_purchase(purchase_obj.pk)
self.assertEqual(purchase_obj.status, enums.PurchaseStatus.FINALIZED)
_, balance = wallet_api.get_balance(settings.FOOBAR_CASH_WALLET)
self.assertEqual(balance, Money(69, 'SEK'))
def test_get_purchase(self):
account_obj = AccountFactory.create()
wallet_obj = WalletFactory.create(owner_id=account_obj.id)
trx_obj = WalletTrxFactory.create(
wallet=wallet_obj,
amount=Money(1000, 'SEK')
)
trx_obj.set_status(wallet_enums.TrxStatus.PENDING)
trx_obj.set_status(wallet_enums.TrxStatus.FINALIZED)
product_obj1 = ProductFactory.create(
code='1337733113370',
name='Billys Original',
price=Money(13, 'SEK')
)
products = [
(product_obj1.id, 3),
]
purchase_obj, _ = api.create_purchase(account_obj.id, products)
obj, _ = api.get_purchase(purchase_obj.id)
self.assertIsNotNone(obj)
def test_list_purchases(self):
account_obj = AccountFactory.create()
wallet_obj = WalletFactory.create(owner_id=account_obj.id)
trx_obj = WalletTrxFactory.create(
wallet=wallet_obj,
amount=Money(1000, 'SEK')
)
trx_obj.set_status(wallet_enums.TrxStatus.PENDING)
trx_obj.set_status(wallet_enums.TrxStatus.FINALIZED)
product_obj1 = ProductFactory.create(
code='1337733113370',
name='Billys Original',
price=Money(13, 'SEK')
)
products = [
(product_obj1.id, 3),
]
api.create_purchase(account_obj.id, products)
objs = api.list_purchases(account_obj.id)
self.assertEqual(len(objs), 1)
def test_calculation_correction(self):
wallet_obj = WalletFactory.create()
trx_obj = WalletTrxFactory.create(
wallet=wallet_obj,
amount=Money(1000, 'SEK')
)
trx_obj.set_status(wallet_enums.TrxStatus.PENDING)
trx_obj.set_status(wallet_enums.TrxStatus.FINALIZED)
user_obj = User.objects.create_superuser(
'the_baconator', 'bacon@foobar.com', '123'
)
# Test positive balance change
correction_obj = api.calculate_correction(
new_balance=Money(1200, 'SEK'),
user=user_obj,
owner_id=wallet_obj.owner_id
)
self.assertEqual(correction_obj.wallet.owner_id, wallet_obj.owner_id)
self.assertEqual(correction_obj.trx_type.value, 0)
self.assertEqual(correction_obj.pre_balance.amount, 1000)
self.assertEqual(correction_obj.amount.amount, 200)
_, balance_correction = wallet_api.get_balance(
correction_obj.wallet.owner_id
)
self.assertEqual(balance_correction.amount, 1200)
# Test negative balance change
correction_obj = api.calculate_correction(
new_balance=Money(1000, 'SEK'),
user=user_obj,
owner_id=wallet_obj.owner_id
)
self.assertEqual(correction_obj.wallet.owner_id, wallet_obj.owner_id)
self.assertEqual(correction_obj.trx_type.value, 0)
self.assertEqual(correction_obj.pre_balance.amount, 1200)
self.assertEqual(correction_obj.amount.amount, -200)
_, balance_correction = wallet_api.get_balance(
correction_obj.wallet.owner_id
)
self.assertEqual(balance_correction.amount, 1000)
# Test when balance is the same = no change
correction_obj = api.calculate_correction(
new_balance=Money(1000, 'SEK'),
user=user_obj,
owner_id=wallet_obj.owner_id
)
self.assertEqual(correction_obj.wallet.owner_id, wallet_obj.owner_id)
self.assertEqual(correction_obj.trx_type.value, 0)
self.assertEqual(correction_obj.pre_balance.amount, 1000)
self.assertEqual(correction_obj.amount.amount, 0)
_, balance_correction = wallet_api.get_balance(
correction_obj.wallet.owner_id
)
self.assertEqual(balance_correction.amount, 1000)
def test_make_deposit_or_withdrawal(self):
wallet_obj = WalletFactory.create()
trx_obj = WalletTrxFactory.create(
wallet=wallet_obj,
amount=Money(1000, 'SEK')
)
trx_obj.set_status(wallet_enums.TrxStatus.PENDING)
trx_obj.set_status(wallet_enums.TrxStatus.FINALIZED)
user_obj = User.objects.create_superuser(
'the_baconator', 'bacon@foobar.com', '123'
)
# Test a deposit
correction_obj = api.make_deposit_or_withdrawal(
amount=Money(100, 'SEK'),
user=user_obj,
owner_id=wallet_obj.owner_id
)
self.assertEqual(correction_obj.wallet.owner_id, wallet_obj.owner_id)
self.assertEqual(correction_obj.trx_type, enums.TrxType.DEPOSIT)
self.assertEqual(correction_obj.pre_balance.amount, 1000)
self.assertEqual(correction_obj.amount.amount, 100)
_, balance = wallet_api.get_balance(wallet_obj.owner_id)
self.assertEqual(balance.amount, 1100)
# Test a withdraw
correction_obj = api.make_deposit_or_withdrawal(
amount=Money(-50, 'SEK'),
user=user_obj, owner_id=wallet_obj.owner_id
)
self.assertEqual(correction_obj.wallet.owner_id, wallet_obj.owner_id)
self.assertEqual(correction_obj.trx_type, enums.TrxType.WITHDRAWAL)
self.assertEqual(correction_obj.pre_balance.amount, 1100)
self.assertEqual(correction_obj.amount.amount, -50)
_, balance = wallet_api.get_balance(wallet_obj.owner_id)
self.assertEqual(balance.amount, 1050)
# Test when user tries to deposit or withdraw 0
correction_obj = api.make_deposit_or_withdrawal(
amount=Money(0, 'SEK'),
user=user_obj,
owner_id=wallet_obj.owner_id
)
self.assertEqual(correction_obj.wallet.owner_id, wallet_obj.owner_id)
self.assertEqual(correction_obj.trx_type, enums.TrxType.CORRECTION)
self.assertEqual(correction_obj.pre_balance.amount, 1050)
self.assertEqual(correction_obj.amount.amount, 0)
_, balance = wallet_api.get_balance(wallet_obj.owner_id)
self.assertEqual(balance.amount, 1050)
def test_finalize_pending_purchase(self):
account_obj = AccountFactory.create()
wallet_obj = WalletFactory.create(owner_id=account_obj.pk)
trx_obj = WalletTrxFactory.create(
wallet=wallet_obj,
amount=Money(1000, 'SEK')
)
trx_obj.set_status(wallet_enums.TrxStatus.PENDING)
trx_obj.set_status(wallet_enums.TrxStatus.FINALIZED)
product_obj1 = ProductFactory.create(
code='8437438439393',
name='Fat',
price=Money(42, 'SEK')
)
products = [(product_obj1.pk, 3)]
pending_obj, _ = api.create_purchase(
account_id=account_obj.pk,
products=products
)
self.assertIsNotNone(pending_obj.status)
self.assertEqual(pending_obj.status, enums.PurchaseStatus.PENDING)
self.assertEqual(pending_obj.states.count(), 1)
self.assertEqual(pending_obj.account.pk, account_obj.pk)
self.assertEqual(pending_obj.amount.amount, 126)
_, balance = wallet_api.get_balance(wallet_obj.owner_id)
self.assertEqual(balance, Money(874, 'SEK'))
finalized_obj = api.finalize_purchase(purchase_id=pending_obj.pk)
self.assertIsNotNone(finalized_obj.status)
self.assertEqual(finalized_obj.status, enums.PurchaseStatus.FINALIZED)
self.assertEqual(finalized_obj.states.count(), 2)
self.assertEqual(finalized_obj.account.pk, account_obj.pk)
self.assertEqual(finalized_obj.amount.amount, 126)
_, balance = wallet_api.get_balance(wallet_obj.owner_id)
self.assertEqual(balance, Money(874, 'SEK'))
_, balance = wallet_api.get_balance(settings.FOOBAR_MAIN_WALLET)
self.assertEqual(balance, Money(126, 'SEK'))
self.assertEqual(finalized_obj.items.count(), 1)
item = finalized_obj.items.first()
self.assertEqual(item.qty, 3)
self.assertEqual(item.amount.amount, 42)
self.assertEqual(item.product_id, product_obj1.pk)
@mock.patch('foobar.api.finalize_purchase')
@mock.patch('foobar.api.cancel_purchase')
def test_update_purchase_status(self, mock_cancel_purchase,
mock_finalize_purchase):
item1 = PurchaseItemFactory()
purchase1 = item1.purchase
api.update_purchase_status(purchase1.id,
enums.PurchaseStatus.FINALIZED)
mock_finalize_purchase.assert_called_once_with(purchase1.id)
item2 = PurchaseItemFactory()
purchase2 = item2.purchase
api.update_purchase_status(purchase2.id,
enums.PurchaseStatus.CANCELED)
mock_cancel_purchase.assert_called_once_with(purchase2.id)
item3 = PurchaseItemFactory()
purchase3 = item3.purchase
with self.assertRaises(InvalidTransition):
api.update_purchase_status(purchase3.id,
enums.PurchaseStatus.PENDING)
| uppsaladatavetare/foobar-api | src/foobar/tests/test_api.py | Python | mit | 17,101 |
__all__ = [
'DEFAULT',
# registration decorators
'gcode_dialect',
'word_dialect',
# dialects
'linuxcnc',
'reprap',
]
# Registration decorators
from .mapping import gcode_dialect
from .mapping import word_dialect
# Dialects
from . import linuxcnc
from . import reprap
_DEFAULT = 'linuxcnc'
def get_default():
"""
Get the default gcode interpreter dialect.
(see :meth:`set_default` for details)
"""
return _DEFAULT
def set_default(name):
"""
Set the default gcode interpreter dialect.
This dialect will be used if no other is specified for particular function
calls.
:param name: name of dialect
:type name: :class:`str`
.. doctest::
>>> from pygcode import dialect
>>> dialect.get_default()
'linuxcnc'
>>> dialect.set_default('reprap')
>>> dialect.get_default()
'reprap'
"""
# TODO: verify valid name
_DEFAULT = name
| fragmuffin/pygcode | src/pygcode/dialects/__init__.py | Python | gpl-3.0 | 968 |
# ----------------------------------------------------------------------------
# Copyright (c) 2005-2016, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from ..hooks import exec_statement
# NOTE: This function requires PyInstaller to be on the default "sys.path" for
# the called Python process. Running py.test changes the working dir to a temp
# dir, so PyInstaller should be installed via either "setup.py install" or
# "setup.py develop" before running py.test.
def get_pywin32_module_file_attribute(module_name):
"""
Get the absolute path of the PyWin32 DLL specific to the PyWin32 module
with the passed name.
On import, each PyWin32 module:
* Imports a DLL specific to that module.
* Overwrites the values of all module attributes with values specific to
that DLL. This includes that module's `__file__` attribute, which then
provides the absolute path of that DLL.
This function safely imports that module in a PyWin32-aware subprocess and
returns the value of that module's `__file__` attribute.
Parameters
----------
module_name : str
Fully-qualified name of that module.
Returns
----------
str
Absolute path of that DLL.
See Also
----------
`PyInstaller.utils.win32.winutils.import_pywin32_module()`
For further details.
"""
statement = """
from PyInstaller.utils.win32 import winutils
module = winutils.import_pywin32_module('%s')
print(module.__file__)
"""
return exec_statement(statement % module_name)
__all__ = ('get_pywin32_module_file_attribute', )
| ijat/Hotspot-PUTRA-Auto-login | PyInstaller-3.2/PyInstaller/utils/hooks/win32.py | Python | gpl-3.0 | 1,876 |
"""
A left-corner parser.
"""
import pyactr as actr
from ete3 import Tree
environment = actr.Environment(focus_position=(320, 180))
actr.chunktype("parsing_goal",
"task stack_top stack_bottom parsed_word right_frontier")
actr.chunktype("parse_state",
"node_cat mother daughter1 daughter2 lex_head")
actr.chunktype("word", "form cat")
parser = actr.ACTRModel(environment)
dm = parser.decmem
g = parser.goal
imaginal = parser.set_goal(name="imaginal", delay=0)
dm.add(actr.chunkstring(string="""
isa word
form 'Mary'
cat 'ProperN'
"""))
dm.add(actr.chunkstring(string="""
isa word
form 'Bill'
cat 'ProperN'
"""))
dm.add(actr.chunkstring(string="""
isa word
form 'likes'
cat 'V'
"""))
g.add(actr.chunkstring(string="""
isa parsing_goal
task read_word
stack_top 'S'
right_frontier 'S'
"""))
parser.productionstring(name="press spacebar", string="""
=g>
isa parsing_goal
task read_word
stack_top ~None
?manual>
state free
==>
+manual>
isa _manual
cmd 'press_key'
key 'space'
""")
parser.productionstring(name="encode word", string="""
=g>
isa parsing_goal
task read_word
=visual>
isa _visual
value =val
==>
=g>
isa parsing_goal
task get_word_cat
parsed_word =val
~visual>
""")
parser.productionstring(name="retrieve category", string="""
=g>
isa parsing_goal
task get_word_cat
parsed_word =w
==>
+retrieval>
isa word
form =w
=g>
isa parsing_goal
task retrieving_word
""")
parser.productionstring(name="shift and project word", string="""
=g>
isa parsing_goal
task retrieving_word
stack_top =t
stack_bottom None
=retrieval>
isa word
form =w
cat =c
==>
=g>
isa parsing_goal
task parsing
stack_top =c
stack_bottom =t
+imaginal>
isa parse_state
node_cat =c
daughter1 =w
~retrieval>
""")
parser.productionstring(name="project: NP ==> ProperN", string="""
=g>
isa parsing_goal
stack_top 'ProperN'
stack_bottom ~'NP'
right_frontier =rf
parsed_word =w
==>
=g>
isa parsing_goal
stack_top 'NP'
+imaginal>
isa parse_state
node_cat 'NP'
daughter1 'ProperN'
mother =rf
lex_head =w
""")
parser.productionstring(name="project and complete: NP ==> ProperN", string="""
=g>
isa parsing_goal
stack_top 'ProperN'
stack_bottom 'NP'
right_frontier =rf
parsed_word =w
==>
=g>
isa parsing_goal
task read_word
stack_top None
stack_bottom None
+imaginal>
isa parse_state
node_cat 'NP'
daughter1 'ProperN'
mother =rf
lex_head =w
""")
parser.productionstring(name="project and complete: S ==> NP VP", string="""
=g>
isa parsing_goal
stack_top 'NP'
stack_bottom 'S'
==>
=g>
isa parsing_goal
task read_word
stack_top 'VP'
stack_bottom None
right_frontier 'VP'
+imaginal>
isa parse_state
node_cat 'S'
daughter1 'NP'
daughter2 'VP'
""")
parser.productionstring(name="project and complete: VP ==> V NP", string="""
=g>
isa parsing_goal
task parsing
stack_top 'V'
stack_bottom 'VP'
==>
=g>
isa parsing_goal
task read_word
stack_top 'NP'
stack_bottom None
+imaginal>
isa parse_state
node_cat 'VP'
daughter1 'V'
daughter2 'NP'
""")
parser.productionstring(name="finished", string="""
=g>
isa parsing_goal
task read_word
stack_top None
==>
~g>
~imaginal>
""")
if __name__ == "__main__":
stimuli = [{1: {'text': 'Mary', 'position': (320, 180)}},
{1: {'text': 'likes', 'position': (320, 180)}},
{1: {'text': 'Bill', 'position': (320, 180)}}]
parser_sim = parser.simulation(
realtime=True,
gui=False,
environment_process=environment.environment_process,
stimuli=stimuli,
triggers='space')
parser_sim.run(1.1)
sortedDM = sorted(([item[0], time] for item in dm.items()\
for time in item[1]),\
key=lambda item: item[1])
print("\nParse states in declarative memory at the end of the simulation",
"\nordered by time of (re)activation:")
for chunk in sortedDM:
if chunk[0].typename == "parse_state":
print(chunk[1], "\t", chunk[0])
print("\nWords in declarative memory at the end of the simulation",
"\nordered by time of (re)activation:")
for chunk in sortedDM:
if chunk[0].typename == "word":
print(chunk[1], "\t", chunk[0])
def final_tree(sortedDM):
tree_list = []
parse_states = [chunk for chunk in sortedDM\
if chunk[0].typename == "parse_state" and\
chunk[0].daughter1 != None]
words = set(str(chunk[0].form) for chunk in sortedDM\
if chunk[0].typename == "word")
nodes = [chunk for chunk in parse_states
if chunk[0].node_cat == "S"]
while nodes:
current_chunk = nodes.pop(0)
current_node = str(current_chunk[0].node_cat) + " " +\
str(current_chunk[1])
current_tree = Tree(name=current_node)
if current_chunk[0].daughter2 != None:
child_categs = [current_chunk[0].daughter1,\
current_chunk[0].daughter2]
else:
child_categs = [current_chunk[0].daughter1]
children = []
for cat in child_categs:
if cat == 'NP':
chunkFromCat = [chunk for chunk in parse_states\
if chunk[0].node_cat == cat and\
chunk[0].mother ==\
current_chunk[0].node_cat]
if chunkFromCat:
children += chunkFromCat
current_child = str(chunkFromCat[-1][0].node_cat)\
+ " " + str(chunkFromCat[-1][1])
current_tree.add_child(name=current_child)
elif cat == 'ProperN':
chunkFromCat = [chunk for chunk in parse_states if\
chunk[0].node_cat == cat and\
chunk[0].daughter1 ==\
current_chunk[0].lex_head]
if chunkFromCat:
children += chunkFromCat
current_child = str(chunkFromCat[-1][0].node_cat)\
+ " " + str(chunkFromCat[-1][1])
current_tree.add_child(name=current_child)
elif cat in words:
last_act_time = [chunk[1][-1]
for chunk in dm.items()\
if chunk[0].typename == "word"\
and str(chunk[0].form) == cat]
current_child = cat + " " + str(last_act_time[0])
current_tree.add_child(name=current_child)
else:
chunkFromCat = [chunk for chunk in parse_states\
if chunk[0].node_cat == cat]
if chunkFromCat:
children += chunkFromCat
current_child = str(chunkFromCat[-1][0].node_cat)\
+ " " + str(chunkFromCat[-1][1])
current_tree.add_child(name=current_child)
tree_list.append(current_tree)
nodes += children
final_tree = tree_list[0]
tree_list.remove(final_tree)
while tree_list:
leaves = final_tree.get_leaves()
for leaf in leaves:
subtree_list = [tree for tree in tree_list\
if tree.name == leaf.name]
if subtree_list:
subtree = subtree_list[0]
tree_list.remove(subtree)
leaf.add_sister(subtree)
leaf.detach()
return final_tree
print("\nFinal tree:")
print(final_tree(sortedDM).get_ascii(compact=False))
| jakdot/pyactr | tutorials/forbook/code/ch4_leftcorner_parser.py | Python | gpl-3.0 | 9,243 |
class Error(Exception):
"""Base exception for this package"""
message = None
def __str__(self):
return str(self.message)
class UplinkBuilderError(Error):
"""Something went wrong while building a service."""
message = "`%s`: %s"
def __init__(self, class_name, definition_name, error):
fullname = class_name + "." + definition_name
self.message = self.message % (fullname, error)
self.error = error
class InvalidRequestDefinition(Error):
"""Something went wrong when building the request definition."""
class AnnotationError(Error):
"""Something went wrong with an annotation."""
| prkumar/uplink | uplink/exceptions.py | Python | mit | 652 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPyscreeze(PythonPackage):
"""PyScreeze can take screenshots, save them to files, and
locate images within the screen. This is useful if you have
a small image of, say, a button that needs to be clicked
and want to locate it on the screen."""
homepage = "https://github.com/asweigart/pyscreeze"
pypi = "PyScreeze/PyScreeze-0.1.27.tar.gz"
version('0.1.27', sha256='cba2f264fe4b6c70510061cb2ba6e1da0e3bfecfdbe8a3b2cd6305a2afda9e6b')
depends_on('python@2.7:2,3.2:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('pil', type=('build', 'run'))
depends_on('scrot', type='run')
| LLNL/spack | var/spack/repos/builtin/packages/py-pyscreeze/package.py | Python | lgpl-2.1 | 879 |
# This file provides the installation of the python library 'gfdl'
# Suggested installation procedure:
# $ cd $GFDL_BASE/src/extra/python
# $ pip install -e .
# This installs the package in *development mode* i.e. any changes you make to the python files
# or any additional files you add will be immediately available.
# In a new python console, from any directory, you can now access the execlim code:
# >>> from gfdl import experiment
# >>> exp = experiment.Experiment()
# ...
from distutils.core import setup
setup(name='GFDL',
version='0.1',
description='GFDL utilities for running experiments and performing data analysis',
author='James Penn',
url='https://github.com/ExeClim/GFDLMoistModel',
packages=['gfdl'],
install_requires=[
'sh',
'jinja2',
'f90nml',
'numpy',
'pandas',
'xarray'
]
) | Alexander-P/Isca | src/extra/python/setup.py | Python | gpl-3.0 | 922 |
# evolve.utils
# Helper functions
#
# Author: Benjamin Bengfort <benjamin@bengfort.com>
# Created: Sat May 03 00:16:21 2014 -0400
#
# Copyright (C) 2014 Bengfort.com
# For license information, see LICENSE.txt
#
# ID: utils.py [] benjamin@bengfort.com $
"""
Helper functions
"""
##########################################################################
## Imports
##########################################################################
import os
import json
import yaml
import random
from swarm.params import AllyParameters
##########################################################################
## File system helpers
##########################################################################
def relpath(module, path):
"""
Returns the path from the module not the current working directory.
"""
return os.path.normpath(os.path.join(os.path.dirname(module), path))
##########################################################################
## Parsing helpers
##########################################################################
def parse_fitness(path):
"""
Extracts the fitness out of a fitness file.
"""
with open(path, 'r') as fit:
result = json.load(fit)
return int(result['result']['fitness'])
def parse_genotype(path):
"""
Extracts the genotype out of a configuration yaml file.
"""
with open(path, 'r') as conf:
return yaml.load(conf)
def export_genotype(genotype, path=None):
"""
Exports the ally configuration file.
"""
config = AllyParameters()
config.configure(genotype)
if path:
config.dump_file(path)
return config
##########################################################################
## Testing helpers
##########################################################################
def random_fitness(generation, confpath):
"""
Generate random fitness values for a generation to test the evolver,
rather than attempt to run a long simulation.
"""
for name in os.listdir(confpath):
base, ext = os.path.splitext(name)
if ext != '.yaml': continue
gen, ind = [int(item) for item in base.split('_')]
if gen != generation: continue
fitpath = os.path.join(confpath, (base + ".fit"))
with open(fitpath, 'w') as fit:
fitness = random.randrange(0, 400)
randfit = {
'result': {
'fitness': fitness,
'run_time': random.randrange(174, 314),
'iterations': 10000,
'home_stash': fitness,
'enemy_stash': random.randrange(200, 800) - fitness,
}
}
json.dump(randfit, fit, indent=4)
| mclumd/swarm-simulator | evolve/utils.py | Python | mit | 2,768 |
from echidna.util import root_help
import rat
from ROOT import RAT
from ROOT import TChain
import math
import echidna.core.spectra as spectra
def _scint_weights(times, T):
"""**CURRENTLY DISABLED**
This method applies to the scintillator backgrounds.
It produces the list of weights relative to each time period.
The calculation of weights is based on radioactive decay formula.
Args:
times (*list* of *int*): Time periods
T (float): The Half-life of a studied background
Returns:
Weights (*list* of *float*)
"""
weights = []
for time in times:
weights.append(math.exp(-time/T))
return (weights)
def _av_weights(times, T):
"""**UNAVAILABLE**
This method applies to the backgrounds due to AV leaching.
It produces the list of weights relative to each time period.
The calculation of weights is based on radioactive decay formula.
Args:
times (*list* of *int*): Time periods
T (float): The Half-life of a studied background
Returns:
Weights (*list* of *float*)
"""
weights = []
for time in times:
weights.append(1.0)
return (weights)
def fill_reco_spectrum(filename, T, spectrumname="", spectrum=None):
"""**Weights have been disabled.**
This function fills in the ndarray of energies, radii, times
and weights. It takes the reconstructed energies and positions
of the events from the root file. In order to keep the statistics,
the time dependence is performed via adding weights to every event
depending on the time period. Both, studied time and Half-life must
be written in the same units.
Args:
filename (str): A root file to study
T (float): The Half-life of a studied background
spectrumname (str, optional): A name of future spectrum. Not
required when appending a spectrum.
spectrum (:class:`echidna.core.spectra.Spectra`, optional):
Spectrum you wish to append. Not required when creating a
new spectrum.
Raises:
ValueError: If spectrumname is not set when creating a new
spectrum.
Returns:
spectrum (:class:`echidna.core.spectra.Spectra`)
"""
print filename
print spectrumname
dsreader = RAT.DU.DSReader(filename)
if spectrum is None:
if spectrumname == "":
raise ValueError("Name not set when creating new spectra.")
spectrum = spectra.Spectra(str(spectrumname),
10.*dsreader.GetEntryCount())
else:
spectrum._num_decays += 10.*dsreader.GetEntryCount()
spectrumname = spectrum._name
print spectrumname
times = [0]
for time_step in range(0, spectrum._time_bins):
time = time_step * spectrum._time_width + spectrum._time_low
times.append(time)
if 'AV' in spectrumname:
print "AV WEIGHTS ARE CURRENTLY UNAVAILABLE"
weights = _av_weights(times, T)
else:
weights = _scint_weights(times, T)
for ievent in range(0, dsreader.GetEntryCount()):
ds = dsreader.GetEntry(ievent)
for ievent in range(0, ds.GetEVCount()):
ev = ds.GetEV(ievent)
if not ev.DefaultFitVertexExists() or not ev.GetDefaultFitVertex().ContainsEnergy() or not ev.GetDefaultFitVertex().ValidEnergy():
continue
energy = ev.GetDefaultFitVertex().GetEnergy()
position = ev.GetDefaultFitVertex().GetPosition().Mag()
spectrum._raw_events += 1
for time, weight in zip(times, weights):
try:
spectrum.fill(energy, position, time, 1.)
except ValueError:
pass
return spectrum
def fill_mc_spectrum(filename, T, spectrumname="", spectrum=None):
"""**Weights have been disabled.**
This function fills in the ndarray of true energies, radii, times
and weights. It takes the true energies and positions of the events
from the root file. In order to keep the statistics, the time
dependence is performed via adding weights to every event depending
on the time period. Both, studied time and Half-life must be
written in the same units.
Args:
filename (str): A root file to study
T (float): The Half-life of a studied background
spectrumname (str, optional): A name of future spectrum. Not
required when appending a spectrum.
spectrum (:class:`echidna.core.spectra.Spectra`, optional):
Spectrum you wish to append. Not required when creating a
new spectrum.
Raises:
ValueError: If spectrumname is not set when creating a new
spectrum.
Returns:
spectrum (:class:`echidna.core.spectra.Spectra`)
"""
print filename
print spectrumname
dsreader = RAT.DU.DSReader(filename)
if spectrum is None:
if spectrumname == "":
raise ValueError("Name not set when creating new spectra.")
spectrum = spectra.Spectra(str(spectrumname),
10.*dsreader.GetEntryCount())
else:
spectrum._num_decays += 10.*dsreader.GetEntryCount()
spectrumname = spectrum._name
print spectrumname
times = []
for time_step in range(0, spectrum._time_bins):
time = time_step * spectrum._time_width + spectrum._time_low
times.append(time)
if 'AV' in spectrumname:
print "AV WEIGHTS ARE CURRENTLY UNAVAILABLE"
weights = _av_weights(times, T)
else:
weights = _scint_weights(times, T)
for ievent in range(0, dsreader.GetEntryCount()):
ds = dsreader.GetEntry(ievent)
mc = ds.GetMC()
if mc.GetMCParticleCount() > 0:
energy = mc.GetScintQuenchedEnergyDeposit()
position = mc.GetMCParticle(0).GetPosition().Mag()
spectrum._raw_events += 1
for time, weight in zip(times, weights):
try:
spectrum.fill(energy, position, time, 1.)
except ValueError:
pass
return spectrum
def fill_reco_ntuple_spectrum(filename, T, spectrumname="", spectrum=None):
"""**Weights have been disabled.**
This function fills in the ndarray of energies, radii, times
and weights. It takes the reconstructed energies and positions
of the events from the ntuple. In order to keep the statistics,
the time dependence is performed via adding weights to every event
depending on the time period. Both, studied time and Half-life must
be written in the same units.
Args:
filename (str): The ntuple to study
T (float): The Half-life of a studied background
spectrumname (str, optional): A name of future spectrum. Not
required when appending a spectrum.
spectrum (:class:`echidna.core.spectra.Spectra`, optional):
Spectrum you wish to append. Not required when creating a
new spectrum.
Raises:
ValueError: If spectrumname is not set when creating a new
spectrum.
Returns:
spectrum (:class:`echidna.core.spectra.Spectra`)
"""
print filename
chain = TChain("output")
chain.Add(filename)
if spectrum is None:
if spectrumname == "":
raise ValueError("Name not set when creating new spectra.")
spectrum = spectra.Spectra(str(spectrumname), 10.*chain.GetEntries())
else:
spectrum._num_decays += 10.*chain.GetEntries()
spectrumname = spectrum._name
print spectrumname
times = []
for time_step in range(0, spectrum._time_bins):
time = time_step * spectrum._time_width + spectrum._time_low
times.append(time)
if 'AV' in spectrumname:
print "AV WEIGHTS ARE CURRENTLY UNAVAILABLE"
weights = _av_weights(times, T)
else:
weights = _scint_weights(times, T)
for event in chain:
if event.scintFit == 0:
continue
energy = event.energy
position = math.fabs(math.sqrt((event.posx)**2 +
(event.posy)**2 + (event.posz)**2))
spectrum._raw_events += 1
for time, weight in zip(times, weights):
try:
spectrum.fill(energy, position, time, 1.)
except ValueError:
pass
return spectrum
def fill_mc_ntuple_spectrum(filename, T, spectrumname="", spectrum=None):
"""**Weights have been disabled.**
This function fills in the ndarray of energies, radii, times
and weights. It takes the reconstructed energies and positions
of the events from ntuple. In order to keep the statistics,
the time dependence is performed via adding weights to every event
depending on the time period. Both, studied time and Half-life must
be written in the same units.
Args:
filename (str): The ntuple to study
T (float): The Half-life of a studied background
spectrumname (str, optional): A name of future spectrum. Not
required when appending a spectrum.
spectrum (:class:`echidna.core.spectra.Spectra`, optional):
Spectrum you wish to append. Not required when creating a
new spectrum.
Raises:
ValueError: If spectrumname is not set when creating a new
spectrum.
Returns:
spectrum (:class:`echidna.core.spectra.Spectra`)
"""
print filename
chain = TChain("output")
chain.Add(filename)
if spectrum is None:
if spectrumname == "":
raise ValueError("Name not set when creating new spectra.")
spectrum = spectra.Spectra(str(spectrumname), 10.*chain.GetEntries())
else:
spectrum._num_decays += 10.*chain.GetEntries()
spectrumname = spectrum._name
print spectrumname
times = []
for time_step in range(0, spectrum._time_bins):
time = time_step * spectrum._time_width + spectrum._time_low
times.append(time)
if 'AV' in spectrumname:
print "AV WEIGHTS ARE CURRENTLY UNAVAILABLE"
weights = _av_weights(times, T)
else:
weights = _scint_weights(times, T)
for event in chain:
energy = event.mcEdepQuenched
position = math.fabs(math.sqrt((event.mcPosx)**2 +
(event.mcPosy)**2 + (event.mcPosz)**2))
spectrum._raw_events += 1
for time, weight in zip(times, weights):
try:
spectrum.fill(energy, position, time, 1.)
except ValueError:
pass
return spectrum
| EdLeming/echidna | echidna/core/fill_spectrum.py | Python | mit | 10,569 |
#!/usr/bin/env python
import os
import random
import matplotlib
import matplotlib.pyplot as plt
def main():
value = 100
values = [value]
steps = 10000
for i in xrange(steps):
if random.randint(0, 1) == 1:
choose = 1
else:
choose = -1
value += choose
values.append(value)
plt.plot(range(len(values)), values)
plt.show()
if __name__ == "__main__":
main()
| tobegit3hub/ml_implementation | others/random_walk/random_walk.py | Python | mit | 400 |
import os, tempfile
from seamless.highlevel import Context, Cell
ctx = Context()
ctx.transform = lambda a,b: a + b
ctx.transform.a = 2
ctx.transform.b = 3
ctx.translate()
ctx.transform.example.a = 0
ctx.transform.example.b = 0
ctx.result = ctx.transform
ctx.result.celltype = "plain"
ctx.compute()
print(ctx.result.value)
print("")
print("ERROR 1:")
print("")
ctx.transform.language = "cpp"
ctx.code = ctx.transform.code.pull()
ctx.code = """
#include <iostream>
using namespace std;
extern "C" int transform(int a, int b, double *result) {
cout << "transform " << a << " " << b << endl;
return 1;
}"""
ctx.translate()
ctx.transform.result.example = 0.0 #example, just to fill the schema
ctx.transform.link_options = ["-lstdc++"]
#ctx.transform.main_module.link_options also works
ctx.compute()
print(ctx.transform.exception)
print("")
print("ERROR 2:")
print("")
ctx.code = """
#include <iostream>
using namespace std;
extern "C" int transform(int a, int b, double *result) {
cout << "NOT PRINTED ";
exit(1);
}"""
ctx.translate()
ctx.transform.result.example = 0.0 #example, just to fill the schema
ctx.compute()
print(ctx.transform.exception)
| sjdv1982/seamless | tests/highlevel/transformer-compiled-error.py | Python | mit | 1,166 |
# -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hs_core', '0034_manual_migrate_file_paths'),
]
operations = [
migrations.RemoveField(
model_name='resourcefile',
name='fed_resource_file_name_or_path',
),
migrations.RemoveField(
model_name='resourcefile',
name='fed_resource_file_size',
),
]
| hydroshare/hydroshare | hs_core/migrations/0035_remove_deprecated_fields.py | Python | bsd-3-clause | 475 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Smoke test for reading records from GCS to TensorFlow."""
import random
import sys
import time
import numpy as np
import tensorflow as tf
from tensorflow.core.example import example_pb2
from tensorflow.python.lib.io import file_io
flags = tf.compat.v1.app.flags
flags.DEFINE_string("gcs_bucket_url", "",
"The URL to the GCS bucket in which the temporary "
"tfrecord file is to be written and read, e.g., "
"gs://my-gcs-bucket/test-directory")
flags.DEFINE_integer("num_examples", 10, "Number of examples to generate")
FLAGS = flags.FLAGS
def create_examples(num_examples, input_mean):
"""Create ExampleProto's containing data."""
ids = np.arange(num_examples).reshape([num_examples, 1])
inputs = np.random.randn(num_examples, 1) + input_mean
target = inputs - input_mean
examples = []
for row in range(num_examples):
ex = example_pb2.Example()
ex.features.feature["id"].bytes_list.value.append(bytes(ids[row, 0]))
ex.features.feature["target"].float_list.value.append(target[row, 0])
ex.features.feature["inputs"].float_list.value.append(inputs[row, 0])
examples.append(ex)
return examples
def create_dir_test():
"""Verifies file_io directory handling methods."""
# Test directory creation.
starttime_ms = int(round(time.time() * 1000))
dir_name = "%s/tf_gcs_test_%s" % (FLAGS.gcs_bucket_url, starttime_ms)
print("Creating dir %s" % dir_name)
file_io.create_dir(dir_name)
elapsed_ms = int(round(time.time() * 1000)) - starttime_ms
print("Created directory in: %d milliseconds" % elapsed_ms)
# Check that the directory exists.
dir_exists = file_io.is_directory(dir_name)
assert dir_exists
print("%s directory exists: %s" % (dir_name, dir_exists))
# Test recursive directory creation.
starttime_ms = int(round(time.time() * 1000))
recursive_dir_name = "%s/%s/%s" % (dir_name,
"nested_dir1",
"nested_dir2")
print("Creating recursive dir %s" % recursive_dir_name)
file_io.recursive_create_dir(recursive_dir_name)
elapsed_ms = int(round(time.time() * 1000)) - starttime_ms
print("Created directory recursively in: %d milliseconds" % elapsed_ms)
# Check that the directory exists.
recursive_dir_exists = file_io.is_directory(recursive_dir_name)
assert recursive_dir_exists
print("%s directory exists: %s" % (recursive_dir_name, recursive_dir_exists))
# Create some contents in the just created directory and list the contents.
num_files = 10
files_to_create = ["file_%d.txt" % n for n in range(num_files)]
for file_num in files_to_create:
file_name = "%s/%s" % (dir_name, file_num)
print("Creating file %s." % file_name)
file_io.write_string_to_file(file_name, "test file.")
print("Listing directory %s." % dir_name)
starttime_ms = int(round(time.time() * 1000))
directory_contents = file_io.list_directory(dir_name)
print(directory_contents)
elapsed_ms = int(round(time.time() * 1000)) - starttime_ms
print("Listed directory %s in %s milliseconds" % (dir_name, elapsed_ms))
assert set(directory_contents) == set(files_to_create + ["nested_dir1/"])
# Test directory renaming.
dir_to_rename = "%s/old_dir" % dir_name
new_dir_name = "%s/new_dir" % dir_name
file_io.create_dir(dir_to_rename)
assert file_io.is_directory(dir_to_rename)
assert not file_io.is_directory(new_dir_name)
starttime_ms = int(round(time.time() * 1000))
print("Will try renaming directory %s to %s" % (dir_to_rename, new_dir_name))
file_io.rename(dir_to_rename, new_dir_name)
elapsed_ms = int(round(time.time() * 1000)) - starttime_ms
print("Renamed directory %s to %s in %s milliseconds" % (
dir_to_rename, new_dir_name, elapsed_ms))
assert not file_io.is_directory(dir_to_rename)
assert file_io.is_directory(new_dir_name)
# Test Delete directory recursively.
print("Deleting directory recursively %s." % dir_name)
starttime_ms = int(round(time.time() * 1000))
file_io.delete_recursively(dir_name)
elapsed_ms = int(round(time.time() * 1000)) - starttime_ms
dir_exists = file_io.is_directory(dir_name)
assert not dir_exists
print("Deleted directory recursively %s in %s milliseconds" % (
dir_name, elapsed_ms))
def create_object_test():
"""Verifies file_io's object manipulation methods ."""
starttime_ms = int(round(time.time() * 1000))
dir_name = "%s/tf_gcs_test_%s" % (FLAGS.gcs_bucket_url, starttime_ms)
print("Creating dir %s." % dir_name)
file_io.create_dir(dir_name)
num_files = 5
# Create files of 2 different patterns in this directory.
files_pattern_1 = ["%s/test_file_%d.txt" % (dir_name, n)
for n in range(num_files)]
files_pattern_2 = ["%s/testfile%d.txt" % (dir_name, n)
for n in range(num_files)]
starttime_ms = int(round(time.time() * 1000))
files_to_create = files_pattern_1 + files_pattern_2
for file_name in files_to_create:
print("Creating file %s." % file_name)
file_io.write_string_to_file(file_name, "test file creation.")
elapsed_ms = int(round(time.time() * 1000)) - starttime_ms
print("Created %d files in %s milliseconds" % (
len(files_to_create), elapsed_ms))
# Listing files of pattern1.
list_files_pattern = "%s/test_file*.txt" % dir_name
print("Getting files matching pattern %s." % list_files_pattern)
starttime_ms = int(round(time.time() * 1000))
files_list = file_io.get_matching_files(list_files_pattern)
elapsed_ms = int(round(time.time() * 1000)) - starttime_ms
print("Listed files in %s milliseconds" % elapsed_ms)
print(files_list)
assert set(files_list) == set(files_pattern_1)
# Listing files of pattern2.
list_files_pattern = "%s/testfile*.txt" % dir_name
print("Getting files matching pattern %s." % list_files_pattern)
starttime_ms = int(round(time.time() * 1000))
files_list = file_io.get_matching_files(list_files_pattern)
elapsed_ms = int(round(time.time() * 1000)) - starttime_ms
print("Listed files in %s milliseconds" % elapsed_ms)
print(files_list)
assert set(files_list) == set(files_pattern_2)
# Test renaming file.
file_to_rename = "%s/oldname.txt" % dir_name
file_new_name = "%s/newname.txt" % dir_name
file_io.write_string_to_file(file_to_rename, "test file.")
assert file_io.file_exists(file_to_rename)
assert not file_io.file_exists(file_new_name)
print("Will try renaming file %s to %s" % (file_to_rename, file_new_name))
starttime_ms = int(round(time.time() * 1000))
file_io.rename(file_to_rename, file_new_name)
elapsed_ms = int(round(time.time() * 1000)) - starttime_ms
print("File %s renamed to %s in %s milliseconds" % (
file_to_rename, file_new_name, elapsed_ms))
assert not file_io.file_exists(file_to_rename)
assert file_io.file_exists(file_new_name)
# Delete directory.
print("Deleting directory %s." % dir_name)
file_io.delete_recursively(dir_name)
def main(argv):
del argv # Unused.
# Sanity check on the GCS bucket URL.
if not FLAGS.gcs_bucket_url or not FLAGS.gcs_bucket_url.startswith("gs://"):
print("ERROR: Invalid GCS bucket URL: \"%s\"" % FLAGS.gcs_bucket_url)
sys.exit(1)
# Generate random tfrecord path name.
input_path = FLAGS.gcs_bucket_url + "/"
input_path += "".join(random.choice("0123456789ABCDEF") for i in range(8))
input_path += ".tfrecord"
print("Using input path: %s" % input_path)
# Verify that writing to the records file in GCS works.
print("\n=== Testing writing and reading of GCS record file... ===")
example_data = create_examples(FLAGS.num_examples, 5)
with tf.io.TFRecordWriter(input_path) as hf:
for e in example_data:
hf.write(e.SerializeToString())
print("Data written to: %s" % input_path)
# Verify that reading from the tfrecord file works and that
# tf_record_iterator works.
record_iter = tf.compat.v1.python_io.tf_record_iterator(input_path)
read_count = 0
for _ in record_iter:
read_count += 1
print("Read %d records using tf_record_iterator" % read_count)
if read_count != FLAGS.num_examples:
print("FAIL: The number of records read from tf_record_iterator (%d) "
"differs from the expected number (%d)" % (read_count,
FLAGS.num_examples))
sys.exit(1)
# Verify that running the read op in a session works.
print("\n=== Testing TFRecordReader.read op in a session... ===")
with tf.Graph().as_default():
filename_queue = tf.compat.v1.train.string_input_producer([input_path],
num_epochs=1)
reader = tf.compat.v1.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.compat.v1.local_variables_initializer())
tf.compat.v1.train.start_queue_runners()
index = 0
for _ in range(FLAGS.num_examples):
print("Read record: %d" % index)
sess.run(serialized_example)
index += 1
# Reading one more record should trigger an exception.
try:
sess.run(serialized_example)
print("FAIL: Failed to catch the expected OutOfRangeError while "
"reading one more record than is available")
sys.exit(1)
except tf.errors.OutOfRangeError:
print("Successfully caught the expected OutOfRangeError while "
"reading one more record than is available")
create_dir_test()
create_object_test()
if __name__ == "__main__":
tf.compat.v1.app.run(main)
| tensorflow/tensorflow | tensorflow/tools/gcs_test/python/gcs_smoke.py | Python | apache-2.0 | 10,353 |
from ciscoconfparse import CiscoConfParse
config_cfg = CiscoConfParse("cisco_ipsec.txt")
cr_map_entries = config_cfg.find_objects_w_child(parentspec=r"^crypto map CRYPTO", childspec=r"pfs group2")
print cr_map_entries
| Paricitoi/python_4_eng | python_week1/week1_ex9.py | Python | gpl-3.0 | 222 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import tempfile
from proc_maps import ProcMaps
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
REDUCE_DEBUGLINE_PATH = os.path.join(BASE_PATH, 'reduce_debugline.py')
LOGGER = logging.getLogger('prepare_symbol_info')
def _dump_command_result(command, output_dir_path, basename, suffix):
handle_out, filename_out = tempfile.mkstemp(
suffix=suffix, prefix=basename + '.', dir=output_dir_path)
handle_err, filename_err = tempfile.mkstemp(
suffix=suffix + '.err', prefix=basename + '.', dir=output_dir_path)
error = False
try:
subprocess.check_call(
command, stdout=handle_out, stderr=handle_err, shell=True)
except (OSError, subprocess.CalledProcessError):
error = True
finally:
os.close(handle_err)
os.close(handle_out)
if os.path.exists(filename_err):
if LOGGER.getEffectiveLevel() <= logging.DEBUG:
with open(filename_err, 'r') as f:
for line in f:
LOGGER.debug(line.rstrip())
os.remove(filename_err)
if os.path.exists(filename_out) and (
os.path.getsize(filename_out) == 0 or error):
os.remove(filename_out)
return None
if not os.path.exists(filename_out):
return None
return filename_out
def prepare_symbol_info(maps_path,
output_dir_path=None,
alternative_dirs=None,
use_tempdir=False,
use_source_file_name=False):
"""Prepares (collects) symbol information files for find_runtime_symbols.
1) If |output_dir_path| is specified, it tries collecting symbol information
files in the given directory |output_dir_path|.
1-a) If |output_dir_path| doesn't exist, create the directory and use it.
1-b) If |output_dir_path| is an empty directory, use it.
1-c) If |output_dir_path| is a directory which has 'files.json', assumes that
files are already collected and just ignores it.
1-d) Otherwise, depends on |use_tempdir|.
2) If |output_dir_path| is not specified, it tries to create a new directory
depending on 'maps_path'.
If it cannot create a new directory, creates a temporary directory depending
on |use_tempdir|. If |use_tempdir| is False, returns None.
Args:
maps_path: A path to a file which contains '/proc/<pid>/maps'.
alternative_dirs: A mapping from a directory '/path/on/target' where the
target process runs to a directory '/path/on/host' where the script
reads the binary. Considered to be used for Android binaries.
output_dir_path: A path to a directory where files are prepared.
use_tempdir: If True, it creates a temporary directory when it cannot
create a new directory.
use_source_file_name: If True, it adds reduced result of 'readelf -wL'
to find source file names.
Returns:
A pair of a path to the prepared directory and a boolean representing
if it created a temporary directory or not.
"""
alternative_dirs = alternative_dirs or {}
if not output_dir_path:
matched = re.match('^(.*)\.maps$', os.path.basename(maps_path))
if matched:
output_dir_path = matched.group(1) + '.pre'
if not output_dir_path:
matched = re.match('^/proc/(.*)/maps$', os.path.realpath(maps_path))
if matched:
output_dir_path = matched.group(1) + '.pre'
if not output_dir_path:
output_dir_path = os.path.basename(maps_path) + '.pre'
# TODO(dmikurube): Find another candidate for output_dir_path.
used_tempdir = False
LOGGER.info('Data for profiling will be collected in "%s".' % output_dir_path)
if os.path.exists(output_dir_path):
if os.path.isdir(output_dir_path) and not os.listdir(output_dir_path):
LOGGER.warn('Using an empty existing directory "%s".' % output_dir_path)
else:
LOGGER.warn('A file or a directory exists at "%s".' % output_dir_path)
if os.path.exists(os.path.join(output_dir_path, 'files.json')):
LOGGER.warn('Using the existing directory "%s".' % output_dir_path)
return output_dir_path, used_tempdir
else:
if use_tempdir:
output_dir_path = tempfile.mkdtemp()
used_tempdir = True
LOGGER.warn('Using a temporary directory "%s".' % output_dir_path)
else:
LOGGER.warn('The directory "%s" is not available.' % output_dir_path)
return None, used_tempdir
else:
LOGGER.info('Creating a new directory "%s".' % output_dir_path)
try:
os.mkdir(output_dir_path)
except OSError:
LOGGER.warn('A directory "%s" cannot be created.' % output_dir_path)
if use_tempdir:
output_dir_path = tempfile.mkdtemp()
used_tempdir = True
LOGGER.warn('Using a temporary directory "%s".' % output_dir_path)
else:
LOGGER.warn('The directory "%s" is not available.' % output_dir_path)
return None, used_tempdir
shutil.copyfile(maps_path, os.path.join(output_dir_path, 'maps'))
with open(maps_path, mode='r') as f:
maps = ProcMaps.load(f)
LOGGER.debug('Listing up symbols.')
files = {}
for entry in maps.iter(ProcMaps.executable):
LOGGER.debug(' %016x-%016x +%06x %s' % (
entry.begin, entry.end, entry.offset, entry.name))
binary_path = entry.name
for target_path, host_path in alternative_dirs.iteritems():
if entry.name.startswith(target_path):
binary_path = entry.name.replace(target_path, host_path, 1)
nm_filename = _dump_command_result(
'nm -n --format bsd %s | c++filt' % binary_path,
output_dir_path, os.path.basename(binary_path), '.nm')
if not nm_filename:
continue
readelf_e_filename = _dump_command_result(
'readelf -eW %s' % binary_path,
output_dir_path, os.path.basename(binary_path), '.readelf-e')
if not readelf_e_filename:
continue
readelf_debug_decodedline_file = None
if use_source_file_name:
readelf_debug_decodedline_file = _dump_command_result(
'readelf -wL %s | %s' % (binary_path, REDUCE_DEBUGLINE_PATH),
output_dir_path, os.path.basename(binary_path), '.readelf-wL')
files[entry.name] = {}
files[entry.name]['nm'] = {
'file': os.path.basename(nm_filename),
'format': 'bsd',
'mangled': False}
files[entry.name]['readelf-e'] = {
'file': os.path.basename(readelf_e_filename)}
if readelf_debug_decodedline_file:
files[entry.name]['readelf-debug-decodedline-file'] = {
'file': os.path.basename(readelf_debug_decodedline_file)}
with open(os.path.join(output_dir_path, 'files.json'), 'w') as f:
json.dump(files, f, indent=2, sort_keys=True)
LOGGER.info('Collected symbol information at "%s".' % output_dir_path)
return output_dir_path, used_tempdir
def main():
if not sys.platform.startswith('linux'):
sys.stderr.write('This script work only on Linux.')
return 1
LOGGER.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
LOGGER.addHandler(handler)
# TODO(dmikurube): Specify |alternative_dirs| from command line.
if len(sys.argv) < 2:
sys.stderr.write("""Usage:
%s /path/to/maps [/path/to/output_data_dir/]
""" % sys.argv[0])
return 1
elif len(sys.argv) == 2:
result, _ = prepare_symbol_info(sys.argv[1])
else:
result, _ = prepare_symbol_info(sys.argv[1], sys.argv[2])
return not result
if __name__ == '__main__':
sys.exit(main())
| pozdnyakov/chromium-crosswalk | tools/find_runtime_symbols/prepare_symbol_info.py | Python | bsd-3-clause | 7,786 |
import playlist as pl
class AudioPlaylist(pl.Playlist):
def __init__(self, alias, path, name, type, itemType):
pl.Playlist.__init__(self, alias, path, name, type, itemType, 'music')
| kamisama91/service.skin.smartplaylist.widgets | audioPlaylist.py | Python | gpl-2.0 | 208 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Locaweb.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author: Juliano Martinez, Locaweb.
import inspect
import os
import mock
from neutron.agent.linux import iptables_manager
from neutron.tests import base
from neutron.tests import tools
IPTABLES_ARG = {'bn': iptables_manager.binary_name}
NAT_DUMP = ('# Generated by iptables_manager\n'
'*nat\n'
':neutron-postrouting-bottom - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
':%(bn)s-snat - [0:0]\n'
':%(bn)s-PREROUTING - [0:0]\n'
':%(bn)s-float-snat - [0:0]\n'
':%(bn)s-POSTROUTING - [0:0]\n'
'[0:0] -A PREROUTING -j %(bn)s-PREROUTING\n'
'[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n'
'[0:0] -A POSTROUTING -j %(bn)s-POSTROUTING\n'
'[0:0] -A POSTROUTING -j neutron-postrouting-bottom\n'
'[0:0] -A neutron-postrouting-bottom -j %(bn)s-snat\n'
'[0:0] -A %(bn)s-snat -j '
'%(bn)s-float-snat\n'
'COMMIT\n'
'# Completed by iptables_manager\n' % IPTABLES_ARG)
FILTER_DUMP = ('# Generated by iptables_manager\n'
'*filter\n'
':neutron-filter-top - [0:0]\n'
':%(bn)s-FORWARD - [0:0]\n'
':%(bn)s-INPUT - [0:0]\n'
':%(bn)s-local - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
'[0:0] -A FORWARD -j neutron-filter-top\n'
'[0:0] -A OUTPUT -j neutron-filter-top\n'
'[0:0] -A neutron-filter-top -j %(bn)s-local\n'
'[0:0] -A INPUT -j %(bn)s-INPUT\n'
'[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n'
'[0:0] -A FORWARD -j %(bn)s-FORWARD\n'
'COMMIT\n'
'# Completed by iptables_manager\n' % IPTABLES_ARG)
class IptablesManagerStateFulTestCase(base.BaseTestCase):
def setUp(self):
super(IptablesManagerStateFulTestCase, self).setUp()
self.root_helper = 'sudo'
self.iptables = (iptables_manager.
IptablesManager(root_helper=self.root_helper))
self.execute = mock.patch.object(self.iptables, "execute").start()
def test_binary_name(self):
self.assertEqual(iptables_manager.binary_name,
os.path.basename(inspect.stack()[-1][1])[:16])
def test_get_chain_name(self):
name = '0123456789' * 5
# 28 chars is the maximum length of iptables chain name.
self.assertEqual(iptables_manager.get_chain_name(name, wrap=False),
name[:28])
# 11 chars is the maximum length of chain name of iptable_manager
# if binary_name is prepended.
self.assertEqual(iptables_manager.get_chain_name(name, wrap=True),
name[:11])
def test_add_and_remove_chain_custom_binary_name(self):
bn = ("abcdef" * 5)
self.iptables = (iptables_manager.
IptablesManager(root_helper=self.root_helper,
binary_name=bn))
self.execute = mock.patch.object(self.iptables, "execute").start()
iptables_args = {'bn': bn[:16]}
filter_dump = ('# Generated by iptables_manager\n'
'*filter\n'
':neutron-filter-top - [0:0]\n'
':%(bn)s-FORWARD - [0:0]\n'
':%(bn)s-INPUT - [0:0]\n'
':%(bn)s-local - [0:0]\n'
':%(bn)s-filter - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
'[0:0] -A FORWARD -j neutron-filter-top\n'
'[0:0] -A OUTPUT -j neutron-filter-top\n'
'[0:0] -A neutron-filter-top -j %(bn)s-local\n'
'[0:0] -A INPUT -j %(bn)s-INPUT\n'
'[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n'
'[0:0] -A FORWARD -j %(bn)s-FORWARD\n'
'COMMIT\n'
'# Completed by iptables_manager\n' % iptables_args)
filter_dump_mod = ('# Generated by iptables_manager\n'
'*filter\n'
':neutron-filter-top - [0:0]\n'
':%(bn)s-FORWARD - [0:0]\n'
':%(bn)s-INPUT - [0:0]\n'
':%(bn)s-local - [0:0]\n'
':%(bn)s-filter - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
'[0:0] -A FORWARD -j neutron-filter-top\n'
'[0:0] -A OUTPUT -j neutron-filter-top\n'
'[0:0] -A neutron-filter-top -j %(bn)s-local\n'
'[0:0] -A INPUT -j %(bn)s-INPUT\n'
'[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n'
'[0:0] -A FORWARD -j %(bn)s-FORWARD\n'
'COMMIT\n'
'# Completed by iptables_manager\n'
% iptables_args)
nat_dump = ('# Generated by iptables_manager\n'
'*nat\n'
':neutron-postrouting-bottom - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
':%(bn)s-snat - [0:0]\n'
':%(bn)s-PREROUTING - [0:0]\n'
':%(bn)s-float-snat - [0:0]\n'
':%(bn)s-POSTROUTING - [0:0]\n'
'[0:0] -A PREROUTING -j %(bn)s-PREROUTING\n'
'[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n'
'[0:0] -A POSTROUTING -j %(bn)s-POSTROUTING\n'
'[0:0] -A POSTROUTING -j neutron-postrouting-bottom\n'
'[0:0] -A neutron-postrouting-bottom -j %(bn)s-snat\n'
'[0:0] -A %(bn)s-snat -j '
'%(bn)s-float-snat\n'
'COMMIT\n'
'# Completed by iptables_manager\n' % iptables_args)
expected_calls_and_values = [
(mock.call(['iptables-save', '-c'],
root_helper=self.root_helper),
''),
(mock.call(['iptables-restore', '-c'],
process_input=nat_dump + filter_dump_mod,
root_helper=self.root_helper),
None),
(mock.call(['iptables-save', '-c'],
root_helper=self.root_helper),
''),
(mock.call(['iptables-restore', '-c'],
process_input=nat_dump + filter_dump,
root_helper=self.root_helper),
None),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.iptables.ipv4['filter'].add_chain('filter')
self.iptables.apply()
self.iptables.ipv4['filter'].empty_chain('filter')
self.iptables.apply()
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_empty_chain_custom_binary_name(self):
bn = ("abcdef" * 5)[:16]
self.iptables = (iptables_manager.
IptablesManager(root_helper=self.root_helper,
binary_name=bn))
self.execute = mock.patch.object(self.iptables, "execute").start()
iptables_args = {'bn': bn}
filter_dump = ('# Generated by iptables_manager\n'
'*filter\n'
':neutron-filter-top - [0:0]\n'
':%(bn)s-FORWARD - [0:0]\n'
':%(bn)s-INPUT - [0:0]\n'
':%(bn)s-local - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
'[0:0] -A FORWARD -j neutron-filter-top\n'
'[0:0] -A OUTPUT -j neutron-filter-top\n'
'[0:0] -A neutron-filter-top -j %(bn)s-local\n'
'[0:0] -A INPUT -j %(bn)s-INPUT\n'
'[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n'
'[0:0] -A FORWARD -j %(bn)s-FORWARD\n'
'COMMIT\n'
'# Completed by iptables_manager\n' % iptables_args)
filter_dump_mod = ('# Generated by iptables_manager\n'
'*filter\n'
':neutron-filter-top - [0:0]\n'
':%(bn)s-FORWARD - [0:0]\n'
':%(bn)s-INPUT - [0:0]\n'
':%(bn)s-local - [0:0]\n'
':%(bn)s-filter - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
'[0:0] -A FORWARD -j neutron-filter-top\n'
'[0:0] -A OUTPUT -j neutron-filter-top\n'
'[0:0] -A neutron-filter-top -j %(bn)s-local\n'
'[0:0] -A INPUT -j %(bn)s-INPUT\n'
'[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n'
'[0:0] -A FORWARD -j %(bn)s-FORWARD\n'
'[0:0] -A %(bn)s-filter -s 0/0 -d 192.168.0.2\n'
'COMMIT\n'
'# Completed by iptables_manager\n'
% iptables_args)
nat_dump = ('# Generated by iptables_manager\n'
'*nat\n'
':neutron-postrouting-bottom - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
':%(bn)s-snat - [0:0]\n'
':%(bn)s-PREROUTING - [0:0]\n'
':%(bn)s-float-snat - [0:0]\n'
':%(bn)s-POSTROUTING - [0:0]\n'
'[0:0] -A PREROUTING -j %(bn)s-PREROUTING\n'
'[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n'
'[0:0] -A POSTROUTING -j %(bn)s-POSTROUTING\n'
'[0:0] -A POSTROUTING -j neutron-postrouting-bottom\n'
'[0:0] -A neutron-postrouting-bottom -j %(bn)s-snat\n'
'[0:0] -A %(bn)s-snat -j '
'%(bn)s-float-snat\n'
'COMMIT\n'
'# Completed by iptables_manager\n' % iptables_args)
expected_calls_and_values = [
(mock.call(['iptables-save', '-c'],
root_helper=self.root_helper),
''),
(mock.call(['iptables-restore', '-c'],
process_input=nat_dump + filter_dump_mod,
root_helper=self.root_helper),
None),
(mock.call(['iptables-save', '-c'],
root_helper=self.root_helper),
''),
(mock.call(['iptables-restore', '-c'],
process_input=nat_dump + filter_dump,
root_helper=self.root_helper),
None),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.iptables.ipv4['filter'].add_chain('filter')
self.iptables.ipv4['filter'].add_rule('filter',
'-s 0/0 -d 192.168.0.2')
self.iptables.apply()
self.iptables.ipv4['filter'].remove_chain('filter')
self.iptables.apply()
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_add_and_remove_chain(self):
filter_dump_mod = ('# Generated by iptables_manager\n'
'*filter\n'
':neutron-filter-top - [0:0]\n'
':%(bn)s-FORWARD - [0:0]\n'
':%(bn)s-INPUT - [0:0]\n'
':%(bn)s-local - [0:0]\n'
':%(bn)s-filter - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
'[0:0] -A FORWARD -j neutron-filter-top\n'
'[0:0] -A OUTPUT -j neutron-filter-top\n'
'[0:0] -A neutron-filter-top -j %(bn)s-local\n'
'[0:0] -A INPUT -j %(bn)s-INPUT\n'
'[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n'
'[0:0] -A FORWARD -j %(bn)s-FORWARD\n'
'COMMIT\n'
'# Completed by iptables_manager\n'
% IPTABLES_ARG)
expected_calls_and_values = [
(mock.call(['iptables-save', '-c'],
root_helper=self.root_helper),
''),
(mock.call(['iptables-restore', '-c'],
process_input=NAT_DUMP + filter_dump_mod,
root_helper=self.root_helper),
None),
(mock.call(['iptables-save', '-c'],
root_helper=self.root_helper),
''),
(mock.call(['iptables-restore', '-c'],
process_input=NAT_DUMP + FILTER_DUMP,
root_helper=self.root_helper),
None),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.iptables.ipv4['filter'].add_chain('filter')
self.iptables.apply()
self.iptables.ipv4['filter'].remove_chain('filter')
self.iptables.apply()
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_add_filter_rule(self):
filter_dump_mod = ('# Generated by iptables_manager\n'
'*filter\n'
':neutron-filter-top - [0:0]\n'
':%(bn)s-FORWARD - [0:0]\n'
':%(bn)s-INPUT - [0:0]\n'
':%(bn)s-local - [0:0]\n'
':%(bn)s-filter - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
'[0:0] -A FORWARD -j neutron-filter-top\n'
'[0:0] -A OUTPUT -j neutron-filter-top\n'
'[0:0] -A neutron-filter-top -j %(bn)s-local\n'
'[0:0] -A INPUT -j %(bn)s-INPUT\n'
'[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n'
'[0:0] -A FORWARD -j %(bn)s-FORWARD\n'
'[0:0] -A %(bn)s-filter -j DROP\n'
'[0:0] -A %(bn)s-INPUT -s 0/0 -d 192.168.0.2 -j '
'%(bn)s-filter\n'
'COMMIT\n'
'# Completed by iptables_manager\n'
% IPTABLES_ARG)
expected_calls_and_values = [
(mock.call(['iptables-save', '-c'],
root_helper=self.root_helper),
''),
(mock.call(['iptables-restore', '-c'],
process_input=NAT_DUMP + filter_dump_mod,
root_helper=self.root_helper),
None),
(mock.call(['iptables-save', '-c'],
root_helper=self.root_helper),
''),
(mock.call(['iptables-restore', '-c'],
process_input=NAT_DUMP + FILTER_DUMP,
root_helper=self.root_helper
),
None),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.iptables.ipv4['filter'].add_chain('filter')
self.iptables.ipv4['filter'].add_rule('filter', '-j DROP')
self.iptables.ipv4['filter'].add_rule('INPUT',
'-s 0/0 -d 192.168.0.2 -j'
' %(bn)s-filter' % IPTABLES_ARG)
self.iptables.apply()
self.iptables.ipv4['filter'].remove_rule('filter', '-j DROP')
self.iptables.ipv4['filter'].remove_rule('INPUT',
'-s 0/0 -d 192.168.0.2 -j'
' %(bn)s-filter'
% IPTABLES_ARG)
self.iptables.ipv4['filter'].remove_chain('filter')
self.iptables.apply()
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_rule_with_wrap_target(self):
name = '0123456789' * 5
wrap = "%s-%s" % (iptables_manager.binary_name,
iptables_manager.get_chain_name(name))
iptables_args = {'bn': iptables_manager.binary_name,
'wrap': wrap}
filter_dump_mod = ('# Generated by iptables_manager\n'
'*filter\n'
':neutron-filter-top - [0:0]\n'
':%(bn)s-FORWARD - [0:0]\n'
':%(bn)s-INPUT - [0:0]\n'
':%(bn)s-local - [0:0]\n'
':%(wrap)s - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
'[0:0] -A FORWARD -j neutron-filter-top\n'
'[0:0] -A OUTPUT -j neutron-filter-top\n'
'[0:0] -A neutron-filter-top -j %(bn)s-local\n'
'[0:0] -A INPUT -j %(bn)s-INPUT\n'
'[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n'
'[0:0] -A FORWARD -j %(bn)s-FORWARD\n'
'[0:0] -A %(bn)s-INPUT -s 0/0 -d 192.168.0.2 -j '
'%(wrap)s\n'
'COMMIT\n'
'# Completed by iptables_manager\n'
% iptables_args)
expected_calls_and_values = [
(mock.call(['iptables-save', '-c'],
root_helper=self.root_helper),
''),
(mock.call(['iptables-restore', '-c'],
process_input=NAT_DUMP + filter_dump_mod,
root_helper=self.root_helper),
None),
(mock.call(['iptables-save', '-c'],
root_helper=self.root_helper),
''),
(mock.call(['iptables-restore', '-c'],
process_input=NAT_DUMP + FILTER_DUMP,
root_helper=self.root_helper),
None),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.iptables.ipv4['filter'].add_chain(name)
self.iptables.ipv4['filter'].add_rule('INPUT',
'-s 0/0 -d 192.168.0.2 -j'
' $%s' % name)
self.iptables.apply()
self.iptables.ipv4['filter'].remove_rule('INPUT',
'-s 0/0 -d 192.168.0.2 -j'
' $%s' % name)
self.iptables.ipv4['filter'].remove_chain(name)
self.iptables.apply()
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_add_nat_rule(self):
nat_dump = ('# Generated by iptables_manager\n'
'*nat\n'
':neutron-postrouting-bottom - [0:0]\n'
':%(bn)s-float-snat - [0:0]\n'
':%(bn)s-POSTROUTING - [0:0]\n'
':%(bn)s-PREROUTING - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
':%(bn)s-snat - [0:0]\n'
'[0:0] -A PREROUTING -j %(bn)s-PREROUTING\n'
'[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n'
'[0:0] -A POSTROUTING -j %(bn)s-POSTROUTING\n'
'[0:0] -A POSTROUTING -j neutron-postrouting-bottom\n'
'[0:0] -A neutron-postrouting-bottom -j %(bn)s-snat\n'
'[0:0] -A %(bn)s-snat -j %(bn)s-float-snat\n'
'COMMIT\n'
'# Completed by iptables_manager\n'
% IPTABLES_ARG)
nat_dump_mod = ('# Generated by iptables_manager\n'
'*nat\n'
':neutron-postrouting-bottom - [0:0]\n'
':%(bn)s-float-snat - [0:0]\n'
':%(bn)s-POSTROUTING - [0:0]\n'
':%(bn)s-PREROUTING - [0:0]\n'
':%(bn)s-nat - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
':%(bn)s-snat - [0:0]\n'
'[0:0] -A PREROUTING -j %(bn)s-PREROUTING\n'
'[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n'
'[0:0] -A POSTROUTING -j %(bn)s-POSTROUTING\n'
'[0:0] -A POSTROUTING -j neutron-postrouting-bottom\n'
'[0:0] -A neutron-postrouting-bottom -j %(bn)s-snat\n'
'[0:0] -A %(bn)s-snat -j %(bn)s-float-snat\n'
'[0:0] -A %(bn)s-PREROUTING -d 192.168.0.3 -j '
'%(bn)s-nat\n'
'[0:0] -A %(bn)s-nat -p tcp --dport 8080 -j '
'REDIRECT --to-port 80\n'
'COMMIT\n'
'# Completed by iptables_manager\n'
% IPTABLES_ARG)
expected_calls_and_values = [
(mock.call(['iptables-save', '-c'],
root_helper=self.root_helper),
''),
(mock.call(['iptables-restore', '-c'],
process_input=nat_dump_mod + FILTER_DUMP,
root_helper=self.root_helper),
None),
(mock.call(['iptables-save', '-c'],
root_helper=self.root_helper),
''),
(mock.call(['iptables-restore', '-c'],
process_input=nat_dump + FILTER_DUMP,
root_helper=self.root_helper),
None),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.iptables.ipv4['nat'].add_chain('nat')
self.iptables.ipv4['nat'].add_rule('PREROUTING',
'-d 192.168.0.3 -j '
'%(bn)s-nat' % IPTABLES_ARG)
self.iptables.ipv4['nat'].add_rule('nat',
'-p tcp --dport 8080' +
' -j REDIRECT --to-port 80')
self.iptables.apply()
self.iptables.ipv4['nat'].remove_rule('nat',
'-p tcp --dport 8080 -j'
' REDIRECT --to-port 80')
self.iptables.ipv4['nat'].remove_rule('PREROUTING',
'-d 192.168.0.3 -j '
'%(bn)s-nat' % IPTABLES_ARG)
self.iptables.ipv4['nat'].remove_chain('nat')
self.iptables.apply()
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_add_rule_to_a_nonexistent_chain(self):
self.assertRaises(LookupError, self.iptables.ipv4['filter'].add_rule,
'nonexistent', '-j DROP')
def test_remove_nonexistent_chain(self):
with mock.patch.object(iptables_manager, "LOG") as log:
self.iptables.ipv4['filter'].remove_chain('nonexistent')
log.warn.assert_called_once_with(
'Attempted to remove chain %s which does not exist',
'nonexistent')
def test_remove_nonexistent_rule(self):
with mock.patch.object(iptables_manager, "LOG") as log:
self.iptables.ipv4['filter'].remove_rule('nonexistent', '-j DROP')
log.warn.assert_called_once_with(
'Tried to remove rule that was not there: '
'%(chain)r %(rule)r %(wrap)r %(top)r',
{'wrap': True, 'top': False, 'rule': '-j DROP',
'chain': 'nonexistent'})
def test_get_traffic_counters_chain_notexists(self):
with mock.patch.object(iptables_manager, "LOG") as log:
acc = self.iptables.get_traffic_counters('chain1')
self.assertIsNone(acc)
self.assertEqual(0, self.execute.call_count)
log.warn.assert_called_once_with(
'Attempted to get traffic counters of chain %s which '
'does not exist', 'chain1')
def test_get_traffic_counters(self):
iptables_dump = (
'Chain OUTPUT (policy ACCEPT 400 packets, 65901 bytes)\n'
' pkts bytes target prot opt in out source'
' destination \n'
' 400 65901 chain1 all -- * * 0.0.0.0/0'
' 0.0.0.0/0 \n'
' 400 65901 chain2 all -- * * 0.0.0.0/0'
' 0.0.0.0/0 \n')
expected_calls_and_values = [
(mock.call(['iptables', '-t', 'filter', '-L', 'OUTPUT',
'-n', '-v', '-x'],
root_helper=self.root_helper),
iptables_dump),
(mock.call(['iptables', '-t', 'nat', '-L', 'OUTPUT', '-n',
'-v', '-x'],
root_helper=self.root_helper),
''),
(mock.call(['ip6tables', '-t', 'filter', '-L', 'OUTPUT',
'-n', '-v', '-x'],
root_helper=self.root_helper),
iptables_dump),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
acc = self.iptables.get_traffic_counters('OUTPUT')
self.assertEqual(acc['pkts'], 1600)
self.assertEqual(acc['bytes'], 263604)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_get_traffic_counters_with_zero(self):
iptables_dump = (
'Chain OUTPUT (policy ACCEPT 400 packets, 65901 bytes)\n'
' pkts bytes target prot opt in out source'
' destination \n'
' 400 65901 chain1 all -- * * 0.0.0.0/0'
' 0.0.0.0/0 \n'
' 400 65901 chain2 all -- * * 0.0.0.0/0'
' 0.0.0.0/0 \n')
expected_calls_and_values = [
(mock.call(['iptables', '-t', 'filter', '-L', 'OUTPUT',
'-n', '-v', '-x', '-Z'],
root_helper=self.root_helper),
iptables_dump),
(mock.call(['iptables', '-t', 'nat', '-L', 'OUTPUT', '-n',
'-v', '-x', '-Z'],
root_helper=self.root_helper),
''),
(mock.call(['ip6tables', '-t', 'filter', '-L', 'OUTPUT',
'-n', '-v', '-x', '-Z'],
root_helper=self.root_helper),
iptables_dump),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
acc = self.iptables.get_traffic_counters('OUTPUT', zero=True)
self.assertEqual(acc['pkts'], 1600)
self.assertEqual(acc['bytes'], 263604)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
class IptablesManagerStateLessTestCase(base.BaseTestCase):
def setUp(self):
super(IptablesManagerStateLessTestCase, self).setUp()
self.iptables = (iptables_manager.IptablesManager(state_less=True))
def test_nat_not_found(self):
self.assertNotIn('nat', self.iptables.ipv4)
| vijayendrabvs/hap | neutron/tests/unit/test_iptables_manager.py | Python | apache-2.0 | 28,031 |
#
# Copyright 2014 Infoxchange Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Satellite processes started by Forklift itself to provide services.
"""
import os
from threading import Thread
from forklift.base import wait_for_parent
def start_satellite(target, args=(), kwargs=None, stop=None):
"""
Start a process configured to run the target but kill it after the parent
exits.
"""
if kwargs is None:
kwargs = {}
child_pid = os.fork()
if not child_pid:
# Make sure signals sent by the shell aren't propagated to the
# satellite
os.setpgrp()
_satellite(target, args, kwargs, stop)
def _satellite(target, args, kwargs, stop):
"""
Run the target, killing it after the parent exits.
"""
# Run target daemonized.
payload = Thread(
target=target,
args=args,
kwargs=kwargs,
)
payload.daemon = True
payload.start()
wait_for_parent()
exit_status = stop() if stop is not None else None
if exit_status is None:
exit_status = 0
# This is in a child process, so exit without additional cleanup
os._exit(exit_status) # pylint:disable=protected-access
| infoxchange/docker-forklift | forklift/services/satellite.py | Python | apache-2.0 | 1,716 |
#!/usr/bin/env python2
#
# Copyright 2013 Tim O'Shea
#
# This file is part of PyBOMBS
#
# PyBOMBS is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# PyBOMBS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyBOMBS; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import pickle,os,pprint
topdir = os.path.split(os.path.realpath(__file__))[0] + "/../";
import exceptions;
class inventory:
def __init__(self, filename='inventory.dat'):
self.inv_file = filename;
self.contents = {};
self.loadc();
def has(self, pkg):
return self.contents.has_key(pkg);
def loadc(self):
os.chdir(topdir);
try:
# print "attempting to load existing inventory"
f = open(self.inv_file, 'rb')
self.contents = pickle.load(f)
f.close()
except:
print "no existing inventory found, creating an empty one..."
self.contents = {};
# print "loaded: %s"%(self.contents);
def savec(self):
os.chdir(topdir);
output = open(self.inv_file, 'wb')
pickle.dump(self.contents, output)
output.close()
def state(self,pkg):
self.loadc();
try:
return self.contents[pkg]["state"];
except:
return None;
def set_state(self,pkg,state):
self.loadc();
if(state):
if(not self.has(pkg)):
self.contents[pkg] = {};
self.contents[pkg]["state"] = state;
else:
try:
del self.contents[pkg];
except:
pass
self.savec();
def set_prop(self,pkg,prop,val):
self.loadc();
if(not self.has(pkg)):
raise exceptions.NameError("package has no inv entry" + str((pkg,prop)));
self.contents[pkg][prop] = val;
self.savec();
def get_prop(self,pkg,prop):
self.loadc();
if(not self.has(pkg)):
raise exceptions.NameError("package has no inv entry" + str((pkg,prop)));
if(not self.contents[pkg].has_key(prop)):
raise exceptions.NameError("package in inv does not have prop" + str((pkg,prop)));
return self.contents[pkg][prop];
def try_get_prop(self,pkg,prop):
try:
return self.get_prop(pkg,prop);
except:
print "fail"
return None;
def clear_props(self,pkg):
self.loadc();
self.contents[pkg] = {"state":self.contents[pkg]["state"]};
self.savec();
def show(self):
pprint.pprint(self.contents)
| carlesfernandez/pybombs | mod_pybombs/inventory.py | Python | gpl-3.0 | 3,106 |
from __future__ import division
import os
import math
import cmath
from ejpi import operation
from ejpi import plugin_utils
_NAME = "Trigonometry"
_ICON = "trig.png"
_MAP = {
"name": _NAME,
"keys": {
(0, 0): {
"CENTER": {"action": "7", "type": "text", "text": "7", },
"SOUTH": {"action": "[sinh]", "type": "text", "text": "sinh", },
"SOUTH_EAST": {"action": "[cosh]", "type": "text", "text": "cosh", },
"EAST": {"action": "[tanh]", "type": "text", "text": "tanh", },
"showAllSlices": False,
},
(0, 1): {
"CENTER": {"action": "8", "type": "text", "text": "8", },
"showAllSlices": False,
},
(0, 2): {
"CENTER": {"action": "9", "type": "text", "text": "9", },
"SOUTH": {"action": "[asinh]", "type": "text", "text": "asinh", },
"SOUTH_WEST": {"action": "[acosh]", "type": "text", "text": "acosh", },
"WEST": {"action": "[atanh]", "type": "text", "text": "atanh", },
"showAllSlices": True,
},
(1, 0): {
"CENTER": {"action": "4", "type": "text", "text": "4", },
"showAllSlices": True,
},
(1, 1): {
"CENTER": {"action": "5", "type": "text", "text": "5", },
"NORTH": {"action": "[exp]", "type": "text", "text": "e ** x", },
"SOUTH": {"action": "[log]", "type": "text", "text": "ln", },
"WEST": {"action": "e", "type": "text", "text": "e", },
"EAST": {"action": "j", "type": "text", "text": "j", },
"showAllSlices": True,
},
(1, 2): {
"CENTER": {"action": "6", "type": "text", "text": "6", },
"WEST": {"action": "pi", "type": "text", "text": "pi", },
"NORTH": {"action": "[rad]", "type": "text", "text": "-> rad", },
"SOUTH": {"action": "[deg]", "type": "text", "text": "-> deg", },
"showAllSlices": True,
},
(2, 0): {
"CENTER": {"action": "1", "type": "text", "text": "1", },
"NORTH": {"action": ".", "type": "text", "text": ".", },
"EAST": {"action": "0", "type": "text", "text": "0", },
"showAllSlices": True,
},
(2, 1): {
"CENTER": {"action": "2", "type": "text", "text": "2", },
"WEST": {"action": "[sin]", "type": "text", "text": "sin", },
"NORTH": {"action": "[cos]", "type": "text", "text": "cos", },
"EAST": {"action": "[tan]", "type": "text", "text": "tan", },
"showAllSlices": True,
},
(2, 2): {
"CENTER": {"action": "3", "type": "text", "text": "3", },
"NORTH": {"action": "[asin]", "type": "text", "text": "asin", },
"NORTH_WEST": {"action": "[acos]", "type": "text", "text": "acos", },
"WEST": {"action": "[atan]", "type": "text", "text": "atan", },
"showAllSlices": False,
},
},
}
_ICON_PATH = [os.path.join(os.path.dirname(__file__), "images")]
PLUGIN = plugin_utils.PieKeyboardPluginFactory(_NAME, _ICON, _MAP, _ICON_PATH)
pi = operation.Constant("pi", operation.Value(math.pi, operation.render_float_eng))
e = operation.Constant("e", operation.Value(math.e, operation.render_float_eng))
def float_or_complex(float_func, complex_func):
def switching_func(self, *args, **kwd):
if any(
isinstance(arg, complex)
for arg in args
):
return complex_func(*args, **kwd)
else:
return float_func(*args, **kwd)
switching_func.__name__ = complex_func.__name__
switching_func.__doc__ = complex_func.__doc__
return switching_func
exp = operation.generate_function(float_or_complex(math.exp, cmath.exp), "exp", operation.Function.REP_FUNCTION, 1)
log = operation.generate_function(float_or_complex(math.log, cmath.log), "log", operation.Function.REP_FUNCTION, 1)
PLUGIN.register_operation("exp", exp)
PLUGIN.register_operation("log", log)
cos = operation.generate_function(float_or_complex(math.cos, cmath.cos), "cos", operation.Function.REP_FUNCTION, 1)
acos = operation.generate_function(float_or_complex(math.acos, cmath.acos), "acos", operation.Function.REP_FUNCTION, 1)
sin = operation.generate_function(float_or_complex(math.sin, cmath.sin), "sin", operation.Function.REP_FUNCTION, 1)
asin = operation.generate_function(float_or_complex(math.asin, cmath.asin), "asin", operation.Function.REP_FUNCTION, 1)
tan = operation.generate_function(float_or_complex(math.tan, cmath.tan), "tan", operation.Function.REP_FUNCTION, 1)
atan = operation.generate_function(float_or_complex(math.atan, cmath.atan), "atan", operation.Function.REP_FUNCTION, 1)
PLUGIN.register_operation("cos", cos)
PLUGIN.register_operation("acos", acos)
PLUGIN.register_operation("sin", sin)
PLUGIN.register_operation("asin", asin)
PLUGIN.register_operation("tan", tan)
PLUGIN.register_operation("atan", atan)
cosh = operation.generate_function(float_or_complex(math.cosh, cmath.cosh), "cosh", operation.Function.REP_FUNCTION, 1)
acosh = operation.generate_function(cmath.acosh, "acosh", operation.Function.REP_FUNCTION, 1)
sinh = operation.generate_function(float_or_complex(math.sinh, cmath.sinh), "sinh", operation.Function.REP_FUNCTION, 1)
asinh = operation.generate_function(cmath.asinh, "asinh", operation.Function.REP_FUNCTION, 1)
tanh = operation.generate_function(float_or_complex(math.tanh, cmath.tanh), "tanh", operation.Function.REP_FUNCTION, 1)
atanh = operation.generate_function(cmath.atanh, "atanh", operation.Function.REP_FUNCTION, 1)
PLUGIN.register_operation("cosh", cosh)
PLUGIN.register_operation("acosh", acosh)
PLUGIN.register_operation("sinh", sinh)
PLUGIN.register_operation("asinh", asinh)
PLUGIN.register_operation("tanh", tanh)
PLUGIN.register_operation("atanh", atanh)
deg = operation.generate_function(math.degrees, "deg", operation.Function.REP_FUNCTION, 1)
rad = operation.generate_function(math.radians, "rad", operation.Function.REP_FUNCTION, 1)
PLUGIN.register_operation("deg", deg)
PLUGIN.register_operation("rad", rad)
# In 2.6
#phase = operation.generate_function(cmath.phase, "phase", operation.Function.REP_FUNCTION, 1)
#polar = operation.generate_function(cmath.polar, "polar", operation.Function.REP_FUNCTION, 1)
#rect = operation.generate_function(cmath.rect, "rect", operation.Function.REP_FUNCTION, 1)
| epage/ejpi | ejpi/plugins/trig.py | Python | lgpl-2.1 | 5,907 |
#!/usr/bin/env python
"""
Show how to make date plots in matplotlib using date tick locators and
formatters. See major_minor_demo1.py for more information on
controlling major and minor ticks
All matplotlib date plotting is done by converting date instances into
days since the 0001-01-01 UTC. The conversion, tick locating and
formatting is done behind the scenes so this is most transparent to
you. The dates module provides several converter functions date2num
and num2date
"""
import datetime
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.mlab as mlab
import matplotlib.cbook as cbook
years = mdates.YearLocator() # every year
months = mdates.MonthLocator() # every month
yearsFmt = mdates.DateFormatter('%Y')
# load a numpy record array from yahoo csv data with fields date,
# open, close, volume, adj_close from the mpl-data/example directory.
# The record array stores python datetime.date as an object array in
# the date column
datafile = cbook.get_sample_data('goog.npy')
r = np.load(datafile).view(np.recarray)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(r.date, r.adj_close)
# format the ticks
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(yearsFmt)
ax.xaxis.set_minor_locator(months)
datemin = datetime.date(r.date.min().year, 1, 1)
datemax = datetime.date(r.date.max().year+1, 1, 1)
ax.set_xlim(datemin, datemax)
# format the coords message box
def price(x): return '$%1.2f'%x
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.format_ydata = price
ax.grid(True)
# rotates and right aligns the x labels, and moves the bottom of the
# axes up to make room for them
fig.autofmt_xdate()
plt.show()
| ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/lib/mpl_examples/api/date_demo.py | Python | gpl-2.0 | 1,735 |
"""
Helper methods for `philu_overrides` app
"""
import json
from datetime import datetime
import pytz
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from common.lib.mandrill_client.client import MandrillClient
from lms.djangoapps.courseware.courses import get_course_by_id
from lms.djangoapps.onboarding.constants import ORG_PARTNERSHIP_END_DATE_PLACEHOLDER
from lms.djangoapps.onboarding.models import GranteeOptIn
from lms.djangoapps.philu_overrides.constants import ACTIVATION_ALERT_TYPE, ACTIVATION_ERROR_MSG_FORMAT
from openedx.core.djangoapps.models.course_details import CourseDetails
from openedx.core.lib.request_utils import safe_get_host
from student.models import Registration
from util.json_request import JsonResponse
utc = pytz.UTC
def get_course_details(course_id):
course_descriptor = get_course_by_id(course_id)
course = CourseDetails.populate(course_descriptor)
return course
def send_account_activation_email(request, registration, user):
"""
Send account activation email to user
Arguments:
request (HttpRequest): HttpRequest object
registration (Registration): Registration object
user (User): User object
Returns:
None
"""
activation_link = '{protocol}://{site}/activate/{key}'.format(
protocol='https' if request.is_secure() else 'http',
site=safe_get_host(request),
key=registration.activation_key
)
context = {
'first_name': user.first_name,
'activation_link': activation_link,
}
MandrillClient().send_mail(MandrillClient.USER_ACCOUNT_ACTIVATION_TEMPLATE, user.email, context)
def reactivation_email_for_user_custom(request, user):
try:
reg = Registration.objects.get(user=user)
send_account_activation_email(request, reg, user)
except Registration.DoesNotExist:
return JsonResponse({
"success": False,
"error": _('No inactive user with this e-mail exists'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
def has_access_custom(course):
""" User can enroll if current time is between enrollment start and end date """
current_time = datetime.utcnow().replace(tzinfo=utc)
return (course.enrollment_start and course.enrollment_end and
course.enrollment_start < current_time < course.enrollment_end)
def get_course_next_classes(request, course):
"""
Method to get all upcoming reruns of a course
"""
# imports to avoid circular dependencies
from lms.djangoapps.courseware.access import _can_enroll_courselike
from lms.djangoapps.courseware.views.views import registered_for_course
from student.models import CourseEnrollment
from opaque_keys.edx.locations import SlashSeparatedCourseKey
courses = get_all_reruns_of_a_course(course)
course_next_classes = []
for _course in courses:
course_key = SlashSeparatedCourseKey.from_deprecated_string(_course.id.__str__())
course = get_course_by_id(course_key)
course.course_open_date = _course.course_open_date
registered = registered_for_course(course, request.user)
# Used to provide context to message to student if enrollment not allowed
can_enroll = _can_enroll_courselike(request.user, course)
invitation_only = course.invitation_only
is_course_full = CourseEnrollment.objects.is_course_full(course)
# Register button should be disabled if one of the following is true:
# - Student is already registered for course
# - Course is already full
# - Student cannot enroll in course
active_reg_button = not (registered or is_course_full or not can_enroll)
course_first_chapter_link = ""
if request.user.is_authenticated() and request.user.is_staff:
# imported get_course_first_chapter_link here because importing above was throwing circular exception
from openedx.core.djangoapps.timed_notification.core import get_course_first_chapter_link
course_first_chapter_link = get_course_first_chapter_link(_course)
course_next_classes.append({
'user': request.user,
'registered': registered,
'is_course_full': is_course_full,
'can_enroll': can_enroll.has_access,
'invitation_only': invitation_only,
'course': course,
'active_reg_button': active_reg_button,
'course_first_chapter_link': course_first_chapter_link
})
return course_next_classes
def get_all_reruns_of_a_course(course):
"""
:param course:
:return reruns:
"""
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from course_action_state.models import CourseRerunState
from openedx.features.course_card.helpers import get_course_open_date
courses = []
current_time = datetime.utcnow().replace(tzinfo=utc)
course_rerun_states = [crs.course_key for crs in CourseRerunState.objects.filter(
source_course_key=course.id, action="rerun", state="succeeded")] + [course.id]
course_rerun_objects = CourseOverview.objects.select_related('image_set').filter(
id__in=course_rerun_states).order_by('start')
for course_run in course_rerun_objects:
course_open_date = get_course_open_date(course_run)
if course_run.start > current_time:
course_run.course_open_date = course_open_date
courses.append(course_run)
return courses
def get_user_current_enrolled_class(request, course):
"""
Method to get an ongoing user enrolled course. A course that meets the following criteria
=> start date <= today
=> end date > today
=> user is enrolled
"""
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from lms.djangoapps.philu_overrides.courseware.views.views import get_course_related_keys
from student.models import CourseEnrollment
from course_action_state.models import CourseRerunState
all_course_reruns = [crs.course_key for crs in CourseRerunState.objects.filter(
source_course_key=course.id, action="rerun", state="succeeded")] + [course.id]
current_time = datetime.utcnow().replace(tzinfo=utc)
current_class = get_course_current_class(all_course_reruns, current_time)
current_enrolled_class = False
if current_class:
current_enrolled_class = CourseEnrollment.is_enrolled(request.user, current_class.id)
current_enrolled_class_target = ''
if current_enrolled_class:
course_open_date = current_class.course_open_date
course_key = SlashSeparatedCourseKey.from_deprecated_string(current_class.id.__str__())
current_class = get_course_by_id(course_key)
current_class.course_open_date = course_open_date
first_chapter_url, first_section = get_course_related_keys(request, current_class)
current_enrolled_class_target = reverse('courseware_section',
args=[current_class.id.to_deprecated_string(),
first_chapter_url, first_section])
return current_class, current_enrolled_class, current_enrolled_class_target
def get_course_current_class(all_course_reruns, current_time):
"""
Method to get ongoing course
Arguments:
all_course_reruns (list): List of course reruns key
current_time (datetime): Current time
Returns:
CourseOverview object or None
"""
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from openedx.features.course_card.helpers import get_course_open_date
course = CourseOverview.objects.select_related('image_set').filter(
id__in=all_course_reruns, start__lte=current_time, end__gte=current_time).order_by('-start').first()
if course:
course_open_date = get_course_open_date(course)
if course.start <= current_time:
course.course_open_date = course_open_date
return course
else:
return None
def is_user_enrolled_in_any_class(course_current_class, course_next_classes):
next_reg_classes = [next_class for next_class in course_next_classes if next_class['registered']]
return bool(course_current_class or next_reg_classes)
# Query string parameters that can be passed to the "finish_auth" view to manage
# things like auto-enrollment.
POST_AUTH_PARAMS = ('course_id', 'enrollment_action', 'course_mode', 'email_opt_in', 'purchase_workflow')
def get_next_url_for_login_page_override(request):
"""
NOTE*: We override this method to tackle alquity redirection scenarios
Determine the URL to redirect to following login/registration/third_party_auth
The user is currently on a login or registration page.
If 'course_id' is set, or other POST_AUTH_PARAMS, we will need to send the user to the
/account/finish_auth/ view following login, which will take care of auto-enrollment in
the specified course.
Otherwise, we go to the ?next= query param or to the dashboard if nothing else is
specified.
"""
import urllib
from django.core.urlresolvers import NoReverseMatch
from django.utils import http
from lms.djangoapps.onboarding.helpers import get_alquity_community_url
import logging
log = logging.getLogger(__name__)
redirect_to = request.GET.get('next', None)
# sanity checks for alquity specific users
if redirect_to == 'alquity' and request.path == '/register':
if request.user.is_authenticated():
return get_alquity_community_url()
return reverse('dashboard')
if redirect_to == 'alquity' and request.path == '/login':
return get_alquity_community_url()
# if we get a redirect parameter, make sure it's safe. If it's not, drop the
# parameter.
if redirect_to and not http.is_safe_url(redirect_to):
log.error(
u'Unsafe redirect parameter detected: %(redirect_to)r',
{"redirect_to": redirect_to}
)
redirect_to = None
course_id = request.GET.get('course_id', None)
if not redirect_to:
try:
if course_id:
redirect_to = reverse('info', args=[course_id])
else:
redirect_to = reverse('dashboard')
except NoReverseMatch:
redirect_to = reverse('home')
if any(param in request.GET for param in POST_AUTH_PARAMS):
# Before we redirect to next/dashboard, we need to handle auto-enrollment:
params = [(param, request.GET[param]) for param in POST_AUTH_PARAMS if param in request.GET]
params.append(('next', redirect_to)) # After auto-enrollment, user will be sent to payment page or to this URL
redirect_to = '{}?{}'.format(reverse('finish_auth'), urllib.urlencode(params))
# Note: if we are resuming a third party auth pipeline, then the next URL will already
# be saved in the session as part of the pipeline state. That URL will take priority
# over this one.
return redirect_to
def save_user_partner_network_consent(user, _data):
if _data:
organization = user.extended_profile.organization
consents = json.loads(_data)
for _c in consents:
organization_partner = organization.organization_partners.filter(
partner=_c['code'], end_date=ORG_PARTNERSHIP_END_DATE_PLACEHOLDER
).first()
if organization_partner:
GranteeOptIn.objects.create(
agreed=_c['consent'] == 'true',
organization_partner=organization_partner,
user=user
)
def get_activation_alert_error_msg_dict(user_id):
return {
'type': ACTIVATION_ALERT_TYPE,
'alert': ACTIVATION_ERROR_MSG_FORMAT.format(
api_endpoint=reverse('resend_activation_email'),
user_id=user_id
)
}
| philanthropy-u/edx-platform | lms/djangoapps/philu_overrides/helpers.py | Python | agpl-3.0 | 12,087 |
#!/usr/bin/env python3
import argparse
import io
import zipfile
from collections import defaultdict
from datetime import date
oboInOwl = {
"SynonymTypeProperty": "synonym_type_property",
"hasAlternativeId": "has_alternative_id",
"hasBroadSynonym": "has_broad_synonym",
"hasDbXref": "database_cross_reference",
"hasExactSynonym": "has_exact_synonym",
"hasOBOFormatVersion": "has_obo_format_version",
"hasOBONamespace": "has_obo_namespace",
"hasRelatedSynonym": "has_related_synonym",
"hasScope": "has_scope",
"hasSynonymType": "has_synonym_type",
}
exact_synonym = "oboInOwl:hasExactSynonym"
related_synonym = "oboInOwl:hasRelatedSynonym"
broad_synonym = "oboInOwl:hasBroadSynonym"
predicates = {
"acronym": broad_synonym,
"anamorph": related_synonym,
"blast name": related_synonym,
"common name": exact_synonym,
"equivalent name": exact_synonym,
"genbank acronym": broad_synonym,
"genbank anamorph": related_synonym,
"genbank common name": exact_synonym,
"genbank synonym": related_synonym,
"in-part": related_synonym,
"misnomer": related_synonym,
"misspelling": related_synonym,
"synonym": related_synonym,
"scientific name": exact_synonym,
"teleomorph": related_synonym,
}
ranks = [
"class",
"cohort",
"family",
"forma",
"genus",
"infraclass",
"infraorder",
"kingdom",
"order",
"parvorder",
"phylum",
"section",
"series",
"species group",
"species subgroup",
"species",
"subclass",
"subcohort",
"subfamily",
"subgenus",
"subkingdom",
"suborder",
"subphylum",
"subsection",
"subspecies",
"subtribe",
"superclass",
"superfamily",
"superkingdom",
"superorder",
"superphylum",
"tribe",
"varietas",
]
nodes_fields = [
"tax_id", # node id in GenBank taxonomy database
"parent_tax_id", # parent node id in GenBank taxonomy database
"rank", # rank of this node (superkingdom, kingdom, ...)
"embl_code", # locus-name prefix; not unique
"division_id", # see division.dmp file
"inherited_div_flag", # (1 or 0) 1 if node inherits division from parent
"genetic_code_id", # see gencode.dmp file
"inherited_GC_flag", # (1 or 0) 1 if node inherits genetic code from parent
"mitochondrial_genetic_code_id", # see gencode.dmp file
"inherited_MGC_flag", # (1 or 0) 1 if node inherits mitochondrial gencode from parent
"GenBank_hidden_flag", # (1 or 0) 1 if name is suppressed in GenBank entry lineage
"hidden_subtree_root_flag", # (1 or 0) 1 if this subtree has no sequence data yet
"comments", # free-text comments and citations
]
def escape_literal(text):
return text.replace('"', '\\"')
def label_to_id(text):
return text.replace(" ", "_").replace("-", "_")
def convert_synonyms(tax_id, synonyms):
"""Given a tax_id and list of synonyms,
return a Turtle string asserting triples and OWL annotations on them."""
output = []
for synonym, unique, name_class in synonyms:
if name_class in predicates:
synonym = escape_literal(synonym)
predicate = predicates[name_class]
synonym_type = label_to_id(name_class)
output.append(
f"""
NCBITaxon:{tax_id} {predicate} "{synonym}"^^xsd:string .
[ a owl:Axiom
; owl:annotatedSource NCBITaxon:{tax_id}
; owl:annotatedProperty {predicate}
; owl:annotatedTarget "{synonym}"^^xsd:string
; oboInOwl:hasSynonymType ncbitaxon:{synonym_type}
] ."""
)
return output
def convert_node(node, label, merged, synonyms, citations):
"""Given a node dictionary, a label string, and lists for merged, synonyms, and citations,
return a Turtle string representing this tax_id."""
tax_id = node["tax_id"]
output = [f"NCBITaxon:{tax_id} a owl:Class"]
label = escape_literal(label)
output.append(f'; rdfs:label "{label}"^^xsd:string')
parent_tax_id = node["parent_tax_id"]
if parent_tax_id and parent_tax_id != "" and parent_tax_id != tax_id:
output.append(f"; rdfs:subClassOf NCBITaxon:{parent_tax_id}")
rank = node["rank"]
if rank and rank != "" and rank != "no rank":
if rank not in ranks:
print(f"WARN Unrecognized rank '{rank}'")
rank = label_to_id(rank)
# WARN: This is a special case for backward compatibility
if rank in ["species_group", "species_subgroup"]:
output.append(
f"; ncbitaxon:has_rank <http://purl.obolibrary.org/obo/NCBITaxon#_{rank}>"
)
else:
output.append(f"; ncbitaxon:has_rank NCBITaxon:{rank}")
gc_id = node["genetic_code_id"]
if gc_id:
output.append(f'; oboInOwl:hasDbXref "GC_ID:{gc_id}"^^xsd:string')
for merge in merged:
output.append(f'; oboInOwl:hasAlternativeId "NCBITaxon:{merge}"^^xsd:string')
for pubmed_id in citations:
output.append(f'; oboInOwl:hasDbXref "PMID:{pubmed_id}"^^xsd:string')
output.append('; oboInOwl:hasOBONamespace "ncbi_taxonomy"^^xsd:string')
output.append(".")
output += convert_synonyms(tax_id, synonyms)
return "\n".join(output)
def split_line(line):
"""Split a line from a .dmp file"""
return [x.strip() for x in line.split(" |")]
def convert(taxdmp_path, output_path, taxa=None):
"""Given the paths to the taxdmp.zip file and an output Turtle file,
and an optional set of tax_id strings to extract,
read from the taxdmp.zip file, collect annotations,
convert nodes to Turtle strings,
and write to the output file."""
scientific_names = defaultdict(list)
labels = {}
synonyms = defaultdict(list)
merged = defaultdict(list)
citations = defaultdict(list)
with open(output_path, "w") as output:
isodate = date.today().isoformat()
output.write(
f"""@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix owl: <http://www.w3.org/2002/07/owl#> .
@prefix obo: <http://purl.obolibrary.org/obo/> .
@prefix oboInOwl: <http://www.geneontology.org/formats/oboInOwl#> .
@prefix ncbitaxon: <http://purl.obolibrary.org/obo/ncbitaxon#> .
@prefix NCBITaxon: <http://purl.obolibrary.org/obo/NCBITaxon_> .
@prefix : <http://purl.obolibrary.org/obo/ncbitaxon.owl#> .
<http://purl.obolibrary.org/obo/ncbitaxon.owl> a owl:Ontology
; owl:versionIRI <http://purl.obolibrary.org/obo/ncbitaxon/{isodate}/ncbitaxon.owl>
; rdfs:comment "Built by https://github.com/obophenotype/ncbitaxon"^^xsd:string
.
obo:IAO_0000115 a owl:AnnotationProperty
; rdfs:label "definition"^^xsd:string
.
ncbitaxon:has_rank a owl:AnnotationProperty
; obo:IAO_0000115 "A metadata relation between a class and its taxonomic rank (eg species, family)"^^xsd:string
; rdfs:label "has_rank"^^xsd:string
; rdfs:comment "This is an abstract class for use with the NCBI taxonomy to name the depth of the node within the tree. The link between the node term and the rank is only visible if you are using an obo 1.3 aware browser/editor; otherwise this can be ignored"^^xsd:string
; oboInOwl:hasOBONamespace "ncbi_taxonomy"^^xsd:string
.
"""
)
for predicate, label in oboInOwl.items():
output.write(
f"""
oboInOwl:{predicate} a owl:AnnotationProperty
; rdfs:label "{label}"^^xsd:string
.
"""
)
for label, parent in predicates.items():
predicate = label_to_id(label)
parent = parent.replace("oboInOwl", "oio")
output.write(
f"""
ncbitaxon:{predicate} a owl:AnnotationProperty
; rdfs:label "{label}"^^xsd:string
; oboInOwl:hasScope "{parent}"^^xsd:string
; rdfs:subPropertyOf oboInOwl:SynonymTypeProperty
.
"""
)
with zipfile.ZipFile(taxdmp_path) as taxdmp:
with taxdmp.open("names.dmp") as dmp:
for line in io.TextIOWrapper(dmp):
tax_id, name, unique, name_class, _ = split_line(line)
if name_class == "scientific name":
labels[tax_id] = name
scientific_names[name].append([tax_id, unique])
else:
synonyms[tax_id].append([name, unique, name_class])
# use unique name only if there's a conflict
for name, values in scientific_names.items():
tax_ids = [x[0] for x in values]
if len(tax_ids) > 1:
uniques = [x[1] for x in values]
if len(tax_ids) != len(set(uniques)):
print("WARN: Duplicate unique names", tax_ids, uniques)
for tax_id, unique in values:
labels[tax_id] = unique
synonyms[tax_id].append([name, unique, "scientific name"])
with taxdmp.open("merged.dmp") as dmp:
for line in io.TextIOWrapper(dmp):
old_tax_id, new_tax_id, _ = split_line(line)
merged[new_tax_id].append(old_tax_id)
with taxdmp.open("citations.dmp") as dmp:
for line in io.TextIOWrapper(dmp):
(
cit_id,
cit_key,
pubmed_id,
medline_id,
url,
text,
tax_id_list,
_,
) = split_line(line)
# WARN: the pubmed_id is always "0", we treat medline_id as pubmed_id
if medline_id == "0":
continue
for tax_id in tax_id_list.split():
if taxa and tax_id not in taxa:
continue
citations[tax_id].append(medline_id)
with taxdmp.open("nodes.dmp") as dmp:
for line in io.TextIOWrapper(dmp):
node = {}
fields = split_line(line)
for i in range(0, min(len(fields), len(nodes_fields))):
node[nodes_fields[i]] = fields[i]
tax_id = node["tax_id"]
if taxa and tax_id not in taxa:
continue
result = convert_node(
node,
labels[tax_id],
merged[tax_id],
synonyms[tax_id],
citations[tax_id],
)
output.write(result)
# TODO: delnodes
output.write(
"""
<http://purl.obolibrary.org/obo/NCBITaxon#_taxonomic_rank> a owl:Class
; rdfs:label "taxonomic rank"^^xsd:string
; rdfs:comment "This is an abstract class for use with the NCBI taxonomy to name the depth of the node within the tree. The link between the node term and the rank is only visible if you are using an obo 1.3 aware browser/editor; otherwise this can be ignored."^^xsd:string
; oboInOwl:hasOBONamespace "ncbi_taxonomy"^^xsd:string
.
"""
)
for label in ranks:
rank = label_to_id(label)
if rank in ["species_group", "species_subgroup"]:
iri = f"<http://purl.obolibrary.org/obo/NCBITaxon#_{rank}>"
else:
iri = f"NCBITaxon:{rank}"
output.write(
f"""
{iri} a owl:Class
; rdfs:label "{label}"^^xsd:string
; rdfs:subClassOf <http://purl.obolibrary.org/obo/NCBITaxon#_taxonomic_rank>
; oboInOwl:hasOBONamespace "ncbi_taxonomy"^^xsd:string
.
"""
)
def main():
parser = argparse.ArgumentParser(
description="Convert NCBI Taxonomy taxdmp.zip to Turtle format"
)
parser.add_argument("taxdmp", type=str, help="The taxdmp.zip file to read")
parser.add_argument("taxa", type=str, nargs="?", help="A list of taxa to build")
# TODO: upper, lower
parser.add_argument("turtle", type=str, help="The output Turtle file to write")
args = parser.parse_args()
taxa = None
if args.taxa:
taxa = set()
with open(args.taxa) as taxalist:
for line in taxalist:
taxa.add(line.split()[0])
convert(args.taxdmp, args.turtle, taxa)
if __name__ == "__main__":
main()
| obophenotype/ncbitaxon | src/ncbitaxon.py | Python | bsd-3-clause | 12,475 |
import tensorflow as tf
from networks.network import Network
from fcn.config import cfg
zero_out_module = tf.load_op_library('lib/triplet_flow_loss/triplet_flow_loss.so')
class custom_network(Network):
def __init__(self):
self.inputs = cfg.INPUT
# self.input_format = input_format
self.num_output_dimensions = 2 # formerly num_classes
self.num_units = cfg.TRAIN.NUM_UNITS
self.scale = 1 / cfg.TRAIN.SCALES_BASE[0]
self.vertex_reg = cfg.TRAIN.VERTEX_REG
self.data_left = tf.placeholder(tf.float32, shape=[None, None, None, 3])
self.data_right = tf.placeholder(tf.float32, shape=[None, None, None, 3])
self.gt_flow = tf.placeholder(tf.float32, shape=[None, None, None, self.num_output_dimensions])
self.occluded = tf.placeholder(tf.int32, shape=[None, None, None, 1])
self.labels_left = tf.placeholder(tf.int32, shape=[None, None, None, None])
self.labels_right = tf.placeholder(tf.int32, shape=[None, None, None, None])
self.keep_prob = tf.placeholder(tf.float32)
self.queue_size = 20
# define a queue
self.q = tf.FIFOQueue(self.queue_size, [tf.float32, tf.float32, tf.float32, tf.int32, tf.int32, tf.int32, tf.float32])
self.enqueue_op = self.q.enqueue([self.data_left, self.data_right, self.gt_flow, self.occluded, self.labels_left, self.labels_right, self.keep_prob])
data_left, data_right, gt_flow, occluded, left_labels, right_labels, self.keep_prob_queue = self.q.dequeue()
self.layers = dict({'data_left': data_left, 'data_right': data_right, 'gt_flow': gt_flow, 'occluded': occluded,
'left_labels': left_labels, "right_labels": right_labels})
self.close_queue_op = self.q.close(cancel_pending_enqueues=True)
self.queue_size_op = self.q.size('queue_size')
self.trainable = cfg.TRAIN.TRAINABLE
if cfg.NET_CONF.CONV1_SKIP_LINK:
self.skip_1_mult = tf.constant(1.0, tf.float32)
else:
self.skip_1_mult = tf.constant(0.0, tf.float32)
if cfg.NET_CONF.CONV2_SKIP_LINK:
self.skip_2_mult = tf.constant(1.0, tf.float32)
else:
self.skip_2_mult = tf.constant(0.0, tf.float32)
if cfg.NET_CONF.CONV3_SKIP_LINK:
self.skip_4_mult = tf.constant(1.0, tf.float32)
else:
self.skip_4_mult = tf.constant(0.0, tf.float32)
self.setup()
def setup(self):
trainable = self.trainable
reuse = True
with tf.device("/cpu:0"):
# scaled versions of ground truth
(self.feed('gt_flow')
.avg_pool(2, 2, 2, 2, name='flow_pool1')
.div_immediate(tf.constant(2.0, tf.float32), name='gt_flow_2x')
.avg_pool(2, 2, 2, 2, name='flow_pool2')
.div_immediate(tf.constant(2.0, tf.float32), name='gt_flow_4x')
.avg_pool(2, 2, 2, 2, name='flow_pool3')
.div_immediate(tf.constant(2.0, tf.float32), name='gt_flow_8x')
.avg_pool(2, 2, 2, 2, name='flow_pool4')
.div_immediate(tf.constant(2.0, tf.float32), name='gt_flow_16x'))
(self.feed('occluded').cast(tf.float32)
.avg_pool(2, 2, 2, 2, name='occluded_2x_avg')
.avg_pool(2, 2, 2, 2, name='occluded_4x_avg')
.avg_pool(2, 2, 2, 2, name='occluded_8x_avg')
.avg_pool(2, 2, 2, 2, name='occluded_16x_avg'))
self.feed('occluded_2x_avg').round().cast(tf.int32, name="occluded_2x")
self.feed('occluded_4x_avg').round().cast(tf.int32, name="occluded_4x")
self.feed('occluded_8x_avg').round().cast(tf.int32, name="occluded_8x")
self.feed('occluded_16x_avg').round().cast(tf.int32, name="occluded_16x")
(self.feed('left_labels').cast(tf.float32)
.avg_pool(2, 2, 2, 2, name='left_labels_2x_avg')
.avg_pool(2, 2, 2, 2, name='left_labels_4x_avg')
.avg_pool(2, 2, 2, 2, name='left_labels_8x_avg')
.avg_pool(2, 2, 2, 2, name='left_labels_16x_avg'))
self.feed('left_labels_2x_avg').round().cast(tf.int32, name="left_labels_2x")
self.feed('left_labels_4x_avg').round().cast(tf.int32, name="left_labels_4x")
self.feed('left_labels_8x_avg').round().cast(tf.int32, name="left_labels_8x")
self.feed('left_labels_16x_avg').round().cast(tf.int32, name="left_labels_16x")
(self.feed('right_labels').cast(tf.float32)
.avg_pool(2, 2, 2, 2, name='right_labels_2x_avg')
.avg_pool(2, 2, 2, 2, name='right_labels_4x_avg')
.avg_pool(2, 2, 2, 2, name='right_labels_8x_avg')
.avg_pool(2, 2, 2, 2, name='right_labels_16x_avg'))
self.feed('right_labels_2x_avg').round().cast(tf.int32, name="right_labels_2x")
self.feed('right_labels_4x_avg').round().cast(tf.int32, name="right_labels_4x")
self.feed('right_labels_8x_avg').round().cast(tf.int32, name="right_labels_8x")
self.feed('right_labels_16x_avg').round().cast(tf.int32, name="right_labels_16x")
# left tower
(self.feed('data_left')
.add_immediate(tf.constant(0.0, tf.float32), name='data_left_tap')
.conv(3, 3, 64, 1, 1, name='conv1_1', c_i=3, trainable=trainable)
.conv(3, 3, 64, 1, 1, name='conv1_2', c_i=64, trainable=trainable)
.add_immediate(tf.constant(0.0, tf.float32), name='conv1_l')
.max_pool(2, 2, 2, 2, name='pool1')
.conv(3, 3, 128, 1, 1, name='conv2_1', c_i=64, trainable=trainable)
.conv(3, 3, 128, 1, 1, name='conv2_2', c_i=128, trainable=trainable)
.add_immediate(tf.constant(0.0, tf.float32), name='conv2_l')
.max_pool(2, 2, 2, 2, name='pool2')
.conv(3, 3, 256, 1, 1, name='conv3_1', c_i=128, trainable=trainable)
.conv(3, 3, 256, 1, 1, name='conv3_2', c_i=256, trainable=trainable)
.conv(3, 3, 256, 1, 1, name='conv3_3', c_i=256, trainable=trainable)
.add_immediate(tf.constant(0.0, tf.float32), name='conv3_l')
.max_pool(2, 2, 2, 2, name='pool3')
.conv(3, 3, 512, 1, 1, name='conv4_1', c_i=256, trainable=trainable)
.conv(3, 3, 512, 1, 1, name='conv4_2', c_i=512, trainable=trainable)
.conv(3, 3, 512, 1, 1, name='conv4_3', c_i=512, trainable=trainable)
.add_immediate(tf.constant(0.0, tf.float32), name='conv4_3_l'))
# 8x scaling input
(self.feed('conv4_3_l')
.conv(1, 1, 256, 1, 1, name='8x_skip_cov_1', c_i=512, elu=True)
.conv(3, 3, 512, 1, 1, name='8x_skip_cov_2', c_i=256, elu=True)
.conv(1, 1, 128, 1, 1, name='8x_skip_cov_3', c_i=512, elu=True)
.conv(3, 3, 64, 1, 1, name='8x_skip_cov_4', c_i=128, elu=True)
.add_immediate(tf.constant(0.0, tf.float32), name='features_8x_l'))
# 4x scaling input
(self.feed('conv3_l')
.conv(3, 3, 96, 1, 1, name='4x_skip_conv_1', elu=True, c_i=256)
# .conv(1, 1, 96, 1, 1, name='4x_skip_conv_2', elu=True, c_i=96)
# .conv(3, 3, 64, 1, 1, name='4x_skip_conv_3', elu=True, c_i=96)
.conv(1, 1, 96, 1, 1, name='4x_skip_conv_4', elu=True, c_i=96)
.conv(3, 3, 32, 1, 1, name='4x_skip_conv_5', elu=True, c_i=96)
.add_immediate(tf.constant(0.0, tf.float32), name='features_4x_l'))
# 2x scaling input
(self.feed('conv2_l')
.conv(3, 3, 96, 1, 1, name='2x_skip_conv_1', elu=True, c_i=128)
.conv(1, 1, 64, 1, 1, name='2x_skip_conv_2', elu=True, c_i=96)
.conv(3, 3, 16, 1, 1, name='2x_skip_conv_3', c_i=64, elu=True)
.add_immediate(tf.constant(0.0, tf.float32), name='features_2x_l'))
# 1x scaling input
(self.feed('conv1_l')
.conv(3, 3, 32, 1, 1, name='1x_skip_conv_1', elu=True, c_i=64)
.conv(3, 3, 8, 1, 1, name='1x_skip_conv_2', c_i=32, elu=True)
.add_immediate(tf.constant(0.0, tf.float32), name='features_1x_l'))
# right tower
(self.feed('data_right')
.add_immediate(tf.constant(0.0, tf.float32), name='data_right_tap')
.conv(3, 3, 64, 1, 1, name='conv1_1', c_i=3, trainable=trainable, reuse=reuse)
.conv(3, 3, 64, 1, 1, name='conv1_2', c_i=64, trainable=trainable, reuse=reuse)
.add_immediate(tf.constant(0.0, tf.float32), name='conv1_r')
.max_pool(2, 2, 2, 2, name='pool1')
.conv(3, 3, 128, 1, 1, name='conv2_1', c_i=64, trainable=trainable, reuse=reuse)
.conv(3, 3, 128, 1, 1, name='conv2_2', c_i=128, trainable=trainable, reuse=reuse)
.add_immediate(tf.constant(0.0, tf.float32), name='conv2_r')
.max_pool(2, 2, 2, 2, name='pool2')
.conv(3, 3, 256, 1, 1, name='conv3_1', c_i=128, trainable=trainable, reuse=reuse)
.conv(3, 3, 256, 1, 1, name='conv3_2', c_i=256, trainable=trainable, reuse=reuse)
.conv(3, 3, 256, 1, 1, name='conv3_3', c_i=256, trainable=trainable, reuse=reuse)
.add_immediate(tf.constant(0.0, tf.float32), name='conv3_r')
.max_pool(2, 2, 2, 2, name='pool3')
.conv(3, 3, 512, 1, 1, name='conv4_1', c_i=256, trainable=trainable, reuse=reuse)
.conv(3, 3, 512, 1, 1, name='conv4_2', c_i=512, trainable=trainable, reuse=reuse)
.conv(3, 3, 512, 1, 1, name='conv4_3', c_i=512, trainable=trainable, reuse=reuse)
.add_immediate(tf.constant(0.0, tf.float32), name='conv4_3_r'))
# 8x scaling input
(self.feed('conv4_3_r')
.conv(1, 1, 256, 1, 1, name='8x_skip_cov_1', c_i=512, elu=True, reuse=reuse)
.conv(3, 3, 512, 1, 1, name='8x_skip_cov_2', c_i=256, elu=True, reuse=reuse)
.conv(1, 1, 128, 1, 1, name='8x_skip_cov_3', c_i=512, elu=True, reuse=reuse)
.conv(3, 3, 64, 1, 1, name='8x_skip_cov_4', c_i=128, elu=True, reuse=reuse)
.add_immediate(tf.constant(0.0, tf.float32), name='features_8x_r'))
# 4x scaling input
(self.feed('conv3_r')
.conv(3, 3, 96, 1, 1, name='4x_skip_conv_1', c_i=256, elu=True, reuse=reuse)
# .conv(1, 1, 96, 1, 1, name='4x_skip_conv_2', c_i=96, elu=True, reuse=reuse)
# .conv(3, 3, 64, 1, 1, name='4x_skip_conv_3', c_i=96, elu=True, reuse=reuse)
.conv(1, 1, 96, 1, 1, name='4x_skip_conv_4', c_i=96, elu=True, reuse=reuse)
.conv(3, 3, 32, 1, 1, name='4x_skip_conv_5', c_i=96, elu=True, reuse=reuse)
.add_immediate(tf.constant(0.0, tf.float32), name='features_4x_r'))
# 2x scaling input
(self.feed('conv2_r')
.conv(3, 3, 96, 1, 1, name='2x_skip_conv_1', c_i=128, elu=True, reuse=reuse)
.conv(1, 1, 64, 1, 1, name='2x_skip_conv_2', c_i=96, elu=True, reuse=reuse)
.conv(3, 3, 16, 1, 1, name='2x_skip_conv_3', c_i=64, elu=True, reuse=reuse)
.add_immediate(tf.constant(0.0, tf.float32), name='features_2x_r'))
# 1x scaling input
(self.feed('conv1_r')
.conv(3, 3, 32, 1, 1, name='1x_skip_conv_1', c_i=64, elu=True, reuse=reuse)
.conv(3, 3, 8, 1, 1, name='1x_skip_conv_2', c_i=32, elu=True, reuse=reuse)
.add_immediate(tf.constant(0.0, tf.float32), name='features_1x_r'))
with tf.device("/cpu:0"):
# triplet loss
(self.feed(['features_1x_l', 'features_1x_r', 'gt_flow', 'occluded', 'left_labels', 'right_labels'])
.triplet_flow_loss(margin=1.0, negative_radius=2, positive_radius=0, name="triplet_loss_1x"))
(self.feed(['features_2x_l', 'features_2x_r', 'gt_flow_2x', 'occluded_2x', 'left_labels_2x', 'right_labels_2x'])
.triplet_flow_loss(margin=1.0, negative_radius=3, positive_radius=0, name="triplet_loss_2x"))
(self.feed(['features_4x_l', 'features_4x_r', 'gt_flow_4x', 'occluded_4x', 'left_labels_4x', 'right_labels_4x'])
.triplet_flow_loss(margin=1.0, negative_radius=3, positive_radius=0, name="triplet_loss_4x"))
(self.feed(['features_8x_l', 'features_8x_r', 'gt_flow_8x', 'occluded_8x', 'left_labels_8x', 'right_labels_8x'])
.triplet_flow_loss(margin=1.0, negative_radius=3, positive_radius=0, name="triplet_loss_8x"))
final_output = (self.get_output('triplet_loss_8x')[0] + self.get_output('triplet_loss_2x')[0] +
self.get_output('triplet_loss_4x')[0] + self.get_output('triplet_loss_1x')[0]) / 4.0
self.layers["final_triplet_loss"] = [final_output]
# (self.feed(['features_8x_l', 'features4x_l', 'features_2x_l', 'features_1x_l'])
# .concat(axis=3, name="final_features_l_out"))
#
# (self.feed(['features_8x_r', 'features4x_r', 'features_2x_r', 'features_1x_r'])
# .concat(axis=3, name="final_features_r_out"))
pass
| daweim0/Just-some-image-features | lib/networks/net_labeled_concat_features_shallower.py | Python | mit | 12,807 |
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
import datetime
import operator
import urllib
from django.core.urlresolvers import reverse
from django.db import models, transaction
from django.db.models import Q
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils import timezone, translation, six
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import AnonymousUser
from django.contrib.sites.models import Site
from baseapp.models import School,Block,District
import pytz
from django.core import serializers
from emis_account import signals
from emis_account.compat import AUTH_USER_MODEL, get_user_model
from emis_account.conf import settings
from emis_account.fields import TimeZoneField
from emis_account.hooks import hookset
from emis_account.managers import EmailAddressManager, EmailConfirmationManager
from emis_account.signals import signup_code_sent, signup_code_used
from django.core.validators import MaxValueValidator, MinValueValidator #to validate min and max value
class Account(models.Model):
user = models.OneToOneField(AUTH_USER_MODEL, related_name="account", verbose_name=_("user"))
mobile_number = models.BigIntegerField(validators=[MinValueValidator(6000000000),MaxValueValidator(9999999999)])
user_category = models.ForeignKey('User_Category')
associated_with = models.CharField(max_length=20)
created_date = models.DateTimeField(auto_now_add=True, editable=False)
modified_date = models.DateTimeField(auto_now=True)
@classmethod
def for_request(cls, request):
if request.user.is_authenticated():
try:
account = Account._default_manager.get(user=request.user)
except Account.DoesNotExist:
account = AnonymousAccount(request)
else:
account = AnonymousAccount(request)
return account
@classmethod
def create(cls, request=None, **kwargs):
create_email = kwargs.pop("create_email", True)
confirm_email = kwargs.pop("confirm_email", None)
account = cls(**kwargs)
if create_email and account.user.email:
kwargs = {"primary": True}
if confirm_email is not None:
kwargs["confirm"] = confirm_email
EmailAddress.objects.add_email(account.user, account.user.email, **kwargs)
return account
def get_user_info(self):
if str(self.user_category) == str('school'):
user_associated_with = School.objects.get(id=self.associated_with)
elif str(self.user_category) == str('block'):
#user_associated_with = serializers.serialize( "python", Block.objects.get(block_code=self.associated_with))
user_associated_with = Block.objects.get(id=self.associated_with)
elif str(self.user_category) == str('edu_district'):
user_associated_with = Block.objects.get(id=self.associated_with)
elif str(self.user_category) == str('cbse'):
user_associated_with = Block.objects.get(id=self.associated_with)
elif str(self.user_category) == str('icse'):
user_associated_with = Block.objects.get(id=self.associated_with)
elif str(self.user_category) == str('othr'):
user_associated_with = Block.objects.get(id=self.associated_with)
elif str(self.user_category) == str('district'):
user_associated_with = District.objects.get(id=self.associated_with)
elif str(self.user_category) == str('dee_district'):
user_associated_with = District.objects.get(id=self.associated_with)
elif str(self.user_category) == str('dse_district'):
user_associated_with = District.objects.get(id=self.associated_with)
elif str(self.user_category) == str('dms_district'):
user_associated_with = District.objects.get(id=self.associated_with)
elif str(self.user_category) == str('cbse_district'):
user_associated_with = District.objects.get(id=self.associated_with)
elif str(self.user_category) == str('icse_district'):
user_associated_with = District.objects.get(id=self.associated_with)
elif str(self.user_category) == str('othr_district'):
user_associated_with = District.objects.get(id=self.associated_with)
elif str(self.user_category) == str('state'):
user_associated_with = "State"
elif str(self.user_category) == str('dee_state'):
user_associated_with = "State"
elif str(self.user_category) == str('dse_state'):
user_associated_with = "State"
elif str(self.user_category) == str('dms_state'):
user_associated_with = "State"
elif str(self.user_category) == str('cbse_state'):
user_associated_with = "State"
elif str(self.user_category) == str('icse_state'):
user_associated_with = "State"
elif str(self.user_category) == str('othr_state'):
user_associated_with = "State"
return user_associated_with
def __unicode__(self):
try:
if not self.get_user_info():
raise ValidationError('Associated_with ID does not match with the user category')
except Exception:
raise ValidationError('Associated_with ID does not match with the user category')
#user_associated_with = user_associated_with.school_name
return u'%s' %(self.user)
#
# The call to get_user_model in global scope could lead to a circular import
# when the app cache is not fully initialized in some cases. It is rare, but
# it has happened. If you are debugging this problem and determine this line
# of code as being problematic, contact the developers right away.
#
@receiver(post_save, sender=get_user_model())
def user_post_save(sender, **kwargs):
"""
After User.save is called we check to see if it was a created user. If so,
we check if the User object wants account creation. If all passes we
create an Account object.
We only run on user creation to avoid having to check for existence on
each call to User.save.
"""
user, created = kwargs["instance"], kwargs["created"]
disabled = getattr(user, "_disable_account_creation", not settings.ACCOUNT_CREATE_ON_SAVE)
if created and not disabled:
Account.create(user=user)
class AnonymousAccount(object):
def __init__(self, request=None):
self.user = AnonymousUser()
self.timezone = settings.TIME_ZONE
if request is None:
self.language = settings.LANGUAGE_CODE
else:
self.language = translation.get_language_from_request(request, check_path=True)
def __unicode__(self):
return "AnonymousAccount"
class SignupCode(models.Model):
class AlreadyExists(Exception):
pass
class InvalidCode(Exception):
pass
code = models.CharField(max_length=64, unique=True)
max_uses = models.PositiveIntegerField(default=0)
expiry = models.DateTimeField(null=True, blank=True)
inviter = models.ForeignKey(AUTH_USER_MODEL, null=True, blank=True)
email = models.EmailField(blank=True)
notes = models.TextField(blank=True)
sent = models.DateTimeField(null=True, blank=True)
created = models.DateTimeField(default=timezone.now, editable=False)
use_count = models.PositiveIntegerField(editable=False, default=0)
def __unicode__(self):
if self.email:
return "{0} [{1}]".format(self.email, self.code)
else:
return self.code
@classmethod
def exists(cls, code=None, email=None):
checks = []
if code:
checks.append(Q(code=code))
if email:
checks.append(Q(email=code))
return cls._default_manager.filter(six.moves.reduce(operator.or_, checks)).exists()
@classmethod
def create(cls, **kwargs):
email, code = kwargs.get("email"), kwargs.get("code")
if kwargs.get("check_exists", True) and cls.exists(code=code, email=email):
raise cls.AlreadyExists()
expiry = timezone.now() + datetime.timedelta(hours=kwargs.get("expiry", 24))
if not code:
code = hookset.generate_signup_code_token(email)
params = {
"code": code,
"max_uses": kwargs.get("max_uses", 0),
"expiry": expiry,
"inviter": kwargs.get("inviter"),
"notes": kwargs.get("notes", "")
}
if email:
params["email"] = email
return cls(**params)
@classmethod
def check(cls, code):
try:
signup_code = cls._default_manager.get(code=code)
except cls.DoesNotExist:
raise cls.InvalidCode()
else:
if signup_code.max_uses and signup_code.max_uses <= signup_code.use_count:
raise cls.InvalidCode()
else:
if signup_code.expiry and timezone.now() > signup_code.expiry:
raise cls.InvalidCode()
else:
return signup_code
def calculate_use_count(self):
self.use_count = self.signupcoderesult_set.count()
self.save()
def use(self, user):
"""
Add a SignupCode result attached to the given user.
"""
result = SignupCodeResult()
result.signup_code = self
result.user = user
result.save()
signup_code_used.send(sender=result.__class__, signup_code_result=result)
def send(self, **kwargs):
protocol = getattr(settings, "DEFAULT_HTPROTOCOL", "http")
current_site = kwargs["site"] if "site" in kwargs else Site.objects.get_current()
signup_url = "{0}://{1}{2}?{3}".format(
protocol,
current_site.domain,
reverse("account_signup"),
urllib.urlencode({"code": self.code})
)
ctx = {
"signup_code": self,
"current_site": current_site,
"signup_url": signup_url,
}
hookset.send_invitation_email([self.email], ctx)
self.sent = timezone.now()
self.save()
signup_code_sent.send(sender=SignupCode, signup_code=self)
class SignupCodeResult(models.Model):
signup_code = models.ForeignKey(SignupCode)
user = models.ForeignKey(AUTH_USER_MODEL)
timestamp = models.DateTimeField(default=timezone.now)
def save(self, **kwargs):
super(SignupCodeResult, self).save(**kwargs)
self.signup_code.calculate_use_count()
class EmailAddress(models.Model):
user = models.ForeignKey(AUTH_USER_MODEL)
email = models.EmailField(unique=settings.ACCOUNT_EMAIL_UNIQUE)
verified = models.BooleanField(default=False)
primary = models.BooleanField(default=False)
objects = EmailAddressManager()
class Meta:
verbose_name = _("email address")
verbose_name_plural = _("email addresses")
if not settings.ACCOUNT_EMAIL_UNIQUE:
unique_together = [("user", "email")]
def __unicode__(self):
return "{0} ({1})".format(self.email, self.user)
def set_as_primary(self, conditional=False):
old_primary = EmailAddress.objects.get_primary(self.user)
if old_primary:
if conditional:
return False
old_primary.primary = False
old_primary.save()
self.primary = True
self.save()
self.user.email = self.email
self.user.save()
return True
def send_confirmation(self, **kwargs):
confirmation = EmailConfirmation.create(self)
confirmation.send(**kwargs)
return confirmation
def change(self, new_email, confirm=True):
"""
Given a new email address, change self and re-confirm.
"""
with transaction.commit_on_success():
self.user.email = new_email
self.user.save()
self.email = new_email
self.verified = False
self.save()
if confirm:
self.send_confirmation()
class EmailConfirmation(models.Model):
email_address = models.ForeignKey(EmailAddress)
created = models.DateTimeField(default=timezone.now())
sent = models.DateTimeField(null=True)
key = models.CharField(max_length=64, unique=True)
objects = EmailConfirmationManager()
class Meta:
verbose_name = _("email confirmation")
verbose_name_plural = _("email confirmations")
def __unicode__(self):
return "confirmation for {0}".format(self.email_address)
@classmethod
def create(cls, email_address):
key = hookset.generate_email_confirmation_token(email_address.email)
return cls._default_manager.create(email_address=email_address, key=key)
def key_expired(self):
expiration_date = self.sent + datetime.timedelta(days=settings.ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS)
return expiration_date <= timezone.now()
key_expired.boolean = True
def confirm(self):
if not self.key_expired() and not self.email_address.verified:
email_address = self.email_address
email_address.verified = True
email_address.set_as_primary(conditional=True)
email_address.save()
signals.email_confirmed.send(sender=self.__class__, email_address=email_address)
return email_address
def send(self, **kwargs):
current_site = kwargs["site"] if "site" in kwargs else Site.objects.get_current()
protocol = getattr(settings, "DEFAULT_HTPROTOCOL", "http")
activate_url = "{0}://{1}{2}".format(
protocol,
current_site.domain,
reverse("account_confirm_email", args=[self.key])
)
ctx = {
"email_address": self.email_address,
"user": self.email_address.user,
"activate_url": activate_url,
"current_site": current_site,
"key": self.key,
}
hookset.send_confirmation_email([self.email_address.email], ctx)
self.sent = timezone.now()
self.save()
signals.email_confirmation_sent.send(sender=self.__class__, confirmation=self)
class AccountDeletion(models.Model):
user = models.ForeignKey(AUTH_USER_MODEL, null=True, blank=True, on_delete=models.SET_NULL)
email = models.EmailField()
date_requested = models.DateTimeField(default=timezone.now)
date_expunged = models.DateTimeField(null=True, blank=True)
@classmethod
def expunge(cls, hours_ago=None):
if hours_ago is None:
hours_ago = settings.ACCOUNT_DELETION_EXPUNGE_HOURS
before = timezone.now() - datetime.timedelta(hours=hours_ago)
count = 0
for account_deletion in cls.objects.filter(date_requested__lt=before, user__isnull=False):
settings.ACCOUNT_DELETION_EXPUNGE_CALLBACK(account_deletion)
account_deletion.date_expunged = timezone.now()
account_deletion.save()
count += 1
return count
@classmethod
def mark(cls, user):
account_deletion, created = cls.objects.get_or_create(user=user)
account_deletion.email = user.email
account_deletion.save()
settings.ACCOUNT_DELETION_MARK_CALLBACK(account_deletion)
return account_deletion
"""
Model for user category
"""
class User_Category(models.Model):
user_category = models.CharField(max_length=15)
def __unicode__(self):
return u'%s' % (self.user_category)
| tnemis/staging-server | emis_account/models.py | Python | mit | 15,738 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CMSRedirect'
db.create_table('cms_redirects_cmsredirect', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('page', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cms.Page'], null=True, blank=True)),
('site', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sites.Site'])),
('old_path', self.gf('django.db.models.fields.CharField')(max_length=200, db_index=True)),
('new_path', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)),
))
db.send_create_signal('cms_redirects', ['CMSRedirect'])
# Adding unique constraint on 'CMSRedirect', fields ['site', 'old_path']
db.create_unique('cms_redirects_cmsredirect', ['site_id', 'old_path'])
def backwards(self, orm):
# Removing unique constraint on 'CMSRedirect', fields ['site', 'old_path']
db.delete_unique('cms_redirects_cmsredirect', ['site_id', 'old_path'])
# Deleting model 'CMSRedirect'
db.delete_table('cms_redirects_cmsredirect')
models = {
'cms.page': {
'Meta': {'ordering': "('site', 'tree_id', 'lft')", 'object_name': 'Page'},
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cms_redirects.cmsredirect': {
'Meta': {'ordering': "('old_path',)", 'unique_together': "(('site', 'old_path'),)", 'object_name': 'CMSRedirect'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_path': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'old_path': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms_redirects']
| aptivate/djangocms-redirects | cms_redirects/migrations/0001_initial.py | Python | bsd-3-clause | 5,967 |
from __future__ import unicode_literals
import six
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from chamber.models import SmartModel
from chamber.utils import remove_accent
from ats_sms_operator.config import ATS_STATES, settings
from ats_sms_operator.utils.compatibility import unescape
@python_2_unicode_compatible
class AbstractInputATSSMSmessage(SmartModel):
received_at = models.DateTimeField(verbose_name=_('received at'), null=False, blank=False)
uniq = models.PositiveIntegerField(verbose_name=_('uniq'), null=False, blank=False, unique=True)
sender = models.CharField(verbose_name=_('sender'), null=False, blank=False, max_length=20)
recipient = models.CharField(verbose_name=_('recipient'), null=False, blank=False, max_length=20)
okey = models.CharField(verbose_name=_('okey'), null=False, blank=False, max_length=255)
opid = models.CharField(verbose_name=_('opid'), null=False, blank=False, max_length=255)
opmid = models.CharField(verbose_name=_('opmid'), null=False, blank=True, max_length=255)
content = models.TextField(verbose_name=_('content'), null=False, blank=True)
def __str__(self):
return self.sender
class Meta:
abstract = True
verbose_name = _('input ATS message')
verbose_name_plural = _('input ATS messages')
ordering = ('-created_at',)
@python_2_unicode_compatible
class AbstractOutputATSSMSmessage(SmartModel):
STATE = ATS_STATES
sent_at = models.DateTimeField(verbose_name=_('sent at'), null=True, blank=True)
sender = models.CharField(verbose_name=_('sender'), null=False, blank=False, max_length=20)
recipient = models.CharField(verbose_name=_('recipient'), null=False, blank=False, max_length=20)
opmid = models.CharField(verbose_name=_('opmid'), null=False, blank=True, max_length=255, default='')
dlr = models.BooleanField(verbose_name=_('require delivery notification?'), null=False, blank=False, default=True)
validity = models.PositiveIntegerField(verbose_name=_('validity in minutes'), null=False, blank=False, default=60)
kw = models.CharField(verbose_name=_('project keyword'), null=False, blank=False, max_length=255)
lower_priority = models.BooleanField(verbose_name=_('lower priority'), null=False, blank=False, default=True)
billing = models.BooleanField(verbose_name=_('billing'), null=False, blank=False, default=False)
content = models.TextField(verbose_name=_('content'), null=False, blank=False, max_length=700)
state = models.IntegerField(verbose_name=_('state'), null=False, blank=False, choices=STATE.choices,
default=STATE.LOCAL_TO_SEND)
template_slug = models.SlugField(max_length=100, null=True, blank=True, verbose_name=_('slug'))
def clean_content(self):
if settings.OPERATOR_UNESCAPE_HTML:
self.content = unescape(self.content)
if not settings.USE_ACCENT:
self.content = six.text_type(remove_accent(six.text_type(self.content)))
def clean_sender(self):
self.sender = ''.join(self.sender.split())
def _pre_save(self, change, *args, **kwargs):
super(AbstractOutputATSSMSmessage, self)._pre_save(change, *args, **kwargs)
self.sender = self.sender or settings.OUTPUT_SENDER_NUMBER
self.kw = self.kw or settings.PROJECT_KEYWORD
def serialize_ats(self):
return (
"""<sms type="text" uniq="{prefix}{uniq}" sender="{sender}" recipient="{recipient}" opmid="{opmid}"
dlr="{dlr}" validity="{validity}" kw="{kw}"{extra}>
<body order="0" billing="{billing}">{content}</body>
</sms>""".format(
prefix=settings.UNIQ_PREFIX, uniq=self.pk, sender=self.sender, recipient=self.recipient,
opmid=self.opmid, dlr=int(self.dlr), validity=self.validity, kw=self.kw, billing=int(self.billing),
content=self.ascii_content, extra=(
' textid="{textid}"'.format(textid=settings.TEXTID) if settings.TEXTID else ''
)
)
)
@property
def ascii_content(self):
return remove_accent(self.content)
@property
def failed(self):
return self.state >= 100 or self.state in (self.STATE.REGISTRATION_OK, self.STATE.REREGISTRATION_OK,
self.STATE.UNSPECIFIED_ERROR, self.STATE.LOCAL_UNKNOWN_ATS_STATE)
def __str__(self):
return self.recipient
class Meta:
abstract = True
verbose_name = _('output ATS message')
verbose_name_plural = _('output ATS messages')
ordering = ('-created_at',)
class UIMeta:
add_button_verbose_name = _('send new SMS')
@python_2_unicode_compatible
class AbstractSMSTemplate(SmartModel):
slug = models.SlugField(max_length=100, null=False, blank=False, primary_key=True, verbose_name=_('slug'))
body = models.TextField(null=True, blank=False, verbose_name=_('message body'))
def __str__(self):
return self.slug
class Meta:
abstract = True
verbose_name = _('SMS template')
verbose_name_plural = _('SMS templates')
| druids/django-ats-sms-operator | ats_sms_operator/models.py | Python | lgpl-3.0 | 5,291 |
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Mock unit tests for the NetApp block storage 7-mode library
"""
from lxml import etree
import mock
from cinder import exception
from cinder import test
from cinder.tests.unit.volume.drivers.netapp.dataontap.client import (
fake_api as netapp_api)
import cinder.tests.unit.volume.drivers.netapp.dataontap.fakes as fake
import cinder.tests.unit.volume.drivers.netapp.fakes as na_fakes
from cinder.volume.drivers.netapp.dataontap import block_7mode
from cinder.volume.drivers.netapp.dataontap import block_base
from cinder.volume.drivers.netapp.dataontap.client import client_base
from cinder.volume.drivers.netapp import utils as na_utils
class NetAppBlockStorage7modeLibraryTestCase(test.TestCase):
"""Test case for NetApp's 7-Mode iSCSI library."""
def setUp(self):
super(NetAppBlockStorage7modeLibraryTestCase, self).setUp()
# Inject fake netapp_lib module classes.
netapp_api.mock_netapp_lib([block_7mode])
kwargs = {'configuration': self.get_config_7mode()}
self.library = block_7mode.NetAppBlockStorage7modeLibrary(
'driver', 'protocol', **kwargs)
self.library.zapi_client = mock.Mock()
self.zapi_client = self.library.zapi_client
self.library.vfiler = mock.Mock()
def tearDown(self):
super(NetAppBlockStorage7modeLibraryTestCase, self).tearDown()
def get_config_7mode(self):
config = na_fakes.create_configuration_7mode()
config.netapp_storage_protocol = 'iscsi'
config.netapp_login = 'admin'
config.netapp_password = 'pass'
config.netapp_server_hostname = '127.0.0.1'
config.netapp_transport_type = 'http'
config.netapp_server_port = '80'
return config
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.MagicMock(return_value=(1, 20)))
@mock.patch.object(block_7mode.NetAppBlockStorage7modeLibrary,
'_get_root_volume_name')
@mock.patch.object(block_7mode.NetAppBlockStorage7modeLibrary,
'_do_partner_setup')
@mock.patch.object(block_base.NetAppBlockStorageLibrary, 'do_setup')
def test_do_setup(self, super_do_setup, mock_do_partner_setup,
mock_get_root_volume_name):
mock_get_root_volume_name.return_value = 'vol0'
context = mock.Mock()
self.library.do_setup(context)
super_do_setup.assert_called_once_with(context)
mock_do_partner_setup.assert_called_once_with()
mock_get_root_volume_name.assert_called_once_with()
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.MagicMock(return_value=(1, 20)))
def test_do_partner_setup(self):
self.library.configuration.netapp_partner_backend_name = 'partner'
self.library._do_partner_setup()
self.assertIsNotNone(self.library.partner_zapi_client)
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.MagicMock(return_value=(1, 20)))
def test_do_partner_setup_no_partner(self):
self.library._do_partner_setup()
self.assertFalse(hasattr(self.library, 'partner_zapi_client'))
@mock.patch.object(
block_base.NetAppBlockStorageLibrary, 'check_for_setup_error')
def test_check_for_setup_error(self, super_check_for_setup_error):
self.zapi_client.get_ontapi_version.return_value = (1, 9)
self.library.check_for_setup_error()
super_check_for_setup_error.assert_called_once_with()
def test_check_for_setup_error_too_old(self):
self.zapi_client.get_ontapi_version.return_value = (1, 8)
self.assertRaises(exception.VolumeBackendAPIException,
self.library.check_for_setup_error)
def test_find_mapped_lun_igroup(self):
response = netapp_api.NaElement(etree.XML("""
<results status="passed">
<initiator-groups>
<initiator-group-info>
<initiator-group-name>%(initiator-group-name)s</initiator-group-name>
<initiator-group-type>%(initiator-group-type)s</initiator-group-type>
<initiator-group-uuid>1477ee47-0e1f-4b35-a82c-dcca0b76fc44
</initiator-group-uuid>
<initiator-group-os-type>linux</initiator-group-os-type>
<initiator-group-throttle-reserve>0</initiator-group-throttle-reserve>
<initiator-group-throttle-borrow>false
</initiator-group-throttle-borrow>
<initiator-group-vsa-enabled>false</initiator-group-vsa-enabled>
<initiator-group-alua-enabled>true</initiator-group-alua-enabled>
<initiator-group-report-scsi-name-enabled>true
</initiator-group-report-scsi-name-enabled>
<initiator-group-use-partner>true</initiator-group-use-partner>
<initiators>
<initiator-info>
<initiator-name>21:00:00:24:ff:40:6c:c3</initiator-name>
</initiator-info>
<initiator-info>
<initiator-name>21:00:00:24:ff:40:6c:c2</initiator-name>
<initiator-alias-info>
<initiator-alias>Centos</initiator-alias>
</initiator-alias-info>
</initiator-info>
</initiators>
<lun-id>2</lun-id>
</initiator-group-info>
</initiator-groups>
</results>""" % fake.IGROUP1))
initiators = fake.FC_FORMATTED_INITIATORS
self.zapi_client.get_lun_map.return_value = response
(igroup, lun_id) = self.library._find_mapped_lun_igroup('path',
initiators)
self.assertEqual(fake.IGROUP1_NAME, igroup)
self.assertEqual('2', lun_id)
def test_find_mapped_lun_igroup_initiator_mismatch(self):
response = netapp_api.NaElement(etree.XML("""
<results status="passed">
<initiator-groups>
<initiator-group-info>
<initiator-group-name>openstack-igroup1</initiator-group-name>
<initiator-group-type>fcp</initiator-group-type>
<initiator-group-uuid>1477ee47-0e1f-4b35-a82c-dcca0b76fc44
</initiator-group-uuid>
<initiator-group-os-type>linux</initiator-group-os-type>
<initiator-group-throttle-reserve>0</initiator-group-throttle-reserve>
<initiator-group-throttle-borrow>false
</initiator-group-throttle-borrow>
<initiator-group-vsa-enabled>false</initiator-group-vsa-enabled>
<initiator-group-alua-enabled>true</initiator-group-alua-enabled>
<initiator-group-report-scsi-name-enabled>true
</initiator-group-report-scsi-name-enabled>
<initiator-group-use-partner>true</initiator-group-use-partner>
<initiators>
<initiator-info>
<initiator-name>21:00:00:24:ff:40:6c:c3</initiator-name>
</initiator-info>
</initiators>
<lun-id>2</lun-id>
</initiator-group-info>
</initiator-groups>
</results>"""))
initiators = fake.FC_FORMATTED_INITIATORS
self.zapi_client.get_lun_map.return_value = response
(igroup, lun_id) = self.library._find_mapped_lun_igroup('path',
initiators)
self.assertIsNone(igroup)
self.assertIsNone(lun_id)
def test_find_mapped_lun_igroup_no_igroups(self):
response = netapp_api.NaElement(etree.XML("""
<results status="passed">
<initiator-groups />
</results>"""))
initiators = fake.FC_FORMATTED_INITIATORS
self.zapi_client.get_lun_map.return_value = response
(igroup, lun_id) = self.library._find_mapped_lun_igroup('path',
initiators)
self.assertIsNone(igroup)
self.assertIsNone(lun_id)
def test_find_mapped_lun_igroup_raises(self):
self.zapi_client.get_lun_map.side_effect = netapp_api.NaApiError
initiators = fake.FC_FORMATTED_INITIATORS
self.assertRaises(netapp_api.NaApiError,
self.library._find_mapped_lun_igroup,
'path',
initiators)
def test_has_luns_mapped_to_initiators_local_map(self):
initiator_list = fake.FC_FORMATTED_INITIATORS
self.zapi_client.has_luns_mapped_to_initiators.return_value = True
self.library.partner_zapi_client = mock.Mock()
result = self.library._has_luns_mapped_to_initiators(initiator_list)
self.assertTrue(result)
self.zapi_client.has_luns_mapped_to_initiators.assert_called_once_with(
initiator_list)
self.assertEqual(0, self.library.partner_zapi_client.
has_luns_mapped_to_initiators.call_count)
def test_has_luns_mapped_to_initiators_partner_map(self):
initiator_list = fake.FC_FORMATTED_INITIATORS
self.zapi_client.has_luns_mapped_to_initiators.return_value = False
self.library.partner_zapi_client = mock.Mock()
self.library.partner_zapi_client.has_luns_mapped_to_initiators.\
return_value = True
result = self.library._has_luns_mapped_to_initiators(initiator_list)
self.assertTrue(result)
self.zapi_client.has_luns_mapped_to_initiators.assert_called_once_with(
initiator_list)
self.library.partner_zapi_client.has_luns_mapped_to_initiators.\
assert_called_with(initiator_list)
def test_has_luns_mapped_to_initiators_no_maps(self):
initiator_list = fake.FC_FORMATTED_INITIATORS
self.zapi_client.has_luns_mapped_to_initiators.return_value = False
self.library.partner_zapi_client = mock.Mock()
self.library.partner_zapi_client.has_luns_mapped_to_initiators.\
return_value = False
result = self.library._has_luns_mapped_to_initiators(initiator_list)
self.assertFalse(result)
self.zapi_client.has_luns_mapped_to_initiators.assert_called_once_with(
initiator_list)
self.library.partner_zapi_client.has_luns_mapped_to_initiators.\
assert_called_with(initiator_list)
def test_has_luns_mapped_to_initiators_no_partner(self):
initiator_list = fake.FC_FORMATTED_INITIATORS
self.zapi_client.has_luns_mapped_to_initiators.return_value = False
self.library.partner_zapi_client = mock.Mock()
self.library.partner_zapi_client.has_luns_mapped_to_initiators.\
return_value = True
result = self.library._has_luns_mapped_to_initiators(
initiator_list, include_partner=False)
self.assertFalse(result)
self.zapi_client.has_luns_mapped_to_initiators.assert_called_once_with(
initiator_list)
self.assertEqual(0, self.library.partner_zapi_client.
has_luns_mapped_to_initiators.call_count)
def test_clone_lun_zero_block_count(self):
"""Test for when clone lun is not passed a block count."""
self.library._get_lun_attr = mock.Mock(return_value={
'Volume': 'fakeLUN', 'Path': '/vol/fake/fakeLUN'})
self.library.zapi_client = mock.Mock()
self.library.zapi_client.get_lun_by_args.return_value = [fake.FAKE_LUN]
self.library._add_lun_to_table = mock.Mock()
self.library._clone_lun('fakeLUN', 'newFakeLUN', 'false')
self.library.zapi_client.clone_lun.assert_called_once_with(
'/vol/fake/fakeLUN', '/vol/fake/newFakeLUN', 'fakeLUN',
'newFakeLUN', 'false', block_count=0, dest_block=0, src_block=0)
def test_clone_lun_no_space_reservation(self):
"""Test for when space_reservation is not passed."""
self.library._get_lun_attr = mock.Mock(return_value={
'Volume': 'fakeLUN', 'Path': '/vol/fake/fakeLUN'})
self.library.lun_space_reservation = 'false'
self.library.zapi_client = mock.Mock()
self.library.zapi_client.get_lun_by_args.return_value = [fake.FAKE_LUN]
self.library._add_lun_to_table = mock.Mock()
self.library._clone_lun('fakeLUN', 'newFakeLUN')
self.library.zapi_client.clone_lun.assert_called_once_with(
'/vol/fake/fakeLUN', '/vol/fake/newFakeLUN', 'fakeLUN',
'newFakeLUN', 'false', block_count=0, dest_block=0, src_block=0)
def test_clone_lun_qos_supplied(self):
"""Test for qos supplied in clone lun invocation."""
self.assertRaises(exception.VolumeDriverException,
self.library._clone_lun,
'fakeLUN',
'newFakeLUN',
qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME)
def test_get_fc_target_wwpns(self):
ports1 = [fake.FC_FORMATTED_TARGET_WWPNS[0],
fake.FC_FORMATTED_TARGET_WWPNS[1]]
ports2 = [fake.FC_FORMATTED_TARGET_WWPNS[2],
fake.FC_FORMATTED_TARGET_WWPNS[3]]
self.zapi_client.get_fc_target_wwpns.return_value = ports1
self.library.partner_zapi_client = mock.Mock()
self.library.partner_zapi_client.get_fc_target_wwpns.return_value = \
ports2
result = self.library._get_fc_target_wwpns()
self.assertSetEqual(set(fake.FC_FORMATTED_TARGET_WWPNS), set(result))
def test_get_fc_target_wwpns_no_partner(self):
ports1 = [fake.FC_FORMATTED_TARGET_WWPNS[0],
fake.FC_FORMATTED_TARGET_WWPNS[1]]
ports2 = [fake.FC_FORMATTED_TARGET_WWPNS[2],
fake.FC_FORMATTED_TARGET_WWPNS[3]]
self.zapi_client.get_fc_target_wwpns.return_value = ports1
self.library.partner_zapi_client = mock.Mock()
self.library.partner_zapi_client.get_fc_target_wwpns.return_value = \
ports2
result = self.library._get_fc_target_wwpns(include_partner=False)
self.assertSetEqual(set(ports1), set(result))
@mock.patch.object(block_7mode.NetAppBlockStorage7modeLibrary,
'_refresh_volume_info', mock.Mock())
@mock.patch.object(block_7mode.NetAppBlockStorage7modeLibrary,
'_get_pool_stats', mock.Mock())
def test_vol_stats_calls_provide_ems(self):
self.library.zapi_client.provide_ems = mock.Mock()
self.library.get_volume_stats(refresh=True)
self.assertEqual(1, self.library.zapi_client.provide_ems.call_count)
def test_create_lun(self):
self.library.vol_refresh_voluntary = False
self.library._create_lun(fake.VOLUME_ID, fake.LUN_ID,
fake.LUN_SIZE, fake.LUN_METADATA)
self.library.zapi_client.create_lun.assert_called_once_with(
fake.VOLUME_ID, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA,
None)
self.assertTrue(self.library.vol_refresh_voluntary)
def test_create_lun_with_qos_policy_group(self):
self.assertRaises(exception.VolumeDriverException,
self.library._create_lun, fake.VOLUME_ID,
fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA,
qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME)
def test_check_volume_type_for_lun_legacy_qos_not_supported(self):
mock_get_volume_type = self.mock_object(na_utils,
'get_volume_type_from_volume')
self.assertRaises(exception.ManageExistingVolumeTypeMismatch,
self.library._check_volume_type_for_lun,
na_fakes.VOLUME, {}, {}, na_fakes.LEGACY_EXTRA_SPECS)
self.assertEqual(0, mock_get_volume_type.call_count)
def test_check_volume_type_for_lun_no_volume_type(self):
mock_get_volume_type = self.mock_object(na_utils,
'get_volume_type_from_volume')
mock_get_volume_type.return_value = None
mock_get_backend_spec = self.mock_object(
na_utils, 'get_backend_qos_spec_from_volume_type')
self.library._check_volume_type_for_lun(na_fakes.VOLUME, {}, {}, None)
self.assertEqual(0, mock_get_backend_spec.call_count)
def test_check_volume_type_for_lun_qos_spec_not_supported(self):
mock_get_volume_type = self.mock_object(na_utils,
'get_volume_type_from_volume')
mock_get_volume_type.return_value = na_fakes.VOLUME_TYPE
mock_get_backend_spec = self.mock_object(
na_utils, 'get_backend_qos_spec_from_volume_type')
mock_get_backend_spec.return_value = na_fakes.QOS_SPEC
self.assertRaises(exception.ManageExistingVolumeTypeMismatch,
self.library._check_volume_type_for_lun,
na_fakes.VOLUME, {}, {}, na_fakes.EXTRA_SPECS)
def test_get_preferred_target_from_list(self):
result = self.library._get_preferred_target_from_list(
fake.ISCSI_TARGET_DETAILS_LIST)
self.assertEqual(fake.ISCSI_TARGET_DETAILS_LIST[0], result)
def test_mark_qos_policy_group_for_deletion(self):
result = self.library._mark_qos_policy_group_for_deletion(
fake.QOS_POLICY_GROUP_INFO)
self.assertEqual(None, result)
def test_setup_qos_for_volume(self):
result = self.library._setup_qos_for_volume(fake.VOLUME,
fake.EXTRA_SPECS)
self.assertEqual(None, result)
def test_manage_existing_lun_same_name(self):
mock_lun = block_base.NetAppLun('handle', 'name', '1',
{'Path': '/vol/vol1/name'})
self.library._get_existing_vol_with_manage_ref = mock.Mock(
return_value=mock_lun)
self.mock_object(na_utils, 'get_volume_extra_specs')
self.mock_object(na_utils, 'log_extra_spec_warnings')
self.library._check_volume_type_for_lun = mock.Mock()
self.library._add_lun_to_table = mock.Mock()
self.zapi_client.move_lun = mock.Mock()
self.library.manage_existing({'name': 'name'}, {'ref': 'ref'})
self.library._get_existing_vol_with_manage_ref.assert_called_once_with(
{'ref': 'ref'})
self.assertEqual(1, self.library._check_volume_type_for_lun.call_count)
self.assertEqual(1, self.library._add_lun_to_table.call_count)
self.assertEqual(0, self.zapi_client.move_lun.call_count)
def test_manage_existing_lun_new_path(self):
mock_lun = block_base.NetAppLun(
'handle', 'name', '1', {'Path': '/vol/vol1/name'})
self.library._get_existing_vol_with_manage_ref = mock.Mock(
return_value=mock_lun)
self.mock_object(na_utils, 'get_volume_extra_specs')
self.mock_object(na_utils, 'log_extra_spec_warnings')
self.library._check_volume_type_for_lun = mock.Mock()
self.library._add_lun_to_table = mock.Mock()
self.zapi_client.move_lun = mock.Mock()
self.library.manage_existing({'name': 'volume'}, {'ref': 'ref'})
self.assertEqual(
2, self.library._get_existing_vol_with_manage_ref.call_count)
self.assertEqual(1, self.library._check_volume_type_for_lun.call_count)
self.assertEqual(1, self.library._add_lun_to_table.call_count)
self.zapi_client.move_lun.assert_called_once_with(
'/vol/vol1/name', '/vol/vol1/volume')
| nexusriot/cinder | cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_7mode.py | Python | apache-2.0 | 20,108 |
from server import database
from server.constants import TargetType
from server.exceptions import ClientError, ArgumentError
from . import mod_only
__all__ = [
'ooc_cmd_disemvowel',
'ooc_cmd_undisemvowel',
'ooc_cmd_shake',
'ooc_cmd_unshake'
]
@mod_only()
def ooc_cmd_disemvowel(client, arg):
"""
Remove all vowels from a user's IC chat.
Usage: /disemvowel <id>
"""
if len(arg) == 0:
raise ArgumentError('You must specify a target.')
try:
targets = client.server.client_manager.get_targets(
client, TargetType.ID, int(arg), False)
except:
raise ArgumentError('You must specify a target. Use /disemvowel <id>.')
if targets:
for c in targets:
database.log_room('disemvowel', client, client.area, target=c)
c.disemvowel = True
client.send_ooc(f'Disemvowelled {len(targets)} existing client(s).')
else:
client.send_ooc('No targets found.')
@mod_only()
def ooc_cmd_undisemvowel(client, arg):
"""
Give back the freedom of vowels to a user.
Usage: /undisemvowel <id>
"""
if len(arg) == 0:
raise ArgumentError('You must specify a target.')
try:
targets = client.server.client_manager.get_targets(
client, TargetType.ID, int(arg), False)
except:
raise ArgumentError(
'You must specify a target. Use /undisemvowel <id>.')
if targets:
for c in targets:
database.log_room('undisemvowel', client, client.area, target=c)
c.disemvowel = False
client.send_ooc(f'Undisemvowelled {len(targets)} existing client(s).')
else:
client.send_ooc('No targets found.')
@mod_only()
def ooc_cmd_shake(client, arg):
"""
Scramble the words in a user's IC chat.
Usage: /shake <id>
"""
if len(arg) == 0:
raise ArgumentError('You must specify a target.')
try:
targets = client.server.client_manager.get_targets(
client, TargetType.ID, int(arg), False)
except:
raise ArgumentError('You must specify a target. Use /shake <id>.')
if targets:
for c in targets:
database.log_room('shake', client, client.area, target=c)
c.shaken = True
client.send_ooc(f'Shook {len(targets)} existing client(s).')
else:
client.send_ooc('No targets found.')
@mod_only()
def ooc_cmd_unshake(client, arg):
"""
Give back the freedom of coherent grammar to a user.
Usage: /unshake <id>
"""
if len(arg) == 0:
raise ArgumentError('You must specify a target.')
try:
targets = client.server.client_manager.get_targets(
client, TargetType.ID, int(arg), False)
except:
raise ArgumentError('You must specify a target. Use /unshake <id>.')
if targets:
for c in targets:
database.log_room('unshake', client, client.area, target=c)
c.shaken = False
client.send_ooc(f'Unshook {len(targets)} existing client(s).')
else:
client.send_ooc('No targets found.')
| Attorney-Online-Engineering-Task-Force/tsuserver3 | server/commands/fun.py | Python | agpl-3.0 | 3,101 |
#!/usr/bin/env python3
from multiprocessing import Process
import socket
import struct
MCAST_GRP = '239.255.255.250'
MCAST_PORT = 1900
HOST_IP = socket.gethostbyname(socket.getfqdn())
HOST_PORT = 52235
msearch_string = 'urn:dial-multiscreen-org:service:dial:1'
msearch_response = """HTTP/1.1 200 OK
LOCATION: http://{ip}:{port}/dd.xml
CACHE-CONTROL: max-age=1800
EXT:
BOOTID.UPNP.ORG: 1
SERVER: OS/version UPnP/1.1 product/version
ST: {msearch_string}""".format(**{'ip': HOST_IP, 'port': HOST_PORT, 'msearch_string': msearch_string})
def discovery():
mcast_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
mcast_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
mcast_socket.bind(('', MCAST_PORT))
mreq = struct.pack("4sl", socket.inet_aton(MCAST_GRP), socket.INADDR_ANY)
mcast_socket.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
while True:
msg, addr = mcast_socket.recvfrom(1024)
msg = msg.decode('utf-8')
#print(msg)
for line in msg.splitlines():
line = line.split(':', 1)
if line[0] == 'ST':
if line[1].strip() == msearch_string:
#print(addr)
mcast_socket.sendto(msearch_response.encode('utf-8'), addr)
#print()
def rest():
rest_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
rest_socket.bind((HOST_IP, HOST_PORT))
rest_socket.listen(1)
while True:
connection, client_address = rest_socket.accept()
msg = connection.recv(1024)
msg = msg.decode('utf-8')
print(msg)
if __name__ == '__main__':
m = Process(target=discovery)
m.start()
r = Process(target=rest)
r.start()
| davidpuzey/dial-youtube | dial.py | Python | gpl-2.0 | 1,758 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
translate.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsRasterFileWriter,
QgsProcessingException,
QgsProcessingParameterDefinition,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterNumber,
QgsProcessingParameterBoolean,
QgsProcessingParameterString,
QgsProcessingParameterEnum,
QgsProcessingParameterCrs,
QgsProcessingParameterRasterDestination)
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class translate(GdalAlgorithm):
INPUT = 'INPUT'
TARGET_CRS = 'TARGET_CRS'
NODATA = 'NODATA'
COPY_SUBDATASETS = 'COPY_SUBDATASETS'
OPTIONS = 'OPTIONS'
DATA_TYPE = 'DATA_TYPE'
OUTPUT = 'OUTPUT'
TYPES = ['Use input layer data type', 'Byte', 'Int16', 'UInt16', 'UInt32', 'Int32', 'Float32', 'Float64', 'CInt16', 'CInt32', 'CFloat32', 'CFloat64']
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT, self.tr('Input layer')))
self.addParameter(QgsProcessingParameterCrs(self.TARGET_CRS,
self.tr('Override the projection for the output file'),
defaultValue=None,
optional=True))
self.addParameter(QgsProcessingParameterNumber(self.NODATA,
self.tr('Assign a specified nodata value to output bands'),
type=QgsProcessingParameterNumber.Double,
defaultValue=None,
optional=True))
self.addParameter(QgsProcessingParameterBoolean(self.COPY_SUBDATASETS,
self.tr('Copy all subdatasets of this file to individual output files'),
defaultValue=False))
options_param = QgsProcessingParameterString(self.OPTIONS,
self.tr('Additional creation options'),
defaultValue='',
optional=True)
options_param.setFlags(options_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
options_param.setMetadata({
'widget_wrapper': {
'class': 'processing.algs.gdal.ui.RasterOptionsWidget.RasterOptionsWidgetWrapper'}})
self.addParameter(options_param)
dataType_param = QgsProcessingParameterEnum(self.DATA_TYPE,
self.tr('Output data type'),
self.TYPES,
allowMultiple=False,
defaultValue=0)
dataType_param.setFlags(dataType_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(dataType_param)
self.addParameter(QgsProcessingParameterRasterDestination(self.OUTPUT,
self.tr('Converted')))
def name(self):
return 'translate'
def displayName(self):
return self.tr('Translate (convert format)')
def group(self):
return self.tr('Raster conversion')
def groupId(self):
return 'rasterconversion'
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'translate.png'))
def commandName(self):
return 'gdal_translate'
def getConsoleCommands(self, parameters, context, feedback, executing=True):
inLayer = self.parameterAsRasterLayer(parameters, self.INPUT, context)
if inLayer is None:
raise QgsProcessingException(self.invalidRasterError(parameters, self.INPUT))
out = self.parameterAsOutputLayer(parameters, self.OUTPUT, context)
self.setOutputValue(self.OUTPUT, out)
if self.NODATA in parameters and parameters[self.NODATA] is not None:
nodata = self.parameterAsDouble(parameters, self.NODATA, context)
else:
nodata = None
arguments = []
crs = self.parameterAsCrs(parameters, self.TARGET_CRS, context)
if crs.isValid():
arguments.append('-a_srs')
arguments.append(GdalUtils.gdal_crs_string(crs))
if nodata is not None:
arguments.append('-a_nodata')
arguments.append(nodata)
if self.parameterAsBoolean(parameters, self.COPY_SUBDATASETS, context):
arguments.append('-sds')
data_type = self.parameterAsEnum(parameters, self.DATA_TYPE, context)
if data_type:
arguments.append('-ot ' + self.TYPES[data_type])
arguments.append('-of')
arguments.append(QgsRasterFileWriter.driverForExtension(os.path.splitext(out)[1]))
options = self.parameterAsString(parameters, self.OPTIONS, context)
if options:
arguments.extend(GdalUtils.parseCreationOptions(options))
arguments.append(inLayer.source())
arguments.append(out)
return [self.commandName(), GdalUtils.escapeAndJoin(arguments)]
| m-kuhn/QGIS | python/plugins/processing/algs/gdal/translate.py | Python | gpl-2.0 | 6,812 |
"""distutils.command.sdist
Implements the Distutils 'sdist' command (create a source distribution)."""
import os
import sys
from types import *
from glob import glob
from warnings import warn
from distutils.core import Command
from distutils import dir_util, dep_util, file_util, archive_util
from distutils.text_file import TextFile
from distutils.errors import *
from distutils.filelist import FileList
from distutils import log
from distutils.util import convert_path
def show_formats():
"""Print all possible values for the 'formats' option (used by
the "--help-formats" command-line option).
"""
from distutils.fancy_getopt import FancyGetopt
from distutils.archive_util import ARCHIVE_FORMATS
formats = []
for format in ARCHIVE_FORMATS.keys():
formats.append(("formats=" + format, None,
ARCHIVE_FORMATS[format][2]))
formats.sort()
FancyGetopt(formats).print_help(
"List of available source distribution formats:")
class sdist(Command):
description = "create a source distribution (tarball, zip file, etc.)"
def checking_metadata(self):
"""Callable used for the check sub-command.
Placed here so user_options can view it"""
return self.metadata_check
user_options = [
('template=', 't',
"name of manifest template file [default: MANIFEST.in]"),
('manifest=', 'm',
"name of manifest file [default: MANIFEST]"),
('use-defaults', None,
"include the default file set in the manifest "
"[default; disable with --no-defaults]"),
('no-defaults', None,
"don't include the default file set"),
('prune', None,
"specifically exclude files/directories that should not be "
"distributed (build tree, RCS/CVS dirs, etc.) "
"[default; disable with --no-prune]"),
('no-prune', None,
"don't automatically exclude anything"),
('manifest-only', 'o',
"just regenerate the manifest and then stop "
"(implies --force-manifest)"),
('force-manifest', 'f',
"forcibly regenerate the manifest and carry on as usual. "
"Deprecated: now the manifest is always regenerated."),
('formats=', None,
"formats for source distribution (comma-separated list)"),
('keep-temp', 'k',
"keep the distribution tree around after creating " +
"archive file(s)"),
('dist-dir=', 'd',
"directory to put the source distribution archive(s) in "
"[default: dist]"),
('metadata-check', None,
"Ensure that all required elements of meta-data "
"are supplied. Warn if any missing. [default]"),
('owner=', 'u',
"Owner name used when creating a tar file [default: current user]"),
('group=', 'g',
"Group name used when creating a tar file [default: current group]"),
]
boolean_options = ['use-defaults', 'prune',
'manifest-only', 'force-manifest',
'keep-temp', 'metadata-check']
help_options = [
('help-formats', None,
"list available distribution formats", show_formats),
]
negative_opt = {'no-defaults': 'use-defaults',
'no-prune': 'prune' }
sub_commands = [('check', checking_metadata)]
def initialize_options(self):
# 'template' and 'manifest' are, respectively, the names of
# the manifest template and manifest file.
self.template = None
self.manifest = None
# 'use_defaults': if true, we will include the default file set
# in the manifest
self.use_defaults = 1
self.prune = 1
self.manifest_only = 0
self.force_manifest = 0
self.formats = ['gztar']
self.keep_temp = 0
self.dist_dir = None
self.archive_files = None
self.metadata_check = 1
self.owner = None
self.group = None
def finalize_options(self):
if self.manifest is None:
self.manifest = "MANIFEST"
if self.template is None:
self.template = "MANIFEST.in"
self.ensure_string_list('formats')
bad_format = archive_util.check_archive_formats(self.formats)
if bad_format:
raise DistutilsOptionError(
"unknown archive format '%s'" % bad_format)
if self.dist_dir is None:
self.dist_dir = "dist"
def run(self):
# 'filelist' contains the list of files that will make up the
# manifest
self.filelist = FileList()
# Run sub commands
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
# Do whatever it takes to get the list of files to process
# (process the manifest template, read an existing manifest,
# whatever). File list is accumulated in 'self.filelist'.
self.get_file_list()
# If user just wanted us to regenerate the manifest, stop now.
if self.manifest_only:
return
# Otherwise, go ahead and create the source distribution tarball,
# or zipfile, or whatever.
self.make_distribution()
def check_metadata(self):
"""Deprecated API."""
warn("distutils.command.sdist.check_metadata is deprecated, \
use the check command instead", PendingDeprecationWarning)
check = self.distribution.get_command_obj('check')
check.ensure_finalized()
check.run()
def get_file_list(self):
"""Figure out the list of files to include in the source
distribution, and put it in 'self.filelist'. This might involve
reading the manifest template (and writing the manifest), or just
reading the manifest, or just using the default file set -- it all
depends on the user's options.
"""
# new behavior when using a template:
# the file list is recalculated every time because
# even if MANIFEST.in or setup.py are not changed
# the user might have added some files in the tree that
# need to be included.
#
# This makes --force the default and only behavior with templates.
template_exists = os.path.isfile(self.template)
if not template_exists and self._manifest_is_not_generated():
self.read_manifest()
self.filelist.sort()
self.filelist.remove_duplicates()
return
if not template_exists:
self.warn(("manifest template '%s' does not exist " +
"(using default file list)") %
self.template)
self.filelist.findall()
if self.use_defaults:
self.add_defaults()
if template_exists:
self.read_template()
if self.prune:
self.prune_file_list()
self.filelist.sort()
self.filelist.remove_duplicates()
self.write_manifest()
def add_defaults(self):
"""Add all the default files to self.filelist:
- README or README.txt
- setup.py
- test/test*.py
- all pure Python modules mentioned in setup script
- all files pointed by package_data (build_py)
- all files defined in data_files.
- all files defined as scripts.
- all C sources listed as part of extensions or C libraries
in the setup script (doesn't catch C headers!)
Warns if (README or README.txt) or setup.py are missing; everything
else is optional.
"""
standards = [('README', 'README.txt'), self.distribution.script_name]
for fn in standards:
if isinstance(fn, tuple):
alts = fn
got_it = False
for fn in alts:
if os.path.exists(fn):
got_it = True
self.filelist.append(fn)
break
if not got_it:
self.warn("standard file not found: should have one of " +
', '.join(alts))
else:
if os.path.exists(fn):
self.filelist.append(fn)
else:
self.warn("standard file '%s' not found" % fn)
optional = ['test/test*.py', 'setup.cfg']
for pattern in optional:
files = filter(os.path.isfile, glob(pattern))
self.filelist.extend(files)
# build_py is used to get:
# - python modules
# - files defined in package_data
build_py = self.get_finalized_command('build_py')
# getting python files
if self.distribution.has_pure_modules():
self.filelist.extend(build_py.get_source_files())
# getting package_data files
# (computed in build_py.data_files by build_py.finalize_options)
for pkg, src_dir, build_dir, filenames in build_py.data_files:
for filename in filenames:
self.filelist.append(os.path.join(src_dir, filename))
# getting distribution.data_files
if self.distribution.has_data_files():
for item in self.distribution.data_files:
if isinstance(item, str): # plain file
item = convert_path(item)
if os.path.isfile(item):
self.filelist.append(item)
else: # a (dirname, filenames) tuple
dirname, filenames = item
for f in filenames:
f = convert_path(f)
if os.path.isfile(f):
self.filelist.append(f)
if self.distribution.has_ext_modules():
build_ext = self.get_finalized_command('build_ext')
self.filelist.extend(build_ext.get_source_files())
if self.distribution.has_c_libraries():
build_clib = self.get_finalized_command('build_clib')
self.filelist.extend(build_clib.get_source_files())
if self.distribution.has_scripts():
build_scripts = self.get_finalized_command('build_scripts')
self.filelist.extend(build_scripts.get_source_files())
def read_template(self):
"""Read and parse manifest template file named by self.template.
(usually "MANIFEST.in") The parsing and processing is done by
'self.filelist', which updates itself accordingly.
"""
log.info("reading manifest template '%s'", self.template)
template = TextFile(self.template, strip_comments=1, skip_blanks=1,
join_lines=1, lstrip_ws=1, rstrip_ws=1,
collapse_join=1)
try:
while True:
line = template.readline()
if line is None: # end of file
break
try:
self.filelist.process_template_line(line)
# the call above can raise a DistutilsTemplateError for
# malformed lines, or a ValueError from the lower-level
# convert_path function
except (DistutilsTemplateError, ValueError) as msg:
self.warn("%s, line %d: %s" % (template.filename,
template.current_line,
msg))
finally:
template.close()
def prune_file_list(self):
"""Prune off branches that might slip into the file list as created
by 'read_template()', but really don't belong there:
* the build tree (typically "build")
* the release tree itself (only an issue if we ran "sdist"
previously with --keep-temp, or it aborted)
* any RCS, CVS, .svn, .hg, .git, .bzr, _darcs directories
"""
build = self.get_finalized_command('build')
base_dir = self.distribution.get_fullname()
self.filelist.exclude_pattern(None, prefix=build.build_base)
self.filelist.exclude_pattern(None, prefix=base_dir)
if sys.platform == 'win32':
seps = r'/|\\'
else:
seps = '/'
vcs_dirs = ['RCS', 'CVS', r'\.svn', r'\.hg', r'\.git', r'\.bzr',
'_darcs']
vcs_ptrn = r'(^|%s)(%s)(%s).*' % (seps, '|'.join(vcs_dirs), seps)
self.filelist.exclude_pattern(vcs_ptrn, is_regex=1)
def write_manifest(self):
"""Write the file list in 'self.filelist' (presumably as filled in
by 'add_defaults()' and 'read_template()') to the manifest file
named by 'self.manifest'.
"""
if self._manifest_is_not_generated():
log.info("not writing to manually maintained "
"manifest file '%s'" % self.manifest)
return
content = self.filelist.files[:]
content.insert(0, '# file GENERATED by distutils, do NOT edit')
self.execute(file_util.write_file, (self.manifest, content),
"writing manifest file '%s'" % self.manifest)
def _manifest_is_not_generated(self):
# check for special comment used in 3.1.3 and higher
if not os.path.isfile(self.manifest):
return False
fp = open(self.manifest)
try:
first_line = fp.readline()
finally:
fp.close()
return first_line != '# file GENERATED by distutils, do NOT edit\n'
def read_manifest(self):
"""Read the manifest file (named by 'self.manifest') and use it to
fill in 'self.filelist', the list of files to include in the source
distribution.
"""
log.info("reading manifest file '%s'", self.manifest)
manifest = open(self.manifest)
for line in manifest:
# ignore comments and blank lines
line = line.strip()
if line.startswith('#') or not line:
continue
self.filelist.append(line)
manifest.close()
def make_release_tree(self, base_dir, files):
"""Create the directory tree that will become the source
distribution archive. All directories implied by the filenames in
'files' are created under 'base_dir', and then we hard link or copy
(if hard linking is unavailable) those files into place.
Essentially, this duplicates the developer's source tree, but in a
directory named after the distribution, containing only the files
to be distributed.
"""
# Create all the directories under 'base_dir' necessary to
# put 'files' there; the 'mkpath()' is just so we don't die
# if the manifest happens to be empty.
self.mkpath(base_dir)
dir_util.create_tree(base_dir, files, dry_run=self.dry_run)
# And walk over the list of files, either making a hard link (if
# os.link exists) to each one that doesn't already exist in its
# corresponding location under 'base_dir', or copying each file
# that's out-of-date in 'base_dir'. (Usually, all files will be
# out-of-date, because by default we blow away 'base_dir' when
# we're done making the distribution archives.)
if hasattr(os, 'link'): # can make hard links on this system
link = 'hard'
msg = "making hard links in %s..." % base_dir
else: # nope, have to copy
link = None
msg = "copying files to %s..." % base_dir
if not files:
log.warn("no files to distribute -- empty manifest?")
else:
log.info(msg)
for file in files:
if not os.path.isfile(file):
log.warn("'%s' not a regular file -- skipping", file)
else:
dest = os.path.join(base_dir, file)
self.copy_file(file, dest, link=link)
self.distribution.metadata.write_pkg_info(base_dir)
def make_distribution(self):
"""Create the source distribution(s). First, we create the release
tree with 'make_release_tree()'; then, we create all required
archive files (according to 'self.formats') from the release tree.
Finally, we clean up by blowing away the release tree (unless
'self.keep_temp' is true). The list of archive files created is
stored so it can be retrieved later by 'get_archive_files()'.
"""
# Don't warn about missing meta-data here -- should be (and is!)
# done elsewhere.
base_dir = self.distribution.get_fullname()
base_name = os.path.join(self.dist_dir, base_dir)
self.make_release_tree(base_dir, self.filelist.files)
archive_files = [] # remember names of files we create
# tar archive must be created last to avoid overwrite and remove
if 'tar' in self.formats:
self.formats.append(self.formats.pop(self.formats.index('tar')))
for fmt in self.formats:
file = self.make_archive(base_name, fmt, base_dir=base_dir,
owner=self.owner, group=self.group)
archive_files.append(file)
self.distribution.dist_files.append(('sdist', '', file))
self.archive_files = archive_files
if not self.keep_temp:
dir_util.remove_tree(base_dir, dry_run=self.dry_run)
def get_archive_files(self):
"""Return the list of archive files created when the command
was run, or None if the command hasn't run yet.
"""
return self.archive_files
| yotchang4s/cafebabepy | src/main/python/distutils/command/sdist.py | Python | bsd-3-clause | 17,826 |
from django.conf.urls import url
from provider import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^new/$', views.new, name='new'),
url(r'^(?P<provider_id>\d+)/$', views.view, name='view'),
url(r'^(?P<provider_id>\d+)/edit/$', views.edit, name='edit'),
url(r'^(?P<provider_id>\d+)/delete/$', views.delete, name='delete'),
# Base Provider
url(r'^base/$', views.base_index, name='base_index'),
url(r'^base/(?P<base_provider_id>\d+)/$',
views.base_view, name='base_view'),
]
| zyphrus/fetch-django | provider/urls.py | Python | mit | 537 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
import os
from pants.backend.jvm.tasks.jvm_compile.analysis_tools import AnalysisTools
from pants.backend.jvm.tasks.jvm_compile.jvm_compile import JvmCompile
from pants.backend.jvm.tasks.jvm_compile.scala.zinc_analysis import ZincAnalysis
from pants.backend.jvm.tasks.jvm_compile.scala.zinc_analysis_parser import ZincAnalysisParser
from pants.backend.jvm.tasks.jvm_compile.scala.zinc_utils import ZincUtils
class ScalaCompile(JvmCompile):
_language = 'scala'
_file_suffix = '.scala'
_config_section = 'scala-compile'
@classmethod
def get_args_default(cls, bootstrap_option_values):
return ('-S-encoding', '-SUTF-8','-S-g:vars')
@classmethod
def get_warning_args_default(cls):
return ('-S-deprecation', '-S-unchecked')
@classmethod
def get_no_warning_args_default(cls):
return ('-S-nowarn',)
@classmethod
def register_options(cls, register):
super(ScalaCompile, cls).register_options(register)
# Note: Used in ZincUtils.
# TODO: Revisit this. It's unintuitive for ZincUtils to reach back into the task for options.
register('--plugins', action='append', help='Use these scalac plugins.')
def __init__(self, *args, **kwargs):
super(ScalaCompile, self).__init__(*args, **kwargs)
# Set up the zinc utils.
color = not self.get_options().no_colors
self._zinc_utils = ZincUtils(context=self.context,
nailgun_task=self,
jvm_options=self._jvm_options,
color=color,
log_level=self.get_options().level)
@property
def config_section(self):
return self._config_section
def create_analysis_tools(self):
return AnalysisTools(self.context, ZincAnalysisParser(self._classes_dir), ZincAnalysis)
def extra_compile_time_classpath_elements(self):
# Classpath entries necessary for our compiler plugins.
return self._zinc_utils.plugin_jars()
# Invalidate caches if the toolchain changes.
def platform_version_info(self):
zinc_invalidation_key = self._zinc_utils.platform_version_info()
jvm_target_version = ''
# Check scalac args for jvm target version.
for arg in self._args:
if arg.strip().startswith("-S-target:"):
jvm_target_version = arg.strip()
zinc_invalidation_key.append(jvm_target_version)
return zinc_invalidation_key
def extra_products(self, target):
ret = []
if target.is_scalac_plugin and target.classname:
root, plugin_info_file = ZincUtils.write_plugin_info(self._resources_dir, target)
ret.append((root, [plugin_info_file]))
return ret
def compile(self, args, classpath, sources, classes_output_dir, analysis_file):
# We have to treat our output dir as an upstream element, so zinc can find valid
# analysis for previous partitions. We use the global valid analysis for the upstream.
upstream = ({classes_output_dir: self._analysis_file}
if os.path.exists(self._analysis_file) else {})
return self._zinc_utils.compile(args, classpath + [self._classes_dir], sources,
classes_output_dir, analysis_file, upstream)
| square/pants | src/python/pants/backend/jvm/tasks/jvm_compile/scala/scala_compile.py | Python | apache-2.0 | 3,486 |
"""
Illustrates "point label" functionality.
"""
import matplotlib.pyplot as plt
from mpldatacursor import datacursor
labels = ['a', 'b', 'c', 'd', 'e', 'f']
x = [0, 0.05, 1, 2, 3, 4]
# All points on this figure will point labels.
fig, ax = plt.subplots()
ax.plot(x, x, 'ro')
ax.margins(0.1)
datacursor(axes=ax, point_labels=labels)
# Only the blue points will have point labels on this figure.
fig, ax = plt.subplots()
line, = ax.plot(x, range(6), 'bo')
ax.plot(range(5), 'go')
ax.margins(0.1)
datacursor(axes=ax, point_labels={line:labels})
plt.show()
| joferkington/mpldatacursor | examples/labeled_points_example.py | Python | mit | 559 |
from haystack import indexes
from common.models import Person, CustomPage, NewsArticle, Job
class PersonIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
first_name = indexes.CharField(model_attr='first_name')
last_name = indexes.CharField(model_attr='last_name')
def get_model(self):
return Person
class CustomPageIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
content = indexes.CharField(model_attr='content')
def get_model(self):
return CustomPage
class NewsArticleIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
title = indexes.CharField(model_attr='title')
intro = indexes.CharField(model_attr='intro')
body = indexes.CharField(model_attr='body')
def get_model(self):
return NewsArticle
class JobIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
def get_model(self):
return Job
| baylee-d/cos.io | common/search_indexes.py | Python | apache-2.0 | 1,112 |
from tfs import *
from pylab import *
from numpy import *
import glob, os
import nibabel as nib
matplotlib.interactive(True)
session = tf.InteractiveSession()
dataPath = './corpusCallosum/'
# Class to serve up segmented images
def computePad(dims,depth):
y1=y2=x1=x2=0;
y,x = [numpy.ceil(dims[i]/float(2**depth)) * (2**depth) for i in range(-2,0)]
x = float(x); y = float(y);
y1 = int(numpy.floor((y - dims[-2])/2)); y2 = int(numpy.ceil((y - dims[-2])/2))
x1 = int(numpy.floor((x - dims[-1])/2)); x2 = int(numpy.ceil((x - dims[-1])/2))
return y1,y2,x1,x2
def padImage(img,depth):
"""Pads (or crops) an image so it is evenly divisible by 2**depth."""
y1,y2,x1,x2 = computePad(img.shape,depth)
dims = [(0,0) for i in img.shape]
dims[-2] = (y1,y2); dims[-1] = (x1,x2)
return numpy.pad(img,dims,'constant')
# Class to serve up segmented images
class CCData(object):
def __init__(self,paths,padding=None):
self.paths = paths
self.padding = padding
def getSlices(self,paths):
image,truth = paths
image = nib.load(image).get_data(); truth = nib.load(truth).get_data()
slicesWithValues = [unique(s) for s in where(truth>0)]
sliceAxis = argmin([len(s) for s in slicesWithValues])
slicesWithValues = slicesWithValues[sliceAxis]
slc = repeat(-1,3); slc[sliceAxis] = slicesWithValues[0]
if not self.padding is None:
image, truth = [padImage(im,self.padding) for im in (image[slc][0],truth[slc][0])]
else:
image, truth = (image[slc][0],truth[slc][0])
return (image,truth)
def next_batch(self,miniBatch=None):
if miniBatch is None or miniBatch==len(self.paths):
batch = arange(0,len(self.paths))
else:
batch = random.choice(arange(0,len(self.paths)),miniBatch)
images = [self.getSlices(self.paths[i]) for i in batch]
return list(zip(*images))
class Container(object):
def __init__(self,dataPath,reserve=2,**args):
self.dataPath = dataPath
images = glob.glob(os.path.join(dataPath,'?????.nii.gz'))
images = [(i,i.replace('.nii.gz','_cc.nii.gz')) for i in images]
self.train = CCData(images[0:-reserve],**args)
self.test = CCData(images[reserve:],**args)
data = Container(dataPath,reserve=2)
batch = data.train.next_batch(2)
trainingIterations = 1000
x = tf.placeholder('float',shape=[None,None,None],name='input')
y_ = tf.placeholder('float', shape=[None,None,None],name='truth')
y_OneHot = tf.one_hot(indices=tf.cast(y_,tf.int32),depth=2,name='truthOneHot')
xInput = tf.expand_dims(x,axis=3,name='xInput')
#Standard conv net from Session 3 using new TensorFlow layers
net = LD1 = tf.layers.conv2d(
inputs=xInput,
filters=2,
kernel_size=[5,5],
strides = 1,
padding = 'same',
activation=tf.nn.relu,
name='convD1'
)
logits = LD1
y = tf.nn.softmax(logits,-1)
loss = tf.losses.softmax_cross_entropy(onehot_labels=y_OneHot, logits=logits)
trainDict = {}
testDict = {}
logName = None #logName = 'logs/Conv'
# Training and evaluation
trainStep = tf.train.AdamOptimizer(1e-3).minimize(loss)
# Accuracy
correctPrediction = tf.equal(tf.argmax(y,axis=-1), tf.argmax(y_OneHot,axis=-1))
accuracy = tf.reduce_mean(tf.cast(correctPrediction,'float'))
# Jaccard
output = tf.cast(tf.argmax(y,axis=-1), dtype=tf.float32)
truth = tf.cast(tf.argmax(y_OneHot,axis=-1), dtype=tf.float32)
intersection = tf.reduce_sum(tf.reduce_sum(tf.multiply(output, truth), axis=-1),axis=-1)
union = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.add(output, truth)>= 1, dtype=tf.float32), axis=-1),axis=-1)
jaccard = tf.reduce_mean(intersection / union)
train(session=session,trainingData=data.train,testingData=data.test,truth=y_,input=x,cost=loss,trainingStep=trainStep,accuracy=accuracy,iterations=trainingIterations,miniBatch=2,trainDict=trainDict,testDict=testDict,logName=logName)
# Make a figure
# Get a couple of examples
batch = data.test.next_batch(2)
ex = array(batch[0])
segmentation = y.eval({x:ex})
# Display each example
figure('Example 1'); clf()
imshow(batch[0][0].transpose(),cmap=cm.gray,origin='lower left');
#contour(batch[1][0].transpose(),alpha=0.5,color='g');
contour(segmentation[0,:,:,1].transpose(),alpha=0.5,color='b')
figure('Example 2'); clf()
imshow(batch[0][1].transpose(),cmap=cm.gray,origin='lower left');
#contour(batch[1][1].transpose(),alpha=0.5,color='g');
contour(segmentation[1,:,:,1].transpose(),alpha=0.5,color='b')
plotOutput(LD1,{x:ex[0:1]},figOffset='Layer 1 Output')
| robb-brown/IntroToDeepLearning | 6_MRISegmentation/ccseg.py | Python | mit | 4,404 |
# -*-coding: utf-8 -*-
from zope.interface import implementer
from ..interfaces import (
IResourceType,
)
from ..resources import (
ResourceTypeBase,
)
@implementer(IResourceType)
class PersonsCategoriesResource(ResourceTypeBase):
__name__ = 'persons_categories'
@property
def allowed_assign(self):
return True
| mazvv/travelcrm | travelcrm/resources/persons_categories.py | Python | gpl-3.0 | 344 |
from nestedg.data import unknown
from nestedg.model import Model
from .body import Body, Blood
from .clothing import ClothingSet
class Person(Model):
@classmethod
def children_classes(cls):
yield Body
yield unknown.Psyche
yield ClothingSet
@classmethod
def _generate_name(cls):
return '*PERSON*'
@property
def clothing(self):
return next(self.filter_children(ClothingSet))
@property
def hat(self):
return self.clothing.hat
@property
def glasses(self):
return self.clothing.glasses
@property
def pants(self):
return self.clothing.pants
@property
def shirt(self):
return self.clothing.shirt
@property
def coat(self):
return self.clothing.coat
@property
def socks(self):
return self.clothing.socks
@property
def shoes(self):
return self.clothing.shoes
@property
def underwear(self):
return self.clothing.underwear
class Man(Person):
@classmethod
def _generate_name(cls):
return '*MAN*'
class Woman(Person):
@classmethod
def _generate_name(cls):
return '*WOMAN*'
class Corpse(Person):
@classmethod
def children_classes(cls):
yield Body
yield ClothingSet
yield Blood.probable(35)
yield unknown.Worm.probable(20)
yield unknown.Worm.probable(10)
@classmethod
def _generate_name(cls):
return '*PERSON*| (dead)'
| d2emon/generator-pack | src/factories/nestedg/data/people/__init__.py | Python | gpl-3.0 | 1,510 |
"""Z-Wave Constants."""
ATTR_NODE_ID = "node_id"
ATTR_VALUE_ID = "value_id"
ATTR_OBJECT_ID = "object_id"
ATTR_NAME = "name"
ATTR_SCENE_ID = "scene_id"
ATTR_BASIC_LEVEL = "basic_level"
NETWORK_READY_WAIT_SECS = 30
SERVICE_ADD_NODE = "add_node"
SERVICE_ADD_NODE_SECURE = "add_node_secure"
SERVICE_REMOVE_NODE = "remove_node"
SERVICE_CANCEL_COMMAND = "cancel_command"
SERVICE_HEAL_NETWORK = "heal_network"
SERVICE_SOFT_RESET = "soft_reset"
SERVICE_TEST_NETWORK = "test_network"
SERVICE_STOP_NETWORK = "stop_network"
SERVICE_START_NETWORK = "start_network"
SERVICE_RENAME_NODE = "rename_node"
EVENT_SCENE_ACTIVATED = "zwave.scene_activated"
EVENT_NODE_EVENT = "zwave.node_event"
EVENT_NETWORK_READY = "zwave.network_ready"
EVENT_NETWORK_COMPLETE = "zwave.network_complete"
EVENT_NETWORK_START = "zwave.network_start"
EVENT_NETWORK_STOP = "zwave.network_stop"
COMMAND_CLASS_ALARM = 113
COMMAND_CLASS_ANTITHEFT = 93
COMMAND_CLASS_APPLICATION_CAPABILITY = 87
COMMAND_CLASS_APPLICATION_STATUS = 34
COMMAND_CLASS_ASSOCIATION = 133
COMMAND_CLASS_ASSOCIATION_COMMAND_CONFIGURATION = 155
COMMAND_CLASS_ASSOCIATION_GRP_INFO = 89
COMMAND_CLASS_BARRIER_OPERATOR = 102
COMMAND_CLASS_BASIC = 32
COMMAND_CLASS_BASIC_TARIFF_INFO = 54
COMMAND_CLASS_BASIC_WINDOW_COVERING = 80
COMMAND_CLASS_BATTERY = 128
COMMAND_CLASS_CENTRAL_SCENE = 91
COMMAND_CLASS_CLIMATE_CONTROL_SCHEDULE = 70
COMMAND_CLASS_CLOCK = 129
COMMAND_CLASS_CONFIGURATION = 112
COMMAND_CLASS_CONTROLLER_REPLICATION = 33
COMMAND_CLASS_CRC_16_ENCAP = 86
COMMAND_CLASS_DCP_CONFIG = 58
COMMAND_CLASS_DCP_MONITOR = 59
COMMAND_CLASS_DEVICE_RESET_LOCALLY = 90
COMMAND_CLASS_DOOR_LOCK = 98
COMMAND_CLASS_DOOR_LOCK_LOGGING = 76
COMMAND_CLASS_ENERGY_PRODUCTION = 144
COMMAND_CLASS_ENTRY_CONTROL = 111
COMMAND_CLASS_FIRMWARE_UPDATE_MD = 122
COMMAND_CLASS_GEOGRAPHIC_LOCATION = 140
COMMAND_CLASS_GROUPING_NAME = 123
COMMAND_CLASS_HAIL = 130
COMMAND_CLASS_HRV_CONTROL = 57
COMMAND_CLASS_HRV_STATUS = 55
COMMAND_CLASS_HUMIDITY_CONTROL_MODE = 109
COMMAND_CLASS_HUMIDITY_CONTROL_OPERATING_STATE = 110
COMMAND_CLASS_HUMIDITY_CONTROL_SETPOINT = 100
COMMAND_CLASS_INDICATOR = 135
COMMAND_CLASS_IP_ASSOCIATION = 92
COMMAND_CLASS_IP_CONFIGURATION = 14
COMMAND_CLASS_IRRIGATION = 107
COMMAND_CLASS_LANGUAGE = 137
COMMAND_CLASS_LOCK = 118
COMMAND_CLASS_MAILBOX = 105
COMMAND_CLASS_MANUFACTURER_PROPRIETARY = 145
COMMAND_CLASS_MANUFACTURER_SPECIFIC = 114
COMMAND_CLASS_MARK = 239
COMMAND_CLASS_METER = 50
COMMAND_CLASS_METER_PULSE = 53
COMMAND_CLASS_METER_TBL_CONFIG = 60
COMMAND_CLASS_METER_TBL_MONITOR = 61
COMMAND_CLASS_METER_TBL_PUSH = 62
COMMAND_CLASS_MTP_WINDOW_COVERING = 81
COMMAND_CLASS_MULTI_CHANNEL = 96
COMMAND_CLASS_MULTI_CHANNEL_ASSOCIATION = 142
COMMAND_CLASS_MULTI_COMMAND = 143
COMMAND_CLASS_NETWORK_MANAGEMENT_BASIC = 77
COMMAND_CLASS_NETWORK_MANAGEMENT_INCLUSION = 52
COMMAND_CLASS_NETWORK_MANAGEMENT_PRIMARY = 84
COMMAND_CLASS_NETWORK_MANAGEMENT_PROXY = 82
COMMAND_CLASS_NO_OPERATION = 0
COMMAND_CLASS_NODE_NAMING = 119
COMMAND_CLASS_NON_INTEROPERABLE = 240
COMMAND_CLASS_NOTIFICATION = 113
COMMAND_CLASS_POWERLEVEL = 115
COMMAND_CLASS_PREPAYMENT = 63
COMMAND_CLASS_PREPAYMENT_ENCAPSULATION = 65
COMMAND_CLASS_PROPRIETARY = 136
COMMAND_CLASS_PROTECTION = 117
COMMAND_CLASS_RATE_TBL_CONFIG = 72
COMMAND_CLASS_RATE_TBL_MONITOR = 73
COMMAND_CLASS_REMOTE_ASSOCIATION_ACTIVATE = 124
COMMAND_CLASS_REMOTE_ASSOCIATION = 125
COMMAND_CLASS_SCENE_ACTIVATION = 43
COMMAND_CLASS_SCENE_ACTUATOR_CONF = 44
COMMAND_CLASS_SCENE_CONTROLLER_CONF = 45
COMMAND_CLASS_SCHEDULE = 83
COMMAND_CLASS_SCHEDULE_ENTRY_LOCK = 78
COMMAND_CLASS_SCREEN_ATTRIBUTES = 147
COMMAND_CLASS_SCREEN_MD = 146
COMMAND_CLASS_SECURITY = 152
COMMAND_CLASS_SECURITY_SCHEME0_MARK = 61696
COMMAND_CLASS_SENSOR_ALARM = 156
COMMAND_CLASS_SENSOR_BINARY = 48
COMMAND_CLASS_SENSOR_CONFIGURATION = 158
COMMAND_CLASS_SENSOR_MULTILEVEL = 49
COMMAND_CLASS_SILENCE_ALARM = 157
COMMAND_CLASS_SIMPLE_AV_CONTROL = 148
COMMAND_CLASS_SUPERVISION = 108
COMMAND_CLASS_SWITCH_ALL = 39
COMMAND_CLASS_SWITCH_BINARY = 37
COMMAND_CLASS_SWITCH_COLOR = 51
COMMAND_CLASS_SWITCH_MULTILEVEL = 38
COMMAND_CLASS_SWITCH_TOGGLE_BINARY = 40
COMMAND_CLASS_SWITCH_TOGGLE_MULTILEVEL = 41
COMMAND_CLASS_TARIFF_TBL_CONFIG = 74
COMMAND_CLASS_TARIFF_TBL_MONITOR = 75
COMMAND_CLASS_THERMOSTAT_FAN_MODE = 68
COMMAND_CLASS_THERMOSTAT_FAN_STATE = 69
COMMAND_CLASS_THERMOSTAT_MODE = 64
COMMAND_CLASS_THERMOSTAT_OPERATING_STATE = 66
COMMAND_CLASS_THERMOSTAT_SETBACK = 71
COMMAND_CLASS_THERMOSTAT_SETPOINT = 67
COMMAND_CLASS_TIME = 138
COMMAND_CLASS_TIME_PARAMETERS = 139
COMMAND_CLASS_TRANSPORT_SERVICE = 85
COMMAND_CLASS_USER_CODE = 99
COMMAND_CLASS_VERSION = 134
COMMAND_CLASS_WAKE_UP = 132
COMMAND_CLASS_ZIP = 35
COMMAND_CLASS_ZIP_NAMING = 104
COMMAND_CLASS_ZIP_ND = 88
COMMAND_CLASS_ZIP_6LOWPAN = 79
COMMAND_CLASS_ZIP_GATEWAY = 95
COMMAND_CLASS_ZIP_PORTAL = 97
COMMAND_CLASS_ZWAVEPLUS_INFO = 94
COMMAND_CLASS_WHATEVER = None # Match ALL
COMMAND_CLASS_WINDOW_COVERING = 106
GENERIC_TYPE_WHATEVER = None # Match ALL
SPECIFIC_TYPE_WHATEVER = None # Match ALL
SPECIFIC_TYPE_NOT_USED = 0 # Available in all Generic types
GENERIC_TYPE_AV_CONTROL_POINT = 3
SPECIFIC_TYPE_DOORBELL = 18
SPECIFIC_TYPE_SATELLITE_RECIEVER = 4
SPECIFIC_TYPE_SATELLITE_RECIEVER_V2 = 17
GENERIC_TYPE_DISPLAY = 4
SPECIFIC_TYPE_SIMPLE_DISPLAY = 1
GENERIC_TYPE_ENTRY_CONTROL = 64
SPECIFIC_TYPE_DOOR_LOCK = 1
SPECIFIC_TYPE_ADVANCED_DOOR_LOCK = 2
SPECIFIC_TYPE_SECURE_KEYPAD_DOOR_LOCK = 3
SPECIFIC_TYPE_SECURE_KEYPAD_DOOR_LOCK_DEADBOLT = 4
SPECIFIC_TYPE_SECURE_DOOR = 5
SPECIFIC_TYPE_SECURE_GATE = 6
SPECIFIC_TYPE_SECURE_BARRIER_ADDON = 7
SPECIFIC_TYPE_SECURE_BARRIER_OPEN_ONLY = 8
SPECIFIC_TYPE_SECURE_BARRIER_CLOSE_ONLY = 9
SPECIFIC_TYPE_SECURE_LOCKBOX = 10
SPECIFIC_TYPE_SECURE_KEYPAD = 11
GENERIC_TYPE_GENERIC_CONTROLLER = 1
SPECIFIC_TYPE_PORTABLE_CONTROLLER = 1
SPECIFIC_TYPE_PORTABLE_SCENE_CONTROLLER = 2
SPECIFIC_TYPE_PORTABLE_INSTALLER_TOOL = 3
SPECIFIC_TYPE_REMOTE_CONTROL_AV = 4
SPECIFIC_TYPE_REMOTE_CONTROL_SIMPLE = 6
GENERIC_TYPE_METER = 49
SPECIFIC_TYPE_SIMPLE_METER = 1
SPECIFIC_TYPE_ADV_ENERGY_CONTROL = 2
SPECIFIC_TYPE_WHOLE_HOME_METER_SIMPLE = 3
GENERIC_TYPE_METER_PULSE = 48
GENERIC_TYPE_NON_INTEROPERABLE = 255
GENERIC_TYPE_REPEATER_SLAVE = 15
SPECIFIC_TYPE_REPEATER_SLAVE = 1
SPECIFIC_TYPE_VIRTUAL_NODE = 2
GENERIC_TYPE_SECURITY_PANEL = 23
SPECIFIC_TYPE_ZONED_SECURITY_PANEL = 1
GENERIC_TYPE_SEMI_INTEROPERABLE = 80
SPECIFIC_TYPE_ENERGY_PRODUCTION = 1
GENERIC_TYPE_SENSOR_ALARM = 161
SPECIFIC_TYPE_ADV_ZENSOR_NET_ALARM_SENSOR = 5
SPECIFIC_TYPE_ADV_ZENSOR_NET_SMOKE_SENSOR = 10
SPECIFIC_TYPE_BASIC_ROUTING_ALARM_SENSOR = 1
SPECIFIC_TYPE_BASIC_ROUTING_SMOKE_SENSOR = 6
SPECIFIC_TYPE_BASIC_ZENSOR_NET_ALARM_SENSOR = 3
SPECIFIC_TYPE_BASIC_ZENSOR_NET_SMOKE_SENSOR = 8
SPECIFIC_TYPE_ROUTING_ALARM_SENSOR = 2
SPECIFIC_TYPE_ROUTING_SMOKE_SENSOR = 7
SPECIFIC_TYPE_ZENSOR_NET_ALARM_SENSOR = 4
SPECIFIC_TYPE_ZENSOR_NET_SMOKE_SENSOR = 9
SPECIFIC_TYPE_ALARM_SENSOR = 11
GENERIC_TYPE_SENSOR_BINARY = 32
SPECIFIC_TYPE_ROUTING_SENSOR_BINARY = 1
GENERIC_TYPE_SENSOR_MULTILEVEL = 33
SPECIFIC_TYPE_ROUTING_SENSOR_MULTILEVEL = 1
SPECIFIC_TYPE_CHIMNEY_FAN = 2
GENERIC_TYPE_STATIC_CONTROLLER = 2
SPECIFIC_TYPE_PC_CONTROLLER = 1
SPECIFIC_TYPE_SCENE_CONTROLLER = 2
SPECIFIC_TYPE_STATIC_INSTALLER_TOOL = 3
SPECIFIC_TYPE_SET_TOP_BOX = 4
SPECIFIC_TYPE_SUB_SYSTEM_CONTROLLER = 5
SPECIFIC_TYPE_TV = 6
SPECIFIC_TYPE_GATEWAY = 7
GENERIC_TYPE_SWITCH_BINARY = 16
SPECIFIC_TYPE_POWER_SWITCH_BINARY = 1
SPECIFIC_TYPE_SCENE_SWITCH_BINARY = 3
SPECIFIC_TYPE_POWER_STRIP = 4
SPECIFIC_TYPE_SIREN = 5
SPECIFIC_TYPE_VALVE_OPEN_CLOSE = 6
SPECIFIC_TYPE_COLOR_TUNABLE_BINARY = 2
SPECIFIC_TYPE_IRRIGATION_CONTROLLER = 7
GENERIC_TYPE_SWITCH_MULTILEVEL = 17
SPECIFIC_TYPE_CLASS_A_MOTOR_CONTROL = 5
SPECIFIC_TYPE_CLASS_B_MOTOR_CONTROL = 6
SPECIFIC_TYPE_CLASS_C_MOTOR_CONTROL = 7
SPECIFIC_TYPE_MOTOR_MULTIPOSITION = 3
SPECIFIC_TYPE_POWER_SWITCH_MULTILEVEL = 1
SPECIFIC_TYPE_SCENE_SWITCH_MULTILEVEL = 4
SPECIFIC_TYPE_FAN_SWITCH = 8
SPECIFIC_TYPE_COLOR_TUNABLE_MULTILEVEL = 2
GENERIC_TYPE_SWITCH_REMOTE = 18
SPECIFIC_TYPE_REMOTE_BINARY = 1
SPECIFIC_TYPE_REMOTE_MULTILEVEL = 2
SPECIFIC_TYPE_REMOTE_TOGGLE_BINARY = 3
SPECIFIC_TYPE_REMOTE_TOGGLE_MULTILEVEL = 4
GENERIC_TYPE_SWITCH_TOGGLE = 19
SPECIFIC_TYPE_SWITCH_TOGGLE_BINARY = 1
SPECIFIC_TYPE_SWITCH_TOGGLE_MULTILEVEL = 2
GENERIC_TYPE_THERMOSTAT = 8
SPECIFIC_TYPE_SETBACK_SCHEDULE_THERMOSTAT = 3
SPECIFIC_TYPE_SETBACK_THERMOSTAT = 5
SPECIFIC_TYPE_SETPOINT_THERMOSTAT = 4
SPECIFIC_TYPE_THERMOSTAT_GENERAL = 2
SPECIFIC_TYPE_THERMOSTAT_GENERAL_V2 = 6
SPECIFIC_TYPE_THERMOSTAT_HEATING = 1
GENERIC_TYPE_VENTILATION = 22
SPECIFIC_TYPE_RESIDENTIAL_HRV = 1
GENERIC_TYPE_WINDOWS_COVERING = 9
SPECIFIC_TYPE_SIMPLE_WINDOW_COVERING = 1
GENERIC_TYPE_ZIP_NODE = 21
SPECIFIC_TYPE_ZIP_ADV_NODE = 2
SPECIFIC_TYPE_ZIP_TUN_NODE = 1
GENERIC_TYPE_WALL_CONTROLLER = 24
SPECIFIC_TYPE_BASIC_WALL_CONTROLLER = 1
GENERIC_TYPE_NETWORK_EXTENDER = 5
SPECIFIC_TYPE_SECURE_EXTENDER = 1
GENERIC_TYPE_APPLIANCE = 6
SPECIFIC_TYPE_GENERAL_APPLIANCE = 1
SPECIFIC_TYPE_KITCHEN_APPLIANCE = 2
SPECIFIC_TYPE_LAUNDRY_APPLIANCE = 3
GENERIC_TYPE_SENSOR_NOTIFICATION = 7
SPECIFIC_TYPE_NOTIFICATION_SENSOR = 1
GENRE_WHATEVER = None
GENRE_USER = "User"
TYPE_WHATEVER = None
TYPE_BYTE = "Byte"
TYPE_BOOL = "Bool"
TYPE_DECIMAL = "Decimal"
| leoc/home-assistant | homeassistant/components/zwave/const.py | Python | mit | 9,260 |
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests in this module will be skipped unless:
- ovsdb-client is installed
- ovsdb-client can be invoked password-less via the configured root helper
- sudo testing is enabled (see neutron.tests.functional.base for details)
"""
import eventlet
from oslo_config import cfg
from neutron.agent.linux import ovsdb_monitor
from neutron.agent.linux import utils
from neutron.tests import base as tests_base
from neutron.tests.common import net_helpers
from neutron.tests.functional.agent.linux import base as linux_base
from neutron.tests.functional import base as functional_base
class BaseMonitorTest(linux_base.BaseOVSLinuxTestCase):
def setUp(self):
super(BaseMonitorTest, self).setUp()
rootwrap_not_configured = (cfg.CONF.AGENT.root_helper ==
functional_base.SUDO_CMD)
if rootwrap_not_configured:
# The monitor tests require a nested invocation that has
# to be emulated by double sudo if rootwrap is not
# configured.
self.config(group='AGENT',
root_helper=" ".join([functional_base.SUDO_CMD] * 2))
self._check_test_requirements()
# ovsdb-client monitor needs to have a bridge to make any output
self.useFixture(net_helpers.OVSBridgeFixture())
def _check_test_requirements(self):
self.check_command(['ovsdb-client', 'list-dbs'],
'Exit code: 1',
'password-less sudo not granted for ovsdb-client',
run_as_root=True)
class TestOvsdbMonitor(BaseMonitorTest):
def setUp(self):
super(TestOvsdbMonitor, self).setUp()
self.monitor = ovsdb_monitor.OvsdbMonitor('Bridge')
self.addCleanup(self.monitor.stop)
self.monitor.start()
def collect_initial_output(self):
while True:
output = list(self.monitor.iter_stdout())
if output:
# Output[0] is header row with spaces for column separation.
# The column widths can vary depending on the data in the
# columns, so compress multiple spaces to one for testing.
return ' '.join(output[0].split())
eventlet.sleep(0.01)
def test_killed_monitor_respawns(self):
self.monitor.respawn_interval = 0
old_pid = self.monitor._process.pid
output1 = self.collect_initial_output()
pid = utils.get_root_helper_child_pid(old_pid, run_as_root=True)
self.monitor._kill_process(pid)
self.monitor._reset_queues()
while (self.monitor._process.pid == old_pid):
eventlet.sleep(0.01)
output2 = self.collect_initial_output()
# Initial output should appear twice
self.assertEqual(output1, output2)
class TestSimpleInterfaceMonitor(BaseMonitorTest):
def setUp(self):
super(TestSimpleInterfaceMonitor, self).setUp()
self.monitor = ovsdb_monitor.SimpleInterfaceMonitor()
self.addCleanup(self.monitor.stop)
# In case a global test timeout isn't set or disabled, use a
# value that will ensure the monitor has time to start.
timeout = max(tests_base.get_test_timeout(), 60)
self.monitor.start(block=True, timeout=timeout)
def test_has_updates(self):
utils.wait_until_true(lambda: self.monitor.data_received is True)
self.assertTrue(self.monitor.has_updates,
'Initial call should always be true')
self.assertFalse(self.monitor.has_updates,
'has_updates without port addition should be False')
self.useFixture(net_helpers.OVSPortFixture())
# has_updates after port addition should become True
while not self.monitor.has_updates:
eventlet.sleep(0.01)
| JioCloud/neutron | neutron/tests/functional/agent/linux/test_ovsdb_monitor.py | Python | apache-2.0 | 4,453 |
from swampy.TurtleWorld import *
import math
#def polygon( t, length, n ):
#max_cir_angle = 360
#angle = max_cir_angle / n
#for i in range( n ):
#fd( t, length )
#lt( t, angle )
def polyline ( t, length, n, angle ):
"""
Draws n line segments with the given length and
angle between them. t is a turtle.
"""
for i in range( n ):
fd( t, length )
lt( t, angle )
def polygon( t, length, n ):
max_cir_angle = 360
angle = max_cir_angle / n
polyline( t, length, n, angle )
def circle( t, r ):
circumference = 2 * math.pi * r
n = int( circumference / 3 ) + 1
length = circumference / n
polygon( t, length, n )
def arc( t, r, angle ):
max_cir_angle = 360
arc_length = 2 * math.pi * r * angle / max_cir_angle
n = int( arc_length / 3 ) + 1
step_length = arc_length / n
step_angle = float( angle ) / n
#for i in range( n ):
#fd( t, step_length )
#lt( t, step_angle )
polyline( t, step_length, n, step_angle )
world = TurtleWorld()
bob = Turtle()
bob.delay = 0.01
print bob
polygon( bob, length = 80, n = 5 )
circle( bob, r = 80 )
#arc( bob, r = 80, angle = 90 )
wait_for_user()
| hacpai/reading-lists | Think Python/mypolygon.py | Python | bsd-2-clause | 1,219 |
from ml import pipeline
if __name__ == '__main__':
pipeline.init(pipeline, 'housing', "general")
pipeline.Run()
result = pipeline.Output('data_featureselection->2', to_json=True)
print(result)
#Y = pipeline.Predict('dog.jpg', True)
#print(Y)
| deepakkumar1984/sia-cog | test/testpipeline.py | Python | mit | 272 |
# -*By- coding: utf-8 -*-
from django.db import models
from django.db.models import signals
from django.dispatch import receiver
from django.contrib.auth.models import User
from time import time
from django.utils import timezone
from django.utils.safestring import mark_safe
def upload_file_name(instance, filename):
#return '%s/%s' % (instance.ticket.id, filename)
return 'ticket/%s/%s_%s' % (instance.ticket.id, str(time()).replace('.','_'), filename)
class Ticket(models.Model):
title = models.CharField(verbose_name=u'*Заголовок', max_length=30)
body = models.TextField(verbose_name=u'*Описание проблемы')
open_date = models.DateTimeField(verbose_name=u'Дата открытия', auto_now_add=True)
close_date = models.DateTimeField(verbose_name=u'Дата закрытия', null=True, blank=True)
author = models.ForeignKey(User, verbose_name=u'Сообщил')
author_email = models.EmailField(verbose_name='*e-mail')
responsible_person = models.ForeignKey(User, related_name='responsible_person', null=True, blank=True, verbose_name=u'Ответственный')
STATUS_CHOICE = (
('o', 'Открыт'),
('c', 'Закрыт'),
('m', 'Под наблюдением'),
)
'''По умолчанию все тикеты не имеют responsible_person_id'''
status = models.CharField(verbose_name=u'Статус', max_length = 1,
choices = STATUS_CHOICE,
default ='o')
classification = models.BooleanField('Общая проблема', default=False)
depend_from = models.PositiveIntegerField(null=True, blank=True)
def __unicode__(self):
return self.title
def is_ticket_open(self):
return self.status == 'o'
def does_close_date(self):
if self.close_date == None:
return u'Тикет еще на закрыт'
else:
return self.close_date
is_ticket_open.short_description = u'Тикет открыт?'
is_ticket_open.boolean = True
does_close_date.short_description = u"Дата закрытия"
class TicketComment(models.Model):
author = models.ForeignKey(User)
ticket = models.ForeignKey(Ticket)
body = models.TextField()
pub_date = models.DateTimeField(auto_now_add=True)
class TicketFile(models.Model):
ticket = models.ForeignKey(Ticket)
upload_file = models.FileField(upload_to=upload_file_name, blank=True, verbose_name=u'Приложить файл')
'''
При закрытие тикета изменить дату закрытия на текущее время
'''
@receiver(signals.pre_save, sender=Ticket)
def modify_close_date(sender, instance, **kwargs):
if (instance.close_date == None or instance.close_date < timezone.now()) and instance.status == 'c':
instance.close_date = timezone.now()
# signals.pre_save.connect(modify_close_date, sender=Ticket)'''
'''
Если изменяем статус общего тикета, то должнен измениться и статус прикрепленных тикетов.
Если у прикрепленного тикета нет responsible_person, то это поле меняется на того, кто изменил состояние тикета.
'''
@receiver(signals.post_save, sender=Ticket)
def chose_dependent_ticket_status(sender, instance, **kwargs):
if instance.classification:
dependent_tickets = Ticket.objects.filter(depend_from=instance.id)
for tt in dependent_tickets:
if tt.responsible_person_id == None:
tt.responsible_person_id = instance.responsible_person_id
tt.status = instance.status
tt.save()
'''
Если файл не добавлен, убрать пустую строку из БД
'''
@receiver(signals.post_save, sender=TicketFile)
def delete_blank_file_field(sender, instance, **kwargs):
if (instance.upload_file == '' or instance.upload_file == None):
instance.delete()
| danasp/Ticketraker_light | ticketracker_light/models.py | Python | gpl-2.0 | 4,135 |
import dredd_hooks as hooks
import imp
import os
import json
import uuid
import pprint
#if you want to import another module for use in this workflow
utils = imp.load_source("utils",os.path.join(os.getcwd(),'utils.py'))
###############################################################################
###############################################################################
# Provenance Activities
###############################################################################
###############################################################################
@hooks.before("Provenance Activities > Activities collection > Create activity")
def pass_this_a15_1(transaction):
pass
@hooks.after("Provenance Activities > Activities collection > Create activity")
def get_prov_activity(transaction):
global prov_id
requestBody = json.loads(transaction[u'real'][u'body'])
prov_id = requestBody[u'id']
@hooks.before("Provenance Activities > Activities collection > List activities")
def justPass15_1(transaction):
utils.pass_this_endpoint(transaction)
@hooks.before("Provenance Activities > Activities instance > View activity")
def changeid015_1(transaction):
url = transaction['fullPath']
transaction['fullPath'] = str(url).replace('a1ff02a4-b7e9-999d-87x1-66f4c881jka1',prov_id)
@hooks.before("Provenance Activities > Activities instance > Update activity")
def changeid_and_des15_1(transaction):
requestBody = json.loads(transaction[u'request'][u'body'])
requestBody['ended_on'] = ''
transaction[u'request'][u'body'] = json.dumps(requestBody)
url = transaction['fullPath']
transaction['fullPath'] = str(url).replace('a1ff02a4-b7e9-999d-87x1-66f4c881jka1',prov_id)
@hooks.before("Provenance Activities > Activities instance > Delete activity")
def changeid15_1(transaction):
url = transaction['fullPath']
transaction['fullPath'] = str(url).replace('a1ff02a4-b7e9-999d-87x1-66f4c881jka1',prov_id)
| benneely/duke-data-service-dredd | dredd/dredd_scripts/15_provenance_activities.py | Python | gpl-3.0 | 1,964 |
import argparse
import cPickle
import theano.tensor as T
from theano.sandbox.cuda.var import CudaNdarraySharedVariable
import numpy as np
import copy
import theano
from pynet.model import AutoEncoder
import pynet.layer as layers
floatX = theano.config.floatX
parser = argparse.ArgumentParser(description='''Convert gpu pickle pynet model to cpu pickle pynet model''')
parser.add_argument('--gpu_model', metavar='Path', required=True, help='the path to the gpu model pickle file')
parser.add_argument('--cpu_model', metavar='Path', required=True, help='''path to save the cpu model pickle file''')
args = parser.parse_args()
print ('loading gpu autoencoder..')
fin = open(args.gpu_model)
gpu_model = cPickle.load(fin)
ae = AutoEncoder(input_dim=gpu_model.input_dim)
for layer in gpu_model.encode_layers:
layerW = T._shared(np.array(layer.W.get_value(), floatX),
name=layer.W.name, borrow=False)
layerb = T._shared(np.array(layer.b.get_value(), floatX),
name=layer.b.name, borrow=False)
encode_layer = getattr(layers, layer.__class__.__name__)(dim=layer.dim, name=layer.name,
W=layerW, b=layerb)
ae.add_encode_layer(encode_layer)
print 'encode layer', encode_layer.name, encode_layer.dim
print 'encode layers', ae.encode_layers
for ae_layer, gpu_layer in zip(reversed(ae.encode_layers), gpu_model.decode_layers):
gpu_decode_layer_b = T._shared(np.array(gpu_layer.b.get_value(), floatX),
name=gpu_layer.b.name, borrow=False)
decode_layer = getattr(layers, gpu_layer.__class__.__name__)(name=gpu_layer.name, dim=gpu_layer.dim,
W=ae_layer.W.T, b=gpu_decode_layer_b)
ae.add_decode_layer(decode_layer)
print 'decode layer', decode_layer.name, decode_layer.dim
print 'decode layers', ae.decode_layers
print 'layers', ae.layers
fout = open(args.cpu_model, 'wb')
cPickle.dump(ae, fout)
print ('Done!')
fin.close()
fout.close()
| hycis/Pynet | scripts/gpu_ae_to_cpu_ae.py | Python | apache-2.0 | 2,055 |
import re
import logging
import csv
import os
from StringIO import StringIO
from projects.exceptions import ProjectImportError
from vcs_support.backends.github import GithubContributionBackend
from vcs_support.base import BaseVCS, VCSVersion
log = logging.getLogger(__name__)
class Backend(BaseVCS):
supports_tags = True
supports_branches = True
contribution_backends = [GithubContributionBackend]
fallback_branch = 'master' # default branch
def __init__(self, *args, **kwargs):
super(Backend, self).__init__(*args, **kwargs)
self.token = kwargs.get('token', None)
self.repo_url = self._get_clone_url()
def _get_clone_url(self):
if '://' in self.repo_url:
hacked_url = self.repo_url.split('://')[1]
hacked_url = re.sub('.git$', '', hacked_url)
clone_url = 'https://%s' % hacked_url
if self.token:
clone_url = 'https://%s@%s' % (self.token, hacked_url)
return clone_url
# Don't edit URL because all hosts aren't the same
#else:
#clone_url = 'git://%s' % (hacked_url)
return self.repo_url
def set_remote_url(self, url):
return self.run('git', 'remote', 'set-url', 'origin', url)
def update(self):
# Use checkout() to update repo
self.checkout()
def repo_exists(self):
code, out, err = self.run('git', 'status')
return code == 0
def fetch(self):
code, out, err = self.run('git', 'fetch', '--tags', '--prune')
if code != 0:
raise ProjectImportError(
"Failed to get code from '%s' (git fetch): %s\n\nStderr:\n\n%s\n\n" % (
self.repo_url, code, err)
)
def checkout_revision(self, revision=None):
if not revision:
branch = self.default_branch or self.fallback_branch
revision = 'origin/%s' % branch
code, out, err = self.run('git', 'checkout',
'--force', '--quiet', revision)
if code != 0:
log.warning("Failed to checkout revision '%s': %s" % (
revision, code))
return [code, out, err]
def clone(self):
code, out, err = self.run('git', 'clone', '--recursive', '--quiet',
self.repo_url, '.')
if code != 0:
raise ProjectImportError(
"Failed to get code from '%s' (git clone): %s" % (
self.repo_url, code)
)
@property
def tags(self):
retcode, stdout, err = self.run('git', 'show-ref', '--tags')
# error (or no tags found)
if retcode != 0:
return []
return self.parse_tags(stdout)
def parse_tags(self, data):
"""
Parses output of show-ref --tags, eg:
3b32886c8d3cb815df3793b3937b2e91d0fb00f1 refs/tags/2.0.0
bd533a768ff661991a689d3758fcfe72f455435d refs/tags/2.0.1
c0288a17899b2c6818f74e3a90b77e2a1779f96a refs/tags/2.0.2
a63a2de628a3ce89034b7d1a5ca5e8159534eef0 refs/tags/2.1.0.beta2
c7fc3d16ed9dc0b19f0d27583ca661a64562d21e refs/tags/2.1.0.rc1
edc0a2d02a0cc8eae8b67a3a275f65cd126c05b1 refs/tags/2.1.0.rc2
Into VCSTag objects with the tag name as verbose_name and the commit
hash as identifier.
"""
# parse the lines into a list of tuples (commit-hash, tag ref name)
raw_tags = csv.reader(StringIO(data), delimiter=' ')
vcs_tags = []
for row in raw_tags:
row = filter(lambda f: f != '', row)
if row == []:
continue
commit_hash, name = row
clean_name = name.split('/')[-1]
vcs_tags.append(VCSVersion(self, commit_hash, clean_name))
return vcs_tags
@property
def branches(self):
# Only show remote branches
retcode, stdout, err = self.run('git', 'branch', '-r')
# error (or no tags found)
if retcode != 0:
return []
return self.parse_branches(stdout)
def parse_branches(self, data):
"""
Parse output of git branch -r, eg:
origin/2.0.X
origin/HEAD -> origin/master
origin/develop
origin/master
origin/release/2.0.0
origin/release/2.1.0
"""
clean_branches = []
raw_branches = csv.reader(StringIO(data), delimiter=' ')
for branch in raw_branches:
branch = filter(lambda f: f != '' and f != '*', branch)
# Handle empty branches
if len(branch):
branch = branch[0]
if branch.startswith('origin/'):
cut_len = len('origin/')
slug = branch[cut_len:].replace('/', '-')
if slug in ['HEAD']:
continue
clean_branches.append(VCSVersion(self, branch, slug))
else:
# Believe this is dead code.
slug = branch.replace('/', '-')
clean_branches.append(VCSVersion(self, branch, slug))
return clean_branches
@property
def commit(self):
retcode, stdout, err = self.run('git', 'rev-parse', 'HEAD')
return stdout.strip()
def checkout(self, identifier=None):
self.check_working_dir()
# Clone or update repository
if self.repo_exists():
self.set_remote_url(self.repo_url)
self.fetch()
else:
self.make_clean_working_dir()
self.clone()
# Find proper identifier
if not identifier:
identifier = self.default_branch or self.fallback_branch
identifier = self.find_ref(identifier)
#Checkout the correct identifier for this branch.
code, out, err = self.checkout_revision(identifier)
if code != 0:
return code, out, err
# Clean any remains of previous checkouts
self.run('git', 'clean', '-d', '-f', '-f')
# Update submodules
self.run('git', 'submodule', 'sync')
self.run('git', 'submodule', 'update',
'--init', '--recursive', '--force')
return code, out, err
def find_ref(self, ref):
# Check if ref starts with 'origin/'
if ref.startswith('origin/'):
return ref
# Check if ref is a branch of the origin remote
if self.ref_exists('remotes/origin/' + ref):
return 'origin/' + ref
return ref
def ref_exists(self, ref):
code, out, err = self.run('git', 'show-ref', ref)
return code == 0
@property
def env(self):
env = super(Backend, self).env
env['GIT_DIR'] = os.path.join(self.working_dir, '.git')
return env
| dirn/readthedocs.org | readthedocs/vcs_support/backends/git.py | Python | mit | 6,947 |
#!/usr/bin/python
#Indiegogo & Kickstarter Tracker with Unicorn Hat
#Put your IGG url below
iggURL = "https://www.indiegogo.com/projects/ryanteck-budget-robotics-kit-for-raspberry-pi/x/6492676"
#Or put your KS URL below
ksURL = "https://www.kickstarter.com/projects/pimoroni/flotilla-for-raspberry-pi-making-for-everyone"
#Set the following to 0 for IGG, 1 for KS
site = 1
import unicornhat as UH
import time
import urllib
lastGoal = 0
sleepTime = 0.04
def getGoal():
if (site == 0):
#Lets get URL first
html = urllib.urlopen(iggURL).read()
if("InDemand" in html):
#adds support for campaigns that finish with indemand enabled.
print "In Demand"
goal = html.split("% funded")[0]
goal = goal.split("InDemand</div>\n")[1]
goal = int(goal.strip())
return float(goal/100.0)
else:
print "igg normal"
goal = html.split('i-percent">')[1].split('%</div')[0]
goal = goal.replace(',', '')
#goal = htmlTest
goal = int(goal)
goal = float(goal/100.0)
return goal
elif (site == 1):
print "Kickstarter"
#Lets get URL first
html = urllib.urlopen(ksURL).read()
goal = html.split('data-percent-raised=')[1].split("data-pledged")[0].strip().replace('"','')
#goal = htmlTest
goal = float(goal)
return goal
pixelsToXY =[
#row 0
[0,0], #0
[1,0], #1
[2,0], #2
[3,0], #3
[4,0], #4
[5,0], #5
[6,0], #6
[7,0], #7
#row 1
[0,1], #0
[1,1], #1
[2,1], #2
[3,1], #3
[4,1], #4
[5,1], #5
[6,1], #6
[7,1], #7
#row 2
[0,2], #0
[1,2], #1
[2,2], #2
[3,2], #3
[4,2], #4
[5,2], #5
[6,2], #6
[7,2], #7
#row 3
[0,3], #0
[1,3], #1
[2,3], #2
[3,3], #3
[4,3], #4
[5,3], #5
[6,3], #6
[7,3], #7
#row 4
[0,4], #0
[1,4], #1
[2,4], #2
[3,4], #3
[4,4], #4
[5,4], #5
[6,4], #6
[7,4], #7
#row 5
[0,5], #0
[1,5], #1
[2,5], #2
[3,5], #3
[4,5], #4
[5,5], #5
[6,5], #6
[7,5], #7
#row 6
[0,6], #0
[1,6], #1
[2,6], #2
[3,6], #3
[4,6], #4
[5,6], #5
[6,6], #6
[7,6], #7
#ro 7
[0,7], #0
[1,7], #1
[2,7], #2
[3,7], #3
[4,7], #4
[5,7], #5
[6,7], #6
[7,7], #7
]
#Lets make it SLIGHTLY brigher. TOO BRIGHT CAN DAMAGE EYES
UH.brightness(0.3)
red = 255,000,000
yellow = 255,200,50
green = 000,255,000
orange = 255,153,000
blue = 000,000,255
purple = 255,000,255
while True:
print "getting goal"
goal = getGoal()
pixelsOn= int(64.00*(goal))
if (pixelsOn > 320):
pixelsOn = 320
print pixelsOn
pixel = 0
while pixel < pixelsOn:
if(pixel <16):
color = red
pixelXY = pixelsToXY[pixel]
UH.set_pixel(pixelXY[0],pixelXY[1],color[0],color[1],color[2])
if(15<pixel and pixel<32):
color = orange
pixelXY = pixelsToXY[pixel]
UH.set_pixel(pixelXY[0],pixelXY[1],color[0],color[1],color[2])
if(31<pixel and pixel<48):
color = yellow
pixelXY = pixelsToXY[pixel]
UH.set_pixel(pixelXY[0],pixelXY[1],color[0],color[1],color[2])
if(47<pixel and pixel<64):
color = green
pixelXY = pixelsToXY[pixel]
UH.set_pixel(pixelXY[0],pixelXY[1],color[0],color[1],color[2])
if(63<pixel and pixel<128):
color = blue
pixel2 = pixel - 64
pixelXY = pixelsToXY[pixel2]
UH.set_pixel(pixelXY[0],pixelXY[1],color[0],color[1],color[2])
sleepTime = 0.03
if(127<pixel and pixel<192):
UH.brightness(0.4)
color = purple
pixel2 = pixel - 128
pixelXY = pixelsToXY[pixel2]
UH.set_pixel(pixelXY[0],pixelXY[1],color[0],color[1],color[2])
sleepTime = 0.02
if(191<pixel and pixel<256):
UH.brightness(0.5)
color = blue
pixel2 = pixel - 192
pixelXY = pixelsToXY[pixel2]
UH.set_pixel(pixelXY[0],pixelXY[1],color[0],color[1],color[2])
sleepTime = 0.02
if(255<pixel and pixel<320):
UH.brightness(0.6)
color = orange
pixel2 = pixel - 256
pixelXY = pixelsToXY[pixel2]
UH.set_pixel(pixelXY[0],pixelXY[1],color[0],color[1],color[2])
sleepTime = 0.02
pixel = pixel+1
time.sleep(sleepTime)
UH.show()
if(goal != lastGoal):
repeat = 0
while repeat < 5:
UH.brightness(1)
UH.show()
time.sleep(0.1)
UH.brightness(0.1)
UH.show()
time.sleep(0.1)
repeat = repeat +1
UH.brightness(0.3)
UH.show()
lastGoal = goal
time.sleep(10)
UH.clear()
| ryanteck/UnicornTracker | unicornTracker.py | Python | gpl-3.0 | 4,094 |
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import sys
import numpy as np
from scipy.special import expi
import matplotlib.pyplot as plt
a01 = np.genfromtxt('../../../../../../test/tests/heat_advection/gold/heat_advection_1d_csv_T_0010.csv', delimiter = ',', names = True, dtype = float)
b01 = np.genfromtxt('../../../../../../test/tests/heat_advection/gold/heat_advection_1d_fully_saturated_T_0010.csv', delimiter = ',', names = True, dtype = float)
c01 = np.genfromtxt('../../../../../../test/tests/heat_advection/gold/heat_advection_1d_KT_T_0010.csv', delimiter = ',', names = True, dtype = float)
a06 = np.genfromtxt('../../../../../../test/tests/heat_advection/gold/heat_advection_1d_csv_T_0060.csv', delimiter = ',', names = True, dtype = float)
b06 = np.genfromtxt('../../../../../../test/tests/heat_advection/gold/heat_advection_1d_fully_saturated_T_0060.csv', delimiter = ',', names = True, dtype = float)
c06 = np.genfromtxt('../../../../../../test/tests/heat_advection/gold/heat_advection_1d_KT_T_0060.csv', delimiter = ',', names = True, dtype = float)
plt.figure()
fig, axes = plt.subplots(1, 2, figsize = (15, 4))
# Water pressure vs similarity solution
axes[0].plot(b01['x'], b01['temp'], label = 'No upwinding')
axes[0].plot(a01['x'], a01['temp'], label = 'Full upwinding')
axes[0].plot(c01['x'], c01['temp'], label = 'KT stabilization')
axes[0].set_xlabel('x (m)')
axes[0].set_ylabel('Temperature (K)')
axes[0].grid()
axes[0].legend()
axes[0].set_title("Temperature at 0.1s")
# Gas saturation vs similarity solution
axes[1].plot(b06['x'], b06['temp'], label = 'No upwinding')
axes[1].plot(a06['x'], a06['temp'], label = 'Full upwinding')
axes[1].plot(c06['x'], c06['temp'], label = 'KT stabilization')
axes[1].set_xlabel('x (m)')
axes[1].set_ylabel('Temperature (K)')
axes[1].legend()
axes[1].grid()
axes[1].set_title("Temperature at 0.6s")
plt.tight_layout()
plt.savefig("heat_advection.png")
sys.exit(0)
| harterj/moose | modules/porous_flow/doc/content/modules/porous_flow/tests/heat_advection/heat_advection.py | Python | lgpl-2.1 | 2,227 |
import json
import requests
from django.utils import timezone
from pytz import timezone as tz
from apps.contribution.models import Repository, RepositoryLanguage
from apps.mommy import schedule
from apps.mommy.registry import Task
git_domain = "https://api.github.com"
class UpdateRepositories(Task):
@staticmethod
def run():
# Load new data
fresh = UpdateRepositories.get_git_repositories()
localtz = tz('Europe/Oslo')
for repo in fresh:
fresh_repo = Repository(
id=int(repo['id']),
name=repo['name'],
description=repo['description'],
updated_at=localtz.localize(timezone.datetime.strptime(repo['updated_at'], "%Y-%m-%dT%H:%M:%SZ")),
url=repo['url'],
public_url=repo['html_url'],
issues=repo['open_issues_count']
)
# If repository exists, only update data
if Repository.objects.filter(id=fresh_repo.id).exists():
stored_repo = Repository.objects.get(id=fresh_repo.id)
repo_languages = UpdateRepositories.get_repository_languages(stored_repo.url)
UpdateRepositories.update_repository(stored_repo, fresh_repo, repo_languages)
# else: repository does not exist
else:
repo_languages = UpdateRepositories.get_repository_languages(fresh_repo.url)
UpdateRepositories.new_repository(fresh_repo, repo_languages)
# Delete repositories that does not satisfy the updated_at limit
old_repositories = Repository.objects.all()
for repo in old_repositories:
if repo.updated_at < timezone.now() - timezone.timedelta(days=730):
repo.delete()
@staticmethod
def update_repository(stored_repo, fresh_repo, repo_languages):
stored_repo.name = fresh_repo.name
stored_repo.description = fresh_repo.description
stored_repo.updated_at = fresh_repo.updated_at
stored_repo.url = fresh_repo.url
stored_repo.public_url = fresh_repo.public_url
stored_repo.issues = fresh_repo.issues
stored_repo.save()
# Update languages if they exist, and add if not
for language in repo_languages:
if RepositoryLanguage.objects.filter(type=language, repository=stored_repo).exists():
stored_language = RepositoryLanguage.objects.get(type=language, repository=stored_repo)
stored_language.size = repo_languages[language]
stored_language.save()
else:
new_language = RepositoryLanguage(
type=language,
size=(int(repo_languages[language])),
repository=stored_repo
)
new_language.save()
@staticmethod
def new_repository(new_repo, new_languages):
# Filter out repositories with inactivity past 2 years (365 days * 2)
if new_repo.updated_at > timezone.now() - timezone.timedelta(days=730):
new_repo = Repository(
id=new_repo.id,
name=new_repo.name,
description=new_repo.description,
updated_at=new_repo.updated_at,
url=new_repo.url,
public_url=new_repo.public_url,
issues=new_repo.issues
)
new_repo.save()
# Add repository languages
for language in new_languages:
new_language = RepositoryLanguage(
type=language,
size=int(new_languages[language]),
repository=new_repo
)
new_language.save()
@staticmethod
def get_git_repositories():
url = git_domain + "/users/dotkom/repos?per_page=60"
r = requests.get(url)
data = json.loads(r.text)
return data
@staticmethod
def get_repository_languages(url):
r = requests.get(url + "/languages")
data = json.loads(r.text)
return data
schedule.register(UpdateRepositories, day_of_week="mon-sun", hour=6, minute=0)
| dotKom/onlineweb4 | apps/contribution/mommy.py | Python | mit | 4,206 |
# -*- coding: utf-8 -*-
# Odoo, Open Source Management Solution
# Copyright (C) 2016 Rooms For (Hong Kong) Limited T/A OSCG
# <https://www.odoo-asia.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{
"name": "Stock Project Related",
"summary": "relate project to stock picking via sales order",
"version": "8.0.1.0.0",
"category": "Warehouse",
"website": "https://www.odoo-asia.com/",
"author": "Rooms For (Hong Kong) Limited T/A OSCG",
"license": "AGPL-3",
"application": False,
"installable": True,
"depends": [
"account_analytic_analysis","stock",
],
"data": [
"views/stock_view.xml",
],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| rfhk/ykt-training | stock_project_search_old/__openerp__.py | Python | agpl-3.0 | 1,380 |
# -*- coding: utf-8 -*-
from . import res_partner
| incaser/incaser-odoo-addons | partner_vat_split/models/__init__.py | Python | agpl-3.0 | 50 |
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from unittest import TestCase
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn import hmm
from sklearn import mixture
from sklearn.utils.extmath import logsumexp
from sklearn.utils import check_random_state
from nose import SkipTest
rng = np.random.RandomState(0)
np.seterr(all='warn')
class TestBaseHMM(TestCase):
def setUp(self):
self.prng = np.random.RandomState(9)
class StubHMM(hmm._BaseHMM):
def _compute_log_likelihood(self, X):
return self.framelogprob
def _generate_sample_from_state(self):
pass
def _init(self):
pass
def setup_example_hmm(self):
# Example from http://en.wikipedia.org/wiki/Forward-backward_algorithm
h = self.StubHMM(2)
h.transmat_ = [[0.7, 0.3], [0.3, 0.7]]
h.startprob_ = [0.5, 0.5]
framelogprob = np.log([[0.9, 0.2],
[0.9, 0.2],
[0.1, 0.8],
[0.9, 0.2],
[0.9, 0.2]])
# Add dummy observations to stub.
h.framelogprob = framelogprob
return h, framelogprob
def test_init(self):
h, framelogprob = self.setup_example_hmm()
for params in [('transmat_',), ('startprob_', 'transmat_')]:
d = dict((x[:-1], getattr(h, x)) for x in params)
h2 = self.StubHMM(h.n_components, **d)
self.assertEqual(h.n_components, h2.n_components)
for p in params:
assert_array_almost_equal(getattr(h, p), getattr(h2, p))
def test_do_forward_pass(self):
h, framelogprob = self.setup_example_hmm()
logprob, fwdlattice = h._do_forward_pass(framelogprob)
reflogprob = -3.3725
self.assertAlmostEqual(logprob, reflogprob, places=4)
reffwdlattice = np.array([[0.4500, 0.1000],
[0.3105, 0.0410],
[0.0230, 0.0975],
[0.0408, 0.0150],
[0.0298, 0.0046]])
assert_array_almost_equal(np.exp(fwdlattice), reffwdlattice, 4)
def test_do_backward_pass(self):
h, framelogprob = self.setup_example_hmm()
bwdlattice = h._do_backward_pass(framelogprob)
refbwdlattice = np.array([[0.0661, 0.0455],
[0.0906, 0.1503],
[0.4593, 0.2437],
[0.6900, 0.4100],
[1.0000, 1.0000]])
assert_array_almost_equal(np.exp(bwdlattice), refbwdlattice, 4)
def test_do_viterbi_pass(self):
h, framelogprob = self.setup_example_hmm()
logprob, state_sequence = h._do_viterbi_pass(framelogprob)
refstate_sequence = [0, 0, 1, 0, 0]
assert_array_equal(state_sequence, refstate_sequence)
reflogprob = -4.4590
self.assertAlmostEqual(logprob, reflogprob, places=4)
def test_eval(self):
h, framelogprob = self.setup_example_hmm()
nobs = len(framelogprob)
logprob, posteriors = h.eval([])
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
reflogprob = -3.3725
self.assertAlmostEqual(logprob, reflogprob, places=4)
refposteriors = np.array([[0.8673, 0.1327],
[0.8204, 0.1796],
[0.3075, 0.6925],
[0.8204, 0.1796],
[0.8673, 0.1327]])
assert_array_almost_equal(posteriors, refposteriors, decimal=4)
def test_hmm_eval_consistent_with_gmm(self):
n_components = 8
nobs = 10
h = self.StubHMM(n_components)
# Add dummy observations to stub.
framelogprob = np.log(self.prng.rand(nobs, n_components))
h.framelogprob = framelogprob
# If startprob and transmat are uniform across all states (the
# default), the transitions are uninformative - the model
# reduces to a GMM with uniform mixing weights (in terms of
# posteriors, not likelihoods).
logprob, hmmposteriors = h.eval([])
assert_array_almost_equal(hmmposteriors.sum(axis=1), np.ones(nobs))
norm = logsumexp(framelogprob, axis=1)[:, np.newaxis]
gmmposteriors = np.exp(framelogprob - np.tile(norm, (1, n_components)))
assert_array_almost_equal(hmmposteriors, gmmposteriors)
def test_hmm_decode_consistent_with_gmm(self):
n_components = 8
nobs = 10
h = self.StubHMM(n_components)
# Add dummy observations to stub.
framelogprob = np.log(self.prng.rand(nobs, n_components))
h.framelogprob = framelogprob
# If startprob and transmat are uniform across all states (the
# default), the transitions are uninformative - the model
# reduces to a GMM with uniform mixing weights (in terms of
# posteriors, not likelihoods).
viterbi_ll, state_sequence = h.decode([])
norm = logsumexp(framelogprob, axis=1)[:, np.newaxis]
gmmposteriors = np.exp(framelogprob - np.tile(norm, (1, n_components)))
gmmstate_sequence = gmmposteriors.argmax(axis=1)
assert_array_equal(state_sequence, gmmstate_sequence)
def test_base_hmm_attributes(self):
n_components = 20
startprob = self.prng.rand(n_components)
startprob = startprob / startprob.sum()
transmat = self.prng.rand(n_components, n_components)
transmat /= np.tile(transmat.sum(axis=1)
[:, np.newaxis], (1, n_components))
h = self.StubHMM(n_components)
self.assertEquals(h.n_components, n_components)
h.startprob_ = startprob
assert_array_almost_equal(h.startprob_, startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
2 * startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_', [])
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
np.zeros((n_components - 2, 2)))
h.transmat_ = transmat
assert_array_almost_equal(h.transmat_, transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
2 * transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_', [])
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
np.zeros((n_components - 2, n_components)))
def train_hmm_and_keep_track_of_log_likelihood(hmm, obs, n_iter=1, **kwargs):
hmm.n_iter = 1
hmm.fit(obs)
loglikelihoods = []
for n in xrange(n_iter):
hmm.n_iter = 1
hmm.init_params = ''
hmm.fit(obs)
loglikelihoods.append(sum(hmm.score(x) for x in obs))
return loglikelihoods
class GaussianHMMBaseTester(object):
def setUp(self):
self.prng = prng = np.random.RandomState(10)
self.n_components = n_components = 3
self.n_features = n_features = 3
self.startprob = prng.rand(n_components)
self.startprob = self.startprob / self.startprob.sum()
self.transmat = prng.rand(n_components, n_components)
self.transmat /= np.tile(self.transmat.sum(axis=1)[:, np.newaxis],
(1, n_components))
self.means = prng.randint(-20, 20, (n_components, n_features))
self.covars = {
'spherical': (1.0 + 2 * np.dot(prng.rand(n_components, 1),
np.ones((1, n_features)))) ** 2,
'tied': (make_spd_matrix(n_features, random_state=0)
+ np.eye(n_features)),
'diag': (1.0 + 2 * prng.rand(n_components, n_features)) ** 2,
'full': np.array([make_spd_matrix(n_features, random_state=0)
+ np.eye(n_features)
for x in range(n_components)]),
}
self.expanded_covars = {
'spherical': [np.eye(n_features) * cov
for cov in self.covars['spherical']],
'diag': [np.diag(cov) for cov in self.covars['diag']],
'tied': [self.covars['tied']] * n_components,
'full': self.covars['full'],
}
def test_bad_covariance_type(self):
hmm.GaussianHMM(20, self.covariance_type)
self.assertRaises(ValueError, hmm.GaussianHMM, 20,
'badcovariance_type')
def _test_attributes(self):
# XXX: This test is bugged and creates weird errors -- skipped
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
self.assertEquals(h.n_components, self.n_components)
self.assertEquals(h.covariance_type, self.covariance_type)
h.startprob_ = self.startprob
assert_array_almost_equal(h.startprob_, self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
2 * self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_', [])
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
np.zeros((self.n_components - 2, self.n_features)))
h.transmat_ = self.transmat
assert_array_almost_equal(h.transmat_, self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
2 * self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_', [])
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
np.zeros((self.n_components - 2, self.n_components)))
h.means_ = self.means
self.assertEquals(h.n_features, self.n_features)
self.assertRaises(ValueError, h.__setattr__, 'means_', [])
self.assertRaises(ValueError, h.__setattr__, 'means_',
np.zeros((self.n_components - 2, self.n_features)))
h.covars_ = self.covars[self.covariance_type]
assert_array_almost_equal(h.covars_,
self.expanded_covars[self.covariance_type])
#self.assertRaises(ValueError, h.__setattr__, 'covars', [])
#self.assertRaises(ValueError, h.__setattr__, 'covars',
# np.zeros((self.n_components - 2, self.n_features)))
def test_eval_and_decode(self):
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.means_ = self.means
h.covars_ = self.covars[self.covariance_type]
# Make sure the means are far apart so posteriors.argmax()
# picks the actual component used to generate the observations.
h.means_ = 20 * h.means_
gaussidx = np.repeat(range(self.n_components), 5)
nobs = len(gaussidx)
obs = self.prng.randn(nobs, self.n_features) + h.means_[gaussidx]
ll, posteriors = h.eval(obs)
self.assertEqual(posteriors.shape, (nobs, self.n_components))
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
viterbi_ll, stateseq = h.decode(obs)
assert_array_equal(stateseq, gaussidx)
def test_sample(self, n=1000):
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
# Make sure the means are far apart so posteriors.argmax()
# picks the actual component used to generate the observations.
h.means_ = 20 * self.means
h.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
h.startprob_ = self.startprob
samples = h.sample(n)[0]
self.assertEquals(samples.shape, (n, self.n_features))
def test_fit(self, params='stmc', n_iter=5, verbose=False, **kwargs):
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.startprob_ = self.startprob
h.transmat_ = hmm.normalize(self.transmat
+ np.diag(self.prng.rand(self.n_components)), 1)
h.means_ = 20 * self.means
h.covars_ = self.covars[self.covariance_type]
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10)[0] for x in xrange(10)]
# Mess up the parameters and see if we can re-learn them.
h.n_iter = 0
h.fit(train_obs)
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params, **kwargs)[1:]
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print
print ('Test train: %s (%s)\n %s\n %s'
% (self.covariance_type, params, trainll, np.diff(trainll)))
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > -0.8,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, -0.8, self.covariance_type, trainll))
def test_fit_works_on_sequences_of_different_length(self):
obs = [self.prng.rand(3, self.n_features),
self.prng.rand(4, self.n_features),
self.prng.rand(5, self.n_features)]
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
# This shouldn't raise
# ValueError: setting an array element with a sequence.
h.fit(obs)
def test_fit_with_priors(self, params='stmc', n_iter=5, verbose=False):
startprob_prior = 10 * self.startprob + 2.0
transmat_prior = 10 * self.transmat + 2.0
means_prior = self.means
means_weight = 2.0
covars_weight = 2.0
if self.covariance_type in ('full', 'tied'):
covars_weight += self.n_features
covars_prior = self.covars[self.covariance_type]
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.startprob_ = self.startprob
h.startprob_prior = startprob_prior
h.transmat_ = hmm.normalize(self.transmat
+ np.diag(self.prng.rand(self.n_components)), 1)
h.transmat_prior = transmat_prior
h.means_ = 20 * self.means
h.means_prior = means_prior
h.means_weight = means_weight
h.covars_ = self.covars[self.covariance_type]
h.covars_prior = covars_prior
h.covars_weight = covars_weight
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10)[0] for x in xrange(10)]
# Mess up the parameters and see if we can re-learn them.
h.n_iter = 0
h.fit(train_obs[:1])
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params)[1:]
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print
print ('Test MAP train: %s (%s)\n %s\n %s'
% (self.covariance_type, params, trainll, np.diff(trainll)))
# XXX: Why such a large tolerance?
self.assertTrue(np.all(np.diff(trainll) > -0.5))
class TestGaussianHMMWithSphericalCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'spherical'
def test_fit_startprob_and_transmat(self):
self.test_fit('st')
class TestGaussianHMMWithDiagonalCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'diag'
class TestGaussianHMMWithTiedCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'tied'
class TestGaussianHMMWithFullCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'full'
class MultinomialHMMTestCase(TestCase):
"""Using examples from Wikipedia
- http://en.wikipedia.org/wiki/Hidden_Markov_model
- http://en.wikipedia.org/wiki/Viterbi_algorithm
"""
def setUp(self):
self.prng = np.random.RandomState(9)
self.n_components = 2 # ('Rainy', 'Sunny')
self.n_symbols = 3 # ('walk', 'shop', 'clean')
self.emissionprob = [[0.1, 0.4, 0.5], [0.6, 0.3, 0.1]]
self.startprob = [0.6, 0.4]
self.transmat = [[0.7, 0.3], [0.4, 0.6]]
self.h = hmm.MultinomialHMM(self.n_components,
startprob=self.startprob,
transmat=self.transmat)
self.h.emissionprob_ = self.emissionprob
def test_wikipedia_viterbi_example(self):
# From http://en.wikipedia.org/wiki/Viterbi_algorithm:
# "This reveals that the observations ['walk', 'shop', 'clean']
# were most likely generated by states ['Sunny', 'Rainy',
# 'Rainy'], with probability 0.01344."
observations = [0, 1, 2]
logprob, state_sequence = self.h.decode(observations)
self.assertAlmostEqual(np.exp(logprob), 0.01344)
assert_array_equal(state_sequence, [1, 0, 0])
def test_attributes(self):
h = hmm.MultinomialHMM(self.n_components)
self.assertEquals(h.n_components, self.n_components)
h.startprob_ = self.startprob
assert_array_almost_equal(h.startprob_, self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
2 * self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_', [])
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
np.zeros((self.n_components - 2, self.n_symbols)))
h.transmat_ = self.transmat
assert_array_almost_equal(h.transmat_, self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
2 * self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_', [])
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
np.zeros((self.n_components - 2, self.n_components)))
h.emissionprob_ = self.emissionprob
assert_array_almost_equal(h.emissionprob_, self.emissionprob)
self.assertRaises(ValueError, h.__setattr__, 'emissionprob_', [])
self.assertRaises(ValueError, h.__setattr__, 'emissionprob_',
np.zeros((self.n_components - 2, self.n_symbols)))
self.assertEquals(h.n_symbols, self.n_symbols)
def test_eval(self):
idx = np.repeat(range(self.n_components), 10)
nobs = len(idx)
obs = [int(x) for x in np.floor(self.prng.rand(nobs) * self.n_symbols)]
ll, posteriors = self.h.eval(obs)
self.assertEqual(posteriors.shape, (nobs, self.n_components))
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
def test_sample(self, n=1000):
samples = self.h.sample(n)[0]
self.assertEquals(len(samples), n)
self.assertEquals(len(np.unique(samples)), self.n_symbols)
def test_fit(self, params='ste', n_iter=5, verbose=False, **kwargs):
h = self.h
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10)[0] for x in xrange(10)]
# Mess up the parameters and see if we can re-learn them.
h.startprob_ = hmm.normalize(self.prng.rand(self.n_components))
h.transmat_ = hmm.normalize(self.prng.rand(self.n_components,
self.n_components), axis=1)
h.emissionprob_ = hmm.normalize(
self.prng.rand(self.n_components, self.n_symbols), axis=1)
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params, **kwargs)[1:]
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print
print 'Test train: (%s)\n %s\n %s' % (params, trainll,
np.diff(trainll))
self.assertTrue(np.all(np.diff(trainll) > - 1.e-3))
def test_fit_emissionprob(self):
self.test_fit('e')
def create_random_gmm(n_mix, n_features, covariance_type, prng=0):
prng = check_random_state(prng)
g = mixture.GMM(n_mix, covariance_type=covariance_type)
g.means_ = prng.randint(-20, 20, (n_mix, n_features))
mincv = 0.1
g.covars_ = {
'spherical': (mincv + mincv * np.dot(prng.rand(n_mix, 1),
np.ones((1, n_features)))) ** 2,
'tied': (make_spd_matrix(n_features, random_state=prng)
+ mincv * np.eye(n_features)),
'diag': (mincv + mincv * prng.rand(n_mix, n_features)) ** 2,
'full': np.array(
[make_spd_matrix(n_features, random_state=prng)
+ mincv * np.eye(n_features) for x in xrange(n_mix)])
}[covariance_type]
g.weights_ = hmm.normalize(prng.rand(n_mix))
return g
class GMMHMMBaseTester(object):
def setUp(self):
self.prng = np.random.RandomState(9)
self.n_components = 3
self.n_mix = 2
self.n_features = 2
self.covariance_type = 'diag'
self.startprob = self.prng.rand(self.n_components)
self.startprob = self.startprob / self.startprob.sum()
self.transmat = self.prng.rand(self.n_components, self.n_components)
self.transmat /= np.tile(self.transmat.sum(axis=1)[:, np.newaxis],
(1, self.n_components))
self.gmms = []
for state in xrange(self.n_components):
self.gmms.append(create_random_gmm(
self.n_mix, self.n_features, self.covariance_type,
prng=self.prng))
def test_attributes(self):
h = hmm.GMMHMM(self.n_components, covariance_type=self.covariance_type)
self.assertEquals(h.n_components, self.n_components)
h.startprob_ = self.startprob
assert_array_almost_equal(h.startprob_, self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
2 * self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_', [])
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
np.zeros((self.n_components - 2, self.n_features)))
h.transmat_ = self.transmat
assert_array_almost_equal(h.transmat_, self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
2 * self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_', [])
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
np.zeros((self.n_components - 2, self.n_components)))
def test_eval_and_decode(self):
h = hmm.GMMHMM(self.n_components, gmms=self.gmms)
# Make sure the means are far apart so posteriors.argmax()
# picks the actual component used to generate the observations.
for g in h.gmms:
g.means_ *= 20
refstateseq = np.repeat(range(self.n_components), 5)
nobs = len(refstateseq)
obs = [h.gmms[x].sample(1).flatten() for x in refstateseq]
ll, posteriors = h.eval(obs)
self.assertEqual(posteriors.shape, (nobs, self.n_components))
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
viterbi_ll, stateseq = h.decode(obs)
assert_array_equal(stateseq, refstateseq)
def test_sample(self, n=1000):
h = hmm.GMMHMM(self.n_components, self.covariance_type,
startprob=self.startprob, transmat=self.transmat,
gmms=self.gmms)
samples = h.sample(n)[0]
self.assertEquals(samples.shape, (n, self.n_features))
def test_fit(self, params='stmwc', n_iter=5, verbose=False, **kwargs):
h = hmm.GMMHMM(self.n_components, covars_prior=1.0)
h.startprob_ = self.startprob
h.transmat_ = hmm.normalize(
self.transmat + np.diag(self.prng.rand(self.n_components)), 1)
h.gmms = self.gmms
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10,
random_state=self.prng)[0] for x in xrange(10)]
# Mess up the parameters and see if we can re-learn them.
h.n_iter = 0
h.fit(train_obs)
h.transmat_ = hmm.normalize(self.prng.rand(self.n_components,
self.n_components), axis=1)
h.startprob_ = hmm.normalize(self.prng.rand(self.n_components))
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params)[1:]
if not np.all(np.diff(trainll) > 0) and verbose:
print
print 'Test train: (%s)\n %s\n %s' % (params, trainll,
np.diff(trainll))
# XXX: this test appears to check that training log likelihood should
# never be decreasing (up to a tolerance of 0.5, why?) but this is not
# the case when the seed changes.
raise SkipTest("Unstable test: trainll is not always increasing "
"depending on seed")
self.assertTrue(np.all(np.diff(trainll) > -0.5))
def test_fit_works_on_sequences_of_different_length(self):
obs = [self.prng.rand(3, self.n_features),
self.prng.rand(4, self.n_features),
self.prng.rand(5, self.n_features)]
h = hmm.GMMHMM(self.n_components, covariance_type=self.covariance_type)
# This shouldn't raise
# ValueError: setting an array element with a sequence.
h.fit(obs)
class TestGMMHMMWithDiagCovars(GMMHMMBaseTester, TestCase):
covariance_type = 'diag'
def test_fit_startprob_and_transmat(self):
self.test_fit('st')
def test_fit_means(self):
self.test_fit('m')
class TestGMMHMMWithTiedCovars(GMMHMMBaseTester, TestCase):
covariance_type = 'tied'
class TestGMMHMMWithFullCovars(GMMHMMBaseTester, TestCase):
covariance_type = 'full'
def test_normalize_1D():
A = rng.rand(2) + 1.0
for axis in range(1):
Anorm = hmm.normalize(A, axis)
assert np.all(np.allclose(Anorm.sum(axis), 1.0))
def test_normalize_3D():
A = rng.rand(2, 2, 2) + 1.0
for axis in range(3):
Anorm = hmm.normalize(A, axis)
assert np.all(np.allclose(Anorm.sum(axis), 1.0))
| GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/sklearn/tests/test_hmm.py | Python | agpl-3.0 | 26,383 |
# -*- encoding: utf-8 -*-
from django.db import models
from django.utils.translation import ugettext as _
from students.tipos import *
from finance.models import CustomUser, Cuota, UnicoPago, Pago, ServicioSuscripto, Persona
from efinance.models import Empleado
from django.core.urlresolvers import reverse
from datetime import datetime, date
class Alumno(Persona):
sala = models.ManyToManyField('Sala', through='AlumnoSala', blank=True)
responsables = models.ManyToManyField('finance.Responsable', related_name='alumnos', blank=True, null=True)
autorizados = models.ManyToManyField('finance.Autorizado', blank=True, null=True)
#hermanos
hermanos = models.ManyToManyField('Alumno', blank=True, null=True)
fecha_ingreso = models.DateField(default=date.today)
fecha_egreso = models.DateField(blank=True, null=True)
#medica
padece_enfermedad = models.BooleanField(default=False)
tipo_enfermedad = models.CharField(max_length=40, blank=True) #TODO: choices
controla_esfinteres = models.BooleanField(default=False)
edad_controla_esfinteres = models.IntegerField(blank=True, null=True)
usa_mamadera = models.BooleanField(default=False)
es_alergico = models.BooleanField(default=False)
toma_medicacion = models.BooleanField(default=False)
en_tratamiento_medico = models.BooleanField(default=False)
detalle_tratamiento = models.CharField(max_length=40, blank=True)
tiene_convulsiones = models.BooleanField(default=False, help_text='en caso de temperatura')
tiene_antitetanica = models.BooleanField(default=False)
observaciones_medicas = models.TextField(blank=True, help_text='ingrese aqui informacion adicional de ser necesario')
traslado_emergencia = models.CharField(max_length=40, blank=True) #TODO: choices
telefono_emergencia = models.IntegerField(help_text='Ingresar sólo los números sin puntos', blank=True, null=True)
#otros
expresion_verbal = models.BooleanField(default=False)
vocabulario = models.CharField(max_length=40, blank=True, help_text='como es su vocabulario y lenguaje')
class Meta:
ordering = ['fecha_ingreso',] #primero que se anoto FCFS
# FILTER O GET ???
#FIX get_deuda de alumno devuelve lista, de empleado int
def get_deuda(self, extra=False):
lista = []
try: # FIX usar https://docs.djangoproject.com/en/1.3/ref/models/querysets/#get-or-create
c = Cuota.objects.filter(alumno=self.id, paga=False)
deuda_cuotas = sum([cuota.deuda for cuota in c ])
u = UnicoPago.objects.filter(alumno=self.id, paga=False)
deuda_unicos = sum([p_unico.deuda for p_unico in u ])
lista.extend([deuda_cuotas, deuda_unicos])
if extra:
lista.extend([c,u])
return lista
except: return 0
def get_informes(self, limit=5):
return Informe.objects.filter(alumno=self.id)[:limit]
def last_payments(self, limit=5):
try: # FIX usar https://docs.djangoproject.com/en/1.3/ref/models/querysets/#get-or-create
p = Pago.objects.filter(alumno=self.id)[:limit]
return p
except: return None
def suscript_services(self):
return ServicioSuscripto.objects.filter(alumno=self.id)
def get_absolute_url(self):
return reverse('alumnos-jardin')
class Sala(models.Model):
sala = models.IntegerField(max_length=2, choices=SALA_CHOICES)
turno = models.IntegerField(max_length=1, choices=TURNO_CHOICES)
seccion = models.IntegerField(max_length=1, choices=SECCION_CHOICES)
anio_lectivo = models.IntegerField(max_length=4)
fecha_inicio = models.DateField(default=date.today)
capacidad = models.IntegerField(max_length=2) #capacidad maxima de la sala
def __unicode__(self):
return u'Sala: %s, Turno: %s' % (self.get_sala_display(), self.get_turno_display())
def get_alumnos(self):
return self.alumno_set.all()
def get_absolute_url(self):
return reverse('salas-jardin')
class AlumnoSala(models.Model):
alumno = models.ForeignKey(Alumno)
sala = models.ForeignKey(Sala)
estado = models.IntegerField(max_length=2, choices=ESTADO_ALUMNO_SALA_CHOICES)
comentarios = models.TextField(blank=True)
#class Meta:
# ordering = ['alumno.fecha_ingreso',] #primero que se anoto
def __unicode__(self):
return '%s: %s, %d' % (self.alumno.apellido, self.estado, self.sala.anio_lectivo)
class Maestra(Empleado):
salas = models.ManyToManyField(Sala)
def get_absolute_url(self):
#return "/efinance/detalle_gasto/%d/" % self.id
return reverse('maestras-jardin')
class Informe(models.Model):
titulo = models.CharField(max_length=200)
texto = models.TextField()
alumno = models.ForeignKey(Alumno)
maestra = models.ForeignKey(Maestra)
fecha = models.DateField(default=date.today)
class Meta:
ordering = ['-fecha',]
def __unicode__(self):
return u'Informe de la maestra: %s para el alumno %s' % (self.maestra.get_full_name(), self.alumno.get_full_name())
| mfalcon/edujango | students/models.py | Python | apache-2.0 | 5,144 |
import numpy as np
from numpy.testing import assert_array_equal
from nose.tools import assert_raises, assert_equals, assert_almost_equals
from ....models.classes import MatrixPreferenceDataModel, \
MatrixBooleanPrefDataModel
from ..item_strategies import ItemsNeighborhoodStrategy, AllPossibleItemsStrategy
from ..neighborhood_strategies import AllNeighborsStrategy, NearestNeighborsStrategy
from ....similarities.basic_similarities import ItemSimilarity, UserSimilarity
from ..classes import ItemBasedRecommender, UserBasedRecommender
from ....models.utils import ItemNotFoundError, UserNotFoundError
from ....metrics.pairwise import euclidean_distances, jaccard_coefficient, pearson_correlation
movies = {'Marcel Caraciolo': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.5,
'Just My Luck': 3.0, 'Superman Returns': 3.5, 'You, Me and Dupree': 2.5,
'The Night Listener': 3.0},
'Luciana Nunes': {'Lady in the Water': 3.0, 'Snakes on a Plane': 3.5,
'Just My Luck': 1.5, 'Superman Returns': 5.0, 'The Night Listener': 3.0,
'You, Me and Dupree': 3.5},
'Leopoldo Pires': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.0,
'Superman Returns': 3.5, 'The Night Listener': 4.0},
'Lorena Abreu': {'Snakes on a Plane': 3.5, 'Just My Luck': 3.0,
'The Night Listener': 4.5, 'Superman Returns': 4.0,
'You, Me and Dupree': 2.5},
'Steve Gates': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,
'Just My Luck': 2.0, 'Superman Returns': 3.0, 'The Night Listener': 3.0,
'You, Me and Dupree': 2.0},
'Sheldom': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,
'The Night Listener': 3.0, 'Superman Returns': 5.0, 'You, Me and Dupree': 3.5},
'Penny Frewman': {'Snakes on a Plane': 4.5, 'You, Me and Dupree': 1.0, 'Superman Returns': 4.0},
'Maria Gabriela': {}}
matrix_model = MatrixPreferenceDataModel(movies)
boolean_matrix_model = MatrixBooleanPrefDataModel(movies)
def test_create_ItemBasedRecommender():
items_strategy = AllPossibleItemsStrategy()
similarity = ItemSimilarity(matrix_model, euclidean_distances)
recsys = ItemBasedRecommender(matrix_model, similarity, items_strategy)
assert_equals(recsys.similarity, similarity)
assert_equals(recsys.items_selection_strategy, items_strategy)
assert_equals(recsys.model, matrix_model)
assert_equals(recsys.capper, True)
def test_create_UserBasedRecommender():
nhood_strategy = AllNeighborsStrategy()
similarity = UserSimilarity(matrix_model, euclidean_distances)
recsys = UserBasedRecommender(matrix_model, similarity, nhood_strategy)
assert_equals(recsys.similarity, similarity)
assert_equals(recsys.neighborhood_strategy, nhood_strategy)
assert_equals(recsys.model, matrix_model)
assert_equals(recsys.capper, True)
def test_all_other_items_ItemBasedRecommender():
items_strategy = AllPossibleItemsStrategy()
similarity = ItemSimilarity(matrix_model, euclidean_distances)
recsys = ItemBasedRecommender(matrix_model, similarity, items_strategy)
assert_array_equal(np.array(['Lady in the Water']), recsys.all_other_items('Lorena Abreu'))
assert_array_equal(np.array([], dtype='|S'), recsys.all_other_items('Marcel Caraciolo'))
assert_array_equal(np.array(['Just My Luck', 'Lady in the Water', 'Snakes on a Plane',
'Superman Returns', 'The Night Listener', 'You, Me and Dupree']), recsys.all_other_items('Maria Gabriela'))
similarity = ItemSimilarity(boolean_matrix_model, jaccard_coefficient)
recsys = ItemBasedRecommender(boolean_matrix_model, similarity, items_strategy)
assert_array_equal(np.array(['Lady in the Water']), recsys.all_other_items('Lorena Abreu'))
assert_array_equal(np.array([], dtype='|S'), recsys.all_other_items('Marcel Caraciolo'))
assert_array_equal(np.array(['Just My Luck', 'Lady in the Water', 'Snakes on a Plane',
'Superman Returns', 'The Night Listener', 'You, Me and Dupree']), recsys.all_other_items('Maria Gabriela'))
def test_all_other_items_UserBasedRecommender():
nhood_strategy = AllNeighborsStrategy()
similarity = UserSimilarity(boolean_matrix_model, jaccard_coefficient)
recsys = UserBasedRecommender(boolean_matrix_model, similarity, nhood_strategy)
assert_array_equal(np.array(['Lady in the Water']), recsys.all_other_items('Lorena Abreu'))
assert_array_equal(np.array([], dtype='|S'), recsys.all_other_items('Marcel Caraciolo'))
assert_array_equal(np.array(['Just My Luck', 'Lady in the Water', 'Snakes on a Plane',
'Superman Returns', 'The Night Listener', 'You, Me and Dupree']), recsys.all_other_items('Maria Gabriela'))
similarity = UserSimilarity(boolean_matrix_model, jaccard_coefficient)
recsys = UserBasedRecommender(boolean_matrix_model, similarity, nhood_strategy)
assert_array_equal(np.array(['Lady in the Water']),
recsys.all_other_items(user_id='Lorena Abreu', distance=pearson_correlation, nhood_size=2, minimal_similarity=0.1))
assert_array_equal(np.array([], dtype='|S'),
recsys.all_other_items(user_id='Marcel Caraciolo', distance=pearson_correlation, nhood_size=2, minimal_similarity=0.1))
assert_array_equal(np.array(['Just My Luck', 'Lady in the Water', 'Snakes on a Plane',
'Superman Returns', 'The Night Listener', 'You, Me and Dupree']),
recsys.all_other_items(user_id='Maria Gabriela', distance=pearson_correlation, nhood_size=2, minimal_similarity=0.1))
similarity = UserSimilarity(matrix_model, euclidean_distances)
recsys = UserBasedRecommender(matrix_model, similarity, nhood_strategy)
assert_array_equal(np.array(['Lady in the Water']), recsys.all_other_items('Lorena Abreu'))
assert_array_equal(np.array([], dtype='|S'), recsys.all_other_items('Marcel Caraciolo'))
assert_array_equal(np.array(['Just My Luck', 'Lady in the Water', 'Snakes on a Plane',
'Superman Returns', 'The Night Listener', 'You, Me and Dupree']), recsys.all_other_items('Maria Gabriela'))
nhood_strategy = NearestNeighborsStrategy()
similarity = UserSimilarity(matrix_model, pearson_correlation)
recsys = UserBasedRecommender(matrix_model, similarity, nhood_strategy)
assert_array_equal(np.array(['Lady in the Water']),
recsys.all_other_items(user_id='Lorena Abreu', distance=pearson_correlation, nhood_size=2, minimal_similarity=0.1))
assert_array_equal(np.array([], dtype='|S'),
recsys.all_other_items(user_id='Marcel Caraciolo', distance=pearson_correlation, nhood_size=3))
assert_array_equal(np.array([]),
recsys.all_other_items(user_id='Maria Gabriela', distance=euclidean_distances, nhood_size=2))
def test_estimate_preference_ItemBasedRecommender():
items_strategy = ItemsNeighborhoodStrategy()
similarity = ItemSimilarity(matrix_model, euclidean_distances)
recsys = ItemBasedRecommender(matrix_model, similarity, items_strategy)
assert_almost_equals(3.5, recsys.estimate_preference('Marcel Caraciolo', 'Superman Returns'))
assert_almost_equals(3.14717875510, recsys.estimate_preference('Leopoldo Pires', 'You, Me and Dupree'))
#With capper = False
recsys = ItemBasedRecommender(matrix_model, similarity, items_strategy, False)
assert_almost_equals(3.14717875510, recsys.estimate_preference('Leopoldo Pires', 'You, Me and Dupree'))
#Non-Preferences
assert_array_equal(np.nan, recsys.estimate_preference('Maria Gabriela', 'You, Me and Dupree'))
items_strategy = ItemsNeighborhoodStrategy()
similarity = ItemSimilarity(boolean_matrix_model, jaccard_coefficient)
recsys = ItemBasedRecommender(boolean_matrix_model, similarity, items_strategy)
assert_almost_equals(1.0, recsys.estimate_preference('Marcel Caraciolo', 'Superman Returns'))
assert_almost_equals(1.0, recsys.estimate_preference('Leopoldo Pires', 'You, Me and Dupree'))
#With capper = False
recsys = ItemBasedRecommender(boolean_matrix_model, similarity, items_strategy, False)
assert_almost_equals(1.0, recsys.estimate_preference('Leopoldo Pires', 'You, Me and Dupree'))
#Non-Preferences
assert_array_equal(np.NaN, recsys.estimate_preference('Maria Gabriela', 'You, Me and Dupree'))
def test_estimate_preference_UserBasedRecommender():
nhood_strategy = NearestNeighborsStrategy()
similarity = UserSimilarity(matrix_model, euclidean_distances)
recsys = UserBasedRecommender(matrix_model, similarity, nhood_strategy)
assert_almost_equals(3.5, recsys.estimate_preference('Marcel Caraciolo', 'Superman Returns'))
assert_almost_equals(2.4533792305691886, recsys.estimate_preference('Leopoldo Pires', 'You, Me and Dupree'))
recsys = UserBasedRecommender(matrix_model, similarity, nhood_strategy)
assert_almost_equals(3.5, recsys.estimate_preference('Marcel Caraciolo', 'Superman Returns'))
assert_almost_equals(2.8960083169728952,
recsys.estimate_preference(user_id='Leopoldo Pires', item_id='You, Me and Dupree',
distance=pearson_correlation, nhood_size=4, minimal_similarity=-1.0))
recsys = UserBasedRecommender(matrix_model, similarity, nhood_strategy)
assert_almost_equals(2.0653946891716108,
recsys.estimate_preference(user_id='Leopoldo Pires', item_id='You, Me and Dupree',
nhood_size=4))
#With capper = False
recsys = UserBasedRecommender(matrix_model, similarity, nhood_strategy, False)
assert_almost_equals(2.0653946891716108, recsys.estimate_preference('Leopoldo Pires', 'You, Me and Dupree'))
assert_almost_equals(2.8960083169728952,
recsys.estimate_preference(user_id='Leopoldo Pires', item_id='You, Me and Dupree',
distance=pearson_correlation, nhood_size=4, minimal_similarity=-1.0))
recsys = UserBasedRecommender(matrix_model, similarity, nhood_strategy, False)
assert_almost_equals(2.0653946891716108,
recsys.estimate_preference(user_id='Leopoldo Pires', item_id='You, Me and Dupree',
nhood_size=4))
#Non-Preferences
assert_array_equal(np.nan, recsys.estimate_preference('Maria Gabriela', 'You, Me and Dupree'))
nhood_strategy = NearestNeighborsStrategy()
similarity = UserSimilarity(boolean_matrix_model, jaccard_coefficient)
recsys = UserBasedRecommender(boolean_matrix_model, similarity, nhood_strategy)
assert_almost_equals(1.0, recsys.estimate_preference('Marcel Caraciolo', 'Superman Returns'))
assert_almost_equals(1.0, recsys.estimate_preference('Leopoldo Pires', 'You, Me and Dupree'))
assert_almost_equals(1.0,
recsys.estimate_preference(user_id='Leopoldo Pires', item_id='You, Me and Dupree',
distance=jaccard_coefficient, nhood_size=3))
#With capper = False
recsys = UserBasedRecommender(boolean_matrix_model, similarity, nhood_strategy, False)
assert_almost_equals(1.0, recsys.estimate_preference('Leopoldo Pires', 'You, Me and Dupree'))
#Non-Preferences
assert_array_equal(np.NaN, recsys.estimate_preference('Maria Gabriela', 'You, Me and Dupree'))
def test_most_similar_items_ItemBasedRecommender():
items_strategy = ItemsNeighborhoodStrategy()
similarity = ItemSimilarity(matrix_model, euclidean_distances)
recsys = ItemBasedRecommender(matrix_model, similarity, items_strategy)
#semi items
assert_array_equal(np.array(['Snakes on a Plane', \
'The Night Listener', 'Lady in the Water', 'Just My Luck']), \
recsys.most_similar_items('Superman Returns', 4))
#all items
assert_array_equal(np.array(['Lady in the Water', 'You, Me and Dupree', \
'The Night Listener', 'Snakes on a Plane', 'Superman Returns']), \
recsys.most_similar_items('Just My Luck'))
#Non-existing
assert_raises(ItemNotFoundError, recsys.most_similar_items, 'Back to the Future')
#Exceed the limit
assert_array_equal(np.array(['Lady in the Water', 'You, Me and Dupree', 'The Night Listener', \
'Snakes on a Plane', 'Superman Returns']), \
recsys.most_similar_items('Just My Luck', 20))
#Empty
assert_array_equal(np.array([]), \
recsys.most_similar_items('Just My Luck', 0))
items_strategy = ItemsNeighborhoodStrategy()
similarity = ItemSimilarity(boolean_matrix_model, jaccard_coefficient)
recsys = ItemBasedRecommender(boolean_matrix_model, similarity, items_strategy)
#semi items
assert_array_equal(np.array(['Snakes on a Plane', 'The Night Listener', \
'You, Me and Dupree', 'Lady in the Water']), \
recsys.most_similar_items('Superman Returns', 4))
#all items
assert_array_equal(np.array(['The Night Listener', 'You, Me and Dupree', \
'Snakes on a Plane', 'Superman Returns', 'Lady in the Water']), \
recsys.most_similar_items('Just My Luck'))
#Non-existing
assert_raises(ItemNotFoundError, recsys.most_similar_items, 'Back to the Future')
#Exceed the limit
assert_array_equal(np.array(['The Night Listener', 'You, Me and Dupree', 'Snakes on a Plane',
'Superman Returns', 'Lady in the Water']), \
recsys.most_similar_items('Just My Luck', 20))
#Empty
assert_array_equal(np.array([]), \
recsys.most_similar_items('Just My Luck', 0))
def test_most_similar_users_UserBasedRecommender():
nhood_strategy = NearestNeighborsStrategy()
similarity = UserSimilarity(matrix_model, euclidean_distances)
recsys = UserBasedRecommender(matrix_model, similarity, nhood_strategy)
#semi items
assert_array_equal(np.array(['Leopoldo Pires', 'Steve Gates', 'Lorena Abreu',
'Penny Frewman']), \
recsys.most_similar_users('Marcel Caraciolo', 4))
#all items
assert_array_equal(np.array(['Lorena Abreu', 'Marcel Caraciolo', 'Penny Frewman', \
'Steve Gates', 'Luciana Nunes', 'Sheldom']), \
recsys.most_similar_users('Leopoldo Pires'))
#Non-existing
assert_array_equal(np.array([]), \
recsys.most_similar_users('Maria Gabriela'))
#Exceed the limit
assert_array_equal(np.array(['Lorena Abreu', 'Marcel Caraciolo', 'Penny Frewman', \
'Steve Gates', 'Luciana Nunes', 'Sheldom']), \
recsys.most_similar_users('Leopoldo Pires', 20))
#Empty
assert_array_equal(np.array([]), \
recsys.most_similar_users('Sheldom', 0))
nhood_strategy = NearestNeighborsStrategy()
similarity = UserSimilarity(boolean_matrix_model, jaccard_coefficient)
recsys = UserBasedRecommender(boolean_matrix_model, similarity, nhood_strategy)
#semi items
assert_array_equal(np.array(['Luciana Nunes', 'Steve Gates', \
'Lorena Abreu', 'Sheldom']), \
recsys.most_similar_users('Marcel Caraciolo', 4))
#all items
assert_array_equal(np.array(['Sheldom', 'Luciana Nunes', 'Marcel Caraciolo',
'Steve Gates', 'Lorena Abreu', 'Penny Frewman']), \
recsys.most_similar_users('Leopoldo Pires'))
#Non-existing
assert_array_equal(np.array([]), \
recsys.most_similar_users('Maria Gabriela'))
#Exceed the limit
assert_array_equal(np.array(['Sheldom', 'Luciana Nunes', 'Marcel Caraciolo',
'Steve Gates', 'Lorena Abreu', 'Penny Frewman']), \
recsys.most_similar_users('Leopoldo Pires', 20))
#Empty
assert_array_equal(np.array([]), \
recsys.most_similar_users('Sheldom', 0))
def test_recommend_ItemBasedRecommender():
items_strategy = ItemsNeighborhoodStrategy()
similarity = ItemSimilarity(matrix_model, euclidean_distances)
#Empty Recommendation
recsys = ItemBasedRecommender(matrix_model, similarity, items_strategy)
assert_array_equal(np.array([]), recsys.recommend('Marcel Caraciolo'))
#Semi Recommendation
recsys = ItemBasedRecommender(matrix_model, similarity, items_strategy)
assert_array_equal(np.array(['Just My Luck', 'You, Me and Dupree']), \
recsys.recommend('Leopoldo Pires'))
#Semi Recommendation
recsys = ItemBasedRecommender(matrix_model, similarity, items_strategy)
assert_array_equal(np.array(['Just My Luck']), \
recsys.recommend('Leopoldo Pires', 1))
#Empty Recommendation
recsys = ItemBasedRecommender(matrix_model, similarity, items_strategy)
assert_array_equal(np.array([]), recsys.recommend('Maria Gabriela'))
#Test with params update
recsys.recommend(user_id='Maria Gabriela', similarity=similarity)
assert_array_equal(np.array([]), recsys.recommend('Maria Gabriela'))
#with_preference
recsys = ItemBasedRecommender(matrix_model, similarity, items_strategy, True, True)
assert_equals('Just My Luck', recsys.recommend('Leopoldo Pires')[0][0])
assert_equals('You, Me and Dupree', recsys.recommend('Leopoldo Pires')[1][0])
assert_almost_equals(3.20597, recsys.recommend('Leopoldo Pires')[0][1], 2)
assert_almost_equals(3.147178755, recsys.recommend('Leopoldo Pires')[1][1], 2)
similarity = ItemSimilarity(boolean_matrix_model, jaccard_coefficient)
#Empty Recommendation
recsys = ItemBasedRecommender(boolean_matrix_model, similarity, items_strategy)
assert_array_equal(np.array([]), recsys.recommend('Marcel Caraciolo'))
#Semi Recommendation
recsys = ItemBasedRecommender(boolean_matrix_model, similarity, items_strategy)
assert_array_equal(np.array(['You, Me and Dupree', 'Just My Luck']), \
recsys.recommend('Leopoldo Pires'))
#Semi Recommendation
recsys = ItemBasedRecommender(boolean_matrix_model, similarity, items_strategy)
assert_array_equal(np.array(['You, Me and Dupree']), \
recsys.recommend('Leopoldo Pires', 1))
#Empty Recommendation
recsys = ItemBasedRecommender(boolean_matrix_model, similarity, items_strategy)
assert_array_equal(np.array([]), recsys.recommend('Maria Gabriela'))
#Test with params update
recsys.recommend(user_id='Maria Gabriela', similarity=similarity)
assert_array_equal(np.array([]), recsys.recommend('Maria Gabriela'))
#with_preference
recsys = ItemBasedRecommender(boolean_matrix_model, similarity, items_strategy, True, True)
assert_equals('You, Me and Dupree', recsys.recommend('Leopoldo Pires')[0][0])
assert_equals('Just My Luck', recsys.recommend('Leopoldo Pires')[1][0])
assert_almost_equals(1.0, recsys.recommend('Leopoldo Pires')[0][1], 2)
assert_almost_equals(1.0, recsys.recommend('Leopoldo Pires')[1][1], 2)
def test_recommend_UserBasedRecommender():
nhood_strategy = NearestNeighborsStrategy()
similarity = UserSimilarity(matrix_model, euclidean_distances)
#Empty Recommendation
recsys = UserBasedRecommender(matrix_model, similarity, nhood_strategy)
assert_array_equal(np.array([]), recsys.recommend('Marcel Caraciolo'))
#Semi Recommendation
recsys = UserBasedRecommender(matrix_model, similarity, nhood_strategy)
assert_array_equal(np.array(['Just My Luck', 'You, Me and Dupree']), \
recsys.recommend('Leopoldo Pires'))
#Semi Recommendation
recsys = UserBasedRecommender(matrix_model, similarity, nhood_strategy)
assert_array_equal(np.array(['Just My Luck']), \
recsys.recommend('Leopoldo Pires', 1))
#Empty Recommendation
recsys = UserBasedRecommender(matrix_model, similarity, nhood_strategy)
assert_array_equal(np.array([]), recsys.recommend('Maria Gabriela'))
#Test with params update
recsys.recommend(user_id='Maria Gabriela', similarity=similarity)
assert_array_equal(np.array([]), recsys.recommend('Maria Gabriela'))
#with_preference
recsys = UserBasedRecommender(matrix_model, similarity, nhood_strategy, True, True)
assert_equals('Just My Luck', recsys.recommend('Leopoldo Pires')[0][0])
assert_equals('You, Me and Dupree', recsys.recommend('Leopoldo Pires')[1][0])
assert_almost_equals(2.456743361464, recsys.recommend('Leopoldo Pires')[0][1], 2)
assert_almost_equals(2.453379, recsys.recommend('Leopoldo Pires')[1][1], 2)
similarity = UserSimilarity(boolean_matrix_model, jaccard_coefficient)
#Empty Recommendation
recsys = UserBasedRecommender(boolean_matrix_model, similarity, nhood_strategy)
assert_array_equal(np.array([]), recsys.recommend('Marcel Caraciolo'))
#Semi Recommendation
recsys = UserBasedRecommender(boolean_matrix_model, similarity, nhood_strategy)
assert_array_equal(np.array(['You, Me and Dupree', 'Just My Luck']), \
recsys.recommend('Leopoldo Pires'))
#Semi Recommendation
recsys = UserBasedRecommender(boolean_matrix_model, similarity, nhood_strategy)
assert_array_equal(np.array(['You, Me and Dupree']), \
recsys.recommend('Leopoldo Pires', 1))
#Empty Recommendation
recsys = UserBasedRecommender(boolean_matrix_model, similarity, nhood_strategy)
assert_array_equal(np.array([]), recsys.recommend('Maria Gabriela'))
#Test with params update
recsys.recommend(user_id='Maria Gabriela', similarity=similarity)
assert_array_equal(np.array([]), recsys.recommend('Maria Gabriela'))
#with_preference
recsys = UserBasedRecommender(boolean_matrix_model, similarity, nhood_strategy, True, True)
assert_equals('You, Me and Dupree', recsys.recommend('Leopoldo Pires')[0][0])
assert_equals('Just My Luck', recsys.recommend('Leopoldo Pires')[1][0])
assert_almost_equals(1.0, recsys.recommend('Leopoldo Pires')[0][1], 2)
assert_almost_equals(1.0, recsys.recommend('Leopoldo Pires')[1][1], 2)
def test_recommend_because_ItemBasedRecommender():
items_strategy = ItemsNeighborhoodStrategy()
similarity = ItemSimilarity(matrix_model, euclidean_distances)
#Full Recommendation Because
recsys = ItemBasedRecommender(matrix_model, similarity, items_strategy)
assert_array_equal(np.array(['The Night Listener', 'Superman Returns', \
'Snakes on a Plane', 'Lady in the Water']), \
recsys.recommended_because('Leopoldo Pires', 'Just My Luck'))
#over-items
recsys = ItemBasedRecommender(matrix_model, similarity, items_strategy)
assert_array_equal(np.array(['The Night Listener', 'Superman Returns', \
'Snakes on a Plane', 'Lady in the Water']), \
recsys.recommended_because('Leopoldo Pires', 'Just My Luck', 20))
#Semi
recsys = ItemBasedRecommender(matrix_model, similarity, items_strategy)
assert_array_equal(np.array(['The Night Listener', 'Superman Returns']), \
recsys.recommended_because('Leopoldo Pires', 'Just My Luck', 2))
#Non-Existing
recsys = ItemBasedRecommender(matrix_model, similarity, items_strategy)
assert_array_equal(np.array([]), \
recsys.recommended_because('Maria Gabriela', 'Just My Luck', 2))
#with_preference
recsys = ItemBasedRecommender(matrix_model, similarity, items_strategy, True, True)
assert_array_equal(np.array([('The Night Listener', 4.0), \
('Superman Returns', 3.5)]), \
recsys.recommended_because('Leopoldo Pires', 'Just My Luck', 2))
#boolean_matrix_model
similarity = ItemSimilarity(boolean_matrix_model, jaccard_coefficient)
#Full Recommendation Because
recsys = ItemBasedRecommender(boolean_matrix_model, similarity, items_strategy)
assert_array_equal(np.array(['The Night Listener', 'Superman Returns', \
'Snakes on a Plane', 'Lady in the Water']), \
recsys.recommended_because('Leopoldo Pires', 'Just My Luck'))
#over-items
recsys = ItemBasedRecommender(boolean_matrix_model, similarity, items_strategy)
assert_array_equal(np.array(['The Night Listener', 'Superman Returns', \
'Snakes on a Plane', 'Lady in the Water']), \
recsys.recommended_because('Leopoldo Pires', 'Just My Luck', 20))
#Semi
recsys = ItemBasedRecommender(boolean_matrix_model, similarity, items_strategy)
assert_array_equal(np.array(['The Night Listener', 'Superman Returns']), \
recsys.recommended_because('Leopoldo Pires', 'Just My Luck', 2))
#Non-Existing
recsys = ItemBasedRecommender(boolean_matrix_model, similarity, items_strategy)
assert_array_equal(np.array([]), \
recsys.recommended_because('Maria Gabriela', 'Just My Luck', 2))
def test_recommend_because_UserBasedRecommender():
nhood_strategy = NearestNeighborsStrategy()
similarity = UserSimilarity(matrix_model, euclidean_distances)
#Full Recommendation Because
recsys = UserBasedRecommender(matrix_model, similarity, nhood_strategy)
assert_array_equal(np.array(['Lorena Abreu', 'Marcel Caraciolo', \
'Steve Gates', 'Luciana Nunes']), \
recsys.recommended_because('Leopoldo Pires', 'Just My Luck'))
#over-items
recsys = UserBasedRecommender(matrix_model, similarity, nhood_strategy)
assert_array_equal(np.array(['Lorena Abreu', 'Marcel Caraciolo', \
'Steve Gates', 'Luciana Nunes']), \
recsys.recommended_because('Leopoldo Pires', 'Just My Luck', 20))
#Semi
recsys = UserBasedRecommender(matrix_model, similarity, nhood_strategy)
assert_array_equal(np.array(['Lorena Abreu', 'Marcel Caraciolo']), \
recsys.recommended_because('Leopoldo Pires', 'Just My Luck', 2))
#Non-Existing
recsys = UserBasedRecommender(matrix_model, similarity, nhood_strategy)
assert_array_equal(np.array([]), \
recsys.recommended_because('Maria Gabriela', 'Just My Luck', 2))
#with_preference
recsys = UserBasedRecommender(matrix_model, similarity, nhood_strategy, True, True)
assert_array_equal(np.array([('Lorena Abreu', 3.0), \
('Marcel Caraciolo', 3.0)]), \
recsys.recommended_because('Leopoldo Pires', 'Just My Luck', 2))
#boolean_matrix_model
similarity = UserSimilarity(boolean_matrix_model, jaccard_coefficient)
#Full Recommendation Because
recsys = UserBasedRecommender(boolean_matrix_model, similarity, nhood_strategy)
assert_array_equal(np.array(['Steve Gates', 'Marcel Caraciolo', 'Luciana Nunes', \
'Lorena Abreu']), \
recsys.recommended_because('Leopoldo Pires', 'Just My Luck'))
#over-items
recsys = UserBasedRecommender(boolean_matrix_model, similarity, nhood_strategy)
assert_array_equal(np.array(['Steve Gates', 'Marcel Caraciolo', 'Luciana Nunes', \
'Lorena Abreu']), \
recsys.recommended_because('Leopoldo Pires', 'Just My Luck', 20))
#Semi
recsys = UserBasedRecommender(boolean_matrix_model, similarity, nhood_strategy)
assert_array_equal(np.array(['Steve Gates', 'Marcel Caraciolo']), \
recsys.recommended_because('Leopoldo Pires', 'Just My Luck', 2))
#Non-Existing
recsys = UserBasedRecommender(boolean_matrix_model, similarity, nhood_strategy)
assert_array_equal(np.array([]), \
recsys.recommended_because('Maria Gabriela', 'Just My Luck', 2))
| imouren/crab | scikits/crab/recommenders/knn/tests/test_classes.py | Python | bsd-3-clause | 26,670 |
# -*- coding: utf-8 -*-
""" Sahana Eden Disaster Victim Registration Model
@copyright: 2012-15 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3DVRModel",)
from gluon import *
from gluon.storage import Storage
from gluon.tools import callback
from ..s3 import *
from s3layouts import S3PopupLink
# =============================================================================
class S3DVRModel(S3Model):
"""
Allow an individual or household to register to receive
compensation and/or distributions of relief items
"""
names = ("dvr_need",
"dvr_case",
"dvr_case_need",
)
def model(self):
T = current.T
db = current.db
UNKNOWN_OPT = current.messages.UNKNOWN_OPT
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
configure = self.configure
# ---------------------------------------------------------------------
# Case
#
#dvr_damage_opts = {
# 1: T("Very High"),
# 2: T("High"),
# 3: T("Medium"),
# 4: T("Low"),
#}
dvr_status_opts = {
1: T("Open"),
2: T("Pending"),
3: T("Closed"),
}
tablename = "dvr_case"
define_table(tablename,
# @ToDo: Option to autogenerate these, like Waybills, et al
Field("reference",
label = T("Case Number"),
),
self.org_organisation_id(),
self.pr_person_id(
# @ToDo: Modify this to update location_id if the selected
# person has a Home Address already
comment = None,
represent = self.pr_PersonRepresent(show_link=True),
requires = IS_ADD_PERSON_WIDGET2(),
widget = S3AddPersonWidget2(controller="pr"),
),
#Field("damage", "integer",
# label= T("Damage Assessment"),
# represent = lambda opt: \
# dvr_damage_opts.get(opt, UNKNOWN_OPT),
# requires = IS_EMPTY_OR(IS_IN_SET(dvr_damage_opts)),
# ),
#Field("insurance", "boolean",
# label = T("Insurance"),
# represent = s3_yes_no_represent,
# ),
Field("status", "integer",
default = 1,
label = T("Status"),
represent = S3Represent(options=dvr_status_opts),
requires = IS_EMPTY_OR(IS_IN_SET(dvr_status_opts)),
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Case"),
title_display = T("Case Details"),
title_list = T("Cases"),
title_update = T("Edit Case"),
label_list_button = T("List Cases"),
label_delete_button = T("Delete Case"),
msg_record_created = T("Case added"),
msg_record_modified = T("Case updated"),
msg_record_deleted = T("Case deleted"),
msg_list_empty = T("No Cases found")
)
represent = S3Represent(lookup=tablename, fields=("reference",))
case_id = S3ReusableField("case_id", "reference %s" % tablename,
label = T("Case"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "dvr_case.id",
represent)),
)
self.add_components(tablename,
dvr_need = {"link": "dvr_case_need",
"joinby": "case_id",
"key": "need_id",
},
pr_address = ({"name": "current_address",
"link": "pr_person",
"joinby": "id",
"key": "pe_id",
"fkey": "pe_id",
"pkey": "person_id",
"filterby": "type",
"filterfor": ("1",),
},
{"name": "permanent_address",
"link": "pr_person",
"joinby": "id",
"key": "pe_id",
"fkey": "pe_id",
"pkey": "person_id",
"filterby": "type",
"filterfor": ("2",),
},
),
)
crud_form = S3SQLCustomForm("reference",
"organisation_id",
"status",
"person_id",
S3SQLInlineComponent("current_address",
label = T("Current Address"),
fields = [("", "location_id"),
],
default = {"type": 1}, # Current Home Address
link = False,
multiple = False,
),
S3SQLInlineComponent("permanent_address",
comment = T("If Displaced"),
label = T("Normal Address"),
fields = [("", "location_id"),
],
default = {"type": 2}, # Permanent Home Address
link = False,
multiple = False,
),
S3SQLInlineLink("need",
field = "need_id",
),
"comments",
)
axes = ["organisation_id",
"case_need.need_id",
]
levels = current.gis.get_relevant_hierarchy_levels()
for level in levels:
axes.append("current_address.location_id$%s" % level)
highest_lx = "current_address.location_id$%s" % levels[0]
facts = [(T("Number of Cases"), "count(id)"),
]
report_options = {"rows": axes,
"cols": axes,
"fact": facts,
"defaults": {"rows": "case_need.need_id",
"cols": highest_lx,
"fact": facts[0],
"totals": True,
},
}
configure(tablename,
crud_form = crud_form,
report_options = report_options,
)
# ---------------------------------------------------------------------
# Needs
#
tablename = "dvr_need"
define_table(tablename,
Field("name",
label = T("Name"),
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
ADD_NEED = T("Create Need")
crud_strings[tablename] = Storage(
label_create = ADD_NEED,
title_display = T("Need Details"),
title_list = T("Needs"),
title_update = T("Edit Need"),
label_list_button = T("List Needs"),
label_delete_button = T("Delete Need"),
msg_record_created = T("Need added"),
msg_record_modified = T("Need updated"),
msg_record_deleted = T("Need deleted"),
msg_list_empty = T("No Needs found")
)
represent = S3Represent(lookup=tablename, translate=True)
need_id = S3ReusableField("need_id", "reference %s" % tablename,
label = T("Need"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "dvr_need.id",
represent)),
comment=S3PopupLink(c = "dvr",
f = "need",
label = ADD_NEED,
),
)
configure(tablename,
deduplicate = S3Duplicate(),
)
# ---------------------------------------------------------------------
# Cases <> Needs
#
tablename = "dvr_case_need"
define_table(tablename,
case_id(empty = False,
ondelete = "CASCADE",
),
need_id(empty = False,
ondelete = "CASCADE",
),
#s3_comments(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# END =========================================================================
| anurag-ks/eden | modules/s3db/dvr.py | Python | mit | 12,049 |
# Generated by Django 2.2.24 on 2021-06-29 13:16
from django.db import migrations
from django.db.models import Min
from corehq.util.django_migrations import skip_on_fresh_install
def _reset_date_modified(model_name):
@skip_on_fresh_install
def _reset_date(apps, schema_editor):
model = apps.get_model('sms', model_name)
min_date = model.objects.aggregate(min_date=Min("date_modified"))['min_date']
if min_date:
model.objects.filter(date_modified=min_date).update(date_modified=None)
return _reset_date
class Migration(migrations.Migration):
dependencies = [
('sms', '0050_sms_email_date_modified'),
]
operations = [
migrations.RunPython(_reset_date_modified('SMS'), migrations.RunPython.noop),
migrations.RunPython(_reset_date_modified('Email'), migrations.RunPython.noop),
]
| dimagi/commcare-hq | corehq/apps/sms/migrations/0051_reset_modified_on.py | Python | bsd-3-clause | 873 |
from __future__ import absolute_import, unicode_literals
import unittest
from mopidy.mpd.exceptions import (
MpdAckError, MpdNoCommand, MpdNotImplemented, MpdPermissionError,
MpdSystemError, MpdUnknownCommand)
class MpdExceptionsTest(unittest.TestCase):
def test_key_error_wrapped_in_mpd_ack_error(self):
try:
try:
raise KeyError('Track X not found')
except KeyError as e:
raise MpdAckError(e.message)
except MpdAckError as e:
self.assertEqual(e.message, 'Track X not found')
def test_mpd_not_implemented_is_a_mpd_ack_error(self):
try:
raise MpdNotImplemented
except MpdAckError as e:
self.assertEqual(e.message, 'Not implemented')
def test_get_mpd_ack_with_default_values(self):
e = MpdAckError('A description')
self.assertEqual(e.get_mpd_ack(), 'ACK [0@0] {None} A description')
def test_get_mpd_ack_with_values(self):
try:
raise MpdAckError('A description', index=7, command='foo')
except MpdAckError as e:
self.assertEqual(e.get_mpd_ack(), 'ACK [0@7] {foo} A description')
def test_mpd_unknown_command(self):
try:
raise MpdUnknownCommand(command='play')
except MpdAckError as e:
self.assertEqual(
e.get_mpd_ack(), 'ACK [5@0] {} unknown command "play"')
def test_mpd_no_command(self):
try:
raise MpdNoCommand
except MpdAckError as e:
self.assertEqual(
e.get_mpd_ack(), 'ACK [5@0] {} No command given')
def test_mpd_system_error(self):
try:
raise MpdSystemError('foo')
except MpdSystemError as e:
self.assertEqual(
e.get_mpd_ack(), 'ACK [52@0] {None} foo')
def test_mpd_permission_error(self):
try:
raise MpdPermissionError(command='foo')
except MpdPermissionError as e:
self.assertEqual(
e.get_mpd_ack(),
'ACK [4@0] {foo} you don\'t have permission for "foo"')
| priestd09/mopidy | tests/mpd/test_exceptions.py | Python | apache-2.0 | 2,135 |
#! /usr/bin/python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import os
import sys
import glob
import re
# read version.py file to get version and metadata information
here = os.path.abspath(os.path.dirname(__file__))
version_py = os.path.join(here, "src/t80sched/core/version.py")
execfile(version_py)
# chimera scripts
t80sched_scripts = ['src/scripts/chimera-t80sched',
'src/scripts/chimera-setupdb',
'src/scripts/chimera-qexec',
'src/scripts/chimera-schedgui']
# platform specific requirements
platform_deps = []
# go!
setup(
name='t80sched-python',
version=_t80sched_version_,
description=_t80sched_description_,
long_description=open("docs/site/index.rst").read(),
url=_t80sched_url_,
author=_t80sched_author_,
author_email=_t80sched_author_email_,
license=_t80sched_license_,
package_dir={"": "src"},
package_data={'': ['*.ui']},
packages=find_packages("src", exclude=["*.tests"]),
include_package_data=True,
scripts=t80sched_scripts,
tests_require=["astropy","chimera"],
)
| tribeiro/chimera-t80sched | setup.py | Python | gpl-2.0 | 1,104 |
#!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import amulet
class TestDeploy(unittest.TestCase):
"""
Trivial deployment test for Apache Bigtop ResourceManager.
This charm cannot do anything useful by itself, so integration testing
is done in the bundle.
"""
def test_deploy(self):
self.d = amulet.Deployment(series='xenial')
self.d.add('resourcemanager', 'hadoop-resourcemanager')
self.d.setup(timeout=900)
self.d.sentry.wait(timeout=1800)
self.unit = self.d.sentry['resourcemanager'][0]
if __name__ == '__main__':
unittest.main()
| sekikn/bigtop | bigtop-packages/src/charm/hadoop/layer-hadoop-resourcemanager/tests/01-basic-deployment.py | Python | apache-2.0 | 1,381 |
#!/usr/bin/env python
#
# $Id: put.py,v 1.2 2005-01-27 04:24:40 jsommers Exp $
#
import httplib, socket, sys, getopt
def usage(proggie):
print "usage: ", proggie, "[-h <host> -i <infile> -o <targetfile>]"
sys.exit(0)
try:
opts,args = getopt.getopt(sys.argv[1:], "h:i:o:", [])
except getopt.GetoptError,e:
print "exception while processing options: ", e
usage (sys.argv[0])
rhost = ''
infile = ''
tfile = ''
url_list = []
for o, a in opts:
if o == "-h":
rhost = a
elif o == "-i":
infile = a
elif o == "-o":
tfile = a
if len(rhost) == 0 or len(infile) == 0 or len(tfile) == 0:
usage(sys.argv[0])
pfile = file(infile)
fstr = pfile.read(1000000)
pfile.close()
dstport = 8180
method = 'PUT'
print "sending file of length: ",len(fstr)
try:
httpcon = httplib.HTTPConnection(rhost, 8180)
httpcon.request(method, tfile, fstr)
resp = httpcon.getresponse()
httpcon.close()
print "http returned: ",resp.status,resp.reason,resp.version,resp.msg
except:
print >>sys.stderr,"error shipping file. check hostname and whether server is running."
| jsommers/harpoon | cli/put.py | Python | gpl-2.0 | 1,126 |
#!/usr/bin/env python
#
# Original code from - https://github.com/Clete2/NessusReport, modded by Lee Baird
# John Kim - additional modification completed to support UTF-8, support cli help, renaming output files
# Thanks to Securicon, LLC. for sponsoring development
import csv
import datetime
import re
import sys
import time
import utfdictcsv
import xml.etree.ElementTree as ET
################################################################
class NessusParser:
def loadXML(self, filename):
self.xml = ET.parse(filename)
self.rootElement = self.xml.getroot()
def getHosts(self):
return self.rootElement.findall("./Report/ReportHost")
################################################################
def getHostProperties(self, host):
properties = {}
hostProperties = host.findall("./HostProperties")[0]
_temp_ip = hostProperties.findall("./tag[@name='host-ip']")
if len(_temp_ip) > 0 and _temp_ip is not None:
properties['host-ip'] = _temp_ip[0].text
else:
properties['host-ip'] = host.attrib['name']
hostnames = hostProperties.findall("./tag[@name='netbios-name']")
if(len(hostnames) >= 1):
properties['netbios-name'] = hostnames[0].text
else:
hostnames = hostProperties.findall("./tag[@name='host-fqdn']")
if(len(hostnames) >= 1):
properties['netbios-name'] = hostnames[0].text
os = hostProperties.findall("./tag[@name='operating-system']")
if(len(os) >= 1):
properties['operating-system'] = os[0].text
else:
os = hostProperties.findall("./tag[@name='os']")
if(len(os) >= 1):
properties['operating-system'] = os[0].text
return properties
################################################################
def getReportItems(self, host):
return host.findall("./ReportItem")
def getReportItemProperties(self, reportItem):
properties = reportItem.attrib
if(properties.has_key('severity')):
del(properties['severity'])
if(properties.has_key('pluginFamily')):
del(properties['pluginFamily'])
return properties
################################################################
def getReportItemDetails(self, reportItem):
details = {}
details['description'] = reportItem.findall("./description")[0].text
pluginElements = reportItem.findall("./plugin_output")
if(len(pluginElements) >= 1):
details['plugin_output'] = pluginElements[0].text
solutionElements = reportItem.findall("./solution")
if(len(solutionElements) >= 1):
details['solution'] = solutionElements[0].text
seealsoElements = reportItem.findall("./see_also")
if(len(seealsoElements) >= 1):
details['see_also'] = seealsoElements[0].text
cveElements = reportItem.findall("./cve")
if(len(cveElements) >= 1):
details['cve'] = cveElements[0].text
cvssElements = reportItem.findall("./cvss_base_score")
if(len(cvssElements) >= 1):
details['cvss_base_score'] = cvssElements[0].text
return details
################################################################
def transformIfAvailable(inputDict, inputKey, outputDict, outputKey):
if(inputDict.has_key(inputKey)):
inputDict[inputKey] = inputDict[inputKey].replace("\n"," ")
# Excel has a hard limit of 32,767 characters per cell. Let's make it an even 32K.
if(len(inputDict[inputKey]) > 32000):
inputDict[inputKey] = inputDict[inputKey][:32000] +" [Text Cut Due To Length]"
outputDict[outputKey] = inputDict[inputKey]
################################################################
if __name__ == "__main__":
if len(sys.argv) > 1:
header = ['CVSS Score','IP','FQDN','OS','Port','Vulnerability','Description','Proof','Solution','See Also','CVE']
with open("nessus.csv", "wb") as outFile:
csvWriter = utfdictcsv.DictUnicodeWriter(outFile, header, quoting=csv.QUOTE_ALL)
csvWriter.writeheader()
nessusParser = NessusParser()
for fileName in sys.argv[1:]:
# try:
nessusParser.loadXML(fileName)
hostReports = []
hosts = nessusParser.getHosts()
for host in hosts:
# Get properties for this host
hostProperties = nessusParser.getHostProperties(host)
# Get all findings for this host
reportItems = nessusParser.getReportItems(host)
for reportItem in reportItems:
reportItemDict = {}
# Get the metadata and details for this report item
reportItemProperties = nessusParser.getReportItemProperties(reportItem)
reportItemDetails = nessusParser.getReportItemDetails(reportItem)
# Create dictionary for line
transformIfAvailable(reportItemDetails, "cvss_base_score", reportItemDict, header[0])
transformIfAvailable(hostProperties, "host-ip", reportItemDict, header[1])
transformIfAvailable(hostProperties, "netbios-name", reportItemDict, header[2])
transformIfAvailable(hostProperties, "operating-system", reportItemDict, header[3])
transformIfAvailable(reportItemProperties, "port", reportItemDict, header[4])
transformIfAvailable(reportItemProperties, "pluginName", reportItemDict, header[5])
transformIfAvailable(reportItemDetails, "description", reportItemDict, header[6])
transformIfAvailable(reportItemDetails, "plugin_output", reportItemDict, header[7])
transformIfAvailable(reportItemDetails, "solution", reportItemDict, header[8])
transformIfAvailable(reportItemDetails, "see_also", reportItemDict, header[9])
transformIfAvailable(reportItemDetails, "cve", reportItemDict, header[10])
hostReports.append(reportItemDict)
csvWriter.writerows(hostReports)
# except:
# print "[!] Error processing {}".format(fileName)
# pass
outFile.close()
else:
print "\nUsage: ./parse-nessus.py input.nessus"
print "Any field longer than 32,000 characters will be truncated.\n".format(sys.argv[0])
exit()
| EricSB/discover | parsers/parse-nessus.py | Python | mit | 6,845 |
from algorithms.structures.disjoint_set import DisjointSet
def test_disjoint_set():
a = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
ds = DisjointSet(a)
assert ds.find_set(1) != ds.find_set(2)
ds.union(1, 2)
assert ds.find_set(1) == ds.find_set(2)
assert ds.find_set(1) != ds.find_set(3)
ds.union(2, 3)
assert ds.find_set(1) == ds.find_set(2)
assert ds.find_set(2) == ds.find_set(3)
| vadimadr/python-algorithms | tests/test_disjoint_set.py | Python | mit | 407 |
from django.contrib import messages
from django.contrib.auth.models import User
from django.contrib.messages.views import SuccessMessageMixin
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.views.generic import DetailView, ListView, View
from django.views.generic.base import TemplateView
from django.views.generic.edit import CreateView, DeleteView, UpdateView
from stronghold.views import StrongholdPublicMixin
from books.forms import BookBorrowForm, BookForm, TagsForm
from books.models import Book, BookSuggestion, Tags
class HomePageView(TemplateView):
def get(self, request, *args, **kwargs):
if not request.user.is_staff:
return render(request, 'no_access.html')
else:
return render(request, 'staff_homepage.html')
class BookListView(ListView):
def get(self, request):
books = Book.objects.all()
query = request.GET.get("q")
if query:
books = books.filter(
Q(book_name__icontains=query)|
Q(author_name__icontains=query)|
Q(tags__name__icontains=query)
).distinct()
paginator = Paginator(books, 10) # Show 25 contacts per page
page = request.GET.get('page')
try:
books = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
books = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
books = paginator.page(paginator.num_pages)
context = {
'books' : books,
}
if not request.user.is_staff:
return render(request, 'no_access.html')
else:
return render(request, 'staff_book_list.html', context)
class AddBookView(SuccessMessageMixin, CreateView):
def dispatch(self, request, *args, **kwargs):
if not request.user.is_staff:
return render(request, 'no_access.html')
return super(AddBookView, self).dispatch(request, *args, **kwargs)
form_class = BookForm
model = Book
template_name = "staff_add_form.html"
success_message = "%(book_name)s was created successfully"
def get_context_data(self, **kwargs):
context = super(AddBookView, self).get_context_data(**kwargs)
context.update({'pagename': "Add new Book"})
return context
def get_success_url(self):
return reverse('staff:book_list')
class EditBookView(SuccessMessageMixin, UpdateView):
def dispatch(self, request, *args, **kwargs):
if not request.user.is_staff:
return render(request, 'no_access.html')
return super(EditBookView, self).dispatch(request, *args, **kwargs)
form_class = BookForm
model = Book
pk_url_kwarg = 'id'
template_name = "staff_add_form.html"
success_message = "%(book_name)s was updated successfully"
def get_context_data(self, **kwargs):
context = super(EditBookView, self).get_context_data(**kwargs)
context.update({'pagename': "Update Book"})
return context
def get_success_url(self):
return reverse('staff:book_list')
class DeleteBookView(SuccessMessageMixin, View):
def dispatch(self, request, *args, **kwargs):
if not request.user.is_staff:
return render(request, 'no_access.html')
return super(DeleteBookView, self).dispatch(request, *args, **kwargs)
def delete(self, request, id):
# delete an object and send a confirmation response
Book.objects.get(pk = id).delete()
return HttpResponse('deleted!')
# def deleteBook(request, id):
# if request.method == "DELETE":
# Book.objects.get(pk=request.DELETE['pk']).delete()
# return HttpResponse('deleted!', content_type="text/plain")
# return HttpResponse('not working!')
"""
delete user from the book isntance and change its status
args = id of the user
return = redirect
"""
def releasebook(request, id):
if not request.user.is_staff:
return render(request, 'no_access.html')
else:
book = get_object_or_404(Book, id=id)
if request.method == "POST":
book.status = True
book.borrower = None
book.save()
messages.success(request, 'Book released.')
return redirect(reverse('staff:book_list'))
"""
args = None
return = list of all the users including the staff
"""
class UserListView(ListView):
def get(self, request):
users = User.objects.all()
context = {
'users' : users
}
if not request.user.is_staff:
return render(request, 'no_access.html')
else:
return render(request, 'staff_user_list.html', context)
"""
delete a specific user from the database
args = id of the user
return = refresh
"""
class DeleteUserView(SuccessMessageMixin, View):
def dispatch(self, request, *args, **kwargs):
if not request.user.is_staff:
return render(request, 'no_access.html')
return super(DeleteUserView, self).dispatch(request, *args, **kwargs)
def delete(self, request, id):
# delete an object and send a confirmation response
user = User.objects.get(pk = id)
if not user.is_staff:
user.delete()
return HttpResponse('deleted!')
"""
list all the suggestions from the database
args = None
return = list
"""
class Suggestions(ListView):
def get(self, request):
suggestions = BookSuggestion.objects.all()
context = {
'suggestions' : suggestions
}
if not request.user.is_staff:
return render(request, 'no_access.html')
else:
return render(request, 'suggestions.html', context)
"""
return list of the tags from the database
args = None
return = list
"""
class Tagslist(StrongholdPublicMixin, ListView):
def get(self, request):
tags = Tags.objects.all()
context = {
'tags' : tags,
}
if not request.user.is_staff:
return render(request, 'no_access.html')
else:
return render(request, 'staff_tags_list.html', context)
"""
create a new tag and store in the database
args = data from the form
return = redirect
"""
class AddTagView(SuccessMessageMixin, CreateView):
def dispatch(self, request, *args, **kwargs):
if not request.user.is_staff:
return render(request, 'no_access.html')
return super(AddTagView, self).dispatch(request, *args, **kwargs)
form_class = TagsForm
model = Tags
template_name = "staff_add_form.html"
success_message = "%(name)s was created successfully"
def get_context_data(self, **kwargs):
context = super(AddTagView, self).get_context_data(**kwargs)
context.update({'pagename': "Add new Tag"})
return context
def get_success_url(self):
return reverse('staff:tagslist')
"""
modify a specific tag
args = id
return = redirect
"""
class EditTagView(SuccessMessageMixin, UpdateView):
def dispatch(self, request, *args, **kwargs):
if not request.user.is_staff:
return render(request, 'no_access.html')
return super(EditTagView, self).dispatch(request, *args, **kwargs)
form_class = TagsForm
model = Tags
pk_url_kwarg = 'id'
template_name = "staff_add_form.html"
success_message = "%(name)s was updated successfully"
def get_context_data(self, **kwargs):
context = super(EditTagView, self).get_context_data(**kwargs)
context.update({'pagename': "Update Tag"})
return context
def get_success_url(self):
return reverse('staff:tagslist')
"""
delete a specific tag
args = id
return = redirect
"""
class DeleteTagView(SuccessMessageMixin, DeleteView):
def delete(self, request, id):
# delete an object and send a confirmation response
Tags.objects.get(pk = id).delete()
return HttpResponse('deleted!')
# model = Tags
# pk_url_kwarg = 'id'
# template_name = "confirm_delete.html"
# success_message = "item was deleted successfully"
# def get_success_url(self):
# return reverse('staff:tagslist')
| revaxl/library | staff/views.py | Python | mit | 7,621 |
"""Wrappers for forwarding stdout/stderr over zmq"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import threading
import time
import uuid
from io import StringIO, UnsupportedOperation
import zmq
from zmq.eventloop.ioloop import IOLoop
from .session import extract_header
from IPython.utils import py3compat
from IPython.utils.py3compat import unicode_type
from IPython.utils.warn import warn
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
MASTER = 0
CHILD = 1
#-----------------------------------------------------------------------------
# Stream classes
#-----------------------------------------------------------------------------
class OutStream(object):
"""A file like object that publishes the stream to a 0MQ PUB socket."""
# The time interval between automatic flushes, in seconds.
_subprocess_flush_limit = 256
flush_interval = 0.05
topic=None
def __init__(self, session, pub_socket, name, pipe=True):
self.encoding = 'UTF-8'
self.session = session
self.pub_socket = pub_socket
self.name = name
self.topic = b'stream.' + py3compat.cast_bytes(name)
self.parent_header = {}
self._new_buffer()
self._buffer_lock = threading.Lock()
self._master_pid = os.getpid()
self._master_thread = threading.current_thread().ident
self._pipe_pid = os.getpid()
self._pipe_flag = pipe
if pipe:
self._setup_pipe_in()
def _setup_pipe_in(self):
"""setup listening pipe for subprocesses"""
ctx = self.pub_socket.context
# use UUID to authenticate pipe messages
self._pipe_uuid = uuid.uuid4().bytes
self._pipe_in = ctx.socket(zmq.PULL)
self._pipe_in.linger = 0
try:
self._pipe_port = self._pipe_in.bind_to_random_port("tcp://127.0.0.1")
except zmq.ZMQError as e:
warn("Couldn't bind IOStream to 127.0.0.1: %s" % e +
"\nsubprocess output will be unavailable."
)
self._pipe_flag = False
self._pipe_in.close()
del self._pipe_in
return
self._pipe_poller = zmq.Poller()
self._pipe_poller.register(self._pipe_in, zmq.POLLIN)
def _setup_pipe_out(self):
# must be new context after fork
ctx = zmq.Context()
self._pipe_pid = os.getpid()
self._pipe_out = ctx.socket(zmq.PUSH)
self._pipe_out_lock = threading.Lock()
self._pipe_out.connect("tcp://127.0.0.1:%i" % self._pipe_port)
def _is_master_process(self):
return os.getpid() == self._master_pid
def _is_master_thread(self):
return threading.current_thread().ident == self._master_thread
def _have_pipe_out(self):
return os.getpid() == self._pipe_pid
def _check_mp_mode(self):
"""check for forks, and switch to zmq pipeline if necessary"""
if not self._pipe_flag or self._is_master_process():
return MASTER
else:
if not self._have_pipe_out():
self._flush_buffer()
# setup a new out pipe
self._setup_pipe_out()
return CHILD
def set_parent(self, parent):
self.parent_header = extract_header(parent)
def close(self):
self.pub_socket = None
def _flush_from_subprocesses(self):
"""flush possible pub data from subprocesses into my buffer"""
if not self._pipe_flag or not self._is_master_process():
return
for i in range(self._subprocess_flush_limit):
if self._pipe_poller.poll(0):
msg = self._pipe_in.recv_multipart()
if msg[0] != self._pipe_uuid:
continue
else:
self._buffer.write(msg[1].decode(self.encoding, 'replace'))
# this always means a flush,
# so reset our timer
self._start = 0
else:
break
def _schedule_flush(self):
"""schedule a flush in the main thread
only works with a tornado/pyzmq eventloop running
"""
if IOLoop.initialized():
IOLoop.instance().add_callback(self.flush)
else:
# no async loop, at least force the timer
self._start = 0
def flush(self):
"""trigger actual zmq send"""
if self.pub_socket is None:
raise ValueError(u'I/O operation on closed file')
mp_mode = self._check_mp_mode()
if mp_mode != CHILD:
# we are master
if not self._is_master_thread():
# sub-threads must not trigger flush directly,
# but at least they can schedule an async flush, or force the timer.
self._schedule_flush()
return
self._flush_from_subprocesses()
data = self._flush_buffer()
if data:
content = {u'name':self.name, u'data':data}
msg = self.session.send(self.pub_socket, u'stream', content=content,
parent=self.parent_header, ident=self.topic)
if hasattr(self.pub_socket, 'flush'):
# socket itself has flush (presumably ZMQStream)
self.pub_socket.flush()
else:
with self._pipe_out_lock:
string = self._flush_buffer()
tracker = self._pipe_out.send_multipart([
self._pipe_uuid,
string.encode(self.encoding, 'replace'),
], copy=False, track=True)
try:
tracker.wait(1)
except:
pass
def isatty(self):
return False
def __next__(self):
raise IOError('Read not supported on a write only stream.')
if not py3compat.PY3:
next = __next__
def read(self, size=-1):
raise IOError('Read not supported on a write only stream.')
def readline(self, size=-1):
raise IOError('Read not supported on a write only stream.')
def fileno(self):
raise UnsupportedOperation("IOStream has no fileno.")
def write(self, string):
if self.pub_socket is None:
raise ValueError('I/O operation on closed file')
else:
# Make sure that we're handling unicode
if not isinstance(string, unicode_type):
string = string.decode(self.encoding, 'replace')
is_child = (self._check_mp_mode() == CHILD)
self._buffer.write(string)
if is_child:
# newlines imply flush in subprocesses
# mp.Pool cannot be trusted to flush promptly (or ever),
# and this helps.
if '\n' in string:
self.flush()
# do we want to check subprocess flushes on write?
# self._flush_from_subprocesses()
current_time = time.time()
if self._start < 0:
self._start = current_time
elif current_time - self._start > self.flush_interval:
self.flush()
def writelines(self, sequence):
if self.pub_socket is None:
raise ValueError('I/O operation on closed file')
else:
for string in sequence:
self.write(string)
def _flush_buffer(self):
"""clear the current buffer and return the current buffer data"""
data = u''
if self._buffer is not None:
data = self._buffer.getvalue()
self._buffer.close()
self._new_buffer()
return data
def _new_buffer(self):
self._buffer = StringIO()
self._start = -1
| WillisXChen/django-oscar | oscar/lib/python2.7/site-packages/IPython/kernel/zmq/iostream.py | Python | bsd-3-clause | 8,129 |
#!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
import config, re, pprint, time, views
from lib import *
# Python 2.3 does not have 'set' in normal namespace.
# But it can be imported from 'sets'
try:
set()
except NameError:
from sets import Set as set
# Datastructures and functions needed before plugins can be loaded
loaded_with_language = False
# Load all view plugins
def load_plugins():
global loaded_with_language
if loaded_with_language == current_language:
return
config.declare_permission_section("bi", _("BI - Check_MK Business Intelligence"))
config.declare_permission("bi.see_all",
_("See all hosts and services"),
_("With this permission set, the BI aggregation rules are applied to all "
"hosts and services - not only those the user is a contact for. If you "
"remove this permissions then the user will see incomplete aggregation "
"trees with status based only on those items."),
[ "admin", "guest" ])
# This must be set after plugin loading to make broken plugins raise
# exceptions all the time and not only the first time (when the plugins
# are loaded).
loaded_with_language = current_language
# ____ _ _
# / ___|___ _ __ ___| |_ __ _ _ __ | |_ ___
# | | / _ \| '_ \/ __| __/ _` | '_ \| __/ __|
# | |__| (_) | | | \__ \ || (_| | | | | |_\__ \
# \____\___/|_| |_|___/\__\__,_|_| |_|\__|___/
#
# type of rule parameters
SINGLE = 'single'
MULTIPLE = 'multi'
# possible aggregated states
MISSING = -2
PENDING = -1
OK = 0
WARN = 1
CRIT = 2
UNKNOWN = 3
UNAVAIL = 4
service_state_names = { OK:_("OK"), WARN:_("WARN"), CRIT:_("CRIT"), UNKNOWN:_("UNKNOWN"), PENDING:_("PENDING"), UNAVAIL:_("UNAVAILABLE")}
host_state_names = { 0:_("UP"), 1:_("DOWN"), 2:_("UNREACHABLE") }
AGGR_HOST = 0
AGGR_MULTI = 1
# character that separates sites and hosts
SITE_SEP = '#'
# ____ _ _ _ _
# / ___|___ _ __ ___ _ __ (_) | __ _| |_(_) ___ _ __
# | | / _ \| '_ ` _ \| '_ \| | |/ _` | __| |/ _ \| '_ \
# | |__| (_) | | | | | | |_) | | | (_| | |_| | (_) | | | |
# \____\___/|_| |_| |_| .__/|_|_|\__,_|\__|_|\___/|_| |_|
# |_|
# format of a node
# {
# "type" : NT_LEAF, NT_RULE, NT_REMAINING,
# "reqhosts" : [ list of required hosts ],
# "hidden" : True if hidden
#
# SPECIAL KEYS FOR NT_LEAF:
# "host" : host specification,
# "service" : service name, missing for leaf type HOST_STATE
#
# SPECIAL KEYS FOR NT_RULE:
# "title" : title
# "func" : Name of aggregation function, e.g. "count!2!1"
# "nodes" : List of subnodes
# }
NT_LEAF = 1
NT_RULE = 2
NT_REMAINING = 3
NT_PLACEHOLDER = 4 # temporary dummy entry needed for REMAINING
# global variables
g_cache = {} # per-user cache
g_config_information = None # for invalidating cache after config change
did_compilation = False # Is set to true if anything has been compiled
# Load the static configuration of all services and hosts (including tags)
# without state.
def load_services(cache, only_hosts):
global g_services, g_services_by_hostname
g_services = {}
g_services_by_hostname = {}
# TODO: At the moment the data is always refetched. This could really
# be optimized. Maybe create a cache which fetches data for the given
# list of hosts, puts it to a cache and then only fetch the additionally
# needed information which are not cached yet in future requests
# Create optional host filter
filter_txt = 'Filter: custom_variable_names < _REALNAME\n' # drop summary hosts
if only_hosts:
# Only fetch the requested hosts
host_filter = []
for site, hostname in only_hosts:
host_filter.append('Filter: name = %s\n' % hostname)
filter_txt = ''.join(host_filter)
filter_txt += "Or: %d\n" % len(host_filter)
html.live.set_prepend_site(True)
html.live.set_auth_domain('bi')
data = html.live.query("GET hosts\n"
+filter_txt+
"Columns: name custom_variable_names custom_variable_values services childs parents\n")
html.live.set_prepend_site(False)
html.live.set_auth_domain('read')
for site, host, varnames, values, svcs, childs, parents in data:
vars = dict(zip(varnames, values))
tags = vars.get("TAGS", "").split(" ")
entry = (tags, svcs, childs, parents)
g_services[(site, host)] = entry
g_services_by_hostname.setdefault(host, []).append((site, entry))
# Keep complete list of time stamps of configuration
# and start of each site. Unreachable sites are registered
# with 0.
def cache_needs_update():
new_config_information = [tuple(config.modification_timestamps)]
for site in html.site_status.values():
new_config_information.append(site.get("program_start", 0))
if new_config_information != g_config_information:
return new_config_information
else:
return False
def reset_cache_status():
global did_compilation
did_compilation = False
global used_cache
used_cache = False
def reused_compilation():
return used_cache and not did_compilation
# Returns a sorted list of aggregation group names
def aggregation_groups():
if config.bi_precompile_on_demand:
# on demand: show all configured groups
group_names = set([])
for a in config.aggregations + config.host_aggregations:
if type(a[0]) == list:
group_names.update(a[0])
else:
group_names.add(a[0])
group_names = list(group_names)
else:
# classic mode: precompile all and display only groups with members
compile_forest(config.user_id)
group_names = list(set([ group for group, trees in g_user_cache["forest"].items() if trees ]))
return sorted(group_names, cmp = lambda a,b: cmp(a.lower(), b.lower()))
def log(s):
if compile_logging():
file(config.bi_compile_log, "a").write(s)
# Precompile the forest of BI rules. Forest? A collection of trees.
# The compiled forest does not contain any regular expressions anymore.
# Everything is resolved. Sites, hosts and services are hardcoded. The
# aggregation functions are still left as names. That way the forest
# printable (and storable in Python syntax to a file).
def compile_forest(user, only_hosts = None, only_groups = None):
global g_cache, g_user_cache
global used_cache, did_compilation
new_config_information = cache_needs_update()
if new_config_information:
log("Configuration has changed. Forcing recompile.\n")
g_cache = {}
global g_config_information
g_config_information = new_config_information
# OPTIMIZE: All users that have the permissing bi.see_all
# can use the same cache.
if config.may("bi.see_all"):
user = '<<<see_all>>>'
def empty_user_cache():
return {
"forest" : {},
"aggregations_by_hostname" : {},
"host_aggregations" : {},
"affected_hosts" : {},
"affected_services": {},
"compiled_hosts" : set([]),
"compiled_groups" : set([]),
"compiled_all" : False,
}
# Try to get data from per-user cache:
# make sure, BI permissions have not changed since last time.
# g_user_cache is a global variable for all succeeding functions, so
# that they do not need to check the user again
cache = g_cache.get(user)
if cache:
g_user_cache = cache
else:
# Initialize empty caching structure
cache = empty_user_cache()
g_user_cache = cache
if g_user_cache["compiled_all"]:
log('PID: %d - Already compiled everything\n' % os.getpid())
used_cache = True
return # In this case simply skip further compilations
if not config.aggregations and not config.host_aggregations:
return # nothing to do, BI not used
# If we have previously only partly compiled and now there is no
# filter, then throw away partly compiled data.
if (cache["compiled_hosts"] or cache["compiled_groups"]) \
and (not config.bi_precompile_on_demand \
or (config.bi_precompile_on_demand and not only_groups and not only_hosts)):
log("Invalidating incomplete cache, since we compile all now.\n")
cache = empty_user_cache()
g_user_cache = cache
# Reduces a list of hosts by the already compiled hosts
def to_compile(objects, what):
todo = []
for obj in objects:
if obj not in cache['compiled_' + what]:
todo.append(obj)
return todo
if only_hosts and cache['compiled_hosts']:
# if only hosts is given and there are already compiled hosts
# check wether or not hosts are not compiled yet
only_hosts = to_compile(only_hosts, 'hosts')
if not only_hosts:
log('PID: %d - All requested hosts have already been compiled\n' % os.getpid())
used_cache = True
return # Nothing to do - everything is cached
if only_groups and cache['compiled_groups']:
only_groups = to_compile(only_groups, 'groups')
if not only_groups:
log('PID: %d - All requested groups have already been compiled\n' % os.getpid())
used_cache = True
return # Nothing to do - everything is cached
# Set a flag that anything has been compiled in this call
did_compilation = True
# Load all (needed) services
# The only_hosts variable is only set in "precompile on demand" mode to filter out
# the needed hosts/services if possible. It is used in the load_services() function
# to reduce the amount of hosts/services. Reducing the host/services leads to faster
# compilation.
load_services(cache, only_hosts)
log("This request: User: %s, Only-Groups: %r, Only-Hosts: %s PID: %d\n"
% (user, only_groups, only_hosts, os.getpid()))
if compile_logging():
before = time.time()
num_new_host_aggrs = 0
num_new_multi_aggrs = 0
# When only_hosts is given only use the single host aggregations for further processing.
# The only_hosts variable is only populated for single host tables.
if only_hosts:
aggr_list = [(AGGR_HOST, config.host_aggregations)]
else:
aggr_list = [(AGGR_MULTI, config.aggregations), (AGGR_HOST, config.host_aggregations)]
single_affected_hosts = []
for aggr_type, aggregations in aggr_list:
for entry in aggregations:
if entry[0] == config.DISABLED:
continue
if entry[0] == config.HARD_STATES:
use_hard_states = True
entry = entry[1:]
else:
use_hard_states = False
if len(entry) < 3:
raise MKConfigError(_("<h1>Invalid aggregation <tt>%s</tt></h1>"
"Must have at least 3 entries (has %d)") % (entry, len(entry)))
if type(entry[0]) == list:
groups = entry[0]
else:
groups = [ entry[0] ]
groups_set = set(groups)
if only_groups and not groups_set.intersection(only_groups):
log('Skip aggr (No group of the aggr has been requested: %r)\n' % groups)
continue # skip not requested groups if filtered by groups
if len(groups_set) == len(groups_set.intersection(cache['compiled_groups'])):
log('Skip aggr (All groups have already been compiled\n')
continue # skip if all groups have already been compiled
new_entries = compile_rule_node(aggr_type, entry[1:], 0)
for this_entry in new_entries:
remove_empty_nodes(this_entry)
this_entry["use_hard_states"] = use_hard_states
new_entries = [ e for e in new_entries if len(e["nodes"]) > 0 ]
if compile_logging():
if aggr_type == AGGR_HOST:
num_new_host_aggrs += len(new_entries)
else:
num_new_multi_aggrs += len(new_entries)
# enter new aggregations into dictionary for these groups
for group in groups:
if group in cache['compiled_groups']:
log('Skip aggr (group %s already compiled)\n' % group)
continue # the group has already been compiled completely
if group not in cache['forest']:
cache['forest'][group] = new_entries
else:
cache['forest'][group] += new_entries
# Update several global speed-up indices
for aggr in new_entries:
req_hosts = aggr["reqhosts"]
# Aggregations by last part of title (assumed to be host name)
name = aggr["title"].split()[-1]
cache["aggregations_by_hostname"].setdefault(name, []).append((group, aggr))
# All single-host aggregations looked up per host
# Only process the aggregations of hosts which are mentioned in only_hosts
if aggr_type == AGGR_HOST:
# In normal cases a host aggregation has only one req_hosts item, we could use
# index 0 here. But clusters (which are also allowed now) have all their nodes
# in the list of required nodes.
# Before the latest change this used the last item of the req_hosts. I think it
# would be better to register this for all hosts mentioned in req_hosts. Give it a try...
# ASSERT: len(req_hosts) == 1!
for host in req_hosts:
if not only_hosts or host in only_hosts:
cache["host_aggregations"].setdefault(host, []).append((group, aggr))
# construct a list of compiled single-host aggregations for cached registration
single_affected_hosts.append(host)
# Also all other aggregations that contain exactly one hosts are considered to
# be "single host aggregations"
elif len(req_hosts) == 1:
cache["host_aggregations"].setdefault(req_hosts[0], []).append((group, aggr))
# All aggregations containing a specific host
for h in req_hosts:
cache["affected_hosts"].setdefault(h, []).append((group, aggr))
# All aggregations containing a specific service
services = find_all_leaves(aggr)
for s in services: # triples of site, host, service
cache["affected_services"].setdefault(s, []).append((group, aggr))
# Register compiled objects
if only_hosts:
cache['compiled_hosts'].update(single_affected_hosts)
elif only_groups:
cache['compiled_groups'].update(only_groups)
cache['compiled_hosts'].update(single_affected_hosts)
else:
# The list of ALL hosts
cache['compiled_hosts'] = set(g_services.keys())
cache['compiled_groups'] = set(cache['forest'].keys())
cache['compiled_all'] = True
# Remember successful compile in cache
g_cache[user] = cache
if compile_logging():
num_total_aggr = 0
for grp, aggrs in cache['forest'].iteritems():
num_total_aggr += len(aggrs)
num_host_aggr = 0
for grp, aggrs in cache['host_aggregations'].iteritems():
num_host_aggr += len(aggrs)
num_services = 0
for key, val in g_services.iteritems():
num_services += len(val[1])
after = time.time()
log("This request:\n"
" User: %s, Only-Groups: %r, Only-Hosts: %s\n"
" PID: %d, Processed %d services on %d hosts in %.3f seconds.\n"
"\n"
" %d compiled multi aggrs, %d compiled host aggrs, %d compiled groups\n"
"Cache:\n"
" Everything compiled: %r\n"
" %d compiled multi aggrs, %d compiled host aggrs, %d compiled groups\n"
"Config:\n"
" Multi-Aggregations: %d, Host-Aggregations: %d\n"
"\n"
% (
user, only_groups, only_hosts,
os.getpid(),
num_services, len(g_services_by_hostname),
after - before,
num_new_multi_aggrs, num_new_host_aggrs,
only_groups and len(only_groups) or 0,
cache['compiled_all'],
num_total_aggr - num_host_aggr,
num_host_aggr,
len(cache['compiled_groups']),
len(config.aggregations),
len(config.host_aggregations),
))
def compile_logging():
return config.bi_compile_log is not None
# Execute an aggregation rule, but prepare arguments
# and iterate FOREACH first
def compile_rule_node(aggr_type, calllist, lvl):
# Lookup rule source code
rulename, arglist = calllist[-2:]
what = calllist[0]
if rulename not in config.aggregation_rules:
raise MKConfigError(_("<h1>Invalid configuration in variable <tt>aggregations</tt></h1>"
"There is no rule named <tt>%s</tt>. Available are: <tt>%s</tt>") %
(rulename, "</tt>, </tt>".join(config.aggregation_rules.keys())))
rule = config.aggregation_rules[rulename]
# Execute FOREACH: iterate over matching hosts/services.
# Create an argument list where $1$, $2$, ... are
# substituted with matched strings for each match.
if what in [
config.FOREACH_HOST,
config.FOREACH_CHILD,
config.FOREACH_CHILD_WITH,
config.FOREACH_PARENT,
config.FOREACH_SERVICE ]:
matches = find_matching_services(aggr_type, what, calllist[1:])
new_elements = []
handled_args = set([]) # avoid duplicate rule incarnations
for match in matches:
args = [ substitute_matches(a, match) for a in arglist ]
if tuple(args) not in handled_args:
new_elements += compile_aggregation_rule(aggr_type, rule, args, lvl)
handled_args.add(tuple(args))
return new_elements
else:
return compile_aggregation_rule(aggr_type, rule, arglist, lvl)
def find_matching_services(aggr_type, what, calllist):
if what == config.FOREACH_CHILD_WITH: # extract foreach child specific parameters
required_child_tags = calllist[0]
child_re = calllist[1]
calllist = calllist[2:]
# honor list of host tags preceding the host_re
if type(calllist[0]) == list:
required_tags = calllist[0]
calllist = calllist[1:]
else:
required_tags = []
if len(calllist) == 0:
raise MKConfigError(_("Invalid syntax in FOREACH_..."))
host_re = calllist[0]
if what in [ config.FOREACH_HOST, config.FOREACH_CHILD, config.FOREACH_CHILD_WITH, config.FOREACH_PARENT ]:
service_re = config.HOST_STATE
else:
service_re = calllist[1]
matches = set([])
honor_site = SITE_SEP in host_re
if host_re.startswith("^(") and host_re.endswith(")$"):
# Exact host match
middle = host_re[2:-2]
if middle in g_services_by_hostname:
entries = [ ((e[0], host_re), e[1]) for e in g_services_by_hostname[middle] ]
host_re = "(.*)"
elif not honor_site and not '*' in host_re and not '$' in host_re \
and not '|' in host_re and not '[' in host_re:
# Exact host match
entries = [ ((e[0], host_re), e[1]) for e in g_services_by_hostname.get(host_re, []) ]
else:
# All services
entries = g_services.items()
# TODO: Hier könnte man - wenn der Host bekannt ist, effektiver arbeiten, als
# komplett alles durchzugehen.
for (site, hostname), (tags, services, childs, parents) in entries:
# Skip already compiled hosts
if aggr_type == AGGR_HOST and (site, hostname) in g_user_cache['compiled_hosts']:
continue
host_matches = match_host(hostname, host_re, tags, required_tags, site, honor_site)
if host_matches != None:
if what == config.FOREACH_CHILD:
list_of_matches = [ host_matches + (child,) for child in childs ]
elif what == config.FOREACH_CHILD_WITH:
list_of_matches = []
for child_name in childs:
child_tags = g_services_by_hostname[child_name][0][1][0]
child_matches = match_host(child_name, child_re, child_tags, required_child_tags, site, honor_site)
if child_matches != None:
list_of_matches.append(host_matches + child_matches)
elif what == config.FOREACH_PARENT:
list_of_matches = [ host_matches + (parent,) for parent in parents ]
else:
list_of_matches = [ host_matches ]
for matched_host in list_of_matches:
if service_re == config.HOST_STATE:
matches.add(matched_host)
else:
for service in services:
mo = (service_re, service)
if mo in service_nomatch_cache:
continue
m = regex(service_re).match(service)
if m:
svc_matches = tuple(m.groups())
matches.add(matched_host + svc_matches)
else:
service_nomatch_cache.add(mo)
matches = list(matches)
matches.sort()
return matches
def do_match(reg, text):
mo = regex(reg).match(text)
if not mo:
return None
else:
return tuple(mo.groups())
def substitute_matches(arg, match):
for n, m in enumerate(match):
arg = arg.replace("$%d$" % (n+1), m)
return arg
# Debugging function
def render_forest():
for group, trees in g_user_cache["forest"].items():
html.write("<h2>%s</h2>" % group)
for tree in trees:
ascii = render_tree(tree)
html.write("<pre>\n" + ascii + "<pre>\n")
# Debugging function
def render_tree(node, indent = ""):
h = ""
if node["type"] == NT_LEAF: # leaf node
h += indent + "S/H/S: %s/%s/%s%s\n" % (node["host"][0], node["host"][1], node.get("service"),
node.get("hidden") == True and " (hidden)" or "")
else:
h += indent + "Aggregation:\n"
indent += " "
h += indent + "Description: %s\n" % node["title"]
h += indent + "Hidden: %s\n" % (node.get("hidden") == True and "yes" or "no")
h += indent + "Needed Hosts: %s\n" % " ".join([("%s/%s" % h_s) for h_s in node["reqhosts"]])
h += indent + "Aggregation: %s\n" % node["func"]
h += indent + "Nodes:\n"
for node in node["nodes"]:
h += render_tree(node, indent + " ")
h += "\n"
return h
# Compute dictionary of arguments from arglist and
# actual values
def make_arginfo(arglist, args):
arginfo = {}
for name, value in zip(arglist, args):
if name[0] == 'a':
expansion = SINGLE
name = name[1:]
elif name[-1] == 's':
expansion = MULTIPLE
name = name[:-1]
else:
raise MKConfigError(_("Invalid argument name %s. Must begin with 'a' or end with 's'.") % name)
arginfo[name] = (expansion, value)
return arginfo
def find_all_leaves(node):
# leaf node
if node["type"] == NT_LEAF:
site, host = node["host"]
return [ (site, host, node.get("service") ) ]
# rule node
elif node["type"] == NT_RULE:
entries = []
for n in node["nodes"]:
entries += find_all_leaves(n)
return entries
# place holders
else:
return []
# Removes all empty nodes from the given rule tree
def remove_empty_nodes(node):
if node["type"] != NT_RULE:
# simply return leaf nodes without action
return node
else:
subnodes = node["nodes"]
# loop all subnodes recursing down to the lowest level
for i in range(0, len(subnodes)):
remove_empty_nodes(subnodes[i])
# remove all subnode rules which have no subnodes
for i in range(0, len(subnodes))[::-1]:
if node_is_empty(subnodes[i]):
del subnodes[i]
# Checks wether or not a rule node has no subnodes
def node_is_empty(node):
if node["type"] != NT_RULE: # leaf node
return False
else:
return len(node["nodes"]) == 0
# Precompile one aggregation rule. This outputs a list of trees.
# The length of this list is current either 0 or 1
def compile_aggregation_rule(aggr_type, rule, args, lvl):
# When compiling root nodes we essentially create
# complete top-level aggregations. In that case we
# need to deal with REMAINING-entries
if lvl == 0:
global g_remaining_refs
g_remaining_refs = []
# Convert new dictionary style rule into old tuple based
# format
if type(rule) == dict:
rule = (
rule.get("title", _("Untitled BI rule")),
rule.get("params", []),
rule.get("aggregation", "worst"),
rule.get("nodes", [])
)
if len(rule) != 4:
raise MKConfigError(_("<b>Invalid aggregation rule</b><br><br>"
"Aggregation rules must contain four elements: description, argument list, "
"aggregation function and list of nodes. Your rule has %d elements: "
"<pre>%s</pre>") % (len(rule), pprint.pformat(rule)))
if lvl == 50:
raise MKConfigError(_("<b>Depth limit reached</b><br><br>"
"The nesting level of aggregations is limited to 50. You either configured "
"too many levels or built an infinite recursion. This happened in rule <pre>%s</pre>")
% pprint.pformat(rule))
description, arglist, funcname, nodes = rule
# check arguments and convert into dictionary
if len(arglist) != len(args):
raise MKConfigError(_("<b>Invalid rule usage</b><br><br>"
"The rule '%s' needs %d arguments: <tt>%s</tt><br>"
"You've specified %d arguments: <tt>%s</tt>") % (
description, len(arglist), repr(arglist), len(args), repr(args)))
arginfo = dict(zip(arglist, args))
inst_description = subst_vars(description, arginfo)
elements = []
for node in nodes:
# Handle HIDDEN nodes. There are compiled just as normal nodes, but
# will not be visible in the tree view later (at least not per default).
# The HIDDEN flag needs just to be packed into the compilation and not
# further handled here.
if node[0] == config.HIDDEN:
hidden = True
node = node[1:]
else:
hidden = False
# Each node can return more than one incarnation (due to regexes in
# leaf nodes and FOREACH in rule nodes)
if node[1] in [ config.HOST_STATE, config.REMAINING ]:
new_elements = compile_leaf_node(subst_vars(node[0], arginfo), node[1])
new_new_elements = []
for entry in new_elements:
# Postpone: remember reference to list where we need to add
# remaining services of host
if entry["type"] == NT_REMAINING:
# create unique pointer which we find later
placeholder = {"type" : NT_PLACEHOLDER, "id" : str(len(g_remaining_refs)) }
g_remaining_refs.append((entry["host"], elements, placeholder))
new_new_elements.append(placeholder)
else:
new_new_elements.append(entry)
new_elements = new_new_elements
elif type(node[-1]) != list:
if node[0] in [
config.FOREACH_HOST,
config.FOREACH_CHILD,
config.FOREACH_PARENT,
config.FOREACH_SERVICE ]:
# Handle case that leaf elements also need to be iterable via FOREACH_HOST
# 1: config.FOREACH_HOST
# 2: (['waage'], '(.*)')
calllist = []
for n in node[1:-2]:
if type(n) in [ str, unicode, list ]:
n = subst_vars(n, arginfo)
calllist.append(n)
matches = find_matching_services(aggr_type, node[0], calllist)
new_elements = []
handled_args = set([]) # avoid duplicate rule incarnations
for match in matches:
sub_arginfo = dict([(str(n+1), x) for (n,x) in enumerate(match)])
if tuple(args) + match not in handled_args:
new_elements += compile_leaf_node(subst_vars(node[-2], sub_arginfo), subst_vars(node[-1], sub_arginfo))
handled_args.add(tuple(args) + match)
host_name, service_description = node[-2:]
else:
# This is a plain leaf node with just host/service
new_elements = compile_leaf_node(subst_vars(node[0], arginfo), subst_vars(node[1], arginfo))
else:
# substitute our arguments in rule arguments
# rule_args:
# ['$1$']
# rule_parts:
# (<class _mp_84b7bd024cff73bf04ba9045f980becb.FOREACH_HOST at 0x7f03600dc8d8>, ['waage'], '(.*)', 'host')
rule_args = [ subst_vars(a, arginfo) for a in node[-1] ]
rule_parts = tuple([ subst_vars(part, arginfo) for part in node[:-1] ])
new_elements = compile_rule_node(aggr_type, rule_parts + (rule_args,), lvl + 1)
if hidden:
for element in new_elements:
element["hidden"] = True
elements += new_elements
needed_hosts = set([])
for element in elements:
needed_hosts.update(element.get("reqhosts", []))
aggregation = { "type" : NT_RULE,
"reqhosts" : needed_hosts,
"title" : inst_description,
"func" : funcname,
"nodes" : elements}
# Handle REMAINING references, if we are a root node
if lvl == 0:
for hostspec, ref, placeholder in g_remaining_refs:
new_entries = find_remaining_services(hostspec, aggregation)
for entry in new_entries:
aggregation['reqhosts'].update(entry['reqhosts'])
where_to_put = ref.index(placeholder)
ref[where_to_put:where_to_put+1] = new_entries
aggregation['reqhosts'] = list(aggregation['reqhosts'])
return [ aggregation ]
def find_remaining_services(hostspec, aggregation):
tags, all_services, childs, parents = g_services[hostspec]
all_services = set(all_services)
for site, host, service in find_all_leaves(aggregation):
if (site, host) == hostspec:
all_services.discard(service)
remaining = list(all_services)
remaining.sort()
return [ {
"type" : NT_LEAF,
"host" : hostspec,
"reqhosts" : [hostspec],
"service" : service,
"title" : "%s - %s" % (hostspec[1], service)}
for service in remaining ]
# Helper function that finds all occurrances of a variable
# enclosed with $ and $. Returns a list of positions.
def find_variables(pattern, varname):
found = []
start = 0
while True:
pos = pattern.find('$' + varname + '$', start)
if pos >= 0:
found.append(pos)
start = pos + 1
else:
return found
# replace variables in a string
def subst_vars(pattern, arginfo):
if type(pattern) == list:
return [subst_vars(x, arginfo) for x in pattern ]
for name, value in arginfo.items():
if type(pattern) in [ str, unicode ]:
pattern = pattern.replace('$'+name+'$', value)
return pattern
def match_host_tags(have_tags, required_tags):
for tag in required_tags:
if tag.startswith('!'):
negate = True
tag = tag[1:]
else:
negate = False
has_it = tag in have_tags
if has_it == negate:
return False
return True
def match_host(hostname, host_re, tags, required_tags, site, honor_site):
if not match_host_tags(tags, required_tags):
return None
if host_re == '(.*)':
return (hostname, )
else:
# For regex to have '$' anchor for end. Users might be surprised
# to get a prefix match on host names. This is almost never what
# they want. For services this is useful, however.
if host_re.endswith("$"):
anchored = host_re
else:
anchored = host_re + "$"
# In order to distinguish hosts with the same name on different
# sites we prepend the site to the host name. If the host specification
# does not contain the site separator - though - we ignore the site
# an match the rule for all sites.
if honor_site:
return do_match(anchored, "%s%s%s" % (site, SITE_SEP, hostname))
else:
return do_match(anchored, hostname)
def compile_leaf_node(host_re, service_re = config.HOST_STATE):
found = []
honor_site = SITE_SEP in host_re
if not honor_site and not '*' in host_re and not '$' in host_re \
and not '|' in host_re and '[' not in host_re:
entries = [ ((e[0], host_re), e[1]) for e in g_services_by_hostname.get(host_re, []) ]
else:
entries = g_services.items()
# TODO: If we already know the host we deal with, we could avoid this loop
for (site, hostname), (tags, services, childs, parents) in entries:
# If host ends with '|@all', we need to check host tags instead
# of regexes.
if host_re.endswith('|@all'):
if not match_host_tags(host_re[:-5], tags):
continue
elif host_re != '@all':
# For regex to have '$' anchor for end. Users might be surprised
# to get a prefix match on host names. This is almost never what
# they want. For services this is useful, however.
if host_re.endswith("$"):
anchored = host_re
else:
anchored = host_re + "$"
# In order to distinguish hosts with the same name on different
# sites we prepend the site to the host name. If the host specification
# does not contain the site separator - though - we ignore the site
# an match the rule for all sites.
if honor_site:
if not regex(anchored).match("%s%s%s" % (site, SITE_SEP, hostname)):
continue
else:
if not regex(anchored).match(hostname):
continue
if service_re == config.HOST_STATE:
found.append({"type" : NT_LEAF,
"reqhosts" : [(site, hostname)],
"host" : (site, hostname),
"title" : hostname})
elif service_re == config.REMAINING:
found.append({"type" : NT_REMAINING,
"reqhosts" : [(site, hostname)],
"host" : (site, hostname)})
else:
# found.append({"type" : NT_LEAF,
# "reqhosts" : [(site, hostname)],
# "host" : (site, hostname),
# "service" : "FOO",
# "title" : "Foo bar",
# })
# continue
for service in services:
mo = (service_re, service)
if mo in service_nomatch_cache:
continue
m = regex(service_re).match(service)
if m:
found.append({"type" : NT_LEAF,
"reqhosts" : [(site, hostname)],
"host" : (site, hostname),
"service" : service,
"title" : "%s - %s" % (hostname, service)} )
else:
service_nomatch_cache.add(mo)
found.sort()
return found
service_nomatch_cache = set([])
# _____ _ _
# | ____|_ _____ ___ _ _| |_(_) ___ _ __
# | _| \ \/ / _ \/ __| | | | __| |/ _ \| '_ \
# | |___ > < __/ (__| |_| | |_| | (_) | | | |
# |_____/_/\_\___|\___|\__,_|\__|_|\___/|_| |_|
#
# + services + states
# multisite.d/*.mk =========> compiled tree ========> executed tree
# compile execute
# Format of executed tree:
# leaf: ( state, assumed_state, compiled_node )
# rule: ( state, assumed_state, compiled_node, nodes )
# Format of state and assumed_state:
# { "state" : OK, WARN ...
# "output" : aggregated output or service output }
# Execution of the trees. Returns a tree object reflecting
# the states of all nodes
def execute_tree(tree, status_info = None):
use_hard_states = tree["use_hard_states"]
if status_info == None:
required_hosts = tree["reqhosts"]
status_info = get_status_info(required_hosts)
return execute_node(tree, status_info, use_hard_states)
def execute_node(node, status_info, use_hard_states):
if node["type"] == NT_LEAF:
return execute_leaf_node(node, status_info, use_hard_states)
else:
return execute_rule_node(node, status_info, use_hard_states)
def execute_leaf_node(node, status_info, use_hard_states):
site, host = node["host"]
service = node.get("service")
# Get current state of host and services
status = status_info.get((site, host))
if status == None:
return ({
"state" : MISSING,
"output" : _("Host %s not found") % host,
"in_downtime" : False,
"acknowledged" : False,
}, None, node)
host_state, host_hard_state, host_output, host_in_downtime, host_acknowledged, service_state = status
# Get state assumption from user
if service:
key = (site, host, service)
else:
key = (site, host)
state_assumption = g_assumptions.get(key)
# assemble state
if service:
for entry in service_state: # list of all services of that host
if entry[0] == service:
state, has_been_checked, output, hard_state, attempt, max_attempts, downtime_depth, acknowledged = entry[1:9]
if has_been_checked == 0:
output = _("This service has not been checked yet")
state = PENDING
if use_hard_states:
st = hard_state
else:
st = state
state = {
"state" : st,
"output" : output,
"in_downtime" : downtime_depth > 0,
"acknowledged" : not not acknowledged,
}
if state_assumption != None:
assumed_state = {
"state" : state_assumption,
"output" : _("Assumed to be %s") % service_state_names[state_assumption],
"in_downtime" : downtime_depth > 0,
"acknowledged" : not not acknowledged,
}
else:
assumed_state = None
return (state, assumed_state, node)
return ({
"state" : MISSING,
"output" : _("This host has no such service"),
"in_downtime" : False,
"acknowledged" : False,
}, None, node)
else:
if use_hard_states:
st = host_hard_state
else:
st = host_state
aggr_state = {0:OK, 1:CRIT, 2:UNKNOWN, -1:PENDING}[st]
state = {
"state" : aggr_state,
"output" : host_output,
"in_downtime" : host_in_downtime,
"acknowledged" : host_acknowledged,
}
if state_assumption != None:
assumed_state = {
"state" : state_assumption,
"output" : _("Assumed to be %s") % host_state_names[state_assumption],
"in_downtime" : host_in_downtime,
"acknowledged" : host_acknowledged,
}
else:
assumed_state = None
return (state, assumed_state, node)
def execute_rule_node(node, status_info, use_hard_states):
# get aggregation function
funcspec = node["func"]
parts = funcspec.split('!')
funcname = parts[0]
funcargs = parts[1:]
func = config.aggregation_functions.get(funcname)
if not func:
raise MKConfigError(_("Undefined aggregation function '%s'. Available are: %s") %
(funcname, ", ".join(config.aggregation_functions.keys())))
# prepare information for aggregation function
subtrees = []
node_states = []
assumed_states = []
downtime_states = []
ack_states = [] # Needed for computing the acknowledgement of non-OK nodes
one_assumption = False
for n in node["nodes"]:
result = execute_node(n, status_info, use_hard_states) # state, assumed_state, node [, subtrees]
subtrees.append(result)
# Assume items in downtime as CRIT when computing downtime state
downtime_states.append(({"state": result[0]["in_downtime"] and 2 or 0, "output" : ""}, result[2]))
# Assume non-OK nodes that are acked as OK
if result[0]["acknowledged"]:
acked_state = 0
else:
acked_state = result[0]["state"]
ack_states.append(({"state": acked_state, "output" : ""}, result[2]))
node_states.append((result[0], result[2]))
if result[1] != None:
assumed_states.append((result[1], result[2]))
one_assumption = True
else:
# no assumption, take real state into assumption array
assumed_states.append(node_states[-1])
downtime_state = func(*([downtime_states] + funcargs))
state = func(*([node_states] + funcargs))
state["in_downtime"] = downtime_state["state"] >= 2
if state["state"] > 0: # Non-OK-State -> compute acknowledgedment
ack_state = func(*([ack_states] + funcargs))
state["acknowledged"] = ack_state["state"] == 0 # would be OK if acked problems would be OK
else:
state["acknowledged"] = False
if one_assumption:
assumed_state = func(*([assumed_states] + funcargs))
assumed_state["in_downtime"] = state["in_downtime"]
assumed_state["acknowledged"] = state["acknowledged"]
else:
assumed_state = None
return (state, assumed_state, node, subtrees)
# Get all status information we need for the aggregation from
# a known lists of lists (list of site/host pairs)
def get_status_info(required_hosts):
# Query each site only for hosts that that site provides
site_hosts = {}
for site, host in required_hosts:
hosts = site_hosts.get(site)
if hosts == None:
site_hosts[site] = [host]
else:
hosts.append(host)
tuples = []
for site, hosts in site_hosts.items():
filter = ""
for host in hosts:
filter += "Filter: name = %s\n" % host
if len(hosts) > 1:
filter += "Or: %d\n" % len(hosts)
html.live.set_auth_domain('bi')
data = html.live.query(
"GET hosts\n"
"Columns: name state hard_state plugin_output scheduled_downtime_depth acknowledged services_with_fullstate\n"
+ filter)
html.live.set_auth_domain('read')
tuples += [((site, e[0]), e[1:]) for e in data]
return dict(tuples)
# This variant of the function is configured not with a list of
# hosts but with a livestatus filter header and a list of columns
# that need to be fetched in any case
def get_status_info_filtered(filter_header, only_sites, limit, add_columns, fetch_parents = True, bygroup=False):
columns = [ "name", "state", "hard_state", "plugin_output", "scheduled_downtime_depth",
"acknowledged", "services_with_fullstate", "parents" ] + add_columns
html.live.set_only_sites(only_sites)
html.live.set_prepend_site(True)
query = "GET hosts%s\n" % (bygroup and "bygroup" or "")
query += "Columns: " + (" ".join(columns)) + "\n"
query += filter_header
if config.debug_livestatus_queries \
and html.output_format == "html" and 'W' in html.display_options:
html.write('<div class="livestatus message" onmouseover="this.style.display=\'none\';">'
'<tt>%s</tt></div>\n' % (query.replace('\n', '<br>\n')))
html.live.set_only_sites(only_sites)
html.live.set_prepend_site(True)
html.live.set_auth_domain('bi')
data = html.live.query(query)
html.live.set_prepend_site(False)
html.live.set_only_sites(None)
html.live.set_auth_domain('read')
headers = [ "site" ] + columns
hostnames = [ row[1] for row in data ]
rows = [ dict(zip(headers, row)) for row in data]
# on demand compile: if parents have been found, also fetch data of the parents.
# This is needed to allow cluster hosts (which have the nodes as parents) in the
# host_aggregation construct.
if fetch_parents:
parent_filter = []
for row in data:
parent_filter += [ 'Filter: name = %s\n' % p for p in row[7] ]
parent_filter_txt = ''.join(parent_filter)
parent_filter_txt += 'Or: %d\n' % len(parent_filter)
for row in get_status_info_filtered(filter_header, only_sites, limit, add_columns, False, bygroup):
if row['name'] not in hostnames:
rows.append(row)
return rows
# _ _____ _ _
# / \ __ _ __ _ _ __ | ___| _ _ __ ___| |_(_) ___ _ __ ___
# / _ \ / _` |/ _` | '__| | |_ | | | | '_ \ / __| __| |/ _ \| '_ \/ __|
# / ___ \ (_| | (_| | | _ | _|| |_| | | | | (__| |_| | (_) | | | \__ \
# /_/ \_\__, |\__, |_|(_) |_| \__,_|_| |_|\___|\__|_|\___/|_| |_|___/
# |___/ |___/
# API for aggregation functions
# it is called with at least one argument: a list of node infos.
# Each node info is a pair of the node state and the compiled node information.
# The node state is a dictionary with at least "state" and "output", where
# "state" is the Nagios state. It is allowed to place arbitrary additional
# information to the array, e.g. downtime & acknowledgement information.
# The compiled node information is a dictionary as created by the rule
# compiler. It contains "type" (NT_LEAF, NT_RULE), "reqhosts" and "title". For rule
# node it contains also "func". For leaf nodes it contains
# host" and (if not a host leaf) "service".
#
# The aggregation function must return one state dictionary containing
# at least "state" and "output".
# Function for sorting states. Pending should be slightly
# worst then OK. CRIT is worse than UNKNOWN.
def state_weight(s):
if s == CRIT:
return 10.0
elif s == PENDING:
return 0.5
else:
return float(s)
def x_best_state(l, x):
ll = [ (state_weight(s), s) for s in l ]
ll.sort()
if x < 0:
ll.reverse()
n = abs(x)
if len(ll) < n:
n = len(ll)
return ll[n-1][1]
def aggr_nth_state(nodelist, n, worst_state, ignore_states = None):
states = [ i[0]["state"] for i in nodelist if not ignore_states or i[0]["state"] not in ignore_states ]
# In case of the ignored states it might happen that the states list is empty. Use the
# OK state in this case.
if not states:
state = OK
else:
state = x_best_state(states, n)
# limit to worst state
if state_weight(state) > state_weight(worst_state):
state = worst_state
return { "state" : state, "output" : "" }
def aggr_worst(nodes, n = 1, worst_state = CRIT, ignore_states = None):
return aggr_nth_state(nodes, -int(n), int(worst_state), ignore_states)
def aggr_best(nodes, n = 1, worst_state = CRIT, ignore_states = None):
return aggr_nth_state(nodes, int(n), int(worst_state), ignore_states)
config.aggregation_functions["worst"] = aggr_worst
config.aggregation_functions["best"] = aggr_best
def aggr_countok_convert(num, count):
if str(num).endswith('%'):
return int(num[:-1]) / 100.0 * count
else:
return int(num)
def aggr_countok(nodes, needed_for_ok=2, needed_for_warn=1):
states = [ i[0]["state"] for i in nodes ]
num_ok = len([s for s in states if s == 0 ])
num_nonok = len([s for s in states if s > 0 ])
num_pending = len(states) - num_ok - num_nonok
num_nodes = num_ok + num_nonok
# We need to handle the special case "PENDING" separately.
# Example: count is set to 50%. You have 10 nodes, all of
# which are PENDING, then the outcome must be PENDING, not
# CRIT.
if num_nodes == 0: # All are pending
return { "state": -1, "output": "" }
# counts can be specified as integer (e.g. '2') or
# as percentages (e.g. '70%').
ok_count = aggr_countok_convert(needed_for_ok, num_nodes)
warn_count = aggr_countok_convert(needed_for_warn, num_nodes)
# Enough nodes are OK -> state is OK
if num_ok >= ok_count:
return { "state": 0, "output": "" }
# Enough nodes OK in order to trigger warn level -> WARN
elif num_ok >= warn_count:
return { "state": 1, "output": "" }
else:
return { "state": 2, "output": "" }
config.aggregation_functions["count_ok"] = aggr_countok
def aggr_running_on(nodes, regex):
first_check = nodes[0]
# extract hostname we run on
mo = re.match(regex, first_check[0]["output"])
# if not found, then do normal aggregation with 'worst'
if not mo or len(mo.groups()) == 0:
state = config.aggregation_functions['worst'](nodes[1:])
state["output"] += _(", running nowhere")
return state
running_on = mo.groups()[0]
for state, node in nodes[1:]:
for site, host in node["reqhosts"]:
if host == running_on:
state["output"] += _(", running on %s") % running_on
return state
# host we run on not found. Strange...
return {"state": UNKNOWN, "output": _("running on unknown host '%s'") % running_on }
config.aggregation_functions['running_on'] = aggr_running_on
# ____
# | _ \ __ _ __ _ ___ ___
# | |_) / _` |/ _` |/ _ \/ __|
# | __/ (_| | (_| | __/\__ \
# |_| \__,_|\__, |\___||___/
# |___/
# Just for debugging
def page_debug():
compile_forest(config.user_id)
html.header("BI Debug")
render_forest()
html.footer()
# Just for debugging, as well
def page_all():
html.header("All")
compile_forest(config.user_id)
load_assumptions()
for group, trees in g_user_cache["forest"].items():
html.write("<h2>%s</h2>" % group)
for inst_args, tree in trees:
state = execute_tree(tree)
debug(state)
html.footer()
def ajax_set_assumption():
site = html.var_utf8("site")
host = html.var_utf8("host")
service = html.var_utf8("service")
if service:
key = (site, host, service)
else:
key = (site, host)
state = html.var("state")
load_assumptions()
if state == 'none':
del g_assumptions[key]
else:
g_assumptions[key] = int(state)
save_assumptions()
def ajax_save_treestate():
path_id = html.var_utf8("path")
current_ex_level, path = path_id.split(":", 1)
current_ex_level = int(current_ex_level)
saved_ex_level = load_ex_level()
if saved_ex_level != current_ex_level:
html.set_tree_states('bi', {})
html.set_tree_state('bi', path, html.var("state") == "open")
html.save_tree_states()
save_ex_level(current_ex_level)
def ajax_render_tree():
aggr_group = html.var_utf8("group")
reqhosts = [ tuple(sitehost.split('#')) for sitehost in html.var("reqhosts").split(',') ]
aggr_title = html.var_utf8("title")
omit_root = not not html.var("omit_root")
boxes = not not html.var("boxes")
only_problems = not not html.var("only_problems")
# Make sure that BI aggregates are available
if config.bi_precompile_on_demand:
compile_forest(config.user_id, only_hosts = reqhosts, only_groups = [ aggr_group ])
else:
compile_forest(config.user_id)
# Load current assumptions
load_assumptions()
# Now look for our aggregation
if aggr_group not in g_user_cache["forest"]:
raise MKGeneralException(_("Unknown BI Aggregation group %s. Available are: %s") % (
aggr_group, ", ".join(g_user_cache["forest"].keys())))
trees = g_user_cache["forest"][aggr_group]
for tree in trees:
if tree["title"] == aggr_title:
row = create_aggregation_row(tree)
row["aggr_group"] = aggr_group
# ZUTUN: omit_root, boxes, only_problems has HTML-Variablen
tdclass, htmlcode = render_tree_foldable(row, boxes=boxes, omit_root=omit_root,
expansion_level=load_ex_level(), only_problems=only_problems, lazy=False)
html.write(htmlcode)
return
raise MKGeneralException(_("Unknown BI Aggregation %s") % aggr_title)
def render_tree_foldable(row, boxes, omit_root, expansion_level, only_problems, lazy):
saved_expansion_level = load_ex_level()
treestate = html.get_tree_states('bi')
if expansion_level != saved_expansion_level:
treestate = {}
html.set_tree_states('bi', treestate)
html.save_tree_states()
def render_subtree(tree, path, show_host):
is_leaf = len(tree) == 3
path_id = "/".join(path)
is_open = treestate.get(path_id)
if is_open == None:
is_open = len(path) <= expansion_level
# Make sure that in case of BI Boxes (omit root) the root level is *always* visible
if not is_open and omit_root and len(path) == 1:
is_open = True
h = ""
state = tree[0]
omit_content = lazy and not is_open
mousecode = 'onclick="bi_toggle_%s(this, %d);" ' % (boxes and "box" or "subtree", omit_content)
# Variant: BI-Boxes
if boxes:
# Check if we have an assumed state: comparing assumed state (tree[1]) with state (tree[0])
if tree[1] and tree[0] != tree[1]:
addclass = " " + _("assumed")
effective_state = tree[1]
else:
addclass = ""
effective_state = tree[0]
if is_leaf:
leaf = "leaf"
mc = ""
else:
leaf = "noleaf"
mc = mousecode
omit = omit_root and len(path) == 1
if not omit:
h += '<span id="%d:%s" %s class="bibox_box %s %s state state%s%s">' % (
expansion_level or 0, path_id, mc, leaf, is_open and "open" or "closed", effective_state["state"], addclass)
if is_leaf:
h += aggr_render_leaf(tree, show_host, bare = True) # .replace(" ", " ")
else:
h += tree[2]["title"].replace(" ", " ")
h += '</span> '
if not is_leaf and not omit_content:
h += '<span class="bibox" style="%s">' % ((not is_open and not omit) and "display: none;" or "")
parts = []
for node in tree[3]:
new_path = path + [node[2]["title"]]
h += render_subtree(node, new_path, show_host)
h += '</span>'
return h
# Variant: foldable trees
else:
if is_leaf: # leaf
return aggr_render_leaf(tree, show_host, bare = boxes)
h += '<span class=title>'
is_empty = len(tree[3]) == 0
if is_empty:
style = ''
mc = ''
elif is_open:
style = ''
mc = mousecode + 'src="images/tree_black_90.png" '
else:
style = 'style="display: none" '
mc = mousecode + 'src="images/tree_black_00.png" '
h += aggr_render_node(tree, tree[2]["title"], mc, show_host)
if not is_empty:
h += '<ul id="%d:%s" %sclass="subtree">' % (expansion_level or 0, path_id, style)
if not omit_content:
for node in tree[3]:
estate = node[1] != None and node[1] or node[0]
if not node[2].get("hidden"):
new_path = path + [node[2]["title"]]
h += '<li>' + render_subtree(node, new_path, show_host) + '</li>\n'
h += '</ul>'
return h + '</span>\n'
tree = row["aggr_treestate"]
if only_problems:
tree = filter_tree_only_problems(tree)
affected_hosts = row["aggr_hosts"]
title = row["aggr_tree"]["title"]
group = row["aggr_group"]
url_id = html.urlencode_vars([
( "group", group ),
( "title", title ),
( "omit_root", omit_root and "yes" or ""),
( "boxes", boxes and "yes" or ""),
( "only_problems", only_problems and "yes" or ""),
( "reqhosts", ",".join('%s#%s' % sitehost for sitehost in affected_hosts) ),
])
htmlcode = '<div id="%s" class=bi_tree_container>' % html.attrencode(url_id) + \
render_subtree(tree, [tree[2]["title"]], len(affected_hosts) > 1) + \
'</div>'
return "aggrtree" + (boxes and "_box" or ""), htmlcode
def aggr_render_node(tree, title, mousecode, show_host):
# Check if we have an assumed state: comparing assumed state (tree[1]) with state (tree[0])
if tree[1] and tree[0] != tree[1]:
addclass = " " + _("assumed")
effective_state = tree[1]
else:
addclass = ""
effective_state = tree[0]
if tree[0]["in_downtime"]:
title = ('<img class="icon bi" src="images/icon_downtime.png" title="%s">' % \
_("This element is currently in a scheduled downtime")) + title
if tree[0]["acknowledged"]:
title = ('<img class="icon bi" src="images/icon_ack.png" title="%s">' % \
_("This problem has been acknowledged")) + title
h = '<span class="content state state%d%s">%s</span>\n' \
% (effective_state["state"], addclass, render_bi_state(effective_state["state"]))
if mousecode:
h += '<img class=opentree %s>' % mousecode
h += '<span class="content name" %s>%s</span>' % (mousecode, title)
else:
h += title
output = format_plugin_output(effective_state["output"])
if output:
output = "<b class=bullet>♦</b>" + output
else:
output = ""
h += '<span class="content output">%s</span>\n' % output
return h
def render_assume_icon(site, host, service):
if service:
key = (site, host, service)
else:
key = (site, host)
ass = g_assumptions.get(key)
# TODO: Non-Ascii-Characters do not work yet!
mousecode = \
u'onmouseover="this.style.cursor=\'pointer\';" ' \
'onmouseout="this.style.cursor=\'auto\';" ' \
'title="%s" ' \
'onclick="toggle_assumption(this, \'%s\', \'%s\', \'%s\');" ' % \
(_("Assume another state for this item (reload page to activate)"),
# MIST: DAS HIER MUSS verfünftig für Javascript encodiert werden.
# Das Ausgangsmaterial sind UTF-8 kodierte str-Objekte.
site, host, service != None and service.replace('\\', '\\\\') or '')
current = str(ass).lower()
return u'<img state="%s" class=assumption %s src="images/assume_%s.png">\n' % (current, mousecode, current)
def aggr_render_leaf(tree, show_host, bare = False):
site, host = tree[2]["host"]
service = tree[2].get("service")
if bare:
content = u""
else:
content = u"" + render_assume_icon(site, host, service)
# Four cases:
# (1) zbghora17 . Host status (show_host == True, service == None)
# (2) zbghora17 . CPU load (show_host == True, service != None)
# (3) Host Status (show_host == False, service == None)
# (4) CPU load (show_host == False, service != None)
if show_host or not service:
host_url = html.makeuri_contextless([("view_name", "hoststatus"), ("site", site), ("host", host)], filename="view.py")
if service:
service_url = html.makeuri_contextless([("view_name", "service"), ("site", site), ("host", host), ("service", service)], filename="view.py")
if show_host:
content += '<a href="%s">%s</a><b class=bullet>♦</b>' % (host_url, host.replace(" ", " "))
if not service:
content += '<a href="%s">%s</a>' % (host_url, _("Host status"))
else:
content += '<a href="%s">%s</a>' % (service_url, service.replace(" ", " "))
if bare:
return content
else:
return aggr_render_node(tree, content, None, show_host)
def render_bi_state(state):
return { PENDING: _("PD"),
OK: _("OK"),
WARN: _("WA"),
CRIT: _("CR"),
UNKNOWN: _("UN"),
MISSING: _("MI"),
UNAVAIL: _("NA"),
}.get(state, _("??"))
# Convert tree to tree contain only node in non-OK state
def filter_tree_only_problems(tree):
state, assumed_state, node, subtrees = tree
# remove subtrees in state OK
new_subtrees = []
for subtree in subtrees:
effective_state = subtree[1] != None and subtree[1] or subtree[0]
if effective_state["state"] not in [ OK, PENDING ]:
if len(subtree) == 3:
new_subtrees.append(subtree)
else:
new_subtrees.append(filter_tree_only_problems(subtree))
return state, assumed_state, node, new_subtrees
def page_timeline():
aggr_group = html.var("av_aggr_group")
aggr_name = html.var("av_aggr_name")
# First compile the required BI aggregates.
if config.bi_precompile_on_demand:
compile_forest(config.user_id, only_groups = [ aggr_group ])
else:
compile_forest(config.user_id)
# In the resulting collection of BI aggregates find
# our tree
for tree in g_user_cache["forest"][aggr_group]:
if tree["title"] == aggr_name:
break
else:
raise MKGeneralException("No aggregation with the name %s" %
aggr_name)
row = { "aggr_tree" : tree, "aggr_group" : aggr_group }
views.render_bi_availability(aggr_name, [row])
# ____ _
# | _ \ __ _| |_ __ _ ___ ___ _ _ _ __ ___ ___ ___
# | | | |/ _` | __/ _` / __|/ _ \| | | | '__/ __/ _ \/ __|
# | |_| | (_| | || (_| \__ \ (_) | |_| | | | (_| __/\__ \
# |____/ \__,_|\__\__,_|___/\___/ \__,_|_| \___\___||___/
#
def create_aggregation_row(tree, status_info = None):
tree_state = execute_tree(tree, status_info)
state, assumed_state, node, subtrees = tree_state
eff_state = state
if assumed_state != None:
eff_state = assumed_state
return {
"aggr_tree" : tree,
"aggr_treestate" : tree_state,
"aggr_state" : state, # state disregarding assumptions
"aggr_assumed_state" : assumed_state, # is None, if there are no assumptions
"aggr_effective_state" : eff_state, # is assumed_state, if there are assumptions, else real state
"aggr_name" : node["title"],
"aggr_output" : eff_state["output"],
"aggr_hosts" : node["reqhosts"],
"aggr_function" : node["func"],
}
def table(columns, add_headers, only_sites, limit, filters):
load_assumptions() # user specific, always loaded
# Hier müsste man jetzt die Filter kennen, damit man nicht sinnlos
# alle Aggregationen berechnet.
rows = []
# Apply group filter. This is important for performance. We
# must not compute any aggregations from other groups and filter
# later out again.
only_group = None
only_service = None
for filter in filters:
if filter.name == "aggr_group":
val = filter.selected_group()
if val:
only_group = val
elif filter.name == "aggr_service":
only_service = filter.service_spec()
if config.bi_precompile_on_demand and only_group:
# optimized mode: if aggregation group known only precompile this one
compile_forest(config.user_id, only_groups = [ only_group ])
else:
# classic mode: precompile everything
compile_forest(config.user_id)
# TODO: Optimation of affected_hosts filter!
if only_service:
affected = g_user_cache["affected_services"].get(only_service)
if affected == None:
items = []
else:
by_groups = {}
for group, aggr in affected:
entries = by_groups.get(group, [])
entries.append(aggr)
by_groups[group] = entries
items = by_groups.items()
else:
items = g_user_cache["forest"].items()
for group, trees in items:
if only_group not in [ None, group ]:
continue
for tree in trees:
row = create_aggregation_row(tree)
row["aggr_group"] = group
rows.append(row)
if not html.check_limit(rows, limit):
return rows
return rows
# Table of all host aggregations, i.e. aggregations using data from exactly one host
def hostname_table(columns, add_headers, only_sites, limit, filters):
return singlehost_table(columns, add_headers, only_sites, limit, filters, True, bygroup=False)
def hostname_by_group_table(columns, add_headers, only_sites, limit, filters):
return singlehost_table(columns, add_headers, only_sites, limit, filters, True, bygroup=True)
def host_table(columns, add_headers, only_sites, limit, filters):
return singlehost_table(columns, add_headers, only_sites, limit, filters, False, bygroup=False)
def singlehost_table(columns, add_headers, only_sites, limit, filters, joinbyname, bygroup):
log("--------------------------------------------------------------------\n")
log("* Starting to compute singlehost_table (joinbyname = %s)\n" % joinbyname)
load_assumptions() # user specific, always loaded
log("* Assumptions are loaded.\n")
# Create livestatus filter for filtering out hosts. We can
# simply use all those filters since we have a 1:n mapping between
# hosts and host aggregations
filter_code = ""
for filt in filters:
header = filt.filter("bi_host_aggregations")
if not header.startswith("Sites:"):
filter_code += header
log("* Getting status information about hosts...\n")
host_columns = filter(lambda c: c.startswith("host_"), columns)
hostrows = get_status_info_filtered(filter_code, only_sites, limit, host_columns, config.bi_precompile_on_demand, bygroup)
log("* Got %d host rows\n" % len(hostrows))
# if limit:
# views.check_limit(hostrows, limit)
# Apply aggregation group filter. This is important for performance. We
# must not compute any aggregations from other aggregation groups and filter
# them later out again.
only_groups = None
for filt in filters:
if filt.name == "aggr_group":
val = filt.selected_group()
if val:
only_groups = [ filt.selected_group() ]
if config.bi_precompile_on_demand:
log("* Compiling forest on demand...\n")
compile_forest(config.user_id, only_groups = only_groups,
only_hosts = [ (h['site'], h['name']) for h in hostrows ])
else:
log("* Compiling forest...\n")
compile_forest(config.user_id)
# rows by site/host - needed for later cluster state gathering
if config.bi_precompile_on_demand and not joinbyname:
row_dict = dict([ ((r['site'], r['name']), r) for r in hostrows])
rows = []
# Now compute aggregations of these hosts
log("* Assembling host rows...\n")
# Special optimization for joinbyname
if joinbyname:
rows_by_host = {}
for hostrow in hostrows:
site = hostrow["site"]
host = hostrow["name"]
rows_by_host[(site, host)] = hostrow
for hostrow in hostrows:
site = hostrow["site"]
host = hostrow["name"]
# In case of joinbyname we deal with aggregations that bare the
# name of one host, but might contain states of multiple hosts.
# status_info cannot be filled from one row in that case. We
# try to optimize by assuming that all data that we need is being
# displayed in the same view and the information thus being present
# in some of the other hostrows.
if joinbyname:
status_info = {}
aggrs = g_user_cache["aggregations_by_hostname"].get(host, [])
# collect all the required host of all matching aggregations
for a in aggrs:
reqhosts = a[1]["reqhosts"]
for sitehost in reqhosts:
if sitehost not in rows_by_host:
# This one is missing. Darn. Cancel it.
status_info = None
break
else:
row = rows_by_host[sitehost]
status_info[sitehost] = [
row["state"],
row["hard_state"],
row["plugin_output"],
not not hostrow["acknowledged"],
hostrow["scheduled_downtime_depth"] > 0,
row["services_with_fullstate"] ]
if status_info == None:
break
else:
aggrs = g_user_cache["host_aggregations"].get((site, host), [])
status_info = { (site, host) : [
hostrow["state"],
hostrow["hard_state"],
hostrow["plugin_output"],
not not hostrow["acknowledged"],
hostrow["scheduled_downtime_depth"] > 0,
hostrow["services_with_fullstate"] ] }
for group, aggregation in aggrs:
row = hostrow.copy()
# on demand compile: host aggregations of clusters need data of several hosts.
# It is not enough to only process the hostrow. The status_info construct must
# also include the data of the other required hosts.
if config.bi_precompile_on_demand and not joinbyname and len(aggregation['reqhosts']) > 1:
status_info = {}
for site, host in aggregation['reqhosts']:
this_row = row_dict.get((site, host))
if this_row:
status_info[(site, host)] = [
this_row['state'],
this_row['hard_state'],
this_row['plugin_output'],
not not this_row["acknowledged"],
this_row["scheduled_downtime_depth"] > 0,
this_row['services_with_fullstate'],
]
row.update(create_aggregation_row(aggregation, status_info))
row["aggr_group"] = group
rows.append(row)
if not html.check_limit(rows, limit):
return rows
log("* Assembled %d rows.\n" % len(rows))
return rows
# _ _ _
# | | | | ___| |_ __ ___ _ __ ___
# | |_| |/ _ \ | '_ \ / _ \ '__/ __|
# | _ | __/ | |_) | __/ | \__ \
# |_| |_|\___|_| .__/ \___|_| |___/
# |_|
def debug(x):
import pprint
p = pprint.pformat(x)
html.write("<pre>%s</pre>\n" % p)
def load_assumptions():
global g_assumptions
g_assumptions = config.load_user_file("bi_assumptions", {})
def save_assumptions():
config.save_user_file("bi_assumptions", g_assumptions)
def load_ex_level():
return config.load_user_file("bi_treestate", (None, ))[0]
def save_ex_level(current_ex_level):
config.save_user_file("bi_treestate", (current_ex_level, ))
def status_tree_depth(tree):
if len(tree) == 3:
return 1
else:
subtrees = tree[3]
maxdepth = 0
for node in subtrees:
maxdepth = max(maxdepth, status_tree_depth(node))
return maxdepth + 1
def is_part_of_aggregation(what, site, host, service):
compile_forest(config.user_id)
if what == "host":
return (site, host) in g_user_cache["affected_hosts"]
else:
return (site, host, service) in g_user_cache["affected_services"]
def get_state_name(node):
if node[1]['type'] == NT_LEAF:
if 'service' in node[1]:
return service_state_names[node[0]['state']]
else:
return host_state_names[node[0]['state']]
else:
return service_state_names[node[0]['state']]
| alberts/check_mk | web/htdocs/bi.py | Python | gpl-2.0 | 76,013 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This module is part of the Clemson ACM Auto Grader
Copyright (c) 2016, Robert Underwood
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
This module is responsible for cloning and updating repositories.
"""
import json
import logging
import subprocess
LOGGER = logging.getLogger(__name__)
def update(settings, student):
"""
Updates a to the latest student submission
return:
bool updated - whether or not changes were made to the repository
"""
updaters = {
"git": update_git,
"hg" : update_hg,
"noop": update_noop,
"svn": update_svn,
"script": update_script
}
return updaters[settings["update"]["method"]](settings, student)
def update_hg(settings, student):
"""
Updates a Mercurial repository with the latest submission
"""
LOGGER.info('Beginning a HG update for student %s', student['username'])
timeout = settings['update']['timeout']
#Check if updates exist
cmd = """
hg incoming;
"""
changed = False
try:
subprocess.check_call(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL,
shell=True, timeout=timeout, cwd=student['directory'])
except subprocess.CalledProcessError:
changed = False
#Download and apply updates
cmd = """
hg pull;
hg update --clean;
"""
subprocess.check_call(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL,
shell=True, timeout=timeout, cwd=student['directory'])
return changed
def update_git(settings, student):
"""
Updates a Git repository with the latest submission
"""
LOGGER.info('Beginning a git update for student %s', student['username'])
timeout = settings['update']['timeout']
cmd = """
git pull -f -Xtheirs;
"""
out = subprocess.check_output(cmd, stderr=subprocess.DEVNULL,
shell=True, timeout=timeout, cwd=student['directory'])
return "Already up-to-date" in str(out)
def update_noop(settings, student):
"""
A No-Op updater
"""
LOGGER.info('Beginning a NOOP update for student %s', student['username'])
return True
def update_svn(settings, student):
"""
Updates a SVN repository with the latest submission
"""
LOGGER.info('Beginning a svn update for student %s', student['username'])
timeout = settings['update']['timeout']
raise NotImplementedError
def update_script(settings, student):
"""
Updates a SVN repository with the latest submission
"""
LOGGER.info('Beginning a script update for student %s', student['username'])
timeout = settings['update']['timeout']
cmd = settings['update']['method']
output = subprocess.check_output(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL,
shell=True, timeout=timeout, cwd=student['directory'])
ret = json.loads(output)
return ret['changed']
| robertu94/autograder | autograder/source/update.py | Python | bsd-2-clause | 4,236 |
from Screens.Screen import Screen
from Screens.HelpMenu import HelpableScreen
from Components.FileList import FileList
from Components.Sources.StaticText import StaticText
from Components.MediaPlayer import PlayList
from Components.config import config, getConfigListEntry, ConfigSubsection, configfile, ConfigText, ConfigYesNo, ConfigDirectory
from Components.ConfigList import ConfigListScreen
from Components.ActionMap import ActionMap
config.mediaplayer = ConfigSubsection()
config.mediaplayer.repeat = ConfigYesNo(default=False)
config.mediaplayer.savePlaylistOnExit = ConfigYesNo(default=True)
config.mediaplayer.saveDirOnExit = ConfigYesNo(default=False)
config.mediaplayer.defaultDir = ConfigDirectory()
config.mediaplayer.useAlternateUserAgent = ConfigYesNo(default=False)
config.mediaplayer.alternateUserAgent = ConfigText(default="")
config.mediaplayer.sortPlaylists = ConfigYesNo(default=False)
config.mediaplayer.onMainMenu = ConfigYesNo(default=False)
class DirectoryBrowser(Screen, HelpableScreen):
def __init__(self, session, currDir):
Screen.__init__(self, session)
# for the skin: first try MediaPlayerDirectoryBrowser, then FileBrowser, this allows individual skinning
self.skinName = ["MediaPlayerDirectoryBrowser", "FileBrowser" ]
HelpableScreen.__init__(self)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Use"))
self.filelist = FileList(currDir, matchingPattern="")
self["filelist"] = self.filelist
self["FilelistActions"] = ActionMap(["SetupActions", "ColorActions"],
{
"green": self.use,
"red": self.exit,
"ok": self.ok,
"cancel": self.exit
})
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(_("Directory browser"))
def ok(self):
if self.filelist.canDescent():
self.filelist.descent()
def use(self):
if self["filelist"].getCurrentDirectory() is not None:
if self.filelist.canDescent() and self["filelist"].getFilename() and len(self["filelist"].getFilename()) > len(self["filelist"].getCurrentDirectory()):
self.filelist.descent()
self.close(self["filelist"].getCurrentDirectory())
else:
self.close(self["filelist"].getFilename())
def exit(self):
self.close(False)
class MediaPlayerSettings(Screen,ConfigListScreen):
def __init__(self, session, parent):
Screen.__init__(self, session)
# for the skin: first try MediaPlayerSettings, then Setup, this allows individual skinning
self.skinName = ["MediaPlayerSettings", "Setup" ]
self.setup_title = _("Edit settings")
self.onChangedEntry = [ ]
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Save"))
ConfigListScreen.__init__(self, [], session = session, on_change = self.changedEntry)
self.parent = parent
self.initConfigList()
config.mediaplayer.saveDirOnExit.addNotifier(self.initConfigList)
self["setupActions"] = ActionMap(["SetupActions", "ColorActions"],
{
"green": self.save,
"red": self.cancel,
"cancel": self.cancel,
"ok": self.ok,
}, -2)
def layoutFinished(self):
self.setTitle(self.setup_title)
def initConfigList(self, element=None):
print "[initConfigList]", element
try:
self.list = []
self.list.append(getConfigListEntry(_("repeat playlist"), config.mediaplayer.repeat))
self.list.append(getConfigListEntry(_("save playlist on exit"), config.mediaplayer.savePlaylistOnExit))
self.list.append(getConfigListEntry(_("save last directory on exit"), config.mediaplayer.saveDirOnExit))
if not config.mediaplayer.saveDirOnExit.getValue():
self.list.append(getConfigListEntry(_("start directory"), config.mediaplayer.defaultDir))
self.list.append(getConfigListEntry(_("sorting of playlists"), config.mediaplayer.sortPlaylists))
self.list.append(getConfigListEntry(_("show mediaplayer on mainmenu"), config.mediaplayer.onMainMenu))
self["config"].setList(self.list)
except KeyError:
print "keyError"
def changedConfigList(self):
self.initConfigList()
def ok(self):
if self["config"].getCurrent()[1] == config.mediaplayer.defaultDir:
self.session.openWithCallback(self.DirectoryBrowserClosed, DirectoryBrowser, self.parent.filelist.getCurrentDirectory())
def DirectoryBrowserClosed(self, path):
print "PathBrowserClosed:" + str(path)
if path != False:
config.mediaplayer.defaultDir.setValue(path)
def save(self):
for x in self["config"].list:
x[1].save()
self.close()
def cancel(self):
self.close()
# for summary:
def changedEntry(self):
for x in self.onChangedEntry:
x()
def getCurrentEntry(self):
return self["config"].getCurrent()[0]
def getCurrentValue(self):
return str(self["config"].getCurrent()[1].getText())
def createSummary(self):
from Screens.Setup import SetupSummary
return SetupSummary
| noox-/stbgui-1 | lib/python/Plugins/Extensions/MediaPlayer/settings.py | Python | gpl-2.0 | 4,818 |
#!/usr/bin/python3
from mobileapi import app
import os
# By placing our config in ENV variables we can adjust them
# in production with differnet values
# This sets defaults that are good for development
APP_PORT = 5017 # We are using 5017 for the Mobile API
APP_DEBUG = True
# Now let's see if production wants override them
if 'APP_PORT' in os.environ:
APP_PORT = int(os.environ['APP_PORT'])
if 'APP_DEBUG' in os.environ and os.environ['APP_DEBUG'] == '0':
APP_DEBUG = False
app.run(host='0.0.0.0', port=APP_PORT, debug=APP_DEBUG, threaded=True)
| roguefalcon/rpi_docker_images | hackmt_2018_code/mobileapi/run.py | Python | gpl-3.0 | 563 |
import relayManager
import dronekit
class ShotManager():
def __init__(self):
# see the shotlist in app/shots/shots.p
print "init"
def Start(self, vehicle):
self.vehicle = vehicle
# Initialize relayManager
self.relayManager = relayManager.RelayManager(self)
target = 'udp:127.0.0.1:14551' #'tcp:127.0.0.1:5760'
print 'Connecting to ' + target + '...'
vehicle = dronekit.connect(target, wait_ready=True)
sm = ShotManager()
sm.Start(vehicle)
| mapossum/SeymourSolo | tester.py | Python | gpl-3.0 | 495 |
"""
sphinx.search.tr
~~~~~~~~~~~~~~~~
Turkish search language: includes the JS Turkish stemmer.
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from typing import Dict, Set
import snowballstemmer
from sphinx.search import SearchLanguage
class SearchTurkish(SearchLanguage):
lang = 'tr'
language_name = 'Turkish'
js_stemmer_rawcode = 'turkish-stemmer.js'
stopwords: Set[str] = set()
def init(self, options: Dict) -> None:
self.stemmer = snowballstemmer.stemmer('turkish')
def stem(self, word: str) -> str:
return self.stemmer.stemWord(word.lower())
| sonntagsgesicht/regtest | .aux/venv/lib/python3.9/site-packages/sphinx/search/tr.py | Python | apache-2.0 | 678 |
from mpd import MPDClient
from select import select
import time, re
import argparse
class MPDBookmark(object):
def __init__(self, host="localhost", port=6600, password=None,
motif="Podcast", field="album"):
self.client = MPDClient()
try:
self.client.connect(host, port)
if password:
self.client.password(password)
except :
print "merde"
print "host = ", host
print "port = ", port
print "pass = ", password
assert False
self.motif=motif
self.field=field
self.boucle()
def stats(self):
print "==========================================="
print " Version :"
print self.client.mpd_version
print "==========================================="
print "fin du test"
print self.client.status()
def wait_action(self):
self.client.send_idle('player')
select([self.client], [], [], 60)
return self.client.fetch_idle()
def verif_motif(self, song):
return self.field in song and re.match(self.motif, song[self.field])
def update_song(self, song, ts):
new_ts=time.time()
if 'title' in song :
print "Update song : ", song['title'],
print "( ",int(new_ts-ts),
print "/",song['time']," )"
if self.verif_motif(song):
last_up=int(new_ts-ts)
self.client.sticker_set('song', song['file'],
'last_up', last_up)
def start_song(self, new_song):
if ('file' in new_song) and 'last_up' in self.client.sticker_list('song', new_song['file']):
last_up=int(self.client.sticker_get('song',
new_song['file'],
'last_up'))
if abs(int(new_song['time'])-last_up)<=4 :
last_up=0
self.client.seekcur(last_up)
def boucle(self):
state=self.client.status()['state']
song=self.client.currentsong()
print "song : ", song
ts=time.time()
if 'elapsed' in song :
ts-=float( song['elapsed'])
while 1 :
ret=self.wait_action()
new_state=self.client.status()['state']
new_song=self.client.currentsong()
if new_song!=song and new_song!={}:
self.start_song(new_song)
if song !={}:
print "update ",
self.update_song(song, ts)
state= new_state
song= new_song
if 'elapsed' in self.client.status():
ts=time.time()-float( self.client.status()['elapsed'])
#Pour les tests
def select_N_song(client, N=3):
"""On supose que le client est conecter"""
tmp=client.listall('')
i=0
ret=[]
for s in tmp:
if 'file' in s:
i+=1
ret.append(s)
if i>=N:
break
return ret
if __name__ == '__main__':
parser=argparse.ArgumentParser(description='MPD Bookmark is a simple script witch monitor MPD and keep a trace of where the listening of a file ended.')
parser.add_argument('-f','--field',
help='A field either song, file or any tag',
default='album')
parser.add_argument('-m','--motif', help='A regular expression',
default='Podcast')
parser.add_argument('-i','--host', help='Host of MPD', default='localhost')
parser.add_argument('-p','--port', help='Port of MPD', default='6600')
parser.add_argument('-pw','--password', help='Password of MPD',
default=None)
args=parser.parse_args()
w=MPDBookmark(host=args.host,
port=args.port,
password=args.password,
motif=args.motif,
field=args.field)
| RaoulChartreuse/mpd_bookmark | mpd_bookmark.py | Python | gpl-2.0 | 4,058 |
print "Hello world!"
print "master"
print "test"
| dys152/jdtest | jd.py | Python | apache-2.0 | 49 |
# -*- coding: utf-8 -*-
"""QGIS Unit test utils for provider tests.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Matthias Kuhn'
__date__ = '2015-04-27'
__copyright__ = 'Copyright 2015, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.core import QgsRectangle, QgsFeatureRequest, QgsFeature, QgsGeometry, NULL
class ProviderTestCase(object):
def assert_query(self, provider, expression, expected):
result = set([f['pk'] for f in provider.getFeatures(QgsFeatureRequest().setFilterExpression(expression))])
assert set(expected) == result, 'Expected {} and got {} when testing expression "{}"'.format(set(expected), result, expression)
'''
This is a collection of tests for vector data providers and kept generic.
To make use of it, subclass it and set self.provider to a provider you want to test.
Make sure that your provider uses the default dataset by converting one of the provided datasets from the folder
tests/testdata/provider to a dataset your provider is able to handle.
To test expression compilation, add the methods `enableCompiler()` and `disableCompiler()` to your subclass.
If these methods are present, the tests will ensure that the result of server side and client side expression
evaluation are equal.
'''
def runGetFeatureTests(self, provider):
assert len([f for f in provider.getFeatures()]) == 5
self.assert_query(provider, 'name ILIKE \'QGIS\'', [])
self.assert_query(provider, '"name" IS NULL', [5])
self.assert_query(provider, '"name" IS NOT NULL', [1, 2, 3, 4])
self.assert_query(provider, '"name" NOT LIKE \'Ap%\'', [1, 3, 4])
self.assert_query(provider, '"name" NOT ILIKE \'QGIS\'', [1, 2, 3, 4])
self.assert_query(provider, '"name" NOT ILIKE \'pEAR\'', [1, 2, 4])
self.assert_query(provider, 'name = \'Apple\'', [2])
self.assert_query(provider, 'name <> \'Apple\'', [1, 3, 4])
self.assert_query(provider, 'name = \'apple\'', [])
self.assert_query(provider, '"name" <> \'apple\'', [1, 2, 3, 4])
self.assert_query(provider, '(name = \'Apple\') is not null', [1, 2, 3, 4])
self.assert_query(provider, 'name LIKE \'Apple\'', [2])
self.assert_query(provider, 'name LIKE \'aPple\'', [])
self.assert_query(provider, 'name ILIKE \'aPple\'', [2])
self.assert_query(provider, 'name ILIKE \'%pp%\'', [2])
self.assert_query(provider, 'cnt > 0', [1, 2, 3, 4])
self.assert_query(provider, 'cnt < 0', [5])
self.assert_query(provider, 'cnt >= 100', [1, 2, 3, 4])
self.assert_query(provider, 'cnt <= 100', [1, 5])
self.assert_query(provider, 'pk IN (1, 2, 4, 8)', [1, 2, 4])
self.assert_query(provider, 'cnt = 50 * 2', [1])
self.assert_query(provider, 'cnt = 99 + 1', [1])
self.assert_query(provider, 'cnt = 101 - 1', [1])
self.assert_query(provider, 'cnt - 1 = 99', [1])
self.assert_query(provider, 'cnt + 1 = 101', [1])
self.assert_query(provider, 'cnt = 1100 % 1000', [1])
self.assert_query(provider, '"name" || \' \' || "name" = \'Orange Orange\'', [1])
self.assert_query(provider, '"name" || \' \' || "cnt" = \'Orange 100\'', [1])
self.assert_query(provider, '\'x\' || "name" IS NOT NULL', [1, 2, 3, 4])
self.assert_query(provider, '\'x\' || "name" IS NULL', [5])
self.assert_query(provider, 'cnt = 10 ^ 2', [1])
self.assert_query(provider, '"name" ~ \'[OP]ra[gne]+\'', [1])
self.assert_query(provider, '"name"="name2"', [2, 4]) # mix of matched and non-matched case sensitive names
self.assert_query(provider, 'true', [1, 2, 3, 4, 5])
self.assert_query(provider, 'false', [])
# Three value logic
self.assert_query(provider, 'false and false', [])
self.assert_query(provider, 'false and true', [])
self.assert_query(provider, 'false and NULL', [])
self.assert_query(provider, 'true and false', [])
self.assert_query(provider, 'true and true', [1, 2, 3, 4, 5])
self.assert_query(provider, 'true and NULL', [])
self.assert_query(provider, 'NULL and false', [])
self.assert_query(provider, 'NULL and true', [])
self.assert_query(provider, 'NULL and NULL', [])
self.assert_query(provider, 'false or false', [])
self.assert_query(provider, 'false or true', [1, 2, 3, 4, 5])
self.assert_query(provider, 'false or NULL', [])
self.assert_query(provider, 'true or false', [1, 2, 3, 4, 5])
self.assert_query(provider, 'true or true', [1, 2, 3, 4, 5])
self.assert_query(provider, 'true or NULL', [1, 2, 3, 4, 5])
self.assert_query(provider, 'NULL or false', [])
self.assert_query(provider, 'NULL or true', [1, 2, 3, 4, 5])
self.assert_query(provider, 'NULL or NULL', [])
self.assert_query(provider, 'not true', [])
self.assert_query(provider, 'not false', [1, 2, 3, 4, 5])
self.assert_query(provider, 'not null', [])
# not
self.assert_query(provider, 'not name = \'Apple\'', [1, 3, 4])
self.assert_query(provider, 'not name IS NULL', [1, 2, 3, 4])
self.assert_query(provider, 'not name = \'Apple\' or name = \'Apple\'', [1, 2, 3, 4])
self.assert_query(provider, 'not name = \'Apple\' or not name = \'Apple\'', [1, 3, 4])
self.assert_query(provider, 'not name = \'Apple\' and pk = 4', [4])
self.assert_query(provider, 'not name = \'Apple\' and not pk = 4', [1, 3])
self.assert_query(provider, 'not pk IN (1, 2, 4, 8)', [3, 5])
# type conversion - QGIS expressions do not mind that we are comparing a string
# against numeric literals
self.assert_query(provider, 'num_char IN (2, 4, 5)', [2, 4, 5])
def testGetFeaturesUncompiled(self):
try:
self.disableCompiler()
except AttributeError:
pass
self.runGetFeatureTests(self.provider)
def testGetFeaturesCompiled(self):
try:
self.enableCompiler()
self.runGetFeatureTests(self.provider)
except AttributeError:
print 'Provider does not support compiling'
def testSubsetString(self):
if not self.provider.supportsSubsetString():
print 'Provider does not support subset strings'
return
subset = self.getSubsetString()
self.provider.setSubsetString(subset)
self.assertEqual(self.provider.subsetString(), subset)
result = set([f['pk'] for f in self.provider.getFeatures()])
self.provider.setSubsetString(None)
expected = set([2, 3, 4])
assert set(expected) == result, 'Expected {} and got {} when testing subset string {}'.format(set(expected), result, subset)
# Subset string AND filter rect
self.provider.setSubsetString(subset)
extent = QgsRectangle(-70, 70, -60, 75)
result = set([f['pk'] for f in self.provider.getFeatures(QgsFeatureRequest().setFilterRect(extent))])
self.provider.setSubsetString(None)
expected = set([2])
assert set(expected) == result, 'Expected {} and got {} when testing subset string {}'.format(set(expected), result, subset)
# Subset string AND filter rect, version 2
self.provider.setSubsetString(subset)
extent = QgsRectangle(-71, 65, -60, 80)
result = set([f['pk'] for f in self.provider.getFeatures(QgsFeatureRequest().setFilterRect(extent))])
self.provider.setSubsetString(None)
expected = set([2, 4])
assert set(expected) == result, 'Expected {} and got {} when testing subset string {}'.format(set(expected), result, subset)
# Subset string AND expression
self.provider.setSubsetString(subset)
result = set([f['pk'] for f in self.provider.getFeatures(QgsFeatureRequest().setFilterExpression('length("name")=5'))])
self.provider.setSubsetString(None)
expected = set([2, 4])
assert set(expected) == result, 'Expected {} and got {} when testing subset string {}'.format(set(expected), result, subset)
def getSubsetString(self):
"""Individual providers may need to override this depending on their subset string formats"""
return '"cnt" > 100 and "cnt" < 410'
def testOrderByUncompiled(self):
try:
self.disableCompiler()
except AttributeError:
pass
self.runOrderByTests()
def testOrderByCompiled(self):
try:
self.enableCompiler()
self.runOrderByTests()
except AttributeError:
print 'Provider does not support compiling'
def runOrderByTests(self):
request = QgsFeatureRequest().addOrderBy('cnt')
values = [f['cnt'] for f in self.provider.getFeatures(request)]
self.assertEquals(values, [-200, 100, 200, 300, 400])
request = QgsFeatureRequest().addOrderBy('cnt', False)
values = [f['cnt'] for f in self.provider.getFeatures(request)]
self.assertEquals(values, [400, 300, 200, 100, -200])
request = QgsFeatureRequest().addOrderBy('name')
values = [f['name'] for f in self.provider.getFeatures(request)]
self.assertEquals(values, ['Apple', 'Honey', 'Orange', 'Pear', NULL])
request = QgsFeatureRequest().addOrderBy('name', True, True)
values = [f['name'] for f in self.provider.getFeatures(request)]
self.assertEquals(values, [NULL, 'Apple', 'Honey', 'Orange', 'Pear'])
request = QgsFeatureRequest().addOrderBy('name', False)
values = [f['name'] for f in self.provider.getFeatures(request)]
self.assertEquals(values, [NULL, 'Pear', 'Orange', 'Honey', 'Apple'])
request = QgsFeatureRequest().addOrderBy('name', False, False)
values = [f['name'] for f in self.provider.getFeatures(request)]
self.assertEquals(values, ['Pear', 'Orange', 'Honey', 'Apple', NULL])
# Case sensitivity
request = QgsFeatureRequest().addOrderBy('name2')
values = [f['name2'] for f in self.provider.getFeatures(request)]
self.assertEquals(values, ['Apple', 'Honey', 'NuLl', 'oranGe', 'PEaR'])
# Combination with LIMIT
request = QgsFeatureRequest().addOrderBy('pk', False).setLimit(2)
values = [f['pk'] for f in self.provider.getFeatures(request)]
self.assertEquals(values, [5, 4])
# A slightly more complex expression
request = QgsFeatureRequest().addOrderBy('pk*2', False)
values = [f['pk'] for f in self.provider.getFeatures(request)]
self.assertEquals(values, [5, 4, 3, 2, 1])
# Order reversing expression
request = QgsFeatureRequest().addOrderBy('pk*-1', False)
values = [f['pk'] for f in self.provider.getFeatures(request)]
self.assertEquals(values, [1, 2, 3, 4, 5])
# Type dependent expression
request = QgsFeatureRequest().addOrderBy('num_char*2', False)
values = [f['pk'] for f in self.provider.getFeatures(request)]
self.assertEquals(values, [5, 4, 3, 2, 1])
# Order by guaranteed to fail
request = QgsFeatureRequest().addOrderBy('not a valid expression*', False)
values = [f['pk'] for f in self.provider.getFeatures(request)]
self.assertEquals(set(values), set([5, 4, 3, 2, 1]))
# Multiple order bys and boolean
request = QgsFeatureRequest().addOrderBy('pk > 2').addOrderBy('pk', False)
values = [f['pk'] for f in self.provider.getFeatures(request)]
self.assertEquals(values, [2, 1, 5, 4, 3])
# Multiple order bys, one bad, and a limit
request = QgsFeatureRequest().addOrderBy('pk', False).addOrderBy('not a valid expression*', False).setLimit(2)
values = [f['pk'] for f in self.provider.getFeatures(request)]
self.assertEquals(values, [5, 4])
# Bad expression first
request = QgsFeatureRequest().addOrderBy('not a valid expression*', False).addOrderBy('pk', False).setLimit(2)
values = [f['pk'] for f in self.provider.getFeatures(request)]
self.assertEquals(values, [5, 4])
# Combination with subset of attributes
request = QgsFeatureRequest().addOrderBy('num_char', False).setSubsetOfAttributes(['pk'], self.vl.fields())
values = [f['pk'] for f in self.vl.getFeatures(request)]
self.assertEquals(values, [5, 4, 3, 2, 1])
def testGetFeaturesFidTests(self):
fids = [f.id() for f in self.provider.getFeatures()]
assert len(fids) == 5, 'Expected 5 features, got {} instead'.format(len(fids))
for id in fids:
result = [f.id() for f in self.provider.getFeatures(QgsFeatureRequest().setFilterFid(id))]
expected = [id]
assert result == expected, 'Expected {} and got {} when testing for feature ID filter'.format(expected, result)
def testGetFeaturesFidsTests(self):
fids = [f.id() for f in self.provider.getFeatures()]
result = set([f.id() for f in self.provider.getFeatures(QgsFeatureRequest().setFilterFids([fids[0], fids[2]]))])
expected = set([fids[0], fids[2]])
assert result == expected, 'Expected {} and got {} when testing for feature IDs filter'.format(expected, result)
result = set([f.id() for f in self.provider.getFeatures(QgsFeatureRequest().setFilterFids([fids[1], fids[3], fids[4]]))])
expected = set([fids[1], fids[3], fids[4]])
assert result == expected, 'Expected {} and got {} when testing for feature IDs filter'.format(expected, result)
result = set([f.id() for f in self.provider.getFeatures(QgsFeatureRequest().setFilterFids([]))])
expected = set([])
assert result == expected, 'Expected {} and got {} when testing for feature IDs filter'.format(expected, result)
def testGetFeaturesFilterRectTests(self):
extent = QgsRectangle(-70, 67, -60, 80)
features = [f['pk'] for f in self.provider.getFeatures(QgsFeatureRequest().setFilterRect(extent))]
assert set(features) == set([2, 4]), 'Got {} instead'.format(features)
def testGetFeaturesPolyFilterRectTests(self):
""" Test fetching features from a polygon layer with filter rect"""
try:
if not self.poly_provider:
return
except:
return
extent = QgsRectangle(-73, 70, -63, 80)
features = [f['pk'] for f in self.poly_provider.getFeatures(QgsFeatureRequest().setFilterRect(extent))]
# Some providers may return the exact intersection matches (2, 3) even without the ExactIntersect flag, so we accept that too
assert set(features) == set([2, 3]) or set(features) == set([1, 2, 3]), 'Got {} instead'.format(features)
# Test with exact intersection
features = [f['pk'] for f in self.poly_provider.getFeatures(QgsFeatureRequest().setFilterRect(extent).setFlags(QgsFeatureRequest.ExactIntersect))]
assert set(features) == set([2, 3]), 'Got {} instead'.format(features)
def testRectAndExpression(self):
extent = QgsRectangle(-70, 67, -60, 80)
result = set([f['pk'] for f in self.provider.getFeatures(
QgsFeatureRequest().setFilterExpression('"cnt">200').setFilterRect(extent))])
expected = [4]
assert set(expected) == result, 'Expected {} and got {} when testing for combination of filterRect and expression'.format(set(expected), result)
def testGetFeaturesLimit(self):
it = self.provider.getFeatures(QgsFeatureRequest().setLimit(2))
features = [f['pk'] for f in it]
assert len(features) == 2, 'Expected two features, got {} instead'.format(len(features))
# fetch one feature
feature = QgsFeature()
assert not it.nextFeature(feature), 'Expected no feature after limit, got one'
it.rewind()
features = [f['pk'] for f in it]
assert len(features) == 2, 'Expected two features after rewind, got {} instead'.format(len(features))
it.rewind()
assert it.nextFeature(feature), 'Expected feature after rewind, got none'
it.rewind()
features = [f['pk'] for f in it]
assert len(features) == 2, 'Expected two features after rewind, got {} instead'.format(len(features))
# test with expression, both with and without compilation
try:
self.disableCompiler()
except AttributeError:
pass
it = self.provider.getFeatures(QgsFeatureRequest().setLimit(2).setFilterExpression('cnt <= 100'))
features = [f['pk'] for f in it]
assert set(features) == set([1, 5]), 'Expected [1,5] for expression and feature limit, Got {} instead'.format(features)
try:
self.enableCompiler()
except AttributeError:
pass
it = self.provider.getFeatures(QgsFeatureRequest().setLimit(2).setFilterExpression('cnt <= 100'))
features = [f['pk'] for f in it]
assert set(features) == set([1, 5]), 'Expected [1,5] for expression and feature limit, Got {} instead'.format(features)
# limit to more features than exist
it = self.provider.getFeatures(QgsFeatureRequest().setLimit(3).setFilterExpression('cnt <= 100'))
features = [f['pk'] for f in it]
assert set(features) == set([1, 5]), 'Expected [1,5] for expression and feature limit, Got {} instead'.format(features)
# limit to less features than possible
it = self.provider.getFeatures(QgsFeatureRequest().setLimit(1).setFilterExpression('cnt <= 100'))
features = [f['pk'] for f in it]
assert 1 in features or 5 in features, 'Expected either 1 or 5 for expression and feature limit, Got {} instead'.format(features)
def testMinValue(self):
self.assertEqual(self.provider.minimumValue(1), -200)
self.assertEqual(self.provider.minimumValue(2), 'Apple')
def testMaxValue(self):
self.assertEqual(self.provider.maximumValue(1), 400)
self.assertEqual(self.provider.maximumValue(2), 'Pear')
def testExtent(self):
reference = QgsGeometry.fromRect(
QgsRectangle(-71.123, 66.33, -65.32, 78.3))
provider_extent = QgsGeometry.fromRect(self.provider.extent())
assert QgsGeometry.compare(provider_extent.asPolygon(), reference.asPolygon(), 0.00001), 'Expected {}, got {}'.format(reference.exportToWkt(), provider_extent.exportToWkt())
def testUnique(self):
self.assertEqual(set(self.provider.uniqueValues(1)), set([-200, 100, 200, 300, 400]))
assert set([u'Apple', u'Honey', u'Orange', u'Pear', NULL]) == set(self.provider.uniqueValues(2)), 'Got {}'.format(set(self.provider.uniqueValues(2)))
def testFeatureCount(self):
assert self.provider.featureCount() == 5, 'Got {}'.format(self.provider.featureCount())
#Add a subset string and test feature count
subset = self.getSubsetString()
self.provider.setSubsetString(subset)
count = self.provider.featureCount()
self.provider.setSubsetString(None)
assert count == 3, 'Got {}'.format(count)
def testClosedIterators(self):
""" Test behaviour of closed iterators """
# Test retrieving feature after closing iterator
f_it = self.provider.getFeatures(QgsFeatureRequest())
fet = QgsFeature()
assert f_it.nextFeature(fet), 'Could not fetch feature'
assert fet.isValid(), 'Feature is not valid'
assert f_it.close(), 'Could not close iterator'
self.assertFalse(f_it.nextFeature(fet), 'Fetched feature after iterator closed, expected nextFeature() to return False')
self.assertFalse(fet.isValid(), 'Valid feature fetched from closed iterator, should be invalid')
# Test rewinding closed iterator
self.assertFalse(f_it.rewind(), 'Rewinding closed iterator successful, should not be allowed')
def testGetFeaturesSubsetAttributes(self):
""" Test that expected results are returned when using subsets of attributes """
tests = {'pk': set([1, 2, 3, 4, 5]),
'cnt': set([-200, 300, 100, 200, 400]),
'name': set(['Pear', 'Orange', 'Apple', 'Honey', NULL]),
'name2': set(['NuLl', 'PEaR', 'oranGe', 'Apple', 'Honey'])}
for field, expected in tests.iteritems():
result = set([f[field] for f in self.provider.getFeatures(QgsFeatureRequest().setSubsetOfAttributes([field], self.provider.fields()))])
self.assertEqual(result, expected, 'Expected {}, got {}'.format(expected, result))
def testGetFeaturesSubsetAttributes2(self):
""" Test that other fields are NULL wen fetching subsets of attributes """
for field_to_fetch in ['pk', 'cnt', 'name', 'name2']:
for f in self.provider.getFeatures(QgsFeatureRequest().setSubsetOfAttributes([field_to_fetch], self.provider.fields())):
# Check that all other fields are NULL
for other_field in [field.name() for field in self.provider.fields() if field.name() != field_to_fetch]:
self.assertEqual(f[other_field], NULL, 'Value for field "{}" was present when it should not have been fetched by request'.format(other_field))
def testGetFeaturesNoGeometry(self):
""" Test that no geometry is present when fetching features without geometry"""
for f in self.provider.getFeatures(QgsFeatureRequest().setFlags(QgsFeatureRequest.NoGeometry)):
self.assertFalse(f.constGeometry(), 'Expected no geometry, got one')
def testGetFeaturesNoGeometry(self):
""" Test that geometry is present when fetching features without setting NoGeometry flag"""
for f in self.provider.getFeatures(QgsFeatureRequest()):
if f['pk'] == 3:
# no geometry for this feature
continue
assert f.constGeometry(), 'Expected geometry, got none'
| SebDieBln/QGIS | tests/src/python/providertestbase.py | Python | gpl-2.0 | 22,341 |
import random
import threading
import time
import unittest
from traits.api import Enum, HasStrictTraits
from traits.util.async_trait_wait import wait_for_condition
class TrafficLights(HasStrictTraits):
colour = Enum('Green', 'Amber', 'Red', 'RedAndAmber')
_next_colour = {
'Green': 'Amber',
'Amber': 'Red',
'Red': 'RedAndAmber',
'RedAndAmber': 'Green',
}
def make_random_changes(self, change_count):
for _ in xrange(change_count):
time.sleep(random.uniform(0.1, 0.3))
self.colour = self._next_colour[self.colour]
class TestAsyncTraitWait(unittest.TestCase):
def test_wait_for_condition_success(self):
lights = TrafficLights(colour='Green')
t = threading.Thread(target=lights.make_random_changes, args=(2,))
t.start()
wait_for_condition(
condition=lambda l: l.colour == 'Red',
obj=lights,
trait='colour',
)
self.assertEqual(lights.colour, 'Red')
t.join()
def test_wait_for_condition_failure(self):
lights = TrafficLights(colour='Green')
t = threading.Thread(target=lights.make_random_changes, args=(2,))
t.start()
self.assertRaises(
RuntimeError,
wait_for_condition,
condition=lambda l: l.colour == 'RedAndAmber',
obj=lights,
trait='colour',
timeout=5.0,
)
t.join()
def test_traits_handler_cleaned_up(self):
# An older version of wait_for_condition failed to clean up
# the trait handler, leading to possibly evaluation of the
# condition after the 'wait_for_condition' call had returned.
self.lights = TrafficLights(colour='Green')
t = threading.Thread(target=self.lights.make_random_changes, args=(3,))
t.start()
wait_for_condition(
condition=lambda l: self.lights.colour == 'Red',
obj=self.lights,
trait='colour',
)
del self.lights
# If the condition gets evaluated again past this point, we'll
# see an AttributeError from the failed self.lights lookup.
# assertSucceeds!
t.join()
if __name__ == '__main__':
unittest.main()
| burnpanck/traits | traits/util/tests/test_async_trait_wait.py | Python | bsd-3-clause | 2,297 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from docopt import docopt
from models import ChecklistTemplate, Checklist, User
from app import app
__doc__ = """utils.py
Usage:
utils.py delete (checklist | template) <owner> <slug>
utils.py admin <username> (grant | revoke)
utils.py createuser <username>
"""
DOCUMENTS = {
'checklist': Checklist,
'template': ChecklistTemplate,
}
def delete(document, owner, slug):
document.objects(owner=owner, slug=slug).delete()
def admin(username, is_admin):
try:
user = User.objects.get(name=username)
except User.DoesNotExist:
user = User(name=username)
user.is_admin = is_admin
user.save()
def createuser(username):
User.objects.create(name=username)
def main():
args = docopt(__doc__)
if args['delete']:
delete(
DOCUMENTS['checklist' if args['checklist'] else 'template'],
args['<owner>'],
args['<slug>']
)
elif args['admin']:
admin(args['<username>'], args['grant'])
elif args['createuser']:
createuser(args['<username>'])
if __name__ == '__main__':
main()
| ojii/checklists.ojii.ch | utils.py | Python | bsd-3-clause | 1,159 |
#!/usr/bin/env python
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import os
import sys
import shutil
PACKAGE_NAME = 'com.actionlauncher.api'
def main():
root = sys.argv[1]
for path, _, files in os.walk(root):
for f in [f for f in files if f.endswith('.html')]:
fp = open(os.path.join(path, f), 'r')
html = fp.read()
fp.close()
toroot = '.'
if path.startswith(root):
subpath = path[len(root):]
toroot = '../' * (subpath.count('/') + 1)
html = process(toroot, html)
if f.endswith('package-summary.html'):
html = process_package_summary(toroot, html)
fp = open(os.path.join(path, f), 'w')
fp.write(html)
fp.close()
shutil.copy('index.html', root)
def process(toroot, html):
re_flags = re.I | re.M | re.S
html = re.sub(r'<HR>\s+<HR>', '', html, 0, re_flags)
html = re.sub(r'windowTitle\(\);', 'windowTitle();prettyPrint();', html, 0, re_flags)
html = re.sub(r'\s+</PRE>', '</PRE>', html, 0, re_flags)
html = re.sub(PACKAGE_NAME + '</font>', '<A HREF="package-summary.html" STYLE="border:0">' + PACKAGE_NAME + '</A></FONT>', html, 0, re_flags)
html = re.sub(r'<HEAD>', '''<HEAD>
<LINK REL="stylesheet" TYPE="text/css" HREF="http://fonts.googleapis.com/css?family=Roboto:400,700,300|Inconsolata">
<LINK REL="stylesheet" TYPE="text/css" HREF="%(root)sresources/prettify.css">
<SCRIPT SRC="%(root)sresources/prettify.js"></SCRIPT>
''' % dict(root=toroot), html, 0, re_flags)
#html = re.sub(r'<HR>\s+<HR>', '', html, re.I | re.M | re.S)
return html
def process_package_summary(toroot, html):
re_flags = re.I | re.M | re.S
#html = re.sub(r'</H2>\s+.*?\n', '</H2>\n', html, 0, re_flags)
html = re.sub(r'<B>See:</B>\n<br>', '\n', html, 0, re_flags)
html = re.sub(r' ( )+[^\n]+\n', '\n', html, 0, re_flags)
html = re.sub(r'\n[^\n]+\s+description\n', '\nDescription\n', html, 0, re_flags)
return html
if __name__ == '__main__':
main()
| dhootha/ActionLauncherApi | api/javadoc-scripts/tweak_javadoc_html.py | Python | apache-2.0 | 2,517 |
"""Sigmoid
"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
X = tf.placeholder(tf.float32)
with tf.name_scope('Sigmoid'):
fx = tf.sigmoid(X)
tf.summary.scalar("f(x)", tf.squeeze(fx))
init = tf.global_variables_initializer()
with tf.Session() as sess:
# Output graph
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter("log/Sigmoid/", graph = sess.graph)
# Run the initializer
sess.run(init)
for step in range(-10,11):
a = tf.convert_to_tensor(step, dtype=tf.float32)
a_r = sess.run([a])
print(sess.run(a), sess.run(fx, feed_dict={X: a_r}))
sess.run(fx, feed_dict={X: a_r})
summary = sess.run(merged, feed_dict={X: sess.run([a])})
writer.add_summary(summary, step)
# with tf.Session() as sess:
# for step in range(-10,11):
# X = tf.convert_to_tensor(step, dtype=tf.float32)
# # X = tf.random_uniform([1,1], minval=1.0, maxval=3.0, seed=step) //Or use random number
# print(sess.run(X), sess.run(tf.sigmoid(X))) | KarateJB/Python.Practice | src/TensorFlow/venv/Lab/Tutorials/Activation/Sigmoid.py | Python | mit | 1,078 |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova.api.openstack import api_version_request
from nova.api.openstack import common
from nova.api.openstack.compute.views import server_diagnostics
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova.i18n import _
from nova.policies import server_diagnostics as sd_policies
class ServerDiagnosticsController(wsgi.Controller):
_view_builder_class = server_diagnostics.ViewBuilder
def __init__(self, *args, **kwargs):
super(ServerDiagnosticsController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
@extensions.expected_errors((400, 404, 409, 501))
def index(self, req, server_id):
context = req.environ["nova.context"]
context.can(sd_policies.BASE_POLICY_NAME)
instance = common.get_instance(self.compute_api, context, server_id)
try:
if api_version_request.is_supported(req, min_version='2.48'):
diagnostics = self.compute_api.get_instance_diagnostics(
context, instance)
return self._view_builder.instance_diagnostics(diagnostics)
return self.compute_api.get_diagnostics(context, instance)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'get_diagnostics', server_id)
except exception.InstanceNotReady as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceDiagnosticsNotSupported:
# NOTE(snikitin): During upgrade we may face situation when env
# has new API and old compute. New compute returns a
# Diagnostics object. Old compute returns a dictionary. So we
# can't perform a request correctly if compute is too old.
msg = _('Compute node is too old. You must complete the '
'upgrade process to be able to get standardized '
'diagnostics data which is available since v2.48. However '
'you are still able to get diagnostics data in '
'non-standardized format which is available until v2.47.')
raise webob.exc.HTTPBadRequest(explanation=msg)
except NotImplementedError:
common.raise_feature_not_supported()
| Juniper/nova | nova/api/openstack/compute/server_diagnostics.py | Python | apache-2.0 | 3,069 |
"""
@author: Fabrice Douchant
@contact: vamp.higher@gmail.com
@license: GNU GPLv3 (LICENSE.txt)
mylogging is a module that provides standard logging configuration.
It will log into console (stdout, stderr), file (rotating), smtp for exception.
You should use it once in your program like this:
from myPyApps import mylogging
mylogging.configure_logging()
"""
import logging.handlers, logging.config, io, sys
from myPyApps import myconfig
# add new classes to logging
class MaxLevelFilter(logging.Filter):
"""
Allow to define a max_level. This will emit logs only in range [level, maxlevel]
"""
def __init__(self, name='', max_level=None):
logging.Filter.__init__(self, name)
self.max_level = max_level
def filter(self, rec):
if self.max_level:
return rec.levelno <= self.max_level
else:
return True
class StreamMaxLevelHandler(logging.StreamHandler):
"""
Emit logs only in range [level, maxlevel]
"""
def __init__(self, stream=None, max_level=None):
logging.StreamHandler.__init__(self, stream)
self.addFilter(MaxLevelFilter('maxlevelfilter', max_level))
class MySMTPHandler(logging.handlers.SMTPHandler):
"""
Like a SMTPHandler except if connect to SMTP fails, it will only alert once and
will log into a lower level (not just displaying the error like in SMTPHandler)
"""
def __init__(self, *args, **kwargs):
logging.handlers.SMTPHandler.__init__(self, *args, **kwargs)
self.first_error = True
def handleError(self, *args, **kwargs):
# don't spam in logs
if self.first_error:
# just try to use lowest level
logging.handlers.SMTPHandler.handleError(self, *args, **kwargs)
try:
import sys, traceback
print >> sys.stderr, ("Error with SMTP handler. Please check your configuration file. "
"Or disable email 'mylogging.configure_logging(mail=False)'.\n"), traceback.format_exc()
except:
pass
self.first_error = False
def emit_email(self, record, subject=None):
"""
Send an email with record text and subject
"""
# thread safe
self.acquire()
# backup handler's instance variables
bkp_subject = self.subject
bkp_formatter = self.formatter
# change instance variables
self.subject = subject
self.formatter = logging.Formatter("%(message)s")
try:
self.emit(record)
finally:
self.subject = bkp_subject
self.formatter = bkp_formatter
self.release()
class MyLogger(logging.Logger):
def send_email(self, msg, subject=None):
"""
Send an email to all SMTP handlers. Precisely to all handlers having emit_email method.
"""
record = logging.LogRecord(self.name, logging.INFO, None, None, msg, None, None)
for h in logging.root.handlers:
if hasattr(h, 'emit_email'):
h.emit_email(record, subject)
# override some logging variables
logging.setLoggerClass(MyLogger)
logging.Logger.manager = logging.Manager(logging.root)
def getLogger(name=None):
"""
Mimic default logging.getLogger implementation but uses MyLogger as class
@param name: the name of the logger. If none, will be 'root'
"""
return logging.getLogger(name)
def configure_logging(mail=True, verbose=False, config_path=None):
"""
Method to use to init logging, then you may use logging usually.
@param mail: set to False to disable email logging
@param verbose: set to True to force stdout to log DEBUG messages
@param config: to give another way to find logging configuration.
Default is to take logging.default and user defined logging.cfg in HOME, script, module dir
"""
# override default config for further use
MyLogger.default_config = myconfig.MyConfigParser('logging', config_path=config_path or myconfig.DEFAULT_PATH)
result = io.StringIO()
MyLogger.default_config.write(result)
# rewind io
result.seek(0)
try:
logging.config.fileConfig(result, disable_existing_loggers=False)
except IOError as e:
logging.exception("Error configuring mylogging: %s. Please check that log folder exists." % e)
raise e
except Exception as e:
# already default format
logging.exception("Error configuring mylogging: " + str(e))
raise e
for h in filter(lambda h: isinstance(h, logging.handlers.RotatingFileHandler), logging.root.handlers):
try:
h.doRollover()
except WindowsError:
logging.error("Could not rollover " + str(h))
pass
if verbose:
logging.info("Force stdout to display debug messages")
for h in filter(lambda h: isinstance(h, StreamMaxLevelHandler), logging.root.handlers):
if h.stream == sys.stdout:
h.setLevel(logging.DEBUG)
# disable mail
if not mail:
logging.info("Disable SMTP")
logging.root.handlers = filter(lambda h: not isinstance(h, logging.handlers.SMTPHandler), logging.root.handlers)
| fdouchant/myPyApps | myPyApps/mylogging.py | Python | gpl-3.0 | 5,351 |
"""
Django settings for becours project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'v8rea$)b8+a)1vbdbdn727zw7#hj$4czarlp)*j&ei@eh%=!9^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'cuser',
'booking',
'accounting',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'booking.middleware.CuserMiddleware',
]
ROOT_URLCONF = 'becours.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'becours/templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'becours.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': 'localhost',
'USER': 'postgres',
'PASSWORD': '',
'NAME': 'becours',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'fr-fr'
TIME_ZONE = 'Europe/Paris'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'becours', 'static'),
)
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
LOGIN_URL = 'auth:login'
USE_THOUSAND_SEPARATOR = True
| eedf/becours | becours/settings.py | Python | mit | 3,562 |
import os
zeeman_data_root = '../../zeeman_data'
csv_files = [os.path.join(zeeman_data_root, i)
for i in os.listdir(zeeman_data_root) if i.endswith('.csv')]
csv_data = {}
for name in csv_files:
with open(name) as f:
data = list(zip(*[[float(i) for i in line.strip().split(',')]
for line in f if line.strip()]))
csv_data[name] = data
field_map = {1.0: 0.239, 2.0: 0.417, 3.0: 0.566, 4.0: 0.755,
5.0: 0.902, 6.0: 1.015, 7.0: 1.211, 8.0: 1.256,
9.0: 1.275, 10.0: 1.295}
points_files = [os.path.join(zeeman_data_root, i)
for i in os.listdir(zeeman_data_root) if i.endswith('_points.txt')]
points_data = {}
for i in points_files:
data = []
with open(i) as f:
it = iter(f)
while True:
x = next(it)
if x[0] == '-':
x = float(next(it))
# adjust to (delta lambda) / (lambda^2). divide by 2 because
# the `diff` is double the split (peak to peak after split)
data = [(field_map[j[0]], 0.5 * j[1] / (x*x)) for j in data]
break
data.append([float(j) for j in x.split(',')])
points_data[i] = data
| JonNRb/physics506 | src/zeeman/data.py | Python | mit | 1,228 |
# coding=utf-8
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for :class:`ironic.conductor.task_manager`."""
import futurist
import mock
from oslo_utils import uuidutils
from ironic.common import driver_factory
from ironic.common import exception
from ironic.common import fsm
from ironic.common import states
from ironic.conductor import notification_utils
from ironic.conductor import task_manager
from ironic import objects
from ironic.objects import fields
from ironic.tests import base as tests_base
from ironic.tests.unit.db import base as tests_db_base
from ironic.tests.unit.objects import utils as obj_utils
@mock.patch.object(objects.Node, 'get')
@mock.patch.object(objects.Node, 'release')
@mock.patch.object(objects.Node, 'reserve')
@mock.patch.object(driver_factory, 'build_driver_for_task')
@mock.patch.object(objects.Port, 'list_by_node_id')
@mock.patch.object(objects.Portgroup, 'list_by_node_id')
@mock.patch.object(objects.VolumeConnector, 'list_by_node_id')
@mock.patch.object(objects.VolumeTarget, 'list_by_node_id')
class TaskManagerTestCase(tests_db_base.DbTestCase):
def setUp(self):
super(TaskManagerTestCase, self).setUp()
self.host = 'test-host'
self.config(host=self.host)
self.config(node_locked_retry_attempts=1, group='conductor')
self.config(node_locked_retry_interval=0, group='conductor')
self.node = obj_utils.create_test_node(self.context)
self.future_mock = mock.Mock(spec=['cancel', 'add_done_callback'])
def test_excl_lock(self, get_voltgt_mock, get_volconn_mock,
get_portgroups_mock, get_ports_mock,
build_driver_mock, reserve_mock, release_mock,
node_get_mock):
reserve_mock.return_value = self.node
with task_manager.TaskManager(self.context, 'fake-node-id') as task:
self.assertEqual(self.context, task.context)
self.assertEqual(self.node, task.node)
self.assertEqual(get_ports_mock.return_value, task.ports)
self.assertEqual(get_portgroups_mock.return_value, task.portgroups)
self.assertEqual(get_volconn_mock.return_value,
task.volume_connectors)
self.assertEqual(get_voltgt_mock.return_value, task.volume_targets)
self.assertEqual(build_driver_mock.return_value, task.driver)
self.assertFalse(task.shared)
build_driver_mock.assert_called_once_with(task, driver_name=None)
node_get_mock.assert_called_once_with(self.context, 'fake-node-id')
reserve_mock.assert_called_once_with(self.context, self.host,
'fake-node-id')
get_ports_mock.assert_called_once_with(self.context, self.node.id)
get_portgroups_mock.assert_called_once_with(self.context, self.node.id)
get_volconn_mock.assert_called_once_with(self.context, self.node.id)
get_voltgt_mock.assert_called_once_with(self.context, self.node.id)
release_mock.assert_called_once_with(self.context, self.host,
self.node.id)
def test_excl_lock_with_driver(
self, get_voltgt_mock, get_volconn_mock, get_portgroups_mock,
get_ports_mock, build_driver_mock,
reserve_mock, release_mock, node_get_mock):
reserve_mock.return_value = self.node
with task_manager.TaskManager(self.context, 'fake-node-id',
driver_name='fake-driver') as task:
self.assertEqual(self.context, task.context)
self.assertEqual(self.node, task.node)
self.assertEqual(get_ports_mock.return_value, task.ports)
self.assertEqual(get_portgroups_mock.return_value, task.portgroups)
self.assertEqual(get_volconn_mock.return_value,
task.volume_connectors)
self.assertEqual(get_voltgt_mock.return_value, task.volume_targets)
self.assertEqual(build_driver_mock.return_value, task.driver)
self.assertFalse(task.shared)
build_driver_mock.assert_called_once_with(
task, driver_name='fake-driver')
node_get_mock.assert_called_once_with(self.context, 'fake-node-id')
reserve_mock.assert_called_once_with(self.context, self.host,
'fake-node-id')
get_ports_mock.assert_called_once_with(self.context, self.node.id)
get_portgroups_mock.assert_called_once_with(self.context, self.node.id)
get_volconn_mock.assert_called_once_with(self.context, self.node.id)
get_voltgt_mock.assert_called_once_with(self.context, self.node.id)
release_mock.assert_called_once_with(self.context, self.host,
self.node.id)
def test_excl_nested_acquire(
self, get_voltgt_mock, get_volconn_mock, get_portgroups_mock,
get_ports_mock, build_driver_mock,
reserve_mock, release_mock, node_get_mock):
node2 = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake')
reserve_mock.return_value = self.node
get_ports_mock.return_value = mock.sentinel.ports1
get_portgroups_mock.return_value = mock.sentinel.portgroups1
get_volconn_mock.return_value = mock.sentinel.volconn1
get_voltgt_mock.return_value = mock.sentinel.voltgt1
build_driver_mock.return_value = mock.sentinel.driver1
with task_manager.TaskManager(self.context, 'node-id1') as task:
reserve_mock.return_value = node2
get_ports_mock.return_value = mock.sentinel.ports2
get_portgroups_mock.return_value = mock.sentinel.portgroups2
get_volconn_mock.return_value = mock.sentinel.volconn2
get_voltgt_mock.return_value = mock.sentinel.voltgt2
build_driver_mock.return_value = mock.sentinel.driver2
with task_manager.TaskManager(self.context, 'node-id2') as task2:
self.assertEqual(self.context, task.context)
self.assertEqual(self.node, task.node)
self.assertEqual(mock.sentinel.ports1, task.ports)
self.assertEqual(mock.sentinel.portgroups1, task.portgroups)
self.assertEqual(mock.sentinel.volconn1,
task.volume_connectors)
self.assertEqual(mock.sentinel.voltgt1, task.volume_targets)
self.assertEqual(mock.sentinel.driver1, task.driver)
self.assertFalse(task.shared)
self.assertEqual(self.context, task2.context)
self.assertEqual(node2, task2.node)
self.assertEqual(mock.sentinel.ports2, task2.ports)
self.assertEqual(mock.sentinel.portgroups2, task2.portgroups)
self.assertEqual(mock.sentinel.volconn2,
task2.volume_connectors)
self.assertEqual(mock.sentinel.voltgt2, task2.volume_targets)
self.assertEqual(mock.sentinel.driver2, task2.driver)
self.assertFalse(task2.shared)
self.assertEqual([mock.call(task, driver_name=None),
mock.call(task2, driver_name=None)],
build_driver_mock.call_args_list)
self.assertEqual([mock.call(self.context, 'node-id1'),
mock.call(self.context, 'node-id2')],
node_get_mock.call_args_list)
self.assertEqual([mock.call(self.context, self.host, 'node-id1'),
mock.call(self.context, self.host, 'node-id2')],
reserve_mock.call_args_list)
self.assertEqual([mock.call(self.context, self.node.id),
mock.call(self.context, node2.id)],
get_ports_mock.call_args_list)
# release should be in reverse order
self.assertEqual([mock.call(self.context, self.host, node2.id),
mock.call(self.context, self.host, self.node.id)],
release_mock.call_args_list)
def test_excl_lock_exception_then_lock(
self, get_voltgt_mock, get_volconn_mock, get_portgroups_mock,
get_ports_mock, build_driver_mock,
reserve_mock, release_mock, node_get_mock):
retry_attempts = 3
self.config(node_locked_retry_attempts=retry_attempts,
group='conductor')
# Fail on the first lock attempt, succeed on the second.
reserve_mock.side_effect = [exception.NodeLocked(node='foo',
host='foo'),
self.node]
with task_manager.TaskManager(self.context, 'fake-node-id') as task:
self.assertFalse(task.shared)
expected_calls = [mock.call(self.context, self.host,
'fake-node-id')] * 2
reserve_mock.assert_has_calls(expected_calls)
self.assertEqual(2, reserve_mock.call_count)
def test_excl_lock_reserve_exception(
self, get_voltgt_mock, get_volconn_mock, get_portgroups_mock,
get_ports_mock, build_driver_mock,
reserve_mock, release_mock, node_get_mock):
retry_attempts = 3
self.config(node_locked_retry_attempts=retry_attempts,
group='conductor')
reserve_mock.side_effect = exception.NodeLocked(node='foo',
host='foo')
self.assertRaises(exception.NodeLocked,
task_manager.TaskManager,
self.context,
'fake-node-id')
node_get_mock.assert_called_with(self.context, 'fake-node-id')
reserve_mock.assert_called_with(self.context, self.host,
'fake-node-id')
self.assertEqual(retry_attempts, reserve_mock.call_count)
self.assertFalse(get_ports_mock.called)
self.assertFalse(get_portgroups_mock.called)
self.assertFalse(get_volconn_mock.called)
self.assertFalse(get_voltgt_mock.called)
self.assertFalse(build_driver_mock.called)
self.assertFalse(release_mock.called)
def test_excl_lock_get_ports_exception(
self, get_voltgt_mock, get_volconn_mock, get_portgroups_mock,
get_ports_mock, build_driver_mock,
reserve_mock, release_mock, node_get_mock):
reserve_mock.return_value = self.node
get_ports_mock.side_effect = exception.IronicException('foo')
self.assertRaises(exception.IronicException,
task_manager.TaskManager,
self.context,
'fake-node-id')
node_get_mock.assert_called_once_with(self.context, 'fake-node-id')
reserve_mock.assert_called_once_with(self.context, self.host,
'fake-node-id')
get_ports_mock.assert_called_once_with(self.context, self.node.id)
self.assertFalse(build_driver_mock.called)
release_mock.assert_called_once_with(self.context, self.host,
self.node.id)
def test_excl_lock_get_portgroups_exception(
self, get_voltgt_mock, get_volconn_mock, get_portgroups_mock,
get_ports_mock, build_driver_mock,
reserve_mock, release_mock, node_get_mock):
reserve_mock.return_value = self.node
get_portgroups_mock.side_effect = exception.IronicException('foo')
self.assertRaises(exception.IronicException,
task_manager.TaskManager,
self.context,
'fake-node-id')
node_get_mock.assert_called_once_with(self.context, 'fake-node-id')
reserve_mock.assert_called_once_with(self.context, self.host,
'fake-node-id')
get_portgroups_mock.assert_called_once_with(self.context, self.node.id)
self.assertFalse(build_driver_mock.called)
release_mock.assert_called_once_with(self.context, self.host,
self.node.id)
def test_excl_lock_get_volconn_exception(
self, get_voltgt_mock, get_volconn_mock, get_portgroups_mock,
get_ports_mock, build_driver_mock,
reserve_mock, release_mock, node_get_mock):
reserve_mock.return_value = self.node
get_volconn_mock.side_effect = exception.IronicException('foo')
self.assertRaises(exception.IronicException,
task_manager.TaskManager,
self.context,
'fake-node-id')
reserve_mock.assert_called_once_with(self.context, self.host,
'fake-node-id')
get_volconn_mock.assert_called_once_with(self.context, self.node.id)
self.assertFalse(get_voltgt_mock.called)
release_mock.assert_called_once_with(self.context, self.host,
self.node.id)
node_get_mock.assert_called_once_with(self.context, 'fake-node-id')
def test_excl_lock_get_voltgt_exception(
self, get_voltgt_mock, get_volconn_mock, get_portgroups_mock,
get_ports_mock, build_driver_mock,
reserve_mock, release_mock, node_get_mock):
reserve_mock.return_value = self.node
get_voltgt_mock.side_effect = exception.IronicException('foo')
self.assertRaises(exception.IronicException,
task_manager.TaskManager,
self.context,
'fake-node-id')
reserve_mock.assert_called_once_with(self.context, self.host,
'fake-node-id')
get_voltgt_mock.assert_called_once_with(self.context, self.node.id)
self.assertFalse(build_driver_mock.called)
release_mock.assert_called_once_with(self.context, self.host,
self.node.id)
node_get_mock.assert_called_once_with(self.context, 'fake-node-id')
def test_excl_lock_build_driver_exception(
self, get_voltgt_mock, get_volconn_mock, get_portgroups_mock,
get_ports_mock, build_driver_mock,
reserve_mock, release_mock, node_get_mock):
reserve_mock.return_value = self.node
build_driver_mock.side_effect = (
exception.DriverNotFound(driver_name='foo'))
self.assertRaises(exception.DriverNotFound,
task_manager.TaskManager,
self.context,
'fake-node-id')
node_get_mock.assert_called_once_with(self.context, 'fake-node-id')
reserve_mock.assert_called_once_with(self.context, self.host,
'fake-node-id')
get_ports_mock.assert_called_once_with(self.context, self.node.id)
get_portgroups_mock.assert_called_once_with(self.context, self.node.id)
build_driver_mock.assert_called_once_with(mock.ANY, driver_name=None)
release_mock.assert_called_once_with(self.context, self.host,
self.node.id)
def test_shared_lock(
self, get_voltgt_mock, get_volconn_mock, get_portgroups_mock,
get_ports_mock, build_driver_mock,
reserve_mock, release_mock, node_get_mock):
node_get_mock.return_value = self.node
with task_manager.TaskManager(self.context, 'fake-node-id',
shared=True) as task:
self.assertEqual(self.context, task.context)
self.assertEqual(self.node, task.node)
self.assertEqual(get_ports_mock.return_value, task.ports)
self.assertEqual(get_portgroups_mock.return_value, task.portgroups)
self.assertEqual(get_volconn_mock.return_value,
task.volume_connectors)
self.assertEqual(get_voltgt_mock.return_value, task.volume_targets)
self.assertEqual(build_driver_mock.return_value, task.driver)
self.assertTrue(task.shared)
build_driver_mock.assert_called_once_with(task, driver_name=None)
self.assertFalse(reserve_mock.called)
self.assertFalse(release_mock.called)
node_get_mock.assert_called_once_with(self.context, 'fake-node-id')
get_ports_mock.assert_called_once_with(self.context, self.node.id)
get_portgroups_mock.assert_called_once_with(self.context, self.node.id)
get_volconn_mock.assert_called_once_with(self.context, self.node.id)
get_voltgt_mock.assert_called_once_with(self.context, self.node.id)
def test_shared_lock_with_driver(
self, get_voltgt_mock, get_volconn_mock, get_portgroups_mock,
get_ports_mock, build_driver_mock,
reserve_mock, release_mock, node_get_mock):
node_get_mock.return_value = self.node
with task_manager.TaskManager(self.context,
'fake-node-id',
shared=True,
driver_name='fake-driver') as task:
self.assertEqual(self.context, task.context)
self.assertEqual(self.node, task.node)
self.assertEqual(get_ports_mock.return_value, task.ports)
self.assertEqual(get_portgroups_mock.return_value, task.portgroups)
self.assertEqual(get_volconn_mock.return_value,
task.volume_connectors)
self.assertEqual(get_voltgt_mock.return_value, task.volume_targets)
self.assertEqual(build_driver_mock.return_value, task.driver)
self.assertTrue(task.shared)
build_driver_mock.assert_called_once_with(
task, driver_name='fake-driver')
self.assertFalse(reserve_mock.called)
self.assertFalse(release_mock.called)
node_get_mock.assert_called_once_with(self.context, 'fake-node-id')
get_ports_mock.assert_called_once_with(self.context, self.node.id)
get_portgroups_mock.assert_called_once_with(self.context, self.node.id)
get_volconn_mock.assert_called_once_with(self.context, self.node.id)
get_voltgt_mock.assert_called_once_with(self.context, self.node.id)
def test_shared_lock_node_get_exception(
self, get_voltgt_mock, get_volconn_mock, get_portgroups_mock,
get_ports_mock, build_driver_mock,
reserve_mock, release_mock, node_get_mock):
node_get_mock.side_effect = exception.NodeNotFound(node='foo')
self.assertRaises(exception.NodeNotFound,
task_manager.TaskManager,
self.context,
'fake-node-id',
shared=True)
self.assertFalse(reserve_mock.called)
self.assertFalse(release_mock.called)
node_get_mock.assert_called_once_with(self.context, 'fake-node-id')
self.assertFalse(get_ports_mock.called)
self.assertFalse(get_portgroups_mock.called)
self.assertFalse(get_volconn_mock.called)
self.assertFalse(get_voltgt_mock.called)
self.assertFalse(build_driver_mock.called)
def test_shared_lock_get_ports_exception(
self, get_voltgt_mock, get_volconn_mock, get_portgroups_mock,
get_ports_mock, build_driver_mock,
reserve_mock, release_mock, node_get_mock):
node_get_mock.return_value = self.node
get_ports_mock.side_effect = exception.IronicException('foo')
self.assertRaises(exception.IronicException,
task_manager.TaskManager,
self.context,
'fake-node-id',
shared=True)
self.assertFalse(reserve_mock.called)
self.assertFalse(release_mock.called)
node_get_mock.assert_called_once_with(self.context, 'fake-node-id')
get_ports_mock.assert_called_once_with(self.context, self.node.id)
self.assertFalse(build_driver_mock.called)
def test_shared_lock_get_portgroups_exception(
self, get_voltgt_mock, get_volconn_mock, get_portgroups_mock,
get_ports_mock, build_driver_mock,
reserve_mock, release_mock, node_get_mock):
node_get_mock.return_value = self.node
get_portgroups_mock.side_effect = exception.IronicException('foo')
self.assertRaises(exception.IronicException,
task_manager.TaskManager,
self.context,
'fake-node-id',
shared=True)
self.assertFalse(reserve_mock.called)
self.assertFalse(release_mock.called)
node_get_mock.assert_called_once_with(self.context, 'fake-node-id')
get_portgroups_mock.assert_called_once_with(self.context, self.node.id)
self.assertFalse(build_driver_mock.called)
def test_shared_lock_get_volconn_exception(
self, get_voltgt_mock, get_volconn_mock, get_portgroups_mock,
get_ports_mock, build_driver_mock,
reserve_mock, release_mock, node_get_mock):
node_get_mock.return_value = self.node
get_volconn_mock.side_effect = exception.IronicException('foo')
self.assertRaises(exception.IronicException,
task_manager.TaskManager,
self.context,
'fake-node-id',
shared=True)
self.assertFalse(reserve_mock.called)
self.assertFalse(release_mock.called)
node_get_mock.assert_called_once_with(self.context, 'fake-node-id')
get_volconn_mock.assert_called_once_with(self.context, self.node.id)
self.assertFalse(get_voltgt_mock.called)
def test_shared_lock_get_voltgt_exception(
self, get_voltgt_mock, get_volconn_mock, get_portgroups_mock,
get_ports_mock, build_driver_mock,
reserve_mock, release_mock, node_get_mock):
node_get_mock.return_value = self.node
get_voltgt_mock.side_effect = exception.IronicException('foo')
self.assertRaises(exception.IronicException,
task_manager.TaskManager,
self.context,
'fake-node-id',
shared=True)
self.assertFalse(reserve_mock.called)
self.assertFalse(release_mock.called)
node_get_mock.assert_called_once_with(self.context, 'fake-node-id')
get_voltgt_mock.assert_called_once_with(self.context, self.node.id)
self.assertFalse(build_driver_mock.called)
def test_shared_lock_build_driver_exception(
self, get_voltgt_mock, get_volconn_mock, get_portgroups_mock,
get_ports_mock, build_driver_mock,
reserve_mock, release_mock, node_get_mock):
node_get_mock.return_value = self.node
build_driver_mock.side_effect = (
exception.DriverNotFound(driver_name='foo'))
self.assertRaises(exception.DriverNotFound,
task_manager.TaskManager,
self.context,
'fake-node-id',
shared=True)
self.assertFalse(reserve_mock.called)
self.assertFalse(release_mock.called)
node_get_mock.assert_called_once_with(self.context, 'fake-node-id')
get_ports_mock.assert_called_once_with(self.context, self.node.id)
get_portgroups_mock.assert_called_once_with(self.context, self.node.id)
get_volconn_mock.assert_called_once_with(self.context, self.node.id)
get_voltgt_mock.assert_called_once_with(self.context, self.node.id)
build_driver_mock.assert_called_once_with(mock.ANY, driver_name=None)
def test_upgrade_lock(
self, get_voltgt_mock, get_volconn_mock, get_portgroups_mock,
get_ports_mock, build_driver_mock,
reserve_mock, release_mock, node_get_mock):
node_get_mock.return_value = self.node
reserve_mock.return_value = self.node
with task_manager.TaskManager(self.context, 'fake-node-id',
shared=True, purpose='ham') as task:
self.assertEqual(self.context, task.context)
self.assertEqual(self.node, task.node)
self.assertEqual(get_ports_mock.return_value, task.ports)
self.assertEqual(get_portgroups_mock.return_value, task.portgroups)
self.assertEqual(get_volconn_mock.return_value,
task.volume_connectors)
self.assertEqual(get_voltgt_mock.return_value, task.volume_targets)
self.assertEqual(build_driver_mock.return_value, task.driver)
self.assertTrue(task.shared)
self.assertFalse(reserve_mock.called)
task.upgrade_lock()
self.assertFalse(task.shared)
self.assertEqual('ham', task._purpose)
# second upgrade does nothing except changes the purpose
task.upgrade_lock(purpose='spam')
self.assertFalse(task.shared)
self.assertEqual('spam', task._purpose)
build_driver_mock.assert_called_once_with(mock.ANY,
driver_name=None)
# make sure reserve() was called only once
reserve_mock.assert_called_once_with(self.context, self.host,
'fake-node-id')
release_mock.assert_called_once_with(self.context, self.host,
self.node.id)
node_get_mock.assert_called_once_with(self.context, 'fake-node-id')
get_ports_mock.assert_called_once_with(self.context, self.node.id)
get_portgroups_mock.assert_called_once_with(self.context, self.node.id)
get_volconn_mock.assert_called_once_with(self.context, self.node.id)
get_voltgt_mock.assert_called_once_with(self.context, self.node.id)
def test_upgrade_lock_refreshes_fsm(self, get_voltgt_mock,
get_volconn_mock, get_portgroups_mock,
get_ports_mock, build_driver_mock,
reserve_mock, release_mock,
node_get_mock):
reserve_mock.return_value = self.node
node_get_mock.return_value = self.node
with task_manager.acquire(self.context, 'fake-node-id',
shared=True) as task1:
self.assertEqual(states.AVAILABLE, task1.node.provision_state)
with task_manager.acquire(self.context, 'fake-node-id',
shared=False) as task2:
# move the node to manageable
task2.process_event('manage')
self.assertEqual(states.MANAGEABLE, task1.node.provision_state)
# now upgrade our shared task and try to go to cleaning
# this will explode if task1's FSM doesn't get refreshed
task1.upgrade_lock()
task1.process_event('provide')
self.assertEqual(states.CLEANING, task1.node.provision_state)
@mock.patch.object(task_manager.TaskManager,
'_notify_provision_state_change', autospec=True)
def test_spawn_after(
self, notify_mock, get_voltgt_mock, get_volconn_mock,
get_portgroups_mock, get_ports_mock, build_driver_mock,
reserve_mock, release_mock, node_get_mock):
spawn_mock = mock.Mock(return_value=self.future_mock)
task_release_mock = mock.Mock()
reserve_mock.return_value = self.node
with task_manager.TaskManager(self.context, 'node-id') as task:
task.spawn_after(spawn_mock, 1, 2, foo='bar', cat='meow')
task.release_resources = task_release_mock
spawn_mock.assert_called_once_with(1, 2, foo='bar', cat='meow')
self.future_mock.add_done_callback.assert_called_once_with(
task._thread_release_resources)
self.assertFalse(self.future_mock.cancel.called)
# Since we mocked link(), we're testing that __exit__ didn't
# release resources pending the finishing of the background
# thread
self.assertFalse(task_release_mock.called)
notify_mock.assert_called_once_with(task)
def test_spawn_after_exception_while_yielded(
self, get_voltgt_mock, get_volconn_mock, get_portgroups_mock,
get_ports_mock, build_driver_mock,
reserve_mock, release_mock, node_get_mock):
spawn_mock = mock.Mock()
task_release_mock = mock.Mock()
reserve_mock.return_value = self.node
def _test_it():
with task_manager.TaskManager(self.context, 'node-id') as task:
task.spawn_after(spawn_mock, 1, 2, foo='bar', cat='meow')
task.release_resources = task_release_mock
raise exception.IronicException('foo')
self.assertRaises(exception.IronicException, _test_it)
self.assertFalse(spawn_mock.called)
task_release_mock.assert_called_once_with()
@mock.patch.object(task_manager.TaskManager,
'_notify_provision_state_change', autospec=True)
def test_spawn_after_spawn_fails(
self, notify_mock, get_voltgt_mock, get_volconn_mock,
get_portgroups_mock, get_ports_mock, build_driver_mock,
reserve_mock, release_mock, node_get_mock):
spawn_mock = mock.Mock(side_effect=exception.IronicException('foo'))
task_release_mock = mock.Mock()
reserve_mock.return_value = self.node
def _test_it():
with task_manager.TaskManager(self.context, 'node-id') as task:
task.spawn_after(spawn_mock, 1, 2, foo='bar', cat='meow')
task.release_resources = task_release_mock
self.assertRaises(exception.IronicException, _test_it)
spawn_mock.assert_called_once_with(1, 2, foo='bar', cat='meow')
task_release_mock.assert_called_once_with()
self.assertFalse(notify_mock.called)
def test_spawn_after_link_fails(
self, get_voltgt_mock, get_volconn_mock, get_portgroups_mock,
get_ports_mock, build_driver_mock,
reserve_mock, release_mock, node_get_mock):
self.future_mock.add_done_callback.side_effect = (
exception.IronicException('foo'))
spawn_mock = mock.Mock(return_value=self.future_mock)
task_release_mock = mock.Mock()
thr_release_mock = mock.Mock(spec_set=[])
reserve_mock.return_value = self.node
def _test_it():
with task_manager.TaskManager(self.context, 'node-id') as task:
task.spawn_after(spawn_mock, 1, 2, foo='bar', cat='meow')
task._thread_release_resources = thr_release_mock
task.release_resources = task_release_mock
self.assertRaises(exception.IronicException, _test_it)
spawn_mock.assert_called_once_with(1, 2, foo='bar', cat='meow')
self.future_mock.add_done_callback.assert_called_once_with(
thr_release_mock)
self.future_mock.cancel.assert_called_once_with()
task_release_mock.assert_called_once_with()
def test_spawn_after_on_error_hook(
self, get_voltgt_mock, get_volconn_mock, get_portgroups_mock,
get_ports_mock, build_driver_mock,
reserve_mock, release_mock, node_get_mock):
expected_exception = exception.IronicException('foo')
spawn_mock = mock.Mock(side_effect=expected_exception)
task_release_mock = mock.Mock()
on_error_handler = mock.Mock()
reserve_mock.return_value = self.node
def _test_it():
with task_manager.TaskManager(self.context, 'node-id') as task:
task.set_spawn_error_hook(on_error_handler, 'fake-argument')
task.spawn_after(spawn_mock, 1, 2, foo='bar', cat='meow')
task.release_resources = task_release_mock
self.assertRaises(exception.IronicException, _test_it)
spawn_mock.assert_called_once_with(1, 2, foo='bar', cat='meow')
task_release_mock.assert_called_once_with()
on_error_handler.assert_called_once_with(expected_exception,
'fake-argument')
def test_spawn_after_on_error_hook_exception(
self, get_voltgt_mock, get_volconn_mock, get_portgroups_mock,
get_ports_mock, build_driver_mock,
reserve_mock, release_mock, node_get_mock):
expected_exception = exception.IronicException('foo')
spawn_mock = mock.Mock(side_effect=expected_exception)
task_release_mock = mock.Mock()
# Raise an exception within the on_error handler
on_error_handler = mock.Mock(side_effect=Exception('unexpected'))
on_error_handler.__name__ = 'foo_method'
reserve_mock.return_value = self.node
def _test_it():
with task_manager.TaskManager(self.context, 'node-id') as task:
task.set_spawn_error_hook(on_error_handler, 'fake-argument')
task.spawn_after(spawn_mock, 1, 2, foo='bar', cat='meow')
task.release_resources = task_release_mock
# Make sure the original exception is the one raised
self.assertRaises(exception.IronicException, _test_it)
spawn_mock.assert_called_once_with(1, 2, foo='bar', cat='meow')
task_release_mock.assert_called_once_with()
on_error_handler.assert_called_once_with(expected_exception,
'fake-argument')
@mock.patch.object(states.machine, 'copy')
def test_init_prepares_fsm(
self, copy_mock, get_volconn_mock, get_voltgt_mock,
get_portgroups_mock, get_ports_mock,
build_driver_mock, reserve_mock, release_mock, node_get_mock):
m = mock.Mock(spec=fsm.FSM)
reserve_mock.return_value = self.node
copy_mock.return_value = m
t = task_manager.TaskManager('fake', 'fake')
copy_mock.assert_called_once_with()
self.assertIs(m, t.fsm)
m.initialize.assert_called_once_with(
start_state=self.node.provision_state,
target_state=self.node.target_provision_state)
class TaskManagerStateModelTestCases(tests_base.TestCase):
def setUp(self):
super(TaskManagerStateModelTestCases, self).setUp()
self.fsm = mock.Mock(spec=fsm.FSM)
self.node = mock.Mock(spec=objects.Node)
self.task = mock.Mock(spec=task_manager.TaskManager)
self.task.fsm = self.fsm
self.task.node = self.node
def test_release_clears_resources(self):
t = self.task
t.release_resources = task_manager.TaskManager.release_resources
t.driver = mock.Mock()
t.ports = mock.Mock()
t.portgroups = mock.Mock()
t.volume_connectors = mock.Mock()
t.volume_targets = mock.Mock()
t.shared = True
t._purpose = 'purpose'
t._debug_timer = mock.Mock()
t.release_resources(t)
self.assertIsNone(t.node)
self.assertIsNone(t.driver)
self.assertIsNone(t.ports)
self.assertIsNone(t.portgroups)
self.assertIsNone(t.volume_connectors)
self.assertIsNone(t.volume_targets)
self.assertIsNone(t.fsm)
def test_process_event_fsm_raises(self):
self.task.process_event = task_manager.TaskManager.process_event
self.fsm.process_event.side_effect = exception.InvalidState('test')
self.assertRaises(
exception.InvalidState,
self.task.process_event,
self.task, 'fake')
self.assertEqual(0, self.task.spawn_after.call_count)
self.assertFalse(self.task.node.save.called)
def test_process_event_sets_callback(self):
cb = mock.Mock()
arg = mock.Mock()
kwarg = mock.Mock()
self.task.process_event = task_manager.TaskManager.process_event
self.task.process_event(
self.task, 'fake', callback=cb, call_args=[arg],
call_kwargs={'mock': kwarg})
self.fsm.process_event.assert_called_once_with('fake',
target_state=None)
self.task.spawn_after.assert_called_with(cb, arg, mock=kwarg)
self.assertEqual(1, self.task.node.save.call_count)
self.assertIsNone(self.node.last_error)
def test_process_event_sets_callback_and_error_handler(self):
arg = mock.Mock()
cb = mock.Mock()
er = mock.Mock()
kwarg = mock.Mock()
provision_state = 'provision_state'
target_provision_state = 'target'
self.node.provision_state = provision_state
self.node.target_provision_state = target_provision_state
self.task.process_event = task_manager.TaskManager.process_event
self.task.process_event(
self.task, 'fake', callback=cb, call_args=[arg],
call_kwargs={'mock': kwarg}, err_handler=er)
self.task.set_spawn_error_hook.assert_called_once_with(
er, self.node, provision_state, target_provision_state)
self.fsm.process_event.assert_called_once_with('fake',
target_state=None)
self.task.spawn_after.assert_called_with(cb, arg, mock=kwarg)
self.assertEqual(1, self.task.node.save.call_count)
self.assertIsNone(self.node.last_error)
self.assertNotEqual(provision_state, self.node.provision_state)
self.assertNotEqual(target_provision_state,
self.node.target_provision_state)
def test_process_event_sets_target_state(self):
event = 'fake'
tgt_state = 'target'
provision_state = 'provision_state'
target_provision_state = 'target_provision_state'
self.node.provision_state = provision_state
self.node.target_provision_state = target_provision_state
self.task.process_event = task_manager.TaskManager.process_event
self.task.process_event(self.task, event, target_state=tgt_state)
self.fsm.process_event.assert_called_once_with(event,
target_state=tgt_state)
self.assertEqual(1, self.task.node.save.call_count)
self.assertNotEqual(provision_state, self.node.provision_state)
self.assertNotEqual(target_provision_state,
self.node.target_provision_state)
def test_process_event_callback_stable_state(self):
callback = mock.Mock()
for state in states.STABLE_STATES:
self.node.provision_state = state
self.node.target_provision_state = 'target'
self.task.process_event = task_manager.TaskManager.process_event
self.task.process_event(self.task, 'fake', callback=callback)
# assert the target state is set when callback is passed
self.assertNotEqual(states.NOSTATE,
self.task.node.target_provision_state)
def test_process_event_no_callback_stable_state(self):
for state in states.STABLE_STATES:
self.node.provision_state = state
self.node.target_provision_state = 'target'
self.task.process_event = task_manager.TaskManager.process_event
self.task.process_event(self.task, 'fake')
# assert the target state was cleared when moving to a
# stable state
self.assertEqual(states.NOSTATE,
self.task.node.target_provision_state)
def test_process_event_no_callback_notify(self):
self.task.process_event = task_manager.TaskManager.process_event
self.task.process_event(self.task, 'fake')
self.task._notify_provision_state_change.assert_called_once_with()
@task_manager.require_exclusive_lock
def _req_excl_lock_method(*args, **kwargs):
return (args, kwargs)
class ExclusiveLockDecoratorTestCase(tests_base.TestCase):
def setUp(self):
super(ExclusiveLockDecoratorTestCase, self).setUp()
self.task = mock.Mock(spec=task_manager.TaskManager)
self.task.context = self.context
self.args_task_first = (self.task, 1, 2)
self.args_task_second = (1, self.task, 2)
self.kwargs = dict(cat='meow', dog='wuff')
def test_with_excl_lock_task_first_arg(self):
self.task.shared = False
(args, kwargs) = _req_excl_lock_method(*self.args_task_first,
**self.kwargs)
self.assertEqual(self.args_task_first, args)
self.assertEqual(self.kwargs, kwargs)
def test_with_excl_lock_task_second_arg(self):
self.task.shared = False
(args, kwargs) = _req_excl_lock_method(*self.args_task_second,
**self.kwargs)
self.assertEqual(self.args_task_second, args)
self.assertEqual(self.kwargs, kwargs)
def test_with_shared_lock_task_first_arg(self):
self.task.shared = True
self.assertRaises(exception.ExclusiveLockRequired,
_req_excl_lock_method,
*self.args_task_first,
**self.kwargs)
def test_with_shared_lock_task_second_arg(self):
self.task.shared = True
self.assertRaises(exception.ExclusiveLockRequired,
_req_excl_lock_method,
*self.args_task_second,
**self.kwargs)
class ThreadExceptionTestCase(tests_base.TestCase):
def setUp(self):
super(ThreadExceptionTestCase, self).setUp()
self.node = mock.Mock(spec=objects.Node)
self.node.last_error = None
self.task = mock.Mock(spec=task_manager.TaskManager)
self.task.node = self.node
self.task._write_exception = task_manager.TaskManager._write_exception
self.future_mock = mock.Mock(spec_set=['exception'])
def async_method_foo():
pass
self.task._spawn_args = (async_method_foo,)
def test_set_node_last_error(self):
self.future_mock.exception.return_value = Exception('fiasco')
self.task._write_exception(self.task, self.future_mock)
self.node.save.assert_called_once_with()
self.assertIn('fiasco', self.node.last_error)
self.assertIn('async_method_foo', self.node.last_error)
def test_set_node_last_error_exists(self):
self.future_mock.exception.return_value = Exception('fiasco')
self.node.last_error = 'oops'
self.task._write_exception(self.task, self.future_mock)
self.assertFalse(self.node.save.called)
self.assertFalse(self.future_mock.exception.called)
self.assertEqual('oops', self.node.last_error)
def test_set_node_last_error_no_error(self):
self.future_mock.exception.return_value = None
self.task._write_exception(self.task, self.future_mock)
self.assertFalse(self.node.save.called)
self.future_mock.exception.assert_called_once_with()
self.assertIsNone(self.node.last_error)
@mock.patch.object(task_manager.LOG, 'exception', spec_set=True,
autospec=True)
def test_set_node_last_error_cancelled(self, log_mock):
self.future_mock.exception.side_effect = futurist.CancelledError()
self.task._write_exception(self.task, self.future_mock)
self.assertFalse(self.node.save.called)
self.future_mock.exception.assert_called_once_with()
self.assertIsNone(self.node.last_error)
self.assertTrue(log_mock.called)
@mock.patch.object(notification_utils, 'emit_provision_set_notification',
autospec=True)
class ProvisionNotifyTestCase(tests_base.TestCase):
def setUp(self):
super(ProvisionNotifyTestCase, self).setUp()
self.node = mock.Mock(spec=objects.Node)
self.task = mock.Mock(spec=task_manager.TaskManager)
self.task.node = self.node
notifier = task_manager.TaskManager._notify_provision_state_change
self.task.notifier = notifier
self.task._prev_target_provision_state = 'oldtarget'
self.task._event = 'event'
def test_notify_no_state_change(self, emit_mock):
self.task._event = None
self.task.notifier(self.task)
self.assertFalse(emit_mock.called)
def test_notify_error_state(self, emit_mock):
self.task._event = 'fail'
self.task._prev_provision_state = 'fake'
self.task.notifier(self.task)
emit_mock.assert_called_once_with(self.task,
fields.NotificationLevel.ERROR,
fields.NotificationStatus.ERROR,
'fake', 'oldtarget', 'fail')
self.assertIsNone(self.task._event)
def test_notify_unstable_to_unstable(self, emit_mock):
self.node.provision_state = states.DEPLOYING
self.task._prev_provision_state = states.DEPLOYWAIT
self.task.notifier(self.task)
emit_mock.assert_called_once_with(self.task,
fields.NotificationLevel.INFO,
fields.NotificationStatus.SUCCESS,
states.DEPLOYWAIT,
'oldtarget', 'event')
def test_notify_stable_to_unstable(self, emit_mock):
self.node.provision_state = states.DEPLOYING
self.task._prev_provision_state = states.AVAILABLE
self.task.notifier(self.task)
emit_mock.assert_called_once_with(self.task,
fields.NotificationLevel.INFO,
fields.NotificationStatus.START,
states.AVAILABLE,
'oldtarget', 'event')
def test_notify_unstable_to_stable(self, emit_mock):
self.node.provision_state = states.ACTIVE
self.task._prev_provision_state = states.DEPLOYING
self.task.notifier(self.task)
emit_mock.assert_called_once_with(self.task,
fields.NotificationLevel.INFO,
fields.NotificationStatus.END,
states.DEPLOYING,
'oldtarget', 'event')
def test_notify_stable_to_stable(self, emit_mock):
self.node.provision_state = states.MANAGEABLE
self.task._prev_provision_state = states.AVAILABLE
self.task.notifier(self.task)
emit_mock.assert_called_once_with(self.task,
fields.NotificationLevel.INFO,
fields.NotificationStatus.SUCCESS,
states.AVAILABLE,
'oldtarget', 'event')
def test_notify_resource_released(self, emit_mock):
node = mock.Mock(spec=objects.Node)
node.provision_state = states.DEPLOYING
node.target_provision_state = states.ACTIVE
task = mock.Mock(spec=task_manager.TaskManager)
task._prev_provision_state = states.AVAILABLE
task._prev_target_provision_state = states.NOSTATE
task._event = 'event'
task.node = None
task._saved_node = node
notifier = task_manager.TaskManager._notify_provision_state_change
task.notifier = notifier
task.notifier(task)
task_arg = emit_mock.call_args[0][0]
self.assertEqual(node, task_arg.node)
self.assertIsNot(task, task_arg)
def test_notify_only_once(self, emit_mock):
self.node.provision_state = states.DEPLOYING
self.task._prev_provision_state = states.AVAILABLE
self.task.notifier(self.task)
self.assertIsNone(self.task._event)
self.task.notifier(self.task)
self.assertEqual(1, emit_mock.call_count)
self.assertIsNone(self.task._event)
| ruyang/ironic | ironic/tests/unit/conductor/test_task_manager.py | Python | apache-2.0 | 49,512 |
# -*- coding: utf-8 -*-
"""
Allele frequency calculations.
See also the examples at:
- http://nbviewer.ipython.org/github/alimanfoo/anhima/blob/master/examples/af.ipynb
""" # noqa
from __future__ import division, print_function, absolute_import
# third party dependencies
import numpy as np
# internal dependencies
import anhima
def _check_genotypes(genotypes):
"""
Internal function to check the genotypes input argument meets
expectations.
"""
genotypes = np.asarray(genotypes)
assert genotypes.ndim >= 2
if genotypes.ndim == 2:
# assume haploid, add ploidy dimension
genotypes = genotypes[..., np.newaxis]
return genotypes
def is_variant(genotypes):
"""Find variants with at least one non-reference allele observation.
Parameters
----------
genotypes : array_like
An array of shape (n_variants, n_samples, ploidy) where each
element of the array is an integer corresponding to an allele index
(-1 = missing, 0 = reference allele, 1 = first alternate allele,
2 = second alternate allele, etc.).
Returns
-------
is_variant : ndarray, bool
An array of shape (n_variants,) where an element is True if there
are at least `min_ac` non-reference alleles found for the corresponding
variant.
Notes
-----
Applicable to polyploid genotype calls.
Applicable to multiallelic variants.
"""
# check inputs
genotypes = _check_genotypes(genotypes)
# aggregate over samples and ploidy dimensions
out = np.sum(genotypes > 0, axis=(1, 2)) >= 1
return out
def count_variant(genotypes):
"""Count variants with at least one non-reference allele observed.
Parameters
----------
genotypes : array_like
An array of shape (n_variants, n_samples, ploidy) where each
element of the array is an integer corresponding to an allele index
(-1 = missing, 0 = reference allele, 1 = first alternate allele,
2 = second alternate allele, etc.).
min_ac : int, optional
The minimum number of non-reference alleles required to consider
variant.
Returns
-------
n : int
The number of variants.
Notes
-----
Applicable to polyploid genotype calls.
Applicable to multiallelic variants.
"""
return np.count_nonzero(is_variant(genotypes))
def is_non_variant(genotypes):
"""Find variants with no non-reference alleles.
Parameters
----------
genotypes : array_like
An array of shape (n_variants, n_samples, ploidy) where each
element of the array is an integer corresponding to an allele index
(-1 = missing, 0 = reference allele, 1 = first alternate allele,
2 = second alternate allele, etc.).
Returns
-------
is_non_variant : ndarray, bool
An array of shape (n_variants,) where an element is True if there
are no non-reference alleles found for the corresponding variant.
Notes
-----
Applicable to polyploid genotype calls.
Applicable to multiallelic variants.
"""
# check inputs
genotypes = _check_genotypes(genotypes)
# aggregate over samples and ploidy dimensions
out = np.all(genotypes <= 0, axis=(1, 2))
return out
def count_non_variant(genotypes):
"""Count variants with no non-reference alleles.
Parameters
----------
genotypes : array_like
An array of shape (n_variants, n_samples, ploidy) where each
element of the array is an integer corresponding to an allele index
(-1 = missing, 0 = reference allele, 1 = first alternate allele,
2 = second alternate allele, etc.).
Returns
-------
n : int
The number of variants.
Notes
-----
Applicable to polyploid genotype calls.
Applicable to multiallelic variants.
"""
return np.count_nonzero(is_non_variant(genotypes))
def is_singleton(genotypes, allele=1):
"""Find variants with only a single instance of `allele` observed.
Parameters
----------
genotypes : array_like
An array of shape (n_variants, n_samples, ploidy) where each
element of the array is an integer corresponding to an allele index
(-1 = missing, 0 = reference allele, 1 = first alternate allele,
2 = second alternate allele, etc.).
allele : int, optional
The allele to find singletons of.
Returns
-------
is_singleton : ndarray, bool
An array of shape (n_variants,) where an element is True if there
is a single instance of `allele` observed.
Notes
-----
Applicable to polyploid genotype calls.
Applicable to multiallelic variants, but note this function checks for a
specific `allele`.
"""
# check inputs
genotypes = _check_genotypes(genotypes)
# aggregate over samples and ploidy dimensions
out = np.sum(genotypes == allele, axis=(1, 2)) == 1
return out
def count_singletons(genotypes, allele=1):
"""Count variants with only a single instance of `allele` observed.
Parameters
----------
genotypes : array_like
An array of shape (n_variants, n_samples, ploidy) where each
element of the array is an integer corresponding to an allele index
(-1 = missing, 0 = reference allele, 1 = first alternate allele,
2 = second alternate allele, etc.).
allele : int, optional
The allele to find singletons of.
Returns
-------
n : int
The number of variants.
Notes
-----
Applicable to polyploid genotype calls.
Applicable to multiallelic variants, but note this function checks for a
specific `allele`.
"""
return np.count_nonzero(is_singleton(genotypes, allele))
def is_doubleton(genotypes, allele=1):
"""Find variants with only two instances of `allele` observed.
Parameters
----------
genotypes : array_like
An array of shape (n_variants, n_samples, ploidy) where each
element of the array is an integer corresponding to an allele index
(-1 = missing, 0 = reference allele, 1 = first alternate allele,
2 = second alternate allele, etc.).
allele : int, optional
The allele to find doubletons of.
Returns
-------
is_doubleton : ndarray, bool
An array of shape (n_variants,) where an element is True if there
are exactly two instances of `allele` observed.
Notes
-----
Applicable to polyploid genotype calls.
Applicable to multiallelic variants, but note this function checks for a
specific `allele`.
"""
# check inputs
genotypes = _check_genotypes(genotypes)
# aggregate over samples and ploidy dimensions
out = np.sum(genotypes == allele, axis=(1, 2)) == 2
return out
def count_doubletons(genotypes, allele=1):
"""Count variants with only two instances of `allele` observed.
Parameters
----------
genotypes : array_like
An array of shape (n_variants, n_samples, ploidy) where each
element of the array is an integer corresponding to an allele index
(-1 = missing, 0 = reference allele, 1 = first alternate allele,
2 = second alternate allele, etc.).
allele : int, optional
The allele to find doubletons of.
Returns
-------
n : int
The number of variants.
Notes
-----
Applicable to polyploid genotype calls.
Applicable to multiallelic variants, but note this function checks for a
specific `allele`.
"""
return np.count_nonzero(is_doubleton(genotypes, allele))
def allele_number(genotypes):
"""Count the number of non-missing allele calls per variant.
Parameters
----------
genotypes : array_like
An array of shape (n_variants, n_samples, ploidy) where each
element of the array is an integer corresponding to an allele index
(-1 = missing, 0 = reference allele, 1 = first alternate allele,
2 = second alternate allele, etc.).
Returns
-------
an : ndarray, int
An array of shape (n_variants,) counting the total number of
non-missing alleles observed.
Notes
-----
Applicable to polyploid genotype calls.
Applicable to multiallelic variants.
"""
# check inputs
genotypes = _check_genotypes(genotypes)
# aggregate over samples and ploidy dimensions
an = np.sum(genotypes >= 0, axis=(1, 2))
return an
def allele_count(genotypes, allele=1):
"""Calculate number of observations of the given allele per variant.
Parameters
----------
genotypes : array_like
An array of shape (n_variants, n_samples, ploidy) where each
element of the array is an integer corresponding to an allele index
(-1 = missing, 0 = reference allele, 1 = first alternate allele,
2 = second alternate allele, etc.).
allele : int, optional
The allele to count.
Returns
-------
ac : ndarray, int
An array of shape (n_variants,) counting the number of
times the given `allele` was observed.
Notes
-----
Applicable to polyploid genotype calls.
Applicable to multiallelic variants, but note that this function
calculates the frequency of a specific `allele`.
"""
# check inputs
genotypes = _check_genotypes(genotypes)
# aggregate over samples and ploidy dimensions
ac = np.sum(genotypes == allele, axis=(1, 2))
return ac
def allele_frequency(genotypes, allele=1):
"""Calculate frequency of the given allele per variant.
Parameters
----------
genotypes : array_like
An array of shape (n_variants, n_samples, ploidy) where each
element of the array is an integer corresponding to an allele index
(-1 = missing, 0 = reference allele, 1 = first alternate allele,
2 = second alternate allele, etc.).
allele : int, optional
The allele to calculate the frequency of.
Returns
-------
an : ndarray, int
An array of shape (n_variants,) counting the total number of
non-missing alleles observed.
ac : ndarray, int
An array of shape (n_variants,) counting the number of
times the given `allele` was observed.
af : ndarray, float
An array of shape (n_variants,) containing the allele frequency.
Notes
-----
Applicable to polyploid genotype calls.
Applicable to multiallelic variants, but note that this function
calculates the frequency of a specific `allele`.
"""
# count non-missing alleles
an = allele_number(genotypes)
# count alleles
ac = allele_count(genotypes, allele=allele)
# calculate allele frequency, accounting for missingness
err = np.seterr(invalid='ignore')
af = np.where(an > 0, ac / an, 0)
np.seterr(**err)
return an, ac, af
def allele_counts(genotypes, alleles=None):
"""Calculate allele counts per variant.
Parameters
----------
genotypes : array_like
An array of shape (n_variants, n_samples, ploidy) where each
element of the array is an integer corresponding to an allele index
(-1 = missing, 0 = reference allele, 1 = first alternate allele,
2 = second alternate allele, etc.).
alleles : sequence of ints, optional
The alleles to count. If not specified, all alleles will be counted.
Returns
-------
ac : ndarray, int
An array of shape (n_variants, n_alleles) counting the number of
times the given `alleles` were observed.
Notes
-----
Applicable to polyploid genotype calls.
Applicable to multiallelic variants.
"""
# check input
genotypes = _check_genotypes(genotypes)
# determine number of variants
n_variants = genotypes.shape[0]
# if alleles not specified, count all alleles
if alleles is None:
m = np.amax(genotypes)
alleles = range(m+1)
# count alleles
ac = np.zeros((n_variants, len(alleles)), dtype='i4')
for i, allele in enumerate(alleles):
np.sum(genotypes == allele, axis=(1, 2), out=ac[:, i])
return ac
def allele_frequencies(genotypes, alleles=None):
"""Calculate allele frequencies per variant.
Parameters
----------
genotypes : array_like
An array of shape (n_variants, n_samples, ploidy) where each
element of the array is an integer corresponding to an allele index
(-1 = missing, 0 = reference allele, 1 = first alternate allele,
2 = second alternate allele, etc.).
alleles : sequence of ints, optional
The alleles to calculate the frequency of. If not specified, all
alleles will be counted.
Returns
-------
an : ndarray, int
An array of shape (n_variants,) counting the total number of
non-missing alleles observed.
ac : ndarray, int
An array of shape (n_variants, n_alleles) counting the number of
times the given `alleles` were observed.
af : ndarray, float
An array of shape (n_variants, n_alleles) containing the allele
frequencies.
Notes
-----
Applicable to polyploid genotype calls.
Applicable to multiallelic variants.
"""
# count non-missing alleles
an = allele_number(genotypes)[:, np.newaxis]
# count alleles
ac = allele_counts(genotypes, alleles=alleles)
# calculate allele frequencies, accounting for missingness
err = np.seterr(invalid='ignore')
af = np.where(an > 0, ac / an, 0)
np.seterr(**err)
return an, ac, af
def allelism(genotypes):
"""Determine the number of distinct alleles found for each variant.
Parameters
----------
genotypes : array_like
An array of shape (n_variants, n_samples, ploidy) where each
element of the array is an integer corresponding to an allele index
(-1 = missing, 0 = reference allele, 1 = first alternate allele,
2 = second alternate allele, etc.).
Returns
-------
n : ndarray, int
An array of shape (n_variants,) where an element holds the allelism
of the corresponding variant.
See Also
--------
max_allele
Notes
-----
Applicable to polyploid genotype calls.
Applicable to multiallelic variants.
"""
# check inputs
genotypes = _check_genotypes(genotypes)
# calculate allele counts
ac = allele_counts(genotypes)
# count alleles present
n = np.sum(ac > 0, axis=1)
return n
def is_non_segregating(genotypes, allele=None):
"""Find non-segregating variants (fixed for a single allele).
Parameters
----------
genotypes : array_like
An array of shape (n_variants, n_samples, ploidy) where each
element of the array is an integer corresponding to an allele index
(-1 = missing, 0 = reference allele, 1 = first alternate allele,
2 = second alternate allele, etc.).
allele : int, optional
If given, find variants fixed with respect to `allele`. Otherwise
find variants fixed for any allele.
Returns
-------
is_non_segregating : ndarray, bool
An array of shape (n_variants,) where an element is True if all
genotype calls for the corresponding variant are either missing or
equal to the same allele.
Notes
-----
Applicable to polyploid genotype calls.
Applicable to multiallelic variants.
"""
# check inputs
genotypes = _check_genotypes(genotypes)
if allele is None:
# count distinct alleles
n_alleles = allelism(genotypes)
# find fixed variants
out = n_alleles == 1
else:
# find fixed variants with respect to a specific allele
out = np.all((genotypes < 0) | (genotypes == allele), axis=(1, 2))
return out
def count_non_segregating(genotypes, allele=None):
"""Count non-segregating variants.
Parameters
----------
genotypes : array_like
An array of shape (n_variants, n_samples, ploidy) where each
element of the array is an integer corresponding to an allele index
(-1 = missing, 0 = reference allele, 1 = first alternate allele,
2 = second alternate allele, etc.).
allele : int, optional
If given, find variants fixed with respect to `allele`.
Returns
-------
n : int
The number of variants.
Notes
-----
Applicable to polyploid genotype calls.
Applicable to multiallelic variants.
"""
return np.count_nonzero(is_non_segregating(genotypes, allele=allele))
def is_segregating(genotypes):
"""Find segregating variants (where more than one allele is observed).
Parameters
----------
genotypes : array_like
An array of shape (n_variants, n_samples, ploidy) where each
element of the array is an integer corresponding to an allele index
(-1 = missing, 0 = reference allele, 1 = first alternate allele,
2 = second alternate allele, etc.).
Returns
-------
is_segregating : ndarray, bool
An array of shape (n_variants,) where an element is True if more
than one allele is found for the given variant.
Notes
-----
Applicable to polyploid genotype calls.
Applicable to multiallelic variants.
"""
# check inputs
genotypes = _check_genotypes(genotypes)
# count distinct alleles
n_alleles = allelism(genotypes)
# find segregating variants
out = n_alleles > 1
return out
def count_segregating(genotypes):
"""Count segregating variants.
Parameters
----------
genotypes : array_like
An array of shape (n_variants, n_samples, ploidy) where each
element of the array is an integer corresponding to an allele index
(-1 = missing, 0 = reference allele, 1 = first alternate allele,
2 = second alternate allele, etc.).
Returns
-------
n : int
The number of variants.
Notes
-----
Applicable to polyploid genotype calls.
Applicable to multiallelic variants.
"""
return np.count_nonzero(is_segregating(genotypes))
def maximum_likelihood_ancestry(genotypes, qa, qb, filter_size=0):
"""Given alternate allele frequencies in two populations `qa` and `qb`,
predict the ancestry for a set of `genotypes`.
Parameters
----------
genotypes : array_like
An array of diploid genotype calls of shape (n_variants, n_samples,
2) where each element of the array is an integer corresponding to an
allele index (-1 = missing, 0 = reference allele, 1 = first alternate
allele, 2 = second alternate allele, etc.).
qa : array_like, float
A 1-dimensional array of shape (n_variants, ) containing alternate
allele frequencies for population A.
qb : array_like, float
A 1-dimensional array of shape (n_variants, ) containing alternate
allele frequencies for population B.
filter_size : int, optional
Sum likelihoods in a moving window of size `filter_size`.
Returns
-------
ancestry : ndarray, int, shape (n_variants, n_samples)
An array containing the ancestry predictions, where 0 = AA (both
alleles derive from population A), 1 = AB (hybrid ancestry) and 2 =
BB (both alleles derive from population B), and -1 = ambiguous (models
are equally likely).
confidence : ndarray, float, shape (n_variants, n_samples)
The confidence in the ancestry prediction (natural logarithm of the
likelihood ratio for the two most likely models).
Notes
-----
Where allele frequencies are similar between populations A and B,
ancestry predictions will have low confidence, because different ancestry
models will have similar likelihoods. Greater confidence will be obtained by
filtering variants to select those where the difference in allele
frequencies is greater. E.g.::
>>> flt = np.abs(qa - qb) > .5
>>> genotypes_flt = genotypes[flt]
>>> qa_flt = qa[flt]
>>> qb_flt = qb[flt]
>>> ancestry, confidence = maximum_likelihood_ancestry(genotypes_flt, qa_flt, qb_flt)
""" # noqa
# check inputs
genotypes = _check_genotypes(genotypes)
# require biallelic genotypes
assert np.amax(genotypes) < 2
n_variants, n_samples, ploidy = genotypes.shape
# require diploid genotypes
assert ploidy == 2
qa = np.asarray(qa)
qb = np.asarray(qb)
assert qa.ndim == qb.ndim == 1
assert n_variants == qa.shape[0] == qb.shape[0]
# calculate reference allele frequencies, assuming biallelic variants
pa = 1 - qa
pb = 1 - qb
# work around zero frequencies which cause problems when calculating logs
pa[pa == 0] = np.exp(-250)
qa[qa == 0] = np.exp(-250)
pb[pb == 0] = np.exp(-250)
qb[qb == 0] = np.exp(-250)
# calculate likelihoods
logpa = np.log(pa)
logqa = np.log(qa)
logpb = np.log(pb)
logqb = np.log(qb)
# set up likelihoods array
n_models = 3
n_gn_states = 3
log_likelihoods = np.empty((n_variants, n_samples, n_models, n_gn_states),
dtype='f8')
# probability of genotype (e.g., 0 = hom ref) given model (e.g., 0 = aa)
log_likelihoods[:, :, 0, 0] = (2 * logpa)[:, np.newaxis]
log_likelihoods[:, :, 1, 0] = (np.log(2) + logpa + logqa)[:, np.newaxis]
log_likelihoods[:, :, 2, 0] = (2 * logqa)[:, np.newaxis]
log_likelihoods[:, :, 0, 1] = (logpa + logpb)[:, np.newaxis]
log_likelihoods[:, :, 1, 1] = (np.logaddexp(logpa + logqb,
logqa + logpb)[:, np.newaxis])
log_likelihoods[:, :, 2, 1] = (logqa + logqb)[:, np.newaxis]
log_likelihoods[:, :, 0, 2] = (2 * logpb)[:, np.newaxis]
log_likelihoods[:, :, 1, 2] = (np.log(2) + logpb + logqb)[:, np.newaxis]
log_likelihoods[:, :, 2, 2] = (2 * logqb)[:, np.newaxis]
# transform genotypes for convenience
gn = anhima.gt.as_012(genotypes)
# calculate actual model likelihoods for each genotype call
model_likelihoods = np.empty((n_variants, n_samples, n_models), dtype='f8')
model_likelihoods.fill(-250)
for model in 0, 1, 2:
for gn_state in 0, 1, 2:
model_likelihoods[:, :, model][gn == gn_state] = \
log_likelihoods[:, :, model, gn_state][gn == gn_state]
# optionally combine likelihoods in a moving window
if filter_size:
model_likelihoods = np.apply_along_axis(np.convolve,
0,
model_likelihoods,
np.ones((filter_size,)))
# remove edges
model_likelihoods = \
model_likelihoods[filter_size//2:-1*(filter_size//2), ...]
# predict ancestry as model with highest likelihood
ancestry = np.argmax(model_likelihoods, axis=2)
# calculate confidence by comparing first and second most likely models
model_likelihoods.sort(axis=2)
confidence = model_likelihoods[:, :, 2] - model_likelihoods[:, :, 1]
# recind prediction where confidence is zero (models are equally likely)
ancestry[confidence == 0] = -1
return ancestry, confidence
| alimanfoo/anhima | anhima/af.py | Python | mit | 23,513 |
from sites import Site
# import urllib
import requests
import datetime
from bs4 import BeautifulSoup
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
FROM_ADDR = "rbl-check@umbc.edu"
class SiteList(object):
def __init__(self, list_url):
self.list_url = list_url
self.site_list = []
self.html = ""
self.email = ""
self.listed = set()
self.blacklist = set()
self.no_delisting = set()
def downloadList(self):
# multirbl.valli.org blocked the user agent of this method
# http = urllib.urlopen(self.list_url)
# self.html = http.read()
# http.close()
header = {'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.62 Safari/537.36'}
req = requests.Session()
req.headers.update(header)
html = req.get(self.list_url)
self.html = html.text
# process the html
def grind(self, ipList):
soup = BeautifulSoup(self.html)
'''
Example temp list structure from the following for structure
0 <td>292</td>
1 <td><a href="http://www.zoneedit.com/">ZoneEdit deny DNS services domains</a></td>
2 <td>ban.zebl.zoneedit.com</td>
3 <td>-</td>
4 <td>-</td>
5 <td>dom</td>
6 <td>b</td>
7 <td>(<a href="/detail/ban.zebl.zoneedit.com.html">info</a>)</td>
'''
# print "finding all table rows"
for link in soup.table.find_all('tr'):
for ip in ipList:
temp = []
for sibling in link.find_all('td'):
temp.append(sibling)
name = temp[1].text
tempurl = temp[1]
url = tempurl.a['href']
dns_zone = temp[2].string
if temp[3].text == "ipv4":
zone_type = temp[3].text
elif temp[4].text == "ipv6":
zone_type = temp[4].text
elif temp[5].text == "dom":
zone_type = temp[5].text
else:
zone_type = None
# if the rbl identifies as anything but a blacklist, trash it
if temp[6].text == 'b':
self.site_list.append(Site(name, url, dns_zone, zone_type, ip))
def findBlacklistedFromFile(self, filename):
savefile = open(filename,'r')
for line in savefile:
if line != "":
lineList = line.encode('utf-8').strip().split("\t")
# mail_ip dns_zone name URL type
# ['130.85.25.77', 'dnsbl.anticaptcha.net', 'AntiCaptcha.NET IPv4', 'http://anticaptcha.net/', 'ipv4']
self.blacklist.add(Site(lineList[2], lineList[3], lineList[1], lineList[4], lineList[0]))
# formats the html section of the email
def toHTML(self, ip_list):
if self.site_list != None:
html = "<html>"
html += "Source URL:%s <br>" % self.list_url
html += "Mail Servers Checked: %s<br>" % str(ip_list)
# listed table
sorted_listed = sorted(self.listed, key=lambda x: x.mail_ip)
if len(sorted_listed) != 0:
html += "<h2>We're listed on these RBLs</h2>"
html += "<table border=1>"
html += "<tr><td><b>Our Mail IP</b></td><td><b>Dns Zone</b></td><td><b>Name/Link</b></td><td><b>Type</b></td></tr>"
for site in sorted_listed:
html += "<tr>"
html += " <td>%s</td>" % site.mail_ip
html += " <td>%s</td>" % site.dns_zone
html += " <td><a href=\"%s\">%s</a></td>" % (site.url, site.name)
html += " <td>%s</td>" % site.zone_type
html += "</tr>"
html += "</table>"
else:
html += "<br><b>We aren't listed on anything we can request delisting from!</b><br>"
html += "<br>"
html += "Listed on %s RBLs<br>" % len(self.listed)
html += "RBLs checked: %s<br>" % len(self.site_list)
html += "<br>"
# no delisting table
sorted_no_delisting = sorted(self.no_delisting, key=lambda x: x.mail_ip)
if len(sorted_no_delisting) != 0:
html += "<br><h2>We cannot request delisting from these %s RBLs</h2><br>" % len(self.no_delisting)
html += "<table border=1>"
html += "<tr><td><b>Our Mail IP</b></td><td><b>Dns Zone</b></td><td><b>Name/Link</b></td><td><b>Type</b></td></tr>"
for site in sorted_no_delisting:
html += "<tr>"
html += " <td>%s</td>" % site.mail_ip
html += " <td>%s</td>" % site.dns_zone
html += " <td><a href=\"%s\">%s</a></td>" % (site.url, site.name)
html += " <td>%s</td>" % site.zone_type
html += "</tr>"
html += "</table>"
html += "</html>"
else:
html += "<b>We can request delisting from everything!</b>"
else:
html = "We tried to download the list of DNS Blacklist services from %s, but the site was down!" % self.list_url
self.email = html
def sendMail(self, toaddrs):
recipients = ', '.join(toaddrs)
msg = MIMEMultipart('alternative')
msg['Subject'] = "Blacklist Check Results for %s" % datetime.datetime.now().strftime("%x %H:%M")
msg.attach(MIMEText(self.email, 'plain', _charset='utf-8'))
msg.attach(MIMEText(self.email, 'html', _charset='utf-8'))
send = smtplib.SMTP('localhost')
send.sendmail(FROM_ADDR, toaddrs, msg.as_string())
send.quit()
def __str__(self):
for site in self.site_list:
print site
| jeefberkey/rblcheck | list_gatherer.py | Python | apache-2.0 | 4,954 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the update tool."""
from __future__ import unicode_literals
import os
import sys
import unittest
from tools import update
from tests import test_lib
@unittest.skipIf(
os.environ.get('TRAVIS_OS_NAME') == 'osx',
'TLS 1.2 not supported by macOS on Travis')
class GithubRepoDownloadHelperTest(test_lib.BaseTestCase):
"""Tests for the GitHub repo download helper class."""
_DOWNLOAD_URL = 'https://github.com/ForensicArtifacts/artifacts/releases'
_PROJECT_NAME = 'artifacts'
_PROJECT_VERSION = '20180628'
def testGetPackageDownloadURLs(self):
"""Tests the GetPackageDownloadURLs function."""
download_helper = update.GithubRepoDownloadHelper(self._DOWNLOAD_URL)
package_download_urls = download_helper.GetPackageDownloadURLs(
preferred_machine_type='x86', preferred_operating_system='Windows')
if (sys.version_info[0] not in (2, 3) or
(sys.version_info[0] == 2 and sys.version_info[1] != 7) or
(sys.version_info[0] == 3 and sys.version_info[1] != 6)):
# Python versions other than 2.7 and 3.6 are not supported.
self.assertIsNone(package_download_urls)
else:
self.assertIsNotNone(package_download_urls)
expected_url = (
'https://github.com/log2timeline/l2tbinaries/raw/master/win32/'
'{0:s}-{1:s}.1.win32.msi').format(
self._PROJECT_NAME, self._PROJECT_VERSION)
self.assertIn(expected_url, package_download_urls)
@unittest.skipIf(
os.environ.get('TRAVIS_OS_NAME') == 'osx',
'TLS 1.2 not supported by macOS on Travis')
class DependencyUpdaterTest(test_lib.BaseTestCase):
"""Tests for the dependency updater class."""
# pylint: disable=protected-access
_PROJECT_NAME = 'dfvfs'
_PROJECT_VERSION = '20180510'
def testGetPackageFilenamesAndVersions(self):
"""Tests the GetPackageFilenamesAndVersions function."""
dependency_updater = update.DependencyUpdater(
preferred_machine_type='x86', preferred_operating_system='Windows')
package_filenames, package_versions = (
dependency_updater._GetPackageFilenamesAndVersions([]))
if (sys.version_info[0] not in (2, 3) or
(sys.version_info[0] == 2 and sys.version_info[1] != 7) or
(sys.version_info[0] == 3 and sys.version_info[1] != 6)):
# Python versions other than 2.7 and 3.6 are not supported.
self.assertIsNone(package_filenames)
self.assertIsNone(package_versions)
else:
self.assertIsNotNone(package_filenames)
self.assertIsNotNone(package_versions)
self.assertEqual(
package_filenames.get(self._PROJECT_NAME, None),
'{0:s}-{1:s}.1.win32.msi'.format(
self._PROJECT_NAME, self._PROJECT_VERSION))
self.assertEqual(
package_versions.get(self._PROJECT_NAME, None),
[self._PROJECT_VERSION, '1'])
if __name__ == '__main__':
unittest.main()
| rgayon/l2tdevtools | tests/update.py | Python | apache-2.0 | 2,954 |
# -*- coding: utf-8 -*-
# Copyright (c) 2003 - 2014 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing a dialog to enter the data for a switch operation.
"""
from __future__ import unicode_literals
from PyQt5.QtWidgets import QDialog
from .Ui_SvnSwitchDialog import Ui_SvnSwitchDialog
class SvnSwitchDialog(QDialog, Ui_SvnSwitchDialog):
"""
Class implementing a dialog to enter the data for a switch operation.
"""
def __init__(self, taglist, reposURL, standardLayout, parent=None):
"""
Constructor
@param taglist list of previously entered tags (list of strings)
@param reposURL repository path (string) or None
@param standardLayout flag indicating the layout of the
repository (boolean)
@param parent parent widget (QWidget)
"""
super(SvnSwitchDialog, self).__init__(parent)
self.setupUi(self)
self.tagCombo.clear()
self.tagCombo.addItems(sorted(taglist))
if reposURL is not None and reposURL != "":
self.tagCombo.setEditText(reposURL)
if not standardLayout:
self.TagTypeGroup.setEnabled(False)
msh = self.minimumSizeHint()
self.resize(max(self.width(), msh.width()), msh.height())
def getParameters(self):
"""
Public method to retrieve the tag data.
@return tuple of string and int (tag, tag type)
"""
tag = self.tagCombo.currentText()
tagType = 0
if self.regularButton.isChecked():
tagType = 1
elif self.branchButton.isChecked():
tagType = 2
if not tag:
tagType = 4
return (tag, tagType)
| davy39/eric | Plugins/VcsPlugins/vcsSubversion/SvnSwitchDialog.py | Python | gpl-3.0 | 1,777 |
from django.conf.urls import include, url
from . import views
urlpatterns = [
url(r'^$', views.post_list),
url(r'^post/(?P<pk>[0-9]+)/$', views.post_detail),
url(r'^post/new/$', views.post_new, name='post_new'),
url(r'^post/(?P<pk>[0-9]+)/edit/$', views.post_edit, name='post_edit'),
url(r'^drafts/$', views.post_draft_list, name='post_draft_list'),
url(r'^post/(?P<pk>\d+)/publish/$', views.post_publish, name='post_publish'),
url(r'^post/(?P<pk>\d+)/remove/$', views.post_remove, name='post_remove'),
] | pyladiespoa/site | blog/urls.py | Python | gpl-2.0 | 512 |
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import os
import sys
import unittest
try:
reload
except NameError:
# For Python3 (though importlib should be used, silly 3.3).
from imp import reload
from oauth2client.client import HAS_OPENSSL
from oauth2client.client import SignedJwtAssertionCredentials
from oauth2client import crypt
def datafile(filename):
f = open(os.path.join(os.path.dirname(__file__), 'data', filename), 'rb')
data = f.read()
f.close()
return data
class Test_pkcs12_key_as_pem(unittest.TestCase):
def _make_signed_jwt_creds(self, private_key_file='privatekey.p12',
private_key=None):
private_key = private_key or datafile(private_key_file)
return SignedJwtAssertionCredentials(
'some_account@example.com',
private_key,
scope='read+write',
sub='joe@example.org')
def test_succeeds(self):
self.assertEqual(True, HAS_OPENSSL)
credentials = self._make_signed_jwt_creds()
pem_contents = crypt.pkcs12_key_as_pem(credentials.private_key,
credentials.private_key_password)
pkcs12_key_as_pem = datafile('pem_from_pkcs12.pem')
pkcs12_key_as_pem = crypt._parse_pem_key(pkcs12_key_as_pem)
alternate_pem = datafile('pem_from_pkcs12_alternate.pem')
self.assertTrue(pem_contents in [pkcs12_key_as_pem, alternate_pem])
def test_without_openssl(self):
import imp
imp_find_module = imp.find_module
orig_sys_path = sys.path
def find_module(module_name):
raise ImportError('No module named %s' % module_name)
try:
for m in list(sys.modules):
if m.startswith('OpenSSL'):
sys.modules.pop(m)
sys.path = []
imp.find_module = find_module
reload(crypt)
self.assertRaises(NotImplementedError, crypt.pkcs12_key_as_pem,
'FOO', 'BAR')
finally:
sys.path = orig_sys_path
imp.find_module = imp_find_module
import OpenSSL
reload(crypt)
def test_with_nonsense_key(self):
from OpenSSL import crypto
credentials = self._make_signed_jwt_creds(private_key=b'NOT_A_KEY')
self.assertRaises(crypto.Error, crypt.pkcs12_key_as_pem,
credentials.private_key, credentials.private_key_password)
| Chilledheart/chromium | tools/telemetry/third_party/gsutilz/third_party/oauth2client/tests/test_crypt.py | Python | bsd-3-clause | 2,865 |
#!/usr/bin/python
import os
import requests
import json
import subprocess
import time
def userInput(config):
def defaultValue(config, key):
return config[key] if key in config else ""
res = {}
invalid = {}
res['user_name'] = raw_input("your GitHub username (default = " + defaultValue(config, 'user_name') + "): ")
res['project_name'] = raw_input("name of your SAC project on GitHub (default = " + defaultValue(config, 'project_name') + "): ")
res['project_branch'] = raw_input("current working branch of your SAC project on GitHub (default = " + defaultValue(config, 'project_branch') + "): ")
res['google_client_id'] = raw_input("generated Google client id (default = " + defaultValue(config, 'google_client_id') + "): ")
res['google_client_secret'] = raw_input("generated Google client secret (default = " + defaultValue(config, 'google_client_secret') + "): ")
res['google_api_key'] = raw_input("generated Google API key (default = " + defaultValue(config, 'google_api_key') + "): ")
res['sac_app_id'] = raw_input("generated SAC app ID (default = " + defaultValue(config, 'sac_app_id') + "): ")
res['sac_app_secret'] = raw_input("generated SAC app secret (default = " + defaultValue(config, 'sac_app_secret') + "): ")
for k, v in res.items():
if v == "": res[k] = defaultValue(config, k)
invalid = [k for k, v in res.items() if v == ""]
if len(invalid) > 0:
print "invalid configuration: properties", invalid, "must be set"
res = userInput(config)
return res
config_file = open('config.json', 'r')
config = json.load(config_file)
invalid_properties = [k for k, v in config.items() if v == ""]
print "current configuration:", json.dumps(config, indent=2)
if len(invalid_properties) > 0:
print "invalid configuration: properties", invalid_properties, "must be set"
config = userInput(config)
if 'user_name' and 'project_name' and 'project_branch' and 'google_client_id' and 'google_client_secret' and 'google_api_key' and 'sac_app_id' and 'sac_app_secret' in config:
reply = str(raw_input('config file complete. do you wish to reset it? (y/n): ')).lower().strip()
if reply[0] == 'y':
config = userInput(config)
else:
config = userInput(config)
with open('config.json', 'w') as config_file:
json.dump(config, config_file, indent=2)
p = subprocess.Popen(["ngrok", "http", "8080"])
time.sleep(1.5)
# retrieving public url for exposed localhost:8080
headers = {'Content-Type': 'application/json'}
r = requests.get('http://127.0.0.1:4040/api/tunnels', headers=headers)
public_url = json.loads(r.text)['tunnels'][0]['public_url']
# setting environment variables with user input
os.environ["CRAFT_DEMO_SAC_USER"] = config['user_name']
os.environ["CRAFT_DEMO_SAC_PROJECT"] = config['project_name']
os.environ["CRAFT_DEMO_SAC_VERSION"] = config['project_branch']
os.environ["CRAFT_DEMO_SAC_GOOGLE_CLIENT_ID"] = config['google_client_id']
os.environ["CRAFT_DEMO_SAC_GOOGLE_CLIENT_SECRET"] = config['google_client_secret']
os.environ["CRAFT_DEMO_SAC_GOOGLE_API_KEY"] = config['google_api_key']
os.environ["CRAFT_DEMO_SAC_APP_ID"] = config['sac_app_id']
os.environ["CRAFT_DEMO_SAC_APP_SECRET"] = config['sac_app_secret']
os.environ["CRAFT_DEMO_SAC_PORT"] = '8080'
os.environ["CRAFT_DEMO_SAC_URL"] = 'http://localhost:8080'
os.environ["CRAFT_DEMO_SAC_WS_URL"] = 'ws://localhost:8080'
os.environ["CRAFT_RUNTIME_SERVER_URL"] = 'http://runtime.craft.ai'
os.environ["CRAFT_DEMO_SAC_ACTIONS_URL"] = public_url
subprocess.call(["python", "-u", "src/server/main.py"])
p.terminate()
| kau-masa/Komi_SmartAlarmClock | local_demo.py | Python | bsd-3-clause | 3,511 |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from os import listdir
from argparse import ArgumentParser
import pandas as pd
def _parse_args():
parser = ArgumentParser()
parser.add_argument('--input_folder', type=str, required=True,
help="Path to the folder of parquet files.")
parser.add_argument('--output_folder', type=str, default=".",
help="The path to save the preprocessed data to parquet files. ")
args = parser.parse_args()
return args
if __name__ == '__main__':
args = _parse_args()
input_files = [f for f in listdir(args.input_folder) if f.endswith(".parquet")]
for f in input_files:
df = pd.read_parquet(os.path.join(args.input_folder, f))
df = df.rename(columns={"text_ tokens": "text_tokens"})
# This is a typo. Other typos include enaging...
df = df.rename(columns={"retweet_timestampe": "retweet_timestamp"})
df.to_parquet(os.path.join(args.output_folder, "%s" % f))
| intel-analytics/BigDL | python/friesian/example/wnd/train/convert_train.py | Python | apache-2.0 | 1,560 |
"""
Shared methods for Index subclasses backed by ExtensionArray.
"""
from __future__ import annotations
from typing import (
Hashable,
TypeVar,
)
import numpy as np
from pandas._typing import ArrayLike
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
cache_readonly,
doc,
)
from pandas.util._exceptions import rewrite_exception
from pandas.core.dtypes.common import (
is_dtype_equal,
is_object_dtype,
pandas_dtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCSeries,
)
from pandas.core.array_algos.putmask import validate_putmask
from pandas.core.arrays import (
Categorical,
DatetimeArray,
IntervalArray,
PeriodArray,
TimedeltaArray,
)
from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
from pandas.core.indexers import deprecate_ndim_indexing
from pandas.core.indexes.base import Index
from pandas.core.ops import get_op_result_name
_T = TypeVar("_T", bound="NDArrayBackedExtensionIndex")
def inherit_from_data(name: str, delegate, cache: bool = False, wrap: bool = False):
"""
Make an alias for a method of the underlying ExtensionArray.
Parameters
----------
name : str
Name of an attribute the class should inherit from its EA parent.
delegate : class
cache : bool, default False
Whether to convert wrapped properties into cache_readonly
wrap : bool, default False
Whether to wrap the inherited result in an Index.
Returns
-------
attribute, method, property, or cache_readonly
"""
attr = getattr(delegate, name)
if isinstance(attr, property) or type(attr).__name__ == "getset_descriptor":
# getset_descriptor i.e. property defined in cython class
if cache:
def cached(self):
return getattr(self._data, name)
cached.__name__ = name
cached.__doc__ = attr.__doc__
method = cache_readonly(cached)
else:
def fget(self):
result = getattr(self._data, name)
if wrap:
if isinstance(result, type(self._data)):
return type(self)._simple_new(result, name=self.name)
elif isinstance(result, ABCDataFrame):
return result.set_index(self)
return Index(result, name=self.name)
return result
def fset(self, value):
setattr(self._data, name, value)
fget.__name__ = name
fget.__doc__ = attr.__doc__
method = property(fget, fset)
elif not callable(attr):
# just a normal attribute, no wrapping
method = attr
else:
def method(self, *args, **kwargs):
if "inplace" in kwargs:
raise ValueError(f"cannot use inplace with {type(self).__name__}")
result = attr(self._data, *args, **kwargs)
if wrap:
if isinstance(result, type(self._data)):
return type(self)._simple_new(result, name=self.name)
elif isinstance(result, ABCDataFrame):
return result.set_index(self)
return Index(result, name=self.name)
return result
method.__name__ = name
method.__doc__ = attr.__doc__
return method
def inherit_names(names: list[str], delegate, cache: bool = False, wrap: bool = False):
"""
Class decorator to pin attributes from an ExtensionArray to a Index subclass.
Parameters
----------
names : List[str]
delegate : class
cache : bool, default False
wrap : bool, default False
Whether to wrap the inherited result in an Index.
"""
def wrapper(cls):
for name in names:
meth = inherit_from_data(name, delegate, cache=cache, wrap=wrap)
setattr(cls, name, meth)
return cls
return wrapper
def _make_wrapped_comparison_op(opname: str):
"""
Create a comparison method that dispatches to ``._data``.
"""
def wrapper(self, other):
if isinstance(other, ABCSeries):
# the arrays defer to Series for comparison ops but the indexes
# don't, so we have to unwrap here.
other = other._values
other = _maybe_unwrap_index(other)
op = getattr(self._data, opname)
return op(other)
wrapper.__name__ = opname
return wrapper
def _make_wrapped_arith_op(opname: str):
def method(self, other):
if (
isinstance(other, Index)
and is_object_dtype(other.dtype)
and type(other) is not Index
):
# We return NotImplemented for object-dtype index *subclasses* so they have
# a chance to implement ops before we unwrap them.
# See https://github.com/pandas-dev/pandas/issues/31109
return NotImplemented
try:
meth = getattr(self._data, opname)
except AttributeError as err:
# e.g. Categorical, IntervalArray
cls = type(self).__name__
raise TypeError(
f"cannot perform {opname} with this index type: {cls}"
) from err
result = meth(_maybe_unwrap_index(other))
return _wrap_arithmetic_op(self, other, result)
method.__name__ = opname
return method
def _wrap_arithmetic_op(self, other, result):
if result is NotImplemented:
return NotImplemented
if isinstance(result, tuple):
# divmod, rdivmod
assert len(result) == 2
return (
_wrap_arithmetic_op(self, other, result[0]),
_wrap_arithmetic_op(self, other, result[1]),
)
if not isinstance(result, Index):
# Index.__new__ will choose appropriate subclass for dtype
result = Index(result)
res_name = get_op_result_name(self, other)
result.name = res_name
return result
def _maybe_unwrap_index(obj):
"""
If operating against another Index object, we need to unwrap the underlying
data before deferring to the DatetimeArray/TimedeltaArray/PeriodArray
implementation, otherwise we will incorrectly return NotImplemented.
Parameters
----------
obj : object
Returns
-------
unwrapped object
"""
if isinstance(obj, Index):
return obj._data
return obj
class ExtensionIndex(Index):
"""
Index subclass for indexes backed by ExtensionArray.
"""
# The base class already passes through to _data:
# size, __len__, dtype
_data: IntervalArray | NDArrayBackedExtensionArray
_data_cls: (
type[Categorical]
| type[DatetimeArray]
| type[TimedeltaArray]
| type[PeriodArray]
| type[IntervalArray]
)
@classmethod
def _simple_new(
cls,
array: IntervalArray | NDArrayBackedExtensionArray,
name: Hashable = None,
):
"""
Construct from an ExtensionArray of the appropriate type.
Parameters
----------
array : ExtensionArray
name : Label, default None
Attached as result.name
"""
assert isinstance(array, cls._data_cls), type(array)
result = object.__new__(cls)
result._data = array
result._name = name
result._cache = {}
result._reset_identity()
return result
__eq__ = _make_wrapped_comparison_op("__eq__")
__ne__ = _make_wrapped_comparison_op("__ne__")
__lt__ = _make_wrapped_comparison_op("__lt__")
__gt__ = _make_wrapped_comparison_op("__gt__")
__le__ = _make_wrapped_comparison_op("__le__")
__ge__ = _make_wrapped_comparison_op("__ge__")
__add__ = _make_wrapped_arith_op("__add__")
__sub__ = _make_wrapped_arith_op("__sub__")
__radd__ = _make_wrapped_arith_op("__radd__")
__rsub__ = _make_wrapped_arith_op("__rsub__")
__pow__ = _make_wrapped_arith_op("__pow__")
__rpow__ = _make_wrapped_arith_op("__rpow__")
__mul__ = _make_wrapped_arith_op("__mul__")
__rmul__ = _make_wrapped_arith_op("__rmul__")
__floordiv__ = _make_wrapped_arith_op("__floordiv__")
__rfloordiv__ = _make_wrapped_arith_op("__rfloordiv__")
__mod__ = _make_wrapped_arith_op("__mod__")
__rmod__ = _make_wrapped_arith_op("__rmod__")
__divmod__ = _make_wrapped_arith_op("__divmod__")
__rdivmod__ = _make_wrapped_arith_op("__rdivmod__")
__truediv__ = _make_wrapped_arith_op("__truediv__")
__rtruediv__ = _make_wrapped_arith_op("__rtruediv__")
@property
def _has_complex_internals(self) -> bool:
# used to avoid libreduction code paths, which raise or require conversion
return True
# ---------------------------------------------------------------------
# NDarray-Like Methods
def __getitem__(self, key):
result = self._data[key]
if isinstance(result, type(self._data)):
if result.ndim == 1:
return type(self)(result, name=self._name)
# Unpack to ndarray for MPL compat
result = result._ndarray
# Includes cases where we get a 2D ndarray back for MPL compat
deprecate_ndim_indexing(result)
return result
def searchsorted(self, value, side="left", sorter=None) -> np.ndarray:
# overriding IndexOpsMixin improves performance GH#38083
return self._data.searchsorted(value, side=side, sorter=sorter)
def putmask(self, mask, value) -> Index:
mask, noop = validate_putmask(self._data, mask)
if noop:
return self.copy()
try:
self._validate_fill_value(value)
except (ValueError, TypeError):
dtype = self._find_common_type_compat(value)
return self.astype(dtype).putmask(mask, value)
arr = self._data.copy()
arr.putmask(mask, value)
return type(self)._simple_new(arr, name=self.name)
# ---------------------------------------------------------------------
def _get_engine_target(self) -> np.ndarray:
return np.asarray(self._data)
def _from_join_target(self, result: np.ndarray) -> ArrayLike:
# ATM this is only for IntervalIndex, implicit assumption
# about _get_engine_target
return type(self._data)._from_sequence(result, dtype=self.dtype)
def delete(self, loc):
"""
Make new Index with passed location(-s) deleted
Returns
-------
new_index : Index
"""
arr = self._data.delete(loc)
return type(self)._simple_new(arr, name=self.name)
def repeat(self, repeats, axis=None):
nv.validate_repeat((), {"axis": axis})
result = self._data.repeat(repeats, axis=axis)
return type(self)._simple_new(result, name=self.name)
def insert(self, loc: int, item) -> Index:
"""
Make new Index inserting new item at location. Follows
Python list.append semantics for negative values.
Parameters
----------
loc : int
item : object
Returns
-------
new_index : Index
"""
try:
result = self._data.insert(loc, item)
except (ValueError, TypeError):
# e.g. trying to insert an integer into a DatetimeIndex
# We cannot keep the same dtype, so cast to the (often object)
# minimal shared dtype before doing the insert.
dtype = self._find_common_type_compat(item)
return self.astype(dtype).insert(loc, item)
else:
return type(self)._simple_new(result, name=self.name)
def _validate_fill_value(self, value):
"""
Convert value to be insertable to underlying array.
"""
return self._data._validate_setitem_value(value)
@doc(Index.map)
def map(self, mapper, na_action=None):
# Try to run function on index first, and then on elements of index
# Especially important for group-by functionality
try:
result = mapper(self)
# Try to use this result if we can
if isinstance(result, np.ndarray):
result = Index(result)
if not isinstance(result, Index):
raise TypeError("The map function must return an Index object")
return result
except Exception:
return self.astype(object).map(mapper)
@doc(Index.astype)
def astype(self, dtype, copy: bool = True) -> Index:
dtype = pandas_dtype(dtype)
if is_dtype_equal(self.dtype, dtype):
if not copy:
# Ensure that self.astype(self.dtype) is self
return self
return self.copy()
# error: Non-overlapping equality check (left operand type: "dtype[Any]", right
# operand type: "Literal['M8[ns]']")
if (
isinstance(self.dtype, np.dtype)
and isinstance(dtype, np.dtype)
and dtype.kind == "M"
and dtype != "M8[ns]" # type: ignore[comparison-overlap]
):
# For now Datetime supports this by unwrapping ndarray, but DTI doesn't
raise TypeError(f"Cannot cast {type(self).__name__} to dtype")
with rewrite_exception(type(self._data).__name__, type(self).__name__):
new_values = self._data.astype(dtype, copy=copy)
# pass copy=False because any copying will be done in the
# _data.astype call above
return Index(new_values, dtype=new_values.dtype, name=self.name, copy=False)
@cache_readonly
def _isnan(self) -> np.ndarray:
# error: Incompatible return value type (got "ExtensionArray", expected
# "ndarray")
return self._data.isna() # type: ignore[return-value]
@doc(Index.equals)
def equals(self, other) -> bool:
# Dispatch to the ExtensionArray's .equals method.
if self.is_(other):
return True
if not isinstance(other, type(self)):
return False
return self._data.equals(other._data)
class NDArrayBackedExtensionIndex(ExtensionIndex):
"""
Index subclass for indexes backed by NDArrayBackedExtensionArray.
"""
_data: NDArrayBackedExtensionArray
@classmethod
def _simple_new(
cls,
values: NDArrayBackedExtensionArray,
name: Hashable = None,
):
result = super()._simple_new(values, name)
# For groupby perf. See note in indexes/base about _index_data
result._index_data = values._ndarray
return result
def _get_engine_target(self) -> np.ndarray:
return self._data._ndarray
def _from_join_target(self, result: np.ndarray) -> ArrayLike:
assert result.dtype == self._data._ndarray.dtype
return self._data._from_backing_data(result)
| gfyoung/pandas | pandas/core/indexes/extension.py | Python | bsd-3-clause | 14,958 |
# -*- coding: utf-8 -*-
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
from pyxb.exceptions_ import *
import unittest
import pyxb.binding.datatypes as xsd
class _TestIntegerType (object):
"""Base class for testing any datatype that descends from integer.
Subclasses should define class variables:
THIS_TYPE = the xsd datatype class
PARENT_TYPE = the next dominating type in the hierarchy
MIN_IN_RANGE = the minimum expressible value
MAX_IN_RANGE = the maximum expressible value
Optional values to set:
ZERO_IN_RANGE = False if zero not valid for subclass; default is True
"""
MIN_IN_RANGE = None
ZERO_IN_RANGE = True
MAX_IN_RANGE = None
def testParentage (self):
self.assertTrue(self.PARENT_TYPE == self.THIS_TYPE.XsdSuperType())
def testRange (self):
if self.MIN_IN_RANGE is not None:
if not ((self.MIN_IN_RANGE-1) in self.PARENT_EXCLUDE):
self.assertRaises(SimpleTypeValueError, self.THIS_TYPE, self.MIN_IN_RANGE - 1)
self.assertEqual(self.MIN_IN_RANGE, self.THIS_TYPE(self.MIN_IN_RANGE))
if self.ZERO_IN_RANGE:
self.assertEqual(0, self.THIS_TYPE(0))
if self.MAX_IN_RANGE is not None:
self.assertEqual(self.MAX_IN_RANGE, self.THIS_TYPE(self.MAX_IN_RANGE))
if not ((self.MAX_IN_RANGE+1) in self.PARENT_EXCLUDE):
self.assertRaises(SimpleTypeValueError, self.THIS_TYPE, self.MAX_IN_RANGE+1)
PARENT_EXCLUDE = []
def testStringConversion (self):
numbers = [ ]
if self.MIN_IN_RANGE is not None:
numbers.extend([self.MIN_IN_RANGE-1, self.MIN_IN_RANGE])
if self.ZERO_IN_RANGE:
numbers.append(0)
if self.MAX_IN_RANGE is not None:
numbers.extend([self.MAX_IN_RANGE, self.MAX_IN_RANGE+1])
for n in numbers:
s = '%d' % (n,)
p = None
if not (n in self.PARENT_EXCLUDE):
p = self.PARENT_TYPE(n)
self.assertEqual(n, p)
if ((self.MIN_IN_RANGE is None) or (self.MIN_IN_RANGE <= n)) \
and ((self.MAX_IN_RANGE is None) or (n <= self.MAX_IN_RANGE)):
bs = self.THIS_TYPE(s)
self.assertEqual(n, bs)
self.assertEqual(s, bs.xsdLiteral())
bp = self.THIS_TYPE(p)
self.assertEqual(n, bp)
else:
self.assertRaises(SimpleTypeValueError, self.THIS_TYPE, s)
if p is not None:
self.assertRaises(SimpleTypeValueError, self.THIS_TYPE, p)
class Test_byte (unittest.TestCase, _TestIntegerType):
THIS_TYPE = xsd.byte
PARENT_TYPE = xsd.short
MIN_IN_RANGE = -128
MAX_IN_RANGE = 127
class Test_unsignedByte (unittest.TestCase, _TestIntegerType):
THIS_TYPE = xsd.unsignedByte
PARENT_TYPE = xsd.unsignedShort
PARENT_EXCLUDE = [ -1 ]
MIN_IN_RANGE = 0
MAX_IN_RANGE = 255
class Test_short (unittest.TestCase, _TestIntegerType):
THIS_TYPE = xsd.short
PARENT_TYPE = xsd.int
MIN_IN_RANGE = -32768
MAX_IN_RANGE = 32767
class Test_unsignedShort (unittest.TestCase, _TestIntegerType):
THIS_TYPE = xsd.unsignedShort
PARENT_TYPE = xsd.unsignedInt
PARENT_EXCLUDE = [ -1 ]
MIN_IN_RANGE = 0
MAX_IN_RANGE = 65535
class Test_int (unittest.TestCase, _TestIntegerType):
THIS_TYPE = xsd.int
PARENT_TYPE = xsd.long
MIN_IN_RANGE = -2147483648
MAX_IN_RANGE = 2147483647
class Test_unsignedInt (unittest.TestCase, _TestIntegerType):
THIS_TYPE = xsd.unsignedInt
PARENT_TYPE = xsd.unsignedLong
PARENT_EXCLUDE = [ -1 ]
MIN_IN_RANGE = 0
MAX_IN_RANGE = 4294967295
class Test_long (unittest.TestCase, _TestIntegerType):
THIS_TYPE = xsd.long
PARENT_TYPE = xsd.integer
MIN_IN_RANGE = -9223372036854775808
MAX_IN_RANGE = 9223372036854775807
class Test_unsignedLong (unittest.TestCase, _TestIntegerType):
THIS_TYPE = xsd.unsignedLong
PARENT_TYPE = xsd.nonNegativeInteger
PARENT_EXCLUDE = [ -1 ]
MIN_IN_RANGE = 0
MAX_IN_RANGE = 18446744073709551615
class Test_negativeInteger (unittest.TestCase, _TestIntegerType):
ZERO_IN_RANGE = False
THIS_TYPE = xsd.negativeInteger
PARENT_TYPE = xsd.nonPositiveInteger
MAX_IN_RANGE = -1
class Test_nonPositiveInteger (unittest.TestCase, _TestIntegerType):
THIS_TYPE = xsd.nonPositiveInteger
PARENT_TYPE = xsd.integer
MAX_IN_RANGE = 0
class Test_nonNegativeInteger (unittest.TestCase, _TestIntegerType):
THIS_TYPE = xsd.nonNegativeInteger
PARENT_TYPE = xsd.integer
MIN_IN_RANGE = 0
class Test_positiveInteger (unittest.TestCase, _TestIntegerType):
THIS_TYPE = xsd.positiveInteger
PARENT_TYPE = xsd.nonNegativeInteger
MIN_IN_RANGE = 1
ZERO_IN_RANGE = False
if __name__ == '__main__':
unittest.main()
| CantemoInternal/pyxb | tests/datatypes/test-IntegerTypes.py | Python | apache-2.0 | 4,944 |
import unittest
import time
from datetime import datetime
from app import create_app, db
from app.models import User, AnonymousUser, Role, Permission, Follow
class UserModelTestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
Role.insert_roles()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_password_setter(self):
u = User(password='cat')
self.assertTrue(u.password_hash is not None)
def test_no_password_getter(self):
u = User(password='cat')
with self.assertRaises(AttributeError):
u.password
def test_password_verification(self):
u = User(password='cat')
self.assertTrue(u.verify_password('cat'))
self.assertFalse(u.verify_password('dog'))
def test_password_salts_are_random(self):
u = User(password='cat')
u2 = User(password='cat')
self.assertTrue(u.password_hash != u2.password_hash)
def test_valid_confirmation_token(self):
u = User(password='cat')
db.session.add(u)
db.session.commit()
token = u.generate_confirmation_token()
self.assertTrue(u.confirm(token))
def test_invalid_confirmation_token(self):
u1 = User(password='cat')
u2 = User(password='dog')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
token = u1.generate_confirmation_token()
self.assertFalse(u2.confirm(token))
def test_expired_confirmation_token(self):
u = User(password='cat')
db.session.add(u)
db.session.commit()
token = u.generate_confirmation_token(1)
time.sleep(2)
self.assertFalse(u.confirm(token))
def test_valid_reset_token(self):
u = User(password='cat')
db.session.add(u)
db.session.commit()
token = u.generate_reset_token()
self.assertTrue(u.reset_password(token, 'dog'))
self.assertTrue(u.verify_password('dog'))
def test_invalid_reset_token(self):
u1 = User(password='cat')
u2 = User(password='dog')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
token = u1.generate_reset_token()
self.assertFalse(u2.reset_password(token, 'horse'))
self.assertTrue(u2.verify_password('dog'))
def test_valid_email_change_token(self):
u = User(email='john@example.com', password='cat')
db.session.add(u)
db.session.commit()
token = u.generate_email_change_token('susan@example.org')
self.assertTrue(u.change_email(token))
self.assertTrue(u.email == 'susan@example.org')
def test_invalid_email_change_token(self):
u1 = User(email='john@example.com', password='cat')
u2 = User(email='susan@example.org', password='dog')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
token = u1.generate_email_change_token('david@example.net')
self.assertFalse(u2.change_email(token))
self.assertTrue(u2.email == 'susan@example.org')
def test_duplicate_email_change_token(self):
u1 = User(email='john@example.com', password='cat')
u2 = User(email='susan@example.org', password='dog')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
token = u2.generate_email_change_token('john@example.com')
self.assertFalse(u2.change_email(token))
self.assertTrue(u2.email == 'susan@example.org')
def test_roles_and_permissions(self):
u = User(email='john@example.com', password='cat')
self.assertTrue(u.can(Permission.WRITE_ARTICLES))
self.assertFalse(u.can(Permission.MODERATE_COMMENTS))
def test_anonymous_user(self):
u = AnonymousUser()
self.assertFalse(u.can(Permission.FOLLOW))
def test_timestamps(self):
u = User(password='cat')
db.session.add(u)
db.session.commit()
self.assertTrue(
(datetime.utcnow() - u.member_since).total_seconds() < 3)
self.assertTrue(
(datetime.utcnow() - u.last_seen).total_seconds() < 3)
def test_ping(self):
u = User(password='cat')
db.session.add(u)
db.session.commit()
time.sleep(2)
last_seen_before = u.last_seen
u.ping()
self.assertTrue(u.last_seen > last_seen_before)
def test_gravatar(self):
u = User(email='john@example.com', password='cat')
with self.app.test_request_context('/'):
gravatar = u.gravatar()
gravatar_256 = u.gravatar(size=256)
gravatar_pg = u.gravatar(rating='pg')
gravatar_retro = u.gravatar(default='retro')
with self.app.test_request_context('/', base_url='https://example.com'):
gravatar_ssl = u.gravatar()
self.assertTrue('http://www.gravatar.com/avatar/' +
'd4c74594d841139328695756648b6bd6'in gravatar)
self.assertTrue('s=256' in gravatar_256)
self.assertTrue('r=pg' in gravatar_pg)
self.assertTrue('d=retro' in gravatar_retro)
self.assertTrue('https://secure.gravatar.com/avatar/' +
'd4c74594d841139328695756648b6bd6' in gravatar_ssl)
def test_follows(self):
u1 = User(email='john@example.com', password='cat')
u2 = User(email='susan@example.org', password='dog')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
self.assertFalse(u1.is_following(u2))
self.assertFalse(u1.is_followed_by(u2))
timestamp_before = datetime.utcnow()
u1.follow(u2)
db.session.add(u1)
db.session.commit()
timestamp_after = datetime.utcnow()
self.assertTrue(u1.is_following(u2))
self.assertFalse(u1.is_followed_by(u2))
self.assertTrue(u2.is_followed_by(u1))
# count() include yourself!
self.assertTrue(u1.followed.count() == 2)
self.assertTrue(u2.followers.count() == 2)
f = u1.followed.all()[-1]
self.assertTrue(f.followed == u2)
self.assertTrue(timestamp_before <= f.timestamp <= timestamp_after)
f = u2.followers.all()[-1]
self.assertTrue(f.follower == u1)
u1.unfollow(u2)
db.session.add(u1)
db.session.commit()
self.assertTrue(u1.followed.count() == 1)
self.assertTrue(u2.followers.count() == 1)
self.assertTrue(Follow.query.count() == 2)
u2.follow(u1)
db.session.add(u1)
db.session.add(u2)
db.session.commit()
db.session.delete(u2)
db.session.commit()
self.assertTrue(Follow.query.count() == 1)
| caser789/xuejiao-blog | tests/test_user_model.py | Python | mit | 6,887 |
# Authors:
# Thierry Bordaz <tbordaz@redhat.com>
#
# Copyright (C) 2014 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import six
from ipalib import api, errors
from ipalib import (
Flag, Int, Password, Str, Bool, StrEnum, DateTime, DNParam)
from ipalib.parameters import Principal, Certificate
from ipalib.plugable import Registry
from .baseldap import (
DN, LDAPObject, LDAPCreate, LDAPUpdate, LDAPSearch, LDAPDelete,
LDAPRetrieve, LDAPAddAttribute, LDAPModAttribute, LDAPRemoveAttribute,
LDAPAddMember, LDAPRemoveMember,
LDAPAddAttributeViaOption, LDAPRemoveAttributeViaOption,
add_missing_object_class)
from ipaserver.plugins.service import (validate_realm, normalize_principal)
from ipaserver.plugins.config import check_fips_auth_opts
from ipalib.request import context
from ipalib import _
from ipalib.constants import PATTERN_GROUPUSER_NAME
from ipapython import kerberos
from ipapython.ipautil import ipa_generate_password, TMP_PWD_ENTROPY_BITS
from ipapython.ipavalidate import Email
from ipalib.util import (
normalize_sshpubkey,
validate_sshpubkey,
convert_sshpubkey_post,
remove_sshpubkey_from_output_post,
remove_sshpubkey_from_output_list_post,
add_sshpubkey_to_attrs_pre,
set_krbcanonicalname,
check_principal_realm_in_trust_namespace,
ensure_last_krbprincipalname,
ensure_krbcanonicalname_set
)
if six.PY3:
unicode = str
__doc__ = _("""
Baseuser
This contains common definitions for user/stageuser
""")
register = Registry()
NO_UPG_MAGIC = '__no_upg__'
baseuser_output_params = (
Flag('has_keytab',
label=_('Kerberos keys available'),
),
)
UPG_DEFINITION_DN = DN(('cn', 'UPG Definition'),
('cn', 'Definitions'),
('cn', 'Managed Entries'),
('cn', 'etc'),
api.env.basedn)
def validate_nsaccountlock(entry_attrs):
if 'nsaccountlock' in entry_attrs:
nsaccountlock = entry_attrs['nsaccountlock']
if not isinstance(nsaccountlock, (bool, Bool)):
if not isinstance(nsaccountlock, six.string_types):
raise errors.OnlyOneValueAllowed(attr='nsaccountlock')
if nsaccountlock.lower() not in ('true', 'false'):
raise errors.ValidationError(name='nsaccountlock',
error=_('must be TRUE or FALSE'))
def radius_dn2pk(api, entry_attrs):
cl = entry_attrs.get('ipatokenradiusconfiglink', None)
if cl:
pk = api.Object['radiusproxy'].get_primary_key_from_dn(cl[0])
entry_attrs['ipatokenradiusconfiglink'] = [pk]
def convert_nsaccountlock(entry_attrs):
if not 'nsaccountlock' in entry_attrs:
entry_attrs['nsaccountlock'] = False
else:
nsaccountlock = Bool('temp')
entry_attrs['nsaccountlock'] = nsaccountlock.convert(entry_attrs['nsaccountlock'][0])
def normalize_user_principal(value):
principal = kerberos.Principal(normalize_principal(value))
lowercase_components = ((principal.username.lower(),) +
principal.components[1:])
return unicode(
kerberos.Principal(lowercase_components, realm=principal.realm))
def fix_addressbook_permission_bindrule(name, template, is_new,
anonymous_read_aci,
**other_options):
"""Fix bind rule type for Read User Addressbook/IPA Attributes permission
When upgrading from an old IPA that had the global read ACI,
or when installing the first replica with granular read permissions,
we need to keep allowing anonymous access to many user attributes.
This fixup_function changes the bind rule type accordingly.
"""
if is_new and anonymous_read_aci:
template['ipapermbindruletype'] = 'anonymous'
class baseuser(LDAPObject):
"""
baseuser object.
"""
stage_container_dn = api.env.container_stageuser
active_container_dn = api.env.container_user
delete_container_dn = api.env.container_deleteuser
object_class = ['posixaccount']
object_class_config = 'ipauserobjectclasses'
possible_objectclasses = [
'meporiginentry', 'ipauserauthtypeclass', 'ipauser',
'ipatokenradiusproxyuser', 'ipacertmapobject'
]
disallow_object_classes = ['krbticketpolicyaux']
permission_filter_objectclasses = ['posixaccount']
search_attributes_config = 'ipausersearchfields'
default_attributes = [
'uid', 'givenname', 'sn', 'homedirectory', 'loginshell',
'uidnumber', 'gidnumber', 'mail', 'ou',
'telephonenumber', 'title', 'memberof', 'nsaccountlock',
'memberofindirect', 'ipauserauthtype', 'userclass',
'ipatokenradiusconfiglink', 'ipatokenradiususername',
'krbprincipalexpiration', 'usercertificate;binary',
'krbprincipalname', 'krbcanonicalname',
'ipacertmapdata'
]
search_display_attributes = [
'uid', 'givenname', 'sn', 'homedirectory', 'krbcanonicalname',
'krbprincipalname', 'loginshell',
'mail', 'telephonenumber', 'title', 'nsaccountlock',
'uidnumber', 'gidnumber', 'sshpubkeyfp',
]
uuid_attribute = 'ipauniqueid'
attribute_members = {
'manager': ['user'],
'memberof': ['group', 'netgroup', 'role', 'hbacrule', 'sudorule'],
'memberofindirect': ['group', 'netgroup', 'role', 'hbacrule', 'sudorule'],
}
allow_rename = True
bindable = True
password_attributes = [('userpassword', 'has_password'),
('krbprincipalkey', 'has_keytab')]
label = _('Users')
label_singular = _('User')
takes_params = (
Str('uid',
pattern=PATTERN_GROUPUSER_NAME,
pattern_errmsg='may only include letters, numbers, _, -, . and $',
maxlength=255,
cli_name='login',
label=_('User login'),
primary_key=True,
default_from=lambda givenname, sn: givenname[0] + sn,
normalizer=lambda value: value.lower(),
),
Str('givenname',
cli_name='first',
label=_('First name'),
),
Str('sn',
cli_name='last',
label=_('Last name'),
),
Str('cn',
label=_('Full name'),
default_from=lambda givenname, sn: '%s %s' % (givenname, sn),
autofill=True,
),
Str('displayname?',
label=_('Display name'),
default_from=lambda givenname, sn: '%s %s' % (givenname, sn),
autofill=True,
),
Str('initials?',
label=_('Initials'),
default_from=lambda givenname, sn: '%c%c' % (givenname[0], sn[0]),
autofill=True,
),
Str('homedirectory?',
cli_name='homedir',
label=_('Home directory'),
),
Str('gecos?',
label=_('GECOS'),
default_from=lambda givenname, sn: '%s %s' % (givenname, sn),
autofill=True,
),
Str('loginshell?',
cli_name='shell',
label=_('Login shell'),
),
Principal(
'krbcanonicalname?',
validate_realm,
label=_('Principal name'),
flags={'no_option', 'no_create', 'no_update', 'no_search'},
normalizer=normalize_user_principal
),
Principal(
'krbprincipalname*',
validate_realm,
cli_name='principal',
label=_('Principal alias'),
default_from=lambda uid: kerberos.Principal(
uid.lower(), realm=api.env.realm),
autofill=True,
normalizer=normalize_user_principal,
),
DateTime('krbprincipalexpiration?',
cli_name='principal_expiration',
label=_('Kerberos principal expiration'),
),
DateTime('krbpasswordexpiration?',
cli_name='password_expiration',
label=_('User password expiration'),
),
Str('mail*',
cli_name='email',
label=_('Email address'),
),
Password('userpassword?',
cli_name='password',
label=_('Password'),
doc=_('Prompt to set the user password'),
# FIXME: This is temporary till bug is fixed causing updates to
# bomb out via the webUI.
exclude='webui',
),
Flag('random?',
doc=_('Generate a random user password'),
flags=('no_search', 'virtual_attribute'),
default=False,
),
Str('randompassword?',
label=_('Random password'),
flags=('no_create', 'no_update', 'no_search', 'virtual_attribute'),
),
Int('uidnumber?',
cli_name='uid',
label=_('UID'),
doc=_('User ID Number (system will assign one if not provided)'),
minvalue=1,
),
Int('gidnumber?',
label=_('GID'),
doc=_('Group ID Number'),
minvalue=1,
),
Str('street?',
cli_name='street',
label=_('Street address'),
),
Str('l?',
cli_name='city',
label=_('City'),
),
Str('st?',
cli_name='state',
label=_('State/Province'),
),
Str('postalcode?',
label=_('ZIP'),
),
Str('telephonenumber*',
cli_name='phone',
label=_('Telephone Number')
),
Str('mobile*',
label=_('Mobile Telephone Number')
),
Str('pager*',
label=_('Pager Number')
),
Str('facsimiletelephonenumber*',
cli_name='fax',
label=_('Fax Number'),
),
Str('ou?',
cli_name='orgunit',
label=_('Org. Unit'),
),
Str('title?',
label=_('Job Title'),
),
# keep backward compatibility using single value manager option
Str('manager?',
label=_('Manager'),
),
Str('carlicense*',
label=_('Car License'),
),
Str('ipasshpubkey*', validate_sshpubkey,
cli_name='sshpubkey',
label=_('SSH public key'),
normalizer=normalize_sshpubkey,
flags=['no_search'],
),
Str('sshpubkeyfp*',
label=_('SSH public key fingerprint'),
flags={'virtual_attribute', 'no_create', 'no_update', 'no_search'},
),
StrEnum('ipauserauthtype*',
cli_name='user_auth_type',
label=_('User authentication types'),
doc=_('Types of supported user authentication'),
values=(u'password', u'radius', u'otp'),
),
Str('userclass*',
cli_name='class',
label=_('Class'),
doc=_('User category (semantics placed on this attribute are for '
'local interpretation)'),
),
Str('ipatokenradiusconfiglink?',
cli_name='radius',
label=_('RADIUS proxy configuration'),
),
Str('ipatokenradiususername?',
cli_name='radius_username',
label=_('RADIUS proxy username'),
),
Str('departmentnumber*',
label=_('Department Number'),
),
Str('employeenumber?',
label=_('Employee Number'),
),
Str('employeetype?',
label=_('Employee Type'),
),
Str('preferredlanguage?',
label=_('Preferred Language'),
pattern='^(([a-zA-Z]{1,8}(-[a-zA-Z]{1,8})?(;q\=((0(\.[0-9]{0,3})?)|(1(\.0{0,3})?)))?' \
+ '(\s*,\s*[a-zA-Z]{1,8}(-[a-zA-Z]{1,8})?(;q\=((0(\.[0-9]{0,3})?)|(1(\.0{0,3})?)))?)*)|(\*))$',
pattern_errmsg='must match RFC 2068 - 14.4, e.g., "da, en-gb;q=0.8, en;q=0.7"',
),
Certificate('usercertificate*',
cli_name='certificate',
label=_('Certificate'),
doc=_('Base-64 encoded user certificate'),
),
Str(
'ipacertmapdata*',
cli_name='certmapdata',
label=_('Certificate mapping data'),
doc=_('Certificate mapping data'),
flags=['no_create', 'no_update', 'no_search'],
),
)
def normalize_and_validate_email(self, email, config=None):
if not config:
config = self.backend.get_ipa_config()
# check if default email domain should be added
defaultdomain = config.get('ipadefaultemaildomain', [None])[0]
if email:
norm_email = []
if not isinstance(email, (list, tuple)):
email = [email]
for m in email:
if isinstance(m, six.string_types):
if '@' not in m and defaultdomain:
m = m + u'@' + defaultdomain
if not Email(m):
raise errors.ValidationError(name='email', error=_('invalid e-mail format: %(email)s') % dict(email=m))
norm_email.append(m)
else:
if not Email(m):
raise errors.ValidationError(name='email', error=_('invalid e-mail format: %(email)s') % dict(email=m))
norm_email.append(m)
return norm_email
return email
def normalize_manager(self, manager, container):
"""
Given a userid verify the user's existence (in the appropriate containter) and return the dn.
"""
if not manager:
return None
if not isinstance(manager, list):
manager = [manager]
try:
container_dn = DN(container, api.env.basedn)
for i, mgr in enumerate(manager):
if isinstance(mgr, DN) and mgr.endswith(container_dn):
continue
entry_attrs = self.backend.find_entry_by_attr(
self.primary_key.name, mgr, self.object_class, [''],
container_dn
)
manager[i] = entry_attrs.dn
except errors.NotFound:
raise errors.NotFound(reason=_('manager %(manager)s not found') % dict(manager=mgr))
return manager
def _user_status(self, user, container):
assert isinstance(user, DN)
return user.endswith(container)
def active_user(self, user):
assert isinstance(user, DN)
return self._user_status(user, DN(self.active_container_dn, api.env.basedn))
def stage_user(self, user):
assert isinstance(user, DN)
return self._user_status(user, DN(self.stage_container_dn, api.env.basedn))
def delete_user(self, user):
assert isinstance(user, DN)
return self._user_status(user, DN(self.delete_container_dn, api.env.basedn))
def convert_usercertificate_pre(self, entry_attrs):
if 'usercertificate' in entry_attrs:
entry_attrs['usercertificate;binary'] = entry_attrs.pop(
'usercertificate')
def convert_usercertificate_post(self, entry_attrs, **options):
if 'usercertificate;binary' in entry_attrs:
entry_attrs['usercertificate'] = entry_attrs.pop(
'usercertificate;binary')
def convert_attribute_members(self, entry_attrs, *keys, **options):
super(baseuser, self).convert_attribute_members(
entry_attrs, *keys, **options)
if options.get("raw", False):
return
# due the backward compatibility, managers have to be returned in
# 'manager' attribute instead of 'manager_user'
try:
entry_attrs['failed_manager'] = entry_attrs.pop('manager')
except KeyError:
pass
try:
entry_attrs['manager'] = entry_attrs.pop('manager_user')
except KeyError:
pass
class baseuser_add(LDAPCreate):
"""
Prototype command plugin to be implemented by real plugin
"""
def pre_common_callback(self, ldap, dn, entry_attrs, attrs_list, *keys,
**options):
assert isinstance(dn, DN)
set_krbcanonicalname(entry_attrs)
check_fips_auth_opts(fips_mode=self.api.env.fips_mode, **options)
self.obj.convert_usercertificate_pre(entry_attrs)
def post_common_callback(self, ldap, dn, entry_attrs, *keys, **options):
assert isinstance(dn, DN)
self.obj.convert_usercertificate_post(entry_attrs, **options)
self.obj.get_password_attributes(ldap, dn, entry_attrs)
convert_sshpubkey_post(entry_attrs)
radius_dn2pk(self.api, entry_attrs)
class baseuser_del(LDAPDelete):
"""
Prototype command plugin to be implemented by real plugin
"""
class baseuser_mod(LDAPUpdate):
"""
Prototype command plugin to be implemented by real plugin
"""
def check_namelength(self, ldap, **options):
if options.get('rename') is not None:
config = ldap.get_ipa_config()
if 'ipamaxusernamelength' in config:
if len(options['rename']) > int(config.get('ipamaxusernamelength')[0]):
raise errors.ValidationError(
name=self.obj.primary_key.cli_name,
error=_('can be at most %(len)d characters') % dict(
len = int(config.get('ipamaxusernamelength')[0])
)
)
def preserve_krbprincipalname_pre(self, ldap, entry_attrs, *keys, **options):
"""
preserve user principal aliases during rename operation. This is the
pre-callback part of this. Another method called during post-callback
shall insert the principals back
"""
if options.get('rename', None) is None:
return
try:
old_entry = ldap.get_entry(
entry_attrs.dn, attrs_list=(
'krbprincipalname', 'krbcanonicalname'))
if 'krbcanonicalname' not in old_entry:
return
except errors.NotFound:
self.obj.handle_not_found(*keys)
self.context.krbprincipalname = old_entry.get(
'krbprincipalname', [])
def preserve_krbprincipalname_post(self, ldap, entry_attrs, **options):
"""
Insert the preserved aliases back to the user entry during rename
operation
"""
if options.get('rename', None) is None or not hasattr(
self.context, 'krbprincipalname'):
return
obj_pkey = self.obj.get_primary_key_from_dn(entry_attrs.dn)
canonical_name = entry_attrs['krbcanonicalname'][0]
principals_to_add = tuple(p for p in self.context.krbprincipalname if
p != canonical_name)
if principals_to_add:
result = self.api.Command.user_add_principal(
obj_pkey, principals_to_add)['result']
entry_attrs['krbprincipalname'] = result.get('krbprincipalname', [])
def check_mail(self, entry_attrs):
if 'mail' in entry_attrs:
entry_attrs['mail'] = self.obj.normalize_and_validate_email(entry_attrs['mail'])
def check_manager(self, entry_attrs, container):
if 'manager' in entry_attrs:
entry_attrs['manager'] = self.obj.normalize_manager(entry_attrs['manager'], container)
def check_userpassword(self, entry_attrs, **options):
if 'userpassword' not in entry_attrs and options.get('random'):
entry_attrs['userpassword'] = ipa_generate_password(
entropy_bits=TMP_PWD_ENTROPY_BITS)
# save the password so it can be displayed in post_callback
setattr(context, 'randompassword', entry_attrs['userpassword'])
def check_objectclass(self, ldap, dn, entry_attrs):
if ('ipasshpubkey' in entry_attrs or 'ipauserauthtype' in entry_attrs
or 'userclass' in entry_attrs or 'ipatokenradiusconfiglink' in entry_attrs):
if 'objectclass' in entry_attrs:
obj_classes = entry_attrs['objectclass']
else:
_entry_attrs = ldap.get_entry(dn, ['objectclass'])
obj_classes = entry_attrs['objectclass'] = _entry_attrs['objectclass']
# IMPORTANT: compare objectclasses as case insensitive
obj_classes = [o.lower() for o in obj_classes]
if 'ipasshpubkey' in entry_attrs and 'ipasshuser' not in obj_classes:
entry_attrs['objectclass'].append('ipasshuser')
if 'ipauserauthtype' in entry_attrs and 'ipauserauthtypeclass' not in obj_classes:
entry_attrs['objectclass'].append('ipauserauthtypeclass')
if 'userclass' in entry_attrs and 'ipauser' not in obj_classes:
entry_attrs['objectclass'].append('ipauser')
if 'ipatokenradiusconfiglink' in entry_attrs:
cl = entry_attrs['ipatokenradiusconfiglink']
if cl:
if 'ipatokenradiusproxyuser' not in obj_classes:
entry_attrs['objectclass'].append('ipatokenradiusproxyuser')
answer = self.api.Object['radiusproxy'].get_dn_if_exists(cl)
entry_attrs['ipatokenradiusconfiglink'] = answer
def pre_common_callback(self, ldap, dn, entry_attrs, attrs_list, *keys,
**options):
assert isinstance(dn, DN)
add_sshpubkey_to_attrs_pre(self.context, attrs_list)
check_fips_auth_opts(fips_mode=self.api.env.fips_mode, **options)
self.check_namelength(ldap, **options)
self.check_mail(entry_attrs)
self.check_manager(entry_attrs, self.obj.active_container_dn)
self.check_userpassword(entry_attrs, **options)
self.check_objectclass(ldap, dn, entry_attrs)
self.obj.convert_usercertificate_pre(entry_attrs)
self.preserve_krbprincipalname_pre(ldap, entry_attrs, *keys, **options)
def post_common_callback(self, ldap, dn, entry_attrs, *keys, **options):
assert isinstance(dn, DN)
self.preserve_krbprincipalname_post(ldap, entry_attrs, **options)
if options.get('random', False):
try:
entry_attrs['randompassword'] = unicode(getattr(context, 'randompassword'))
except AttributeError:
# if both randompassword and userpassword options were used
pass
convert_nsaccountlock(entry_attrs)
self.obj.get_password_attributes(ldap, dn, entry_attrs)
self.obj.convert_usercertificate_post(entry_attrs, **options)
convert_sshpubkey_post(entry_attrs)
remove_sshpubkey_from_output_post(self.context, entry_attrs)
radius_dn2pk(self.api, entry_attrs)
class baseuser_find(LDAPSearch):
"""
Prototype command plugin to be implemented by real plugin
"""
def args_options_2_entry(self, *args, **options):
newoptions = {}
self.common_enhance_options(newoptions, **options)
options.update(newoptions)
return super(baseuser_find, self).args_options_2_entry(
*args, **options)
def common_enhance_options(self, newoptions, **options):
# assure the manager attr is a dn, not just a bare uid
manager = options.get('manager')
if manager is not None:
newoptions['manager'] = self.obj.normalize_manager(manager, self.obj.active_container_dn)
# Ensure that the RADIUS config link is a dn, not just the name
cl = 'ipatokenradiusconfiglink'
if cl in options:
newoptions[cl] = self.api.Object['radiusproxy'].get_dn(options[cl])
def pre_common_callback(self, ldap, filters, attrs_list, base_dn, scope,
*args, **options):
add_sshpubkey_to_attrs_pre(self.context, attrs_list)
def post_common_callback(self, ldap, entries, lockout=False, **options):
for attrs in entries:
self.obj.convert_usercertificate_post(attrs, **options)
if (lockout):
attrs['nsaccountlock'] = True
else:
convert_nsaccountlock(attrs)
convert_sshpubkey_post(attrs)
remove_sshpubkey_from_output_list_post(self.context, entries)
class baseuser_show(LDAPRetrieve):
"""
Prototype command plugin to be implemented by real plugin
"""
def pre_common_callback(self, ldap, dn, attrs_list, *keys, **options):
assert isinstance(dn, DN)
add_sshpubkey_to_attrs_pre(self.context, attrs_list)
def post_common_callback(self, ldap, dn, entry_attrs, *keys, **options):
assert isinstance(dn, DN)
self.obj.get_password_attributes(ldap, dn, entry_attrs)
self.obj.convert_usercertificate_post(entry_attrs, **options)
convert_sshpubkey_post(entry_attrs)
remove_sshpubkey_from_output_post(self.context, entry_attrs)
radius_dn2pk(self.api, entry_attrs)
class baseuser_add_manager(LDAPAddMember):
member_attributes = ['manager']
class baseuser_remove_manager(LDAPRemoveMember):
member_attributes = ['manager']
class baseuser_add_principal(LDAPAddAttribute):
attribute = 'krbprincipalname'
def pre_callback(self, ldap, dn, entry_attrs, attrs_list, *keys, **options):
check_principal_realm_in_trust_namespace(self.api, *keys)
ensure_krbcanonicalname_set(ldap, entry_attrs)
return dn
class baseuser_remove_principal(LDAPRemoveAttribute):
attribute = 'krbprincipalname'
def pre_callback(self, ldap, dn, entry_attrs, attrs_list, *keys, **options):
ensure_last_krbprincipalname(ldap, entry_attrs, *keys)
return dn
class baseuser_add_cert(LDAPAddAttributeViaOption):
attribute = 'usercertificate'
def pre_callback(self, ldap, dn, entry_attrs, attrs_list, *keys,
**options):
self.obj.convert_usercertificate_pre(entry_attrs)
return dn
def post_callback(self, ldap, dn, entry_attrs, *keys, **options):
assert isinstance(dn, DN)
self.obj.convert_usercertificate_post(entry_attrs, **options)
return dn
class baseuser_remove_cert(LDAPRemoveAttributeViaOption):
attribute = 'usercertificate'
def pre_callback(self, ldap, dn, entry_attrs, attrs_list, *keys,
**options):
self.obj.convert_usercertificate_pre(entry_attrs)
return dn
def post_callback(self, ldap, dn, entry_attrs, *keys, **options):
assert isinstance(dn, DN)
self.obj.convert_usercertificate_post(entry_attrs, **options)
return dn
class ModCertMapData(LDAPModAttribute):
attribute = 'ipacertmapdata'
takes_options = (
DNParam(
'issuer?',
cli_name='issuer',
label=_('Issuer'),
doc=_('Issuer of the certificate'),
flags=['virtual_attribute']
),
DNParam(
'subject?',
cli_name='subject',
label=_('Subject'),
doc=_('Subject of the certificate'),
flags=['virtual_attribute']
),
Certificate(
'certificate*',
cli_name='certificate',
label=_('Certificate'),
doc=_('Base-64 encoded user certificate'),
flags=['virtual_attribute']
),
)
@staticmethod
def _build_mapdata(subject, issuer):
return u'X509:<I>{issuer}<S>{subject}'.format(
issuer=issuer.x500_text(), subject=subject.x500_text())
@classmethod
def _convert_options_to_certmap(cls, entry_attrs, issuer=None,
subject=None, certificates=()):
"""
Converts options to ipacertmapdata
When --subject --issuer or --certificate options are used,
the value for ipacertmapdata is built from extracting subject and
issuer,
converting their values to X500 ordering and using the format
X509:<I>issuer<S>subject
For instance:
X509:<I>O=DOMAIN,CN=Certificate Authority<S>O=DOMAIN,CN=user
A list of values can be returned if --certificate is used multiple
times, or in conjunction with --subject --issuer.
"""
data = []
data.extend(entry_attrs.get(cls.attribute, list()))
if issuer or subject:
data.append(cls._build_mapdata(subject, issuer))
for cert in certificates:
issuer = DN(cert.issuer)
subject = DN(cert.subject)
if not subject:
raise errors.ValidationError(
name='certificate',
error=_('cannot have an empty subject'))
data.append(cls._build_mapdata(subject, issuer))
entry_attrs[cls.attribute] = data
def get_args(self):
# ipacertmapdata is not mandatory as it can be built
# from the values subject+issuer or from reading certificate
for arg in super(ModCertMapData, self).get_args():
if arg.name == 'ipacertmapdata':
yield arg.clone(required=False, alwaysask=False)
else:
yield arg.clone()
def pre_callback(self, ldap, dn, entry_attrs, attrs_list, *keys,
**options):
# The 3 valid calls are
# ipa user-add-certmapdata LOGIN --subject xx --issuer yy
# ipa user-add-certmapdata LOGIN [DATA] --certificate xx
# ipa user-add-certmapdata LOGIN DATA
# Check that at least one of the 3 formats is used
try:
certmapdatas = keys[1] or []
except IndexError:
certmapdatas = []
issuer = options.get('issuer')
subject = options.get('subject')
certificates = options.get('certificate', [])
# If only LOGIN is supplied, then we need either subject or issuer or
# certificate
if (not certmapdatas and not issuer and not subject and
not certificates):
raise errors.RequirementError(name='ipacertmapdata')
# If subject or issuer is provided, other options are not allowed
if subject or issuer:
if certificates:
raise errors.MutuallyExclusiveError(
reason=_('cannot specify both subject/issuer '
'and certificate'))
if certmapdatas:
raise errors.MutuallyExclusiveError(
reason=_('cannot specify both subject/issuer '
'and ipacertmapdata'))
# If subject or issuer is provided, then the other one is required
if not subject:
raise errors.RequirementError(name='subject')
if not issuer:
raise errors.RequirementError(name='issuer')
# if the command is called with --subject --issuer or --certificate
# we need to add ipacertmapdata to the attrs_list in order to
# display the resulting value in the command output
if 'ipacertmapdata' not in attrs_list:
attrs_list.append('ipacertmapdata')
self._convert_options_to_certmap(
entry_attrs,
issuer=issuer,
subject=subject,
certificates=certificates)
return dn
class baseuser_add_certmapdata(ModCertMapData, LDAPAddAttribute):
__doc__ = _("Add one or more certificate mappings to the user entry.")
msg_summary = _('Added certificate mappings to user "%(value)s"')
def pre_callback(self, ldap, dn, entry_attrs, attrs_list, *keys,
**options):
dn = super(baseuser_add_certmapdata, self).pre_callback(
ldap, dn, entry_attrs, attrs_list, *keys, **options)
# The objectclass ipacertmapobject may not be present on
# existing user entries. We need to add it if we define a new
# value for ipacertmapdata
add_missing_object_class(ldap, u'ipacertmapobject', dn)
return dn
class baseuser_remove_certmapdata(ModCertMapData,
LDAPRemoveAttribute):
__doc__ = _("Remove one or more certificate mappings from the user entry.")
msg_summary = _('Removed certificate mappings from user "%(value)s"')
| apophys/freeipa | ipaserver/plugins/baseuser.py | Python | gpl-3.0 | 33,160 |
#!/usr/bin/env python
"""Certificate Transparency log client."""
| raffaelespazzoli/origin | vendor/github.com/google/certificate-transparency/python/ct/__init__.py | Python | apache-2.0 | 66 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-06-01 00:39
from __future__ import unicode_literals
from django.db import migrations
import versatileimagefield.fields
class Migration(migrations.Migration):
dependencies = [
('products', '0016_auto_20160601_0035'),
]
operations = [
migrations.AlterField(
model_name='image',
name='image_ppoi',
field=versatileimagefield.fields.PPOIField(default='0.5x0.5', editable=False, max_length=20, verbose_name='Point of Interest'),
),
]
| libretees/libreshop | libreshop/products/migrations/0017_auto_20160601_0039.py | Python | gpl-3.0 | 567 |
import json
import os
import psycopg2 as dbapi2
import re
from flask import Flask, request, render_template, redirect
from flask.helpers import url_for
app = Flask(__name__)
class activities:
def saveevent(config):
event_name = None
event_location = None
event_date = None
event_category = None
if request.method == 'POST':
event_name = request.form['eventname_text']
print(event_name)
event_location = request.form['eventloc_text']
print(event_location)
event_date = request.form['eventdate_text']
print(event_date)
event_category = request.form['eventcat_text']
print(event_category)
with dbapi2.connect(config) as connection:
cursor = connection.cursor()
try:
query = """INSERT INTO activities(event_name, event_location, event_date, event_category) VALUES (%s, %s, %s, %s)"""
cursor.execute(query, (event_name, event_location, event_date, event_category))
connection.commit();
return 'Your activity has been successfully posted <a href="http://localhost:5000">Home</a>'
except:
return 'Your activity cannot be added due to foreign key constraints! <a href="http://localhost:5000">Home</a>'
def events_db(config):
with dbapi2.connect(config) as connection:
if request.method == 'GET':
cursor = connection.cursor()
query = "SELECT DISTINCT event_name, event_location, event_date, event_category from ACTIVITIES"
cursor.execute(query)
connection.commit();
return render_template('events.html', events_list=cursor)
def events_db_delete(config, deleteevent):
with dbapi2.connect(config) as connection:
cursor = connection.cursor()
query = "DELETE FROM ACTIVITIES where event_name = %s"
cursor.execute(query, (deleteevent,))
connection.commit();
return redirect(url_for('events'))
def events_db_update(config, updateevent):
with dbapi2.connect(config) as connection:
cursor = connection.cursor()
query = """SELECT event_name from activities where event_name = '%s'""" % (updateevent)
cursor.execute(query)
connection.commit();
return render_template('events_update.html', events_updates=cursor)
def events_db_update_apply(config, updateevent):
with dbapi2.connect(config) as connection:
cursor = connection.cursor()
try:
comment = request.form['event_name']
query = """UPDATE activities set event_name ='%s' where event_name = '%s'""" % (comment, updateevent)
cursor.execute(query)
connection.commit();
return redirect(url_for('events'))
except:
return 'Value cannot be NULL! <a href="http://localhost:5000">Home</a>'
| itucsdb1601/itucsdb1601 | events.py | Python | gpl-3.0 | 3,125 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Session Handling for SQLAlchemy backend.
Recommended ways to use sessions within this framework:
* Don't use them explicitly; this is like running with ``AUTOCOMMIT=1``.
`model_query()` will implicitly use a session when called without one
supplied. This is the ideal situation because it will allow queries
to be automatically retried if the database connection is interrupted.
.. note:: Automatic retry will be enabled in a future patch.
It is generally fine to issue several queries in a row like this. Even though
they may be run in separate transactions and/or separate sessions, each one
will see the data from the prior calls. If needed, undo- or rollback-like
functionality should be handled at a logical level. For an example, look at
the code around quotas and `reservation_rollback()`.
Examples:
.. code:: python
def get_foo(context, foo):
return (model_query(context, models.Foo).
filter_by(foo=foo).
first())
def update_foo(context, id, newfoo):
(model_query(context, models.Foo).
filter_by(id=id).
update({'foo': newfoo}))
def create_foo(context, values):
foo_ref = models.Foo()
foo_ref.update(values)
foo_ref.save()
return foo_ref
* Within the scope of a single method, keep all the reads and writes within
the context managed by a single session. In this way, the session's
`__exit__` handler will take care of calling `flush()` and `commit()` for
you. If using this approach, you should not explicitly call `flush()` or
`commit()`. Any error within the context of the session will cause the
session to emit a `ROLLBACK`. Database errors like `IntegrityError` will be
raised in `session`'s `__exit__` handler, and any try/except within the
context managed by `session` will not be triggered. And catching other
non-database errors in the session will not trigger the ROLLBACK, so
exception handlers should always be outside the session, unless the
developer wants to do a partial commit on purpose. If the connection is
dropped before this is possible, the database will implicitly roll back the
transaction.
.. note:: Statements in the session scope will not be automatically retried.
If you create models within the session, they need to be added, but you
do not need to call `model.save()`:
.. code:: python
def create_many_foo(context, foos):
session = sessionmaker()
with session.begin():
for foo in foos:
foo_ref = models.Foo()
foo_ref.update(foo)
session.add(foo_ref)
def update_bar(context, foo_id, newbar):
session = sessionmaker()
with session.begin():
foo_ref = (model_query(context, models.Foo, session).
filter_by(id=foo_id).
first())
(model_query(context, models.Bar, session).
filter_by(id=foo_ref['bar_id']).
update({'bar': newbar}))
.. note:: `update_bar` is a trivially simple example of using
``with session.begin``. Whereas `create_many_foo` is a good example of
when a transaction is needed, it is always best to use as few queries as
possible.
The two queries in `update_bar` can be better expressed using a single query
which avoids the need for an explicit transaction. It can be expressed like
so:
.. code:: python
def update_bar(context, foo_id, newbar):
subq = (model_query(context, models.Foo.id).
filter_by(id=foo_id).
limit(1).
subquery())
(model_query(context, models.Bar).
filter_by(id=subq.as_scalar()).
update({'bar': newbar}))
For reference, this emits approximately the following SQL statement:
.. code:: sql
UPDATE bar SET bar = ${newbar}
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
.. note:: `create_duplicate_foo` is a trivially simple example of catching an
exception while using ``with session.begin``. Here create two duplicate
instances with same primary key, must catch the exception out of context
managed by a single session:
.. code:: python
def create_duplicate_foo(context):
foo1 = models.Foo()
foo2 = models.Foo()
foo1.id = foo2.id = 1
session = sessionmaker()
try:
with session.begin():
session.add(foo1)
session.add(foo2)
except exception.DBDuplicateEntry as e:
handle_error(e)
* Passing an active session between methods. Sessions should only be passed
to private methods. The private method must use a subtransaction; otherwise
SQLAlchemy will throw an error when you call `session.begin()` on an existing
transaction. Public methods should not accept a session parameter and should
not be involved in sessions within the caller's scope.
Note that this incurs more overhead in SQLAlchemy than the above means
due to nesting transactions, and it is not possible to implicitly retry
failed database operations when using this approach.
This also makes code somewhat more difficult to read and debug, because a
single database transaction spans more than one method. Error handling
becomes less clear in this situation. When this is needed for code clarity,
it should be clearly documented.
.. code:: python
def myfunc(foo):
session = sessionmaker()
with session.begin():
# do some database things
bar = _private_func(foo, session)
return bar
def _private_func(foo, session=None):
if not session:
session = sessionmaker()
with session.begin(subtransaction=True):
# do some other database things
return bar
There are some things which it is best to avoid:
* Don't keep a transaction open any longer than necessary.
This means that your ``with session.begin()`` block should be as short
as possible, while still containing all the related calls for that
transaction.
* Avoid ``with_lockmode('UPDATE')`` when possible.
In MySQL/InnoDB, when a ``SELECT ... FOR UPDATE`` query does not match
any rows, it will take a gap-lock. This is a form of write-lock on the
"gap" where no rows exist, and prevents any other writes to that space.
This can effectively prevent any INSERT into a table by locking the gap
at the end of the index. Similar problems will occur if the SELECT FOR UPDATE
has an overly broad WHERE clause, or doesn't properly use an index.
One idea proposed at ODS Fall '12 was to use a normal SELECT to test the
number of rows matching a query, and if only one row is returned,
then issue the SELECT FOR UPDATE.
The better long-term solution is to use
``INSERT .. ON DUPLICATE KEY UPDATE``.
However, this can not be done until the "deleted" columns are removed and
proper UNIQUE constraints are added to the tables.
Enabling soft deletes:
* To use/enable soft-deletes, the `SoftDeleteMixin` must be added
to your model class. For example:
.. code:: python
class NovaBase(models.SoftDeleteMixin, models.ModelBase):
pass
Efficient use of soft deletes:
* There are two possible ways to mark a record as deleted:
`model.soft_delete()` and `query.soft_delete()`.
The `model.soft_delete()` method works with a single already-fetched entry.
`query.soft_delete()` makes only one db request for all entries that
correspond to the query.
* In almost all cases you should use `query.soft_delete()`. Some examples:
.. code:: python
def soft_delete_bar():
count = model_query(BarModel).find(some_condition).soft_delete()
if count == 0:
raise Exception("0 entries were soft deleted")
def complex_soft_delete_with_synchronization_bar(session=None):
if session is None:
session = sessionmaker()
with session.begin(subtransactions=True):
count = (model_query(BarModel).
find(some_condition).
soft_delete(synchronize_session=True))
# Here synchronize_session is required, because we
# don't know what is going on in outer session.
if count == 0:
raise Exception("0 entries were soft deleted")
* There is only one situation where `model.soft_delete()` is appropriate: when
you fetch a single record, work with it, and mark it as deleted in the same
transaction.
.. code:: python
def soft_delete_bar_model():
session = sessionmaker()
with session.begin():
bar_ref = model_query(BarModel).find(some_condition).first()
# Work with bar_ref
bar_ref.soft_delete(session=session)
However, if you need to work with all entries that correspond to query and
then soft delete them you should use the `query.soft_delete()` method:
.. code:: python
def soft_delete_multi_models():
session = sessionmaker()
with session.begin():
query = (model_query(BarModel, session=session).
find(some_condition))
model_refs = query.all()
# Work with model_refs
query.soft_delete(synchronize_session=False)
# synchronize_session=False should be set if there is no outer
# session and these entries are not used after this.
When working with many rows, it is very important to use query.soft_delete,
which issues a single query. Using `model.soft_delete()`, as in the following
example, is very inefficient.
.. code:: python
for bar_ref in bar_refs:
bar_ref.soft_delete(session=session)
# This will produce count(bar_refs) db requests.
"""
import functools
import logging
import re
import time
import six
from sqlalchemy import exc as sqla_exc
from sqlalchemy.interfaces import PoolListener
import sqlalchemy.orm
from sqlalchemy.pool import NullPool, StaticPool
from sqlalchemy.sql.expression import literal_column
from nova.openstack.common.db import exception
from nova.openstack.common.gettextutils import _LE, _LW, _LI
from nova.openstack.common import timeutils
LOG = logging.getLogger(__name__)
class SqliteForeignKeysListener(PoolListener):
"""Ensures that the foreign key constraints are enforced in SQLite.
The foreign key constraints are disabled by default in SQLite,
so the foreign key constraints will be enabled here for every
database connection
"""
def connect(self, dbapi_con, con_record):
dbapi_con.execute('pragma foreign_keys=ON')
# note(boris-42): In current versions of DB backends unique constraint
# violation messages follow the structure:
#
# sqlite:
# 1 column - (IntegrityError) column c1 is not unique
# N columns - (IntegrityError) column c1, c2, ..., N are not unique
#
# sqlite since 3.7.16:
# 1 column - (IntegrityError) UNIQUE constraint failed: tbl.k1
#
# N columns - (IntegrityError) UNIQUE constraint failed: tbl.k1, tbl.k2
#
# postgres:
# 1 column - (IntegrityError) duplicate key value violates unique
# constraint "users_c1_key"
# N columns - (IntegrityError) duplicate key value violates unique
# constraint "name_of_our_constraint"
#
# mysql:
# 1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key
# 'c1'")
# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
# with -' for key 'name_of_our_constraint'")
#
# ibm_db_sa:
# N columns - (IntegrityError) SQL0803N One or more values in the INSERT
# statement, UPDATE statement, or foreign key update caused by a
# DELETE statement are not valid because the primary key, unique
# constraint or unique index identified by "2" constrains table
# "NOVA.KEY_PAIRS" from having duplicate values for the index
# key.
_DUP_KEY_RE_DB = {
"sqlite": (re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"),
re.compile(r"^.*UNIQUE\s+constraint\s+failed:\s+(.+)$")),
"postgresql": (re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),),
"mysql": (re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$"),),
"ibm_db_sa": (re.compile(r"^.*SQL0803N.*$"),),
}
def _raise_if_duplicate_entry_error(integrity_error, engine_name):
"""Raise exception if two entries are duplicated.
In this function will be raised DBDuplicateEntry exception if integrity
error wrap unique constraint violation.
"""
def get_columns_from_uniq_cons_or_name(columns):
# note(vsergeyev): UniqueConstraint name convention: "uniq_t0c10c2"
# where `t` it is table name and columns `c1`, `c2`
# are in UniqueConstraint.
uniqbase = "uniq_"
if not columns.startswith(uniqbase):
if engine_name == "postgresql":
return [columns[columns.index("_") + 1:columns.rindex("_")]]
return [columns]
return columns[len(uniqbase):].split("0")[1:]
if engine_name not in ["ibm_db_sa", "mysql", "sqlite", "postgresql"]:
return
# FIXME(johannes): The usage of the .message attribute has been
# deprecated since Python 2.6. However, the exceptions raised by
# SQLAlchemy can differ when using unicode() and accessing .message.
# An audit across all three supported engines will be necessary to
# ensure there are no regressions.
for pattern in _DUP_KEY_RE_DB[engine_name]:
match = pattern.match(integrity_error.message)
if match:
break
else:
return
# NOTE(mriedem): The ibm_db_sa integrity error message doesn't provide the
# columns so we have to omit that from the DBDuplicateEntry error.
columns = ''
if engine_name != 'ibm_db_sa':
columns = match.group(1)
if engine_name == "sqlite":
columns = [c.split('.')[-1] for c in columns.strip().split(", ")]
else:
columns = get_columns_from_uniq_cons_or_name(columns)
raise exception.DBDuplicateEntry(columns, integrity_error)
# NOTE(comstud): In current versions of DB backends, Deadlock violation
# messages follow the structure:
#
# mysql:
# (OperationalError) (1213, 'Deadlock found when trying to get lock; try '
# 'restarting transaction') <query_str> <query_args>
_DEADLOCK_RE_DB = {
"mysql": re.compile(r"^.*\(1213, 'Deadlock.*")
}
def _raise_if_deadlock_error(operational_error, engine_name):
"""Raise exception on deadlock condition.
Raise DBDeadlock exception if OperationalError contains a Deadlock
condition.
"""
re = _DEADLOCK_RE_DB.get(engine_name)
if re is None:
return
# FIXME(johannes): The usage of the .message attribute has been
# deprecated since Python 2.6. However, the exceptions raised by
# SQLAlchemy can differ when using unicode() and accessing .message.
# An audit across all three supported engines will be necessary to
# ensure there are no regressions.
m = re.match(operational_error.message)
if not m:
return
raise exception.DBDeadlock(operational_error)
def _wrap_db_error(f):
#TODO(rpodolyaka): in a subsequent commit make this a class decorator to
# ensure it can only applied to Session subclasses instances (as we use
# Session instance bind attribute below)
@functools.wraps(f)
def _wrap(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
except UnicodeEncodeError:
raise exception.DBInvalidUnicodeParameter()
except sqla_exc.OperationalError as e:
_raise_if_db_connection_lost(e, self.bind)
_raise_if_deadlock_error(e, self.bind.dialect.name)
# NOTE(comstud): A lot of code is checking for OperationalError
# so let's not wrap it for now.
raise
# note(boris-42): We should catch unique constraint violation and
# wrap it by our own DBDuplicateEntry exception. Unique constraint
# violation is wrapped by IntegrityError.
except sqla_exc.IntegrityError as e:
# note(boris-42): SqlAlchemy doesn't unify errors from different
# DBs so we must do this. Also in some tables (for example
# instance_types) there are more than one unique constraint. This
# means we should get names of columns, which values violate
# unique constraint, from error message.
_raise_if_duplicate_entry_error(e, self.bind.dialect.name)
raise exception.DBError(e)
except exception.DBError:
# note(zzzeek) - if _wrap_db_error is applied to nested functions,
# ensure an existing DBError is propagated outwards
raise
except Exception as e:
LOG.exception(_LE('DB exception wrapped.'))
raise exception.DBError(e)
return _wrap
def _synchronous_switch_listener(dbapi_conn, connection_rec):
"""Switch sqlite connections to non-synchronous mode."""
dbapi_conn.execute("PRAGMA synchronous = OFF")
def _add_regexp_listener(dbapi_con, con_record):
"""Add REGEXP function to sqlite connections."""
def regexp(expr, item):
reg = re.compile(expr)
return reg.search(six.text_type(item)) is not None
dbapi_con.create_function('regexp', 2, regexp)
def _thread_yield(dbapi_con, con_record):
"""Ensure other greenthreads get a chance to be executed.
If we use eventlet.monkey_patch(), eventlet.greenthread.sleep(0) will
execute instead of time.sleep(0).
Force a context switch. With common database backends (eg MySQLdb and
sqlite), there is no implicit yield caused by network I/O since they are
implemented by C libraries that eventlet cannot monkey patch.
"""
time.sleep(0)
def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy):
"""Ensures that MySQL and DB2 connections are alive.
Borrowed from:
http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f
"""
cursor = dbapi_conn.cursor()
try:
ping_sql = 'select 1'
if engine.name == 'ibm_db_sa':
# DB2 requires a table expression
ping_sql = 'select 1 from (values (1)) AS t1'
cursor.execute(ping_sql)
except Exception as ex:
if engine.dialect.is_disconnect(ex, dbapi_conn, cursor):
msg = _LW('Database server has gone away: %s') % ex
LOG.warning(msg)
raise sqla_exc.DisconnectionError(msg)
else:
raise
def _set_mode_traditional(dbapi_con, connection_rec, connection_proxy):
"""Set engine mode to 'traditional'.
Required to prevent silent truncates at insert or update operations
under MySQL. By default MySQL truncates inserted string if it longer
than a declared field just with warning. That is fraught with data
corruption.
"""
_set_session_sql_mode(dbapi_con, connection_rec,
connection_proxy, 'TRADITIONAL')
def _set_session_sql_mode(dbapi_con, connection_rec,
connection_proxy, sql_mode=None):
"""Set the sql_mode session variable.
MySQL supports several server modes. The default is None, but sessions
may choose to enable server modes like TRADITIONAL, ANSI,
several STRICT_* modes and others.
Note: passing in '' (empty string) for sql_mode clears
the SQL mode for the session, overriding a potentially set
server default. Passing in None (the default) makes this
a no-op, meaning if a server-side SQL mode is set, it still applies.
"""
cursor = dbapi_con.cursor()
if sql_mode is not None:
cursor.execute("SET SESSION sql_mode = %s", [sql_mode])
# Check against the real effective SQL mode. Even when unset by
# our own config, the server may still be operating in a specific
# SQL mode as set by the server configuration
cursor.execute("SHOW VARIABLES LIKE 'sql_mode'")
row = cursor.fetchone()
if row is None:
LOG.warning(_LW('Unable to detect effective SQL mode'))
return
realmode = row[1]
LOG.info(_LI('MySQL server mode set to %s') % realmode)
# 'TRADITIONAL' mode enables several other modes, so
# we need a substring match here
if not ('TRADITIONAL' in realmode.upper() or
'STRICT_ALL_TABLES' in realmode.upper()):
LOG.warning(_LW("MySQL SQL mode is '%s', "
"consider enabling TRADITIONAL or STRICT_ALL_TABLES")
% realmode)
def _is_db_connection_error(args):
"""Return True if error in connecting to db."""
# NOTE(adam_g): This is currently MySQL specific and needs to be extended
# to support Postgres and others.
# For the db2, the error code is -30081 since the db2 is still not ready
conn_err_codes = ('2002', '2003', '2006', '2013', '-30081')
for err_code in conn_err_codes:
if args.find(err_code) != -1:
return True
return False
def _raise_if_db_connection_lost(error, engine):
# NOTE(vsergeyev): Function is_disconnect(e, connection, cursor)
# requires connection and cursor in incoming parameters,
# but we have no possibility to create connection if DB
# is not available, so in such case reconnect fails.
# But is_disconnect() ignores these parameters, so it
# makes sense to pass to function None as placeholder
# instead of connection and cursor.
if engine.dialect.is_disconnect(error, None, None):
raise exception.DBConnectionError(error)
def create_engine(sql_connection, sqlite_fk=False, mysql_sql_mode=None,
mysql_traditional_mode=False, idle_timeout=3600,
connection_debug=0, max_pool_size=None, max_overflow=None,
pool_timeout=None, sqlite_synchronous=True,
connection_trace=False, max_retries=10, retry_interval=10):
"""Return a new SQLAlchemy engine."""
connection_dict = sqlalchemy.engine.url.make_url(sql_connection)
engine_args = {
"pool_recycle": idle_timeout,
'convert_unicode': True,
}
logger = logging.getLogger('sqlalchemy.engine')
# Map SQL debug level to Python log level
if connection_debug >= 100:
logger.setLevel(logging.DEBUG)
elif connection_debug >= 50:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.WARNING)
if "sqlite" in connection_dict.drivername:
if sqlite_fk:
engine_args["listeners"] = [SqliteForeignKeysListener()]
engine_args["poolclass"] = NullPool
if sql_connection == "sqlite://":
engine_args["poolclass"] = StaticPool
engine_args["connect_args"] = {'check_same_thread': False}
else:
if max_pool_size is not None:
engine_args['pool_size'] = max_pool_size
if max_overflow is not None:
engine_args['max_overflow'] = max_overflow
if pool_timeout is not None:
engine_args['pool_timeout'] = pool_timeout
engine = sqlalchemy.create_engine(sql_connection, **engine_args)
sqlalchemy.event.listen(engine, 'checkin', _thread_yield)
if engine.name in ['mysql', 'ibm_db_sa']:
ping_callback = functools.partial(_ping_listener, engine)
sqlalchemy.event.listen(engine, 'checkout', ping_callback)
if engine.name == 'mysql':
if mysql_traditional_mode:
mysql_sql_mode = 'TRADITIONAL'
if mysql_sql_mode:
mode_callback = functools.partial(_set_session_sql_mode,
sql_mode=mysql_sql_mode)
sqlalchemy.event.listen(engine, 'checkout', mode_callback)
elif 'sqlite' in connection_dict.drivername:
if not sqlite_synchronous:
sqlalchemy.event.listen(engine, 'connect',
_synchronous_switch_listener)
sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener)
if connection_trace and engine.dialect.dbapi.__name__ == 'MySQLdb':
_patch_mysqldb_with_stacktrace_comments()
try:
engine.connect()
except sqla_exc.OperationalError as e:
if not _is_db_connection_error(e.args[0]):
raise
remaining = max_retries
if remaining == -1:
remaining = 'infinite'
while True:
msg = _LW('SQL connection failed. %s attempts left.')
LOG.warning(msg % remaining)
if remaining != 'infinite':
remaining -= 1
time.sleep(retry_interval)
try:
engine.connect()
break
except sqla_exc.OperationalError as e:
if (remaining != 'infinite' and remaining == 0) or \
not _is_db_connection_error(e.args[0]):
raise
return engine
class Query(sqlalchemy.orm.query.Query):
"""Subclass of sqlalchemy.query with soft_delete() method."""
def soft_delete(self, synchronize_session='evaluate'):
return self.update({'deleted': literal_column('id'),
'updated_at': literal_column('updated_at'),
'deleted_at': timeutils.utcnow()},
synchronize_session=synchronize_session)
class Session(sqlalchemy.orm.session.Session):
"""Custom Session class to avoid SqlAlchemy Session monkey patching."""
@_wrap_db_error
def query(self, *args, **kwargs):
return super(Session, self).query(*args, **kwargs)
@_wrap_db_error
def flush(self, *args, **kwargs):
return super(Session, self).flush(*args, **kwargs)
@_wrap_db_error
def execute(self, *args, **kwargs):
return super(Session, self).execute(*args, **kwargs)
@_wrap_db_error
def commit(self, *args, **kwargs):
return super(Session, self).commit(*args, **kwargs)
def begin(self, **kw):
trans = super(Session, self).begin(**kw)
trans.__class__ = SessionTransactionWrapper
return trans
class SessionTransactionWrapper(sqlalchemy.orm.session.SessionTransaction):
@property
def bind(self):
return self.session.bind
@_wrap_db_error
def commit(self, *args, **kwargs):
return super(SessionTransactionWrapper, self).commit(*args, **kwargs)
@_wrap_db_error
def rollback(self, *args, **kwargs):
return super(SessionTransactionWrapper, self).rollback(*args, **kwargs)
def get_maker(engine, autocommit=True, expire_on_commit=False):
"""Return a SQLAlchemy sessionmaker using the given engine."""
return sqlalchemy.orm.sessionmaker(bind=engine,
class_=Session,
autocommit=autocommit,
expire_on_commit=expire_on_commit,
query_cls=Query)
def _patch_mysqldb_with_stacktrace_comments():
"""Adds current stack trace as a comment in queries.
Patches MySQLdb.cursors.BaseCursor._do_query.
"""
import MySQLdb.cursors
import traceback
old_mysql_do_query = MySQLdb.cursors.BaseCursor._do_query
def _do_query(self, q):
stack = ''
for filename, line, method, function in traceback.extract_stack():
# exclude various common things from trace
if filename.endswith('session.py') and method == '_do_query':
continue
if filename.endswith('api.py') and method == 'wrapper':
continue
if filename.endswith('utils.py') and method == '_inner':
continue
if filename.endswith('exception.py') and method == '_wrap':
continue
# db/api is just a wrapper around db/sqlalchemy/api
if filename.endswith('db/api.py'):
continue
# only trace inside nova
index = filename.rfind('nova')
if index == -1:
continue
stack += "File:%s:%s Method:%s() Line:%s | " \
% (filename[index:], line, method, function)
# strip trailing " | " from stack
if stack:
stack = stack[:-3]
qq = "%s /* %s */" % (q, stack)
else:
qq = q
old_mysql_do_query(self, qq)
setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query)
class EngineFacade(object):
"""A helper class for removing of global engine instances from nova.db.
As a library, nova.db can't decide where to store/when to create engine
and sessionmaker instances, so this must be left for a target application.
On the other hand, in order to simplify the adoption of nova.db changes,
we'll provide a helper class, which creates engine and sessionmaker
on its instantiation and provides get_engine()/get_session() methods
that are compatible with corresponding utility functions that currently
exist in target projects, e.g. in Nova.
engine/sessionmaker instances will still be global (and they are meant to
be global), but they will be stored in the app context, rather that in the
nova.db context.
Note: using of this helper is completely optional and you are encouraged to
integrate engine/sessionmaker instances into your apps any way you like
(e.g. one might want to bind a session to a request context). Two important
things to remember:
1. An Engine instance is effectively a pool of DB connections, so it's
meant to be shared (and it's thread-safe).
2. A Session instance is not meant to be shared and represents a DB
transactional context (i.e. it's not thread-safe). sessionmaker is
a factory of sessions.
"""
def __init__(self, sql_connection,
sqlite_fk=False, mysql_sql_mode=None,
autocommit=True, expire_on_commit=False, **kwargs):
"""Initialize engine and sessionmaker instances.
:param sqlite_fk: enable foreign keys in SQLite
:type sqlite_fk: bool
:param mysql_sql_mode: set SQL mode in MySQL
:type mysql_sql_mode: string
:param autocommit: use autocommit mode for created Session instances
:type autocommit: bool
:param expire_on_commit: expire session objects on commit
:type expire_on_commit: bool
Keyword arguments:
:keyword idle_timeout: timeout before idle sql connections are reaped
(defaults to 3600)
:keyword connection_debug: verbosity of SQL debugging information.
0=None, 100=Everything (defaults to 0)
:keyword max_pool_size: maximum number of SQL connections to keep open
in a pool (defaults to SQLAlchemy settings)
:keyword max_overflow: if set, use this value for max_overflow with
sqlalchemy (defaults to SQLAlchemy settings)
:keyword pool_timeout: if set, use this value for pool_timeout with
sqlalchemy (defaults to SQLAlchemy settings)
:keyword sqlite_synchronous: if True, SQLite uses synchronous mode
(defaults to True)
:keyword connection_trace: add python stack traces to SQL as comment
strings (defaults to False)
:keyword max_retries: maximum db connection retries during startup.
(setting -1 implies an infinite retry count)
(defaults to 10)
:keyword retry_interval: interval between retries of opening a sql
connection (defaults to 10)
"""
super(EngineFacade, self).__init__()
self._engine = create_engine(
sql_connection=sql_connection,
sqlite_fk=sqlite_fk,
mysql_sql_mode=mysql_sql_mode,
idle_timeout=kwargs.get('idle_timeout', 3600),
connection_debug=kwargs.get('connection_debug', 0),
max_pool_size=kwargs.get('max_pool_size'),
max_overflow=kwargs.get('max_overflow'),
pool_timeout=kwargs.get('pool_timeout'),
sqlite_synchronous=kwargs.get('sqlite_synchronous', True),
connection_trace=kwargs.get('connection_trace', False),
max_retries=kwargs.get('max_retries', 10),
retry_interval=kwargs.get('retry_interval', 10))
self._session_maker = get_maker(
engine=self._engine,
autocommit=autocommit,
expire_on_commit=expire_on_commit)
def get_engine(self):
"""Get the engine instance (note, that it's shared)."""
return self._engine
def get_session(self, **kwargs):
"""Get a Session instance.
If passed, keyword arguments values override the ones used when the
sessionmaker instance was created.
:keyword autocommit: use autocommit mode for created Session instances
:type autocommit: bool
:keyword expire_on_commit: expire session objects on commit
:type expire_on_commit: bool
"""
for arg in kwargs:
if arg not in ('autocommit', 'expire_on_commit'):
del kwargs[arg]
return self._session_maker(**kwargs)
| luogangyi/bcec-nova | nova/openstack/common/db/sqlalchemy/session.py | Python | apache-2.0 | 34,679 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.