id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6,400
|
test_contactdowntimes.py
|
shinken-solutions_shinken/test/test_contactdowntimes.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test host- and service-downtimes.
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestContactDowntime(ShinkenTest):
def test_contact_downtime(self):
self.print_header()
# schedule a 2-minute downtime
# downtime must be active
# consume a good result, sleep for a minute
# downtime must be active
# consume a bad result
# downtime must be active
# no notification must be found in broks
duration = 600
now = time.time()
# downtime valid for the next 2 minutes
test_contact = self.sched.contacts.find_by_name('test_contact')
cmd = "[%lu] SCHEDULE_CONTACT_DOWNTIME;test_contact;%d;%d;lausser;blablub" % (now, now, now + duration)
self.sched.run_external_command(cmd)
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
# Change the notif interval, so we can notify as soon as we want
svc.notification_interval = 0.001
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
#time.sleep(20)
# We loop, the downtime wil be check and activate
self.scheduler_loop(1, [[svc, 0, 'OK'], [host, 0, 'UP']])
self.assert_any_log_match('CONTACT DOWNTIME ALERT.*;STARTED')
self.show_and_clear_logs()
print("downtime was scheduled. check its activity and the comment\n"*5)
self.assertEqual(1, len(self.sched.contact_downtimes))
self.assertEqual(1, len(test_contact.downtimes))
self.assertIn(test_contact.downtimes[0], self.sched.contact_downtimes.values())
self.assertTrue(test_contact.downtimes[0].is_in_effect)
self.assertFalse(test_contact.downtimes[0].can_be_deleted)
# Ok, we define the downtime like we should, now look at if it does the job: do not
# raise notif during a downtime for this contact
self.scheduler_loop(3, [[svc, 2, 'CRITICAL']])
# We should NOT see any service notification
self.assert_no_log_match('SERVICE NOTIFICATION.*;CRITICAL')
self.show_and_clear_logs()
# Now we short the downtime a lot so it will be stop at now + 1 sec.
test_contact.downtimes[0].end_time = time.time() + 1
time.sleep(2)
# We invalidate it with a scheduler loop
self.scheduler_loop(1, [])
# So we should be out now, with a log
self.assert_any_log_match('CONTACT DOWNTIME ALERT.*;STOPPED')
self.show_and_clear_logs()
print("\n\nDowntime was ended. Check it is really stopped")
self.assertEqual(0, len(self.sched.contact_downtimes))
self.assertEqual(0, len(test_contact.downtimes))
for n in svc.notifications_in_progress.values():
print("NOTIF", n, n.t_to_go, time.time())
# Now we want this contact to be really notify!
# Ok, we define the downtime like we should, now look at if it does the job: do not
# raise notif during a downtime for this contact
time.sleep(1)
self.scheduler_loop(3, [[svc, 2, 'CRITICAL']])
self.assert_any_log_match('SERVICE NOTIFICATION.*;CRITICAL')
self.show_and_clear_logs()
for n in svc.notifications_in_progress.values():
print("NOTIF", n, n.t_to_go, time.time(), time.time() - n.t_to_go)
def test_contact_downtime_and_cancel(self):
self.print_header()
# schedule a 2-minute downtime
# downtime must be active
# consume a good result, sleep for a minute
# downtime must be active
# consume a bad result
# downtime must be active
# no notification must be found in broks
duration = 600
now = time.time()
# downtime valid for the next 2 minutes
test_contact = self.sched.contacts.find_by_name('test_contact')
cmd = "[%lu] SCHEDULE_CONTACT_DOWNTIME;test_contact;%d;%d;lausser;blablub" % (now, now, now + duration)
self.sched.run_external_command(cmd)
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
# Change the notif interval, so we can notify as soon as we want
svc.notification_interval = 0.001
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
#time.sleep(20)
# We loop, the downtime wil be check and activate
self.scheduler_loop(1, [[svc, 0, 'OK'], [host, 0, 'UP']])
self.assert_any_log_match('CONTACT DOWNTIME ALERT.*;STARTED')
self.show_and_clear_logs()
print("downtime was scheduled. check its activity and the comment")
self.assertEqual(1, len(self.sched.contact_downtimes))
self.assertEqual(1, len(test_contact.downtimes))
self.assertIn(test_contact.downtimes[0], self.sched.contact_downtimes.values())
self.assertTrue(test_contact.downtimes[0].is_in_effect)
self.assertFalse(test_contact.downtimes[0].can_be_deleted)
time.sleep(1)
# Ok, we define the downtime like we should, now look at if it does the job: do not
# raise notif during a downtime for this contact
self.scheduler_loop(3, [[svc, 2, 'CRITICAL']])
# We should NOT see any service notification
self.assert_no_log_match('SERVICE NOTIFICATION.*;CRITICAL')
self.show_and_clear_logs()
downtime_id = test_contact.downtimes[0].id
# OK, Now we cancel this downtime, we do not need it anymore
cmd = "[%lu] DEL_CONTACT_DOWNTIME;%d" % (now, downtime_id)
self.sched.run_external_command(cmd)
# We check if the downtime is tag as to remove
self.assertTrue(test_contact.downtimes[0].can_be_deleted)
# We really delete it
self.scheduler_loop(1, [])
# So we should be out now, with a log
self.assert_any_log_match('CONTACT DOWNTIME ALERT.*;CANCELLED')
self.show_and_clear_logs()
print("Downtime was cancelled")
self.assertEqual(0, len(self.sched.contact_downtimes))
self.assertEqual(0, len(test_contact.downtimes))
time.sleep(1)
# Now we want this contact to be really notify!
# Ok, we define the downtime like we should, now look at if it does the job: do not
# raise notif during a downtime for this contact
self.scheduler_loop(3, [[svc, 2, 'CRITICAL']])
self.assert_any_log_match('SERVICE NOTIFICATION.*;CRITICAL')
self.show_and_clear_logs()
if __name__ == '__main__':
unittest.main()
| 7,796
|
Python
|
.py
| 151
| 43.748344
| 111
| 0.661536
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,401
|
test_multiple_not_hostgroups.py
|
shinken-solutions_shinken/test/test_multiple_not_hostgroups.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestMultipleNotHG(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_multiple_not_hostgroups.cfg')
def test_dummy(self):
for s in self.sched.services:
print("SERVICES", s.get_full_name())
svc = self.sched.services.find_srv_by_name_and_hostname("hst_in_BIG", "THE_SERVICE")
self.assertIsNot(svc, None)
svc = self.sched.services.find_srv_by_name_and_hostname("hst_in_IncludeLast", "THE_SERVICE")
self.assertIsNot(svc, None)
svc = self.sched.services.find_srv_by_name_and_hostname("hst_in_NotOne", "THE_SERVICE")
self.assertIs(None, svc)
svc = self.sched.services.find_srv_by_name_and_hostname("hst_in_NotTwo", "THE_SERVICE")
self.assertIs(None, svc)
if __name__ == '__main__':
unittest.main()
| 1,819
|
Python
|
.py
| 40
| 41.525
| 100
| 0.72238
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,402
|
test_servicegroups.py
|
shinken-solutions_shinken/test/test_servicegroups.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Sebastien Coavoux, s.coavoux@free.fr
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
import copy
from shinken_test import *
class TestServicegroup(ShinkenTest):
def setUp(self):
self.setup_with_file("etc/shinken_servicegroups_generated.cfg")
def test_servicegroup(self):
self.assertEqual(True, self.conf.conf_is_correct)
sgs = []
for name in ["MYSVCGP", "MYSVCGP2", "MYSVCGP3", "MYSVCGP4"]:
sg = self.sched.servicegroups.find_by_name(name)
sgs.append(sg)
self.assertIsNot(sg, None)
svc3 = self.sched.services.find_srv_by_name_and_hostname("fake host", "fake svc3")
svc4 = self.sched.services.find_srv_by_name_and_hostname("fake host", "fake svc4")
self.assertIn(svc3, sgs[0].members)
self.assertIn(svc3, sgs[1].members)
self.assertIn(svc4, sgs[2].members)
self.assertIn(svc4, sgs[3].members)
self.assertIn(sgs[0].get_name(), [sg.get_name() for sg in svc3.servicegroups])
self.assertIn(sgs[1].get_name(), [sg.get_name() for sg in svc3.servicegroups])
self.assertIn(sgs[2].get_name(), [sg.get_name() for sg in svc4.servicegroups])
self.assertIn(sgs[3].get_name(), [sg.get_name() for sg in svc4.servicegroups])
if __name__ == '__main__':
unittest.main()
| 2,242
|
Python
|
.py
| 48
| 42.145833
| 90
| 0.704077
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,403
|
test_no_event_handler_during_downtime.py
|
shinken-solutions_shinken/test/test_no_event_handler_during_downtime.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestNoEventHandlerDuringDowntime(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_no_event_handler_during_downtime.cfg')
def test_no_event_handler_during_downtime(self):
print("Get the hosts and services")
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
self.scheduler_loop(2, [[host, 0, 'UP | value1=1 value2=2'], [router, 0, 'UP | rtt=10'], [svc, 0, 'OK | value1=0 value2=0']])
self.assertEqual('UP', host.state)
self.assertEqual('HARD', host.state_type)
now = time.time()
# downtime valid for the next 2 minutes
cmd = "[%lu] SCHEDULE_SVC_DOWNTIME;test_host_0;test_ok_0;%d;%d;1;0;%d;lausser;blablub" % (now, now, now + 3600, 3600)
self.sched.run_external_command(cmd)
# Make a loop to activate the downtime
self.scheduler_loop(1, [])
# We check so the downtime is really active
self.assert_any_log_match('SERVICE DOWNTIME ALERT.*;STARTED')
self.scheduler_loop(2, [[host, 0, 'UP | value1=1 value2=2'], [router, 0, 'UP | rtt=10'], [svc, 2, 'OK | value1=0 valu\
e2=0']])
# There should be NO event handlers during a downtime!
self.assert_no_log_match('SERVICE EVENT HANDLER.*;CRITICAL')
if __name__ == '__main__':
unittest.main()
| 2,783
|
Python
|
.py
| 56
| 44.5
| 133
| 0.681047
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,404
|
test_bad_hostgroup.py
|
shinken-solutions_shinken/test/test_bad_hostgroup.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Sebastien Coavoux, s.coavoux@free.frc
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestBadHostGroupConf(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_bad_hg_conf.cfg')
def test_bad_conf(self):
self.assertFalse(self.conf.conf_is_correct)
self.assert_any_log_match("itemgroup::.* as hostgroup, got unknown member BADMEMBERHG")
self.assert_no_log_match("itemgroup::.* as servicegroup, got unknown member BADMEMBERHG")
if __name__ == '__main__':
unittest.main()
| 1,402
|
Python
|
.py
| 32
| 41.15625
| 97
| 0.749449
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,405
|
test_bad_servicedependencies.py
|
shinken-solutions_shinken/test/test_bad_servicedependencies.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Sebastien Coavoux, s.coavoux@free.fr
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestBadServiceDependencies(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_bad_servicedependencies.cfg')
def test_bad_conf(self):
self.assertFalse(self.conf.conf_is_correct)
self.assert_any_log_match("hostdependencies conf incorrect!!")
self.assert_any_log_match("servicedependencies conf incorrect!!")
self.assert_any_log_match("The host object 'fake host' is part of a circular parent/child chain!")
if __name__ == '__main__':
unittest.main()
| 1,586
|
Python
|
.py
| 36
| 41.361111
| 107
| 0.74562
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,406
|
test_modulemanager.py
|
shinken-solutions_shinken/test/test_modulemanager.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import time
from shinken_test import (
ShinkenTest, time_hacker, unittest
)
from shinken.modulesmanager import ModulesManager
from shinken.objects.module import Module
modules_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', 'modules')
class TestModuleManager(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_1r_1h_1s.cfg')
time_hacker.set_real_time()
# Try to see if the module manager can manage modules
def test_modulemanager(self):
mod = Module({'module_name': 'DummyExternal', 'module_type': 'dummy_broker_external'})
self.modulemanager = ModulesManager('broker', "var/lib/shinken/modules", [])
self.modulemanager.set_modules([mod])
self.modulemanager.load_and_init()
# And start external ones, like our LiveStatus
self.modulemanager.start_external_instances()
print("I correctly loaded the modules: %s " % ([inst.get_name() for inst in self.modulemanager.instances]))
print("*** First kill ****")
# Now I will try to kill the livestatus module
ls = self.modulemanager.instances[0]
ls._BaseModule__kill()
time.sleep(1)
print("Check alive?")
print("Is alive?", ls.process.is_alive())
# Should be dead
self.assertFalse(ls.process.is_alive())
self.modulemanager.check_alive_instances()
self.modulemanager.try_to_restart_deads()
# In fact it's too early, so it won't do it
# Here the inst should still be dead
print("Is alive?", ls.process.is_alive())
self.assertFalse(ls.process.is_alive())
# So we lie
ls.last_init_try = -5
self.modulemanager.check_alive_instances()
self.modulemanager.try_to_restart_deads()
# In fact it's too early, so it won't do it
# Here the inst should be alive again
print("Is alive?", ls.process.is_alive())
self.assertTrue(ls.process.is_alive())
# should be nothing more in to_restart of
# the module manager
self.assertEqual([], self.modulemanager.to_restart)
# Now we look for time restart so we kill it again
ls._BaseModule__kill()
time.sleep(1)
self.assertFalse(ls.process.is_alive())
# Should be too early
self.modulemanager.check_alive_instances()
self.modulemanager.try_to_restart_deads()
print("Is alive or not", ls.process.is_alive())
self.assertFalse(ls.process.is_alive())
# We lie for the test again
ls.last_init_try = -5
self.modulemanager.check_alive_instances()
self.modulemanager.try_to_restart_deads()
# Here the inst should be alive again
print("Is alive?", ls.process.is_alive())
self.assertTrue(ls.process.is_alive())
# And we clear all now
print("Ask to die")
self.modulemanager.stop_all()
print("Died")
if __name__ == '__main__':
unittest.main()
| 3,989
|
Python
|
.py
| 92
| 37.108696
| 115
| 0.677594
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,407
|
test_scheduler_init.py
|
shinken-solutions_shinken/test/test_scheduler_init.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import signal
import subprocess
from time import sleep
from shinken_test import *
import shinken.log as shinken_log
from shinken.daemons.schedulerdaemon import Shinken
from shinken.daemons.arbiterdaemon import Arbiter
daemons_config = {
Shinken: "etc/test_scheduler_init/schedulerd.ini",
Arbiter: ["etc/test_scheduler_init/shinken.cfg"]
}
class testSchedulerInit(ShinkenTest):
def setUp(self):
time_hacker.set_real_time()
def create_daemon(self):
cls = Shinken
return cls(daemons_config[cls], False, True, True, None, '')
def _get_subproc_data(self, proc):
try:
proc.terminate() # make sure the proc has exited..
proc.wait()
except Exception as err:
print("prob on terminate and wait subproc: %s" % err)
data = {}
data['out'] = proc.stdout.read()
data['err'] = proc.stderr.read()
data['rc'] = proc.returncode
return data
def test_scheduler_init(self):
#shinken_log.local_log = None # otherwise get some "trashs" logs..
d = self.create_daemon()
d.load_config_file()
d.http_backend = 'wsgiref'
d.do_daemon_init_and_start(fake=True)
d.load_modules_manager()
# Test registered function list
d.http_daemon.register(d.interface)
reg_list = d.http_daemon.registered_fun
expected_list = ['get_external_commands', 'get_running_id', 'got_conf', 'have_conf',
'ping', 'push_broks', 'push_host_names', 'put_conf', 'remove_from_conf',
'run_external_commands', 'set_log_level', 'wait_new_conf', 'what_i_managed']
for fun in expected_list:
assert(fun in reg_list)
# Launch an arbiter so that the scheduler get a conf and init
# notice: set this process master with preexec_fn=os.setsid so when we kill it
# it will also kill sons
args = [sys.executable, "../bin/shinken-arbiter.py", "-c", daemons_config[Arbiter][0], "-d"]
print("Launching sub arbiter with", args)
self.arb_proc = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=os.setsid
)
# Ok, now the conf
d.wait_for_initial_conf(timeout=20)
self.assertTrue(d.new_conf)
d.setup_new_conf()
# Test registered function list again, so that there is no overriden functions
reg_list = d.http_daemon.registered_fun
expected_list = ['get_external_commands', 'get_running_id', 'got_conf', 'have_conf',
'ping', 'push_broks', 'push_host_names', 'put_conf', 'remove_from_conf',
'run_external_commands', 'set_log_level', 'wait_new_conf', 'what_i_managed',
'get_checks', 'put_results', 'fill_initial_broks', 'get_broks']
for fun in expected_list:
assert(fun in reg_list)
# Test that use_ssl parameter generates the good uri
for poller in d.pollers.values():
if poller['use_ssl']:
assert poller['uri'] == 'https://localhost:7771/'
else:
assert poller['uri'] == 'http://localhost:7771/'
# Test receivers are init like pollers
assert d.reactionners != {} # Previously this was {} for ever
for reactionner in d.reactionners.values():
assert reactionner['uri'] == 'http://localhost:7769/' # Test dummy value
# I want a simple init
d.must_run = False
d.sched.must_run = False
d.sched.run()
# Test con key is missing or not. Passive daemon should have one
for poller in d.pollers.values():
assert 'con' not in poller # Ensure con key is not here, deamon is not passive so we did not try to connect
for reactionner in d.reactionners.values():
assert reactionner['con'] is None # Previously only pollers were init (sould be None), here daemon is passive
# "Clean" shutdown
sleep(2)
try:
with open("tmp/arbiterd.pid", "r") as f:
pid = int(f.read())
print("KILLING %d" % pid)
d.do_stop()
time.sleep(3)
os.kill(pid, signal.SIGTERM)
except Exception as err:
proc = self.arb_proc
data = self._get_subproc_data(proc)
data.update(err=err)
self.assertTrue(False,
"Could not read pid file or so : %(err)s\n"
"rc=%(rc)s\nstdout=%(out)s\nstderr=%(err)s" % data)
if __name__ == '__main__':
unittest.main()
| 5,681
|
Python
|
.py
| 127
| 36.23622
| 122
| 0.625294
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,408
|
test_end_parsing_types.py
|
shinken-solutions_shinken/test/test_end_parsing_types.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2015:
# Coavoux Sebastien <s.coavoux@free.fr>
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
import six
import unittest
from shinken_test import time_hacker
from shinken.log import logger
from shinken.objects.config import Config
from shinken.brok import Brok
from shinken.external_command import ExternalCommand
from shinken.property import UnusedProp, StringProp, IntegerProp, \
BoolProp, CharProp, DictProp, FloatProp, ListProp, AddrProp, ToGuessProp
class TestEndParsingType(unittest.TestCase):
def map_type(self, obj):
# TODO: Replace all str with unicode when done in property.default attribute
# TODO: Fix ToGuessProp as it may be a list.
if isinstance(obj, ListProp):
return list
if isinstance(obj, StringProp):
return six.string_types
if isinstance(obj, UnusedProp):
return six.string_types
if isinstance(obj, BoolProp):
return bool
if isinstance(obj, IntegerProp):
return int
if isinstance(obj, FloatProp):
return float
if isinstance(obj, CharProp):
return six.string_types
if isinstance(obj, DictProp):
return dict
if isinstance(obj, AddrProp):
return six.string_types
if isinstance(obj, ToGuessProp):
return six.string_types
def print_header(self):
print("\n" + "#" * 80 + "\n" + "#" + " " * 78 + "#")
print("#" + self.id().center(78) + "#")
print("#" + " " * 78 + "#\n" + "#" * 80 + "\n")
def add(self, b):
if isinstance(b, Brok):
self.broks[b.id] = b
return
if isinstance(b, ExternalCommand):
self.sched.run_external_command(b.cmd_line)
def test_types(self):
path = 'etc/shinken_1r_1h_1s.cfg'
time_hacker.set_my_time()
self.print_header()
# i am arbiter-like
self.broks = {}
self.me = None
self.log = logger
self.log.setLevel("INFO")
self.log.load_obj(self)
self.config_files = [path]
self.conf = Config()
buf = self.conf.read_config(self.config_files)
raw_objects = self.conf.read_config_buf(buf)
self.conf.create_objects_for_type(raw_objects, 'arbiter')
self.conf.create_objects_for_type(raw_objects, 'module')
self.conf.early_arbiter_linking()
self.conf.create_objects(raw_objects)
self.conf.instance_id = 0
self.conf.instance_name = 'test'
# Hack push_flavor, that is set by the dispatcher
self.conf.push_flavor = 0
self.conf.load_triggers()
self.conf.linkify_templates()
self.conf.apply_inheritance()
self.conf.explode()
self.conf.apply_implicit_inheritance()
self.conf.fill_default()
self.conf.remove_templates()
self.conf.override_properties()
self.conf.linkify()
self.conf.apply_dependencies()
self.conf.explode_global_conf()
self.conf.propagate_timezone_option()
self.conf.create_business_rules()
self.conf.create_business_rules_dependencies()
self.conf.is_correct()
# Cannot do it for all obj for now. We have to ensure unicode everywhere fist
for objs in [self.conf.arbiters]:
for obj in objs:
#print("=== obj : %s ===" % obj.__class__)
for prop in obj.properties:
if hasattr(obj, prop):
value = getattr(obj, prop)
# We should get ride of None, maybe use the "neutral" value for type
if value is not None:
#print("TESTING %s with value %s" % (prop, value))
self.assertIsInstance(value, self.map_type(obj.properties[prop]))
else:
print("Skipping %s " % prop)
#print("===")
# Manual check of several attr for self.conf.contacts
# because contacts contains unicode attr
for contact in self.conf.contacts:
for prop in ["notificationways", "host_notification_commands", "service_notification_commands"]:
if hasattr(contact, prop):
value = getattr(contact, prop)
# We should get ride of None, maybe use the "neutral" value for type
if value is not None:
print("TESTING %s with value %s" % (prop, value))
self.assertIsInstance(value, self.map_type(contact.properties[prop]))
else:
print("Skipping %s " % prop)
# Same here
for notifway in self.conf.notificationways:
for prop in ["host_notification_commands", "service_notification_commands"]:
if hasattr(notifway, prop):
value = getattr(notifway, prop)
# We should get ride of None, maybe use the "neutral" value for type
if value is not None:
print("TESTING %s with value %s" % (prop, value))
self.assertIsInstance(value, self.map_type(notifway.properties[prop]))
else:
print("Skipping %s " % prop)
if __name__ == '__main__':
unittest.main()
| 6,247
|
Python
|
.py
| 141
| 34.035461
| 108
| 0.605692
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,409
|
test_initial_state.py
|
shinken-solutions_shinken/test/test_initial_state.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test object properties overriding.
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import unittest, ShinkenTest
import re
class TestInitialState(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_initial_state.cfg')
def test_initial_state(self):
host0 = self.sched.hosts.find_by_name("test_host_0")
host1 = self.sched.hosts.find_by_name("test_host_1")
svc00 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_service_0")
svc01 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_service_1")
svc10 = self.sched.services.find_srv_by_name_and_hostname("test_host_1", "test_service_0")
svc11 = self.sched.services.find_srv_by_name_and_hostname("test_host_1", "test_service_1")
self.assertIsNotNone(host0)
self.assertIsNotNone(host1)
self.assertIsNotNone(svc00)
self.assertIsNotNone(svc01)
self.assertIsNotNone(svc10)
self.assertIsNotNone(svc11)
self.assertEqual(host0.state, "PENDING")
self.assertEqual(host0.state_id, 0)
self.assertEqual(host0.output, "")
self.assertEqual(host1.state, "DOWN")
self.assertEqual(host1.state_id, 1)
self.assertEqual(host1.output, "No host result received")
self.assertEqual(svc00.state, "PENDING")
self.assertEqual(svc00.state_id, 0)
self.assertEqual(svc00.output, "")
self.assertEqual(svc01.state, "CRITICAL")
self.assertEqual(svc01.state_id, 2)
self.assertEqual(svc01.output, "No sevrvice result received")
self.assertEqual(svc10.state, "PENDING")
self.assertEqual(svc10.state_id, 0)
self.assertEqual(svc10.output, "")
self.assertEqual(svc11.state, "CRITICAL")
self.assertEqual(svc11.state_id, 2)
self.assertEqual(svc11.output, "No sevrvice result received")
self.scheduler_loop(1, [
[host0, 0, 'UP test_host_0'],
[host1, 0, 'UP test_host_1'],
[svc00, 0, 'OK test_host_0/test_service_0'],
[svc01, 0, 'OK test_host_0/test_service_1'],
[svc10, 0, 'OK test_host_1/test_service_0'],
[svc11, 0, 'OK test_host_1/test_service_1'],
], do_sleep=True)
self.assertEqual(host0.state, "UP")
self.assertEqual(host0.state_id, 0)
self.assertEqual(host0.output, "UP test_host_0")
self.assertEqual(host1.state, "UP")
self.assertEqual(host1.state_id, 0)
self.assertEqual(host1.output, "UP test_host_1")
self.assertEqual(svc00.state, "OK")
self.assertEqual(svc00.state_id, 0)
self.assertEqual(svc00.output, "OK test_host_0/test_service_0")
self.assertEqual(svc01.state, "OK")
self.assertEqual(svc01.state_id, 0)
self.assertEqual(svc01.output, "OK test_host_0/test_service_1")
self.assertEqual(svc10.state, "OK")
self.assertEqual(svc10.state_id, 0)
self.assertEqual(svc10.output, "OK test_host_1/test_service_0")
self.assertEqual(svc11.state, "OK")
self.assertEqual(svc11.state_id, 0)
self.assertEqual(svc11.output, "OK test_host_1/test_service_1")
class TestInitialStateBadConf(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_initial_state_bad.cfg')
def test_bad_conf(self):
self.assertFalse(self.conf.conf_is_correct)
# Get the arbiter's log broks
#[b.prepare() for b in self.broks]
logs = [b.data['log'] for b in self.broks if b.type == 'log']
self.assertEqual(1, len([log for log in logs if re.search('invalid initial_state: a, should be one of d, o, u', log)]) )
self.assertEqual(1, len([log for log in logs if re.search('invalid initial_state: a, should be one of c, o, u, w', log)]) )
if __name__ == '__main__':
unittest.main()
| 4,781
|
Python
|
.py
| 97
| 42.371134
| 131
| 0.674882
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,410
|
test_srv_nohost.py
|
shinken-solutions_shinken/test/test_srv_nohost.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestServiceWhitNoHost(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_srv_nohost.cfg')
# Nagios allow service with no host to exist, it will just drop them
def test_ServiceWhitNoHost(self):
self.assertTrue(self.sched.conf.is_correct)
if __name__ == '__main__':
unittest.main()
| 1,330
|
Python
|
.py
| 32
| 39.3125
| 82
| 0.756589
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,411
|
test_macromodulations.py
|
shinken-solutions_shinken/test/test_macromodulations.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestMacroModulations(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_macromodulations.cfg')
def test_dummy(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print("Get the hosts and services")
now = time.time()
host = self.sched.hosts.find_by_name("host_modulated")
self.assertIsNot(host, None)
print(host.macromodulations)
mod = self.sched.macromodulations.find_by_name("MODULATION")
self.assertIsNot(mod, None)
self.assertIn(mod, host.macromodulations)
c = None
for c in host.get_checks_in_progress():
print(c.command)
# THE hst got 2 modulations. The first with the value MODULATED
# and the second with NOT_THE_GOOD. Both are currently active, but we want the firt one
self.assertEqual('plugins/nothing MODULATED', c.command)
if __name__ == '__main__':
unittest.main()
| 2,022
|
Python
|
.py
| 48
| 37.125
| 99
| 0.706932
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,412
|
test_notif_too_much.py
|
shinken-solutions_shinken/test/test_notif_too_much.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestNotifTooMuch(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_notif_too_much.cfg')
# The goal of this test is to check if we manage this case:
# 2 notif ways on one contact. One notif ways should activate, not the other
# for one timeperiod
def test_notif_too_much(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print("Get the hosts and services")
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
test_contact = self.sched.contacts.find_by_name('test_contact')
self.assertIsNot(test_contact, None)
self.scheduler_loop(1, [[host, 0, 'UP | value1=1 value2=2'], [router, 0, 'UP | rtt=10'], [svc, 2, 'BAD | value1=0 value2=0']])
self.assertEqual('UP', host.state)
self.assertEqual('HARD', host.state_type)
self.scheduler_loop(1, [[host, 0, 'UP | value1=1 value2=2'], [router, 0, 'UP | rtt=10'], [svc, 2, 'BAD | value1=0 value2=0']])
# We should NOT see a send for the notify-service2 call because it's the good contact
# but NOT the good period for this notifways. So 24x7 ok, not the never :)
self.assert_any_log_match('SERVICE NOTIFICATION.*;notify-service')
self.assert_no_log_match('SERVICE NOTIFICATION.*;notify-service2')
if __name__ == '__main__':
unittest.main()
| 2,880
|
Python
|
.py
| 58
| 44.448276
| 134
| 0.683986
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,413
|
test_discovery_def.py
|
shinken-solutions_shinken/test/test_discovery_def.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestDiscoveryConf(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_discovery_def.cfg')
def test_look_for_discorule(self):
genhttp = self.sched.conf.discoveryrules.find_by_name('GenHttp')
self.assertIsNotNone(genhttp)
self.assertEqual('service', genhttp.creation_type)
self.assertEqual('80,443', genhttp.matches['openports'])
self.assertEqual('windows', genhttp.matches['os'])
key = 'osversion'
value = '2003'
# Should not match this
self.assertEqual(False, genhttp.is_matching(key, value) )
# But should match this one
key = 'openports'
value = '80'
self.assertEqual(True, genhttp.is_matching(key, value) )
# Low look for a list of matchings
l = {'openports': '80', 'os': 'windows'}
# should match this
self.assertEqual(True, genhttp.is_matching_disco_datas(l))
# Match this one too
l = {'openports': '80', 'os': 'windows', 'super': 'man'}
self.assertEqual(True, genhttp.is_matching_disco_datas(l))
# But not this one
l = {'openports': '80'}
self.assertEqual(False, genhttp.is_matching_disco_datas(l))
# Now search the NOT rule
genhttpnowin = self.sched.conf.discoveryrules.find_by_name('GenHttpNotWindows')
# Should manage this
l = {'openports': '80', 'os': 'linux'}
self.assertEqual(True, genhttpnowin.is_matching_disco_datas(l))
# But NOT this one
l = {'openports': '80', 'os': 'windows'}
print("Should NOT match")
self.assertEqual(False, genhttpnowin.is_matching_disco_datas(l))
# Now look for strict rule application
genhttpstrict = self.sched.conf.discoveryrules.find_by_name('GenHttpStrict')
self.assertIsNot(genhttpstrict, None)
key = 'openports'
value = '80,443'
self.assertEqual(True, genhttpstrict.is_matching(key, value) )
# But NOT this one
key = 'openports'
value = '800'
self.assertEqual(False, genhttpstrict.is_matching(key, value) )
# Look for good definition and call of a discoveryrun
def test_look_for_discorun(self):
nmap = self.sched.conf.discoveryruns.find_by_name('nmap')
self.assertIsNotNone(nmap)
nmapcmd = self.sched.conf.commands.find_by_name('nmap_runner')
self.assertIsNotNone(nmapcmd)
self.assertIsNotNone(nmap.discoveryrun_command)
# Launch it
nmap.launch()
for i in range(1, 5):
nmap.check_finished()
if nmap.is_finished():
break
time.sleep(1)
print("Exit status", nmap.current_launch.exit_status)
print("Output", nmap.current_launch.output)
print("LongOutput", nmap.current_launch.long_output)
def test_look_for_host_discorule(self):
genhttp = self.sched.conf.discoveryrules.find_by_name('GenHttpHost')
self.assertIsNotNone(genhttp)
self.assertEqual('host', genhttp.creation_type)
self.assertEqual('^80$', genhttp.matches['openports'])
key = 'osversion'
value = '2003'
# Should not match this
self.assertEqual(False, genhttp.is_matching(key, value) )
# But should match this one
key = 'openports'
value = '80'
self.assertEqual(True, genhttp.is_matching(key, value) )
# Low look for a list of matchings
l = {'openports': '80', 'os': 'windows'}
# should match this
self.assertEqual(True, genhttp.is_matching_disco_datas(l))
# Match this one too
l = {'openports': '80', 'os': 'windows', 'super': 'man'}
self.assertEqual(True, genhttp.is_matching_disco_datas(l))
# And this last one
l = {'openports': '80'}
self.assertEqual(True, genhttp.is_matching_disco_datas(l))
print("Writing properties")
print(genhttp.writing_properties)
def test_look_for_host_discorule_and_delete(self):
genhttp = self.sched.conf.discoveryrules.find_by_name('GenHttpHostRemoveLinux')
self.assertIsNotNone(genhttp)
self.assertEqual('host', genhttp.creation_type)
self.assertEqual('^80$', genhttp.matches['openports'])
key = 'os'
value = 'linux'
# Should not match this
self.assertEqual(False, genhttp.is_matching(key, value) )
# But should match this one
key = 'openports'
value = '80'
self.assertEqual(True, genhttp.is_matching(key, value) )
# Low look for a list of matchings
l = {'openports': '80', 'os': 'linux'}
# should match this
self.assertEqual(True, genhttp.is_matching_disco_datas(l))
# Match this one too
l = {'openports': '80', 'os': 'linux', 'super': 'man'}
self.assertEqual(True, genhttp.is_matching_disco_datas(l))
# And this last one
l = {'openports': '80'}
self.assertEqual(True, genhttp.is_matching_disco_datas(l))
print("Writing properties")
print(genhttp.writing_properties)
def test_discorun_matches(self):
linux = self.sched.conf.discoveryruns.find_by_name('linux')
self.assertIsNotNone(linux)
print(linux.__dict__)
self.assertEqual({u'osvendor': u'linux'}, linux.matches)
key = 'osvendor'
value = 'microsoft'
# Should not match this
self.assertEqual(False, linux.is_matching(key, value) )
key = 'osvendor'
value = 'linux'
# Should match this
self.assertEqual(True, linux.is_matching(key, value) )
# Low look for a list of matchings
l = {'openports': '80', 'osvendor': 'linux'}
# should match this
self.assertEqual(True, linux.is_matching_disco_datas(l))
if __name__ == '__main__':
unittest.main()
| 6,907
|
Python
|
.py
| 156
| 36.416667
| 87
| 0.646147
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,414
|
test_business_correlator.py
|
shinken-solutions_shinken/test/test_business_correlator.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
import re
from shinken_test import *
class TestBusinesscorrel(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_business_correlator.cfg')
# We will try a simple bd1 OR db2
def test_simple_or_business_correlator(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print("Get the hosts and services")
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc_bd1 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "db1")
self.assertEqual(False, svc_bd1.got_business_rule)
self.assertIs(None, svc_bd1.business_rule)
svc_bd2 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "db2")
self.assertEqual(False, svc_bd2.got_business_rule)
self.assertIs(None, svc_bd2.business_rule)
svc_cor = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "Simple_Or")
self.assertEqual(True, svc_cor.got_business_rule)
self.assertIsNot(svc_cor.business_rule, None)
bp_rule = svc_cor.business_rule
self.assertEqual('|', bp_rule.operand)
# We check for good parent/childs links
# So svc_cor should be a son of svc_bd1 and svc_bd2
# and bd1 and bd2 should be parents of svc_cor
self.assertIn(svc_cor, svc_bd1.child_dependencies)
self.assertIn(svc_cor, svc_bd2.child_dependencies)
self.assertIn(svc_bd1, svc_cor.parent_dependencies)
self.assertIn(svc_bd2, svc_cor.parent_dependencies)
sons = bp_rule.sons
print("Sons,", sons)
# We've got 2 sons, 2 services nodes
self.assertEqual(2, len(sons))
self.assertEqual('service', sons[0].operand)
self.assertEqual(svc_bd1, sons[0].sons[0])
self.assertEqual('service', sons[1].operand)
self.assertEqual(svc_bd2, sons[1].sons[0])
# Now state working on the states
self.scheduler_loop(1, [[svc_bd2, 0, 'OK | value1=1 value2=2'], [svc_bd1, 0, 'OK | rtt=10']])
self.assertEqual('OK', svc_bd1.state)
self.assertEqual('HARD', svc_bd1.state_type)
self.assertEqual('OK', svc_bd2.state)
self.assertEqual('HARD', svc_bd2.state_type)
state = bp_rule.get_state()
self.assertEqual(0, state)
# Now we set the bd1 as soft/CRITICAL
self.scheduler_loop(1, [[svc_bd1, 2, 'CRITICAL | value1=1 value2=2']])
self.assertEqual('CRITICAL', svc_bd1.state)
self.assertEqual('SOFT', svc_bd1.state_type)
self.assertEqual(0, svc_bd1.last_hard_state_id)
# The business rule must still be 0
state = bp_rule.get_state()
self.assertEqual(0, state)
# Now we get bd1 CRITICAL/HARD
self.scheduler_loop(1, [[svc_bd1, 2, 'CRITICAL | value1=1 value2=2']])
self.assertEqual('CRITICAL', svc_bd1.state)
self.assertEqual('HARD', svc_bd1.state_type)
self.assertEqual(2, svc_bd1.last_hard_state_id)
# The rule must still be a 0 (or inside)
state = bp_rule.get_state()
self.assertEqual(0, state)
# Now we also set bd2 as CRITICAL/HARD... byebye 0 :)
self.scheduler_loop(2, [[svc_bd2, 2, 'CRITICAL | value1=1 value2=2']])
self.assertEqual('CRITICAL', svc_bd2.state)
self.assertEqual('HARD', svc_bd2.state_type)
self.assertEqual(2, svc_bd2.last_hard_state_id)
# And now the state of the rule must be 2
state = bp_rule.get_state()
self.assertEqual(2, state)
# And If we set one WARNING?
self.scheduler_loop(2, [[svc_bd2, 1, 'WARNING | value1=1 value2=2']])
self.assertEqual('WARNING', svc_bd2.state)
self.assertEqual('HARD', svc_bd2.state_type)
self.assertEqual(1, svc_bd2.last_hard_state_id)
# Must be WARNING (better no 0 value)
state = bp_rule.get_state()
self.assertEqual(1, state)
# We will try a simple bd1 AND db2
def test_simple_and_business_correlator(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print("Get the hosts and services")
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc_bd1 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "db1")
self.assertEqual(False, svc_bd1.got_business_rule)
self.assertIs(None, svc_bd1.business_rule)
svc_bd2 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "db2")
self.assertEqual(False, svc_bd2.got_business_rule)
self.assertIs(None, svc_bd2.business_rule)
svc_cor = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "Simple_And")
self.assertEqual(True, svc_cor.got_business_rule)
self.assertIsNot(svc_cor.business_rule, None)
bp_rule = svc_cor.business_rule
self.assertEqual('&', bp_rule.operand)
sons = bp_rule.sons
print("Sons,", sons)
# We've got 2 sons, 2 services nodes
self.assertEqual(2, len(sons))
self.assertEqual('service', sons[0].operand)
self.assertEqual(svc_bd1, sons[0].sons[0])
self.assertEqual('service', sons[1].operand)
self.assertEqual(svc_bd2, sons[1].sons[0])
# Now state working on the states
self.scheduler_loop(1, [[svc_bd2, 0, 'OK | value1=1 value2=2'], [svc_bd1, 0, 'OK | rtt=10']])
self.assertEqual('OK', svc_bd1.state)
self.assertEqual('HARD', svc_bd1.state_type)
self.assertEqual('OK', svc_bd2.state)
self.assertEqual('HARD', svc_bd2.state_type)
state = bp_rule.get_state()
self.assertEqual(0, state)
# Now we set the bd1 as soft/CRITICAL
self.scheduler_loop(1, [[svc_bd1, 2, 'CRITICAL | value1=1 value2=2']])
self.assertEqual('CRITICAL', svc_bd1.state)
self.assertEqual('SOFT', svc_bd1.state_type)
self.assertEqual(0, svc_bd1.last_hard_state_id)
# The business rule must still be 0
# becase we want HARD states
state = bp_rule.get_state()
self.assertEqual(0, state)
# Now we get bd1 CRITICAL/HARD
self.scheduler_loop(1, [[svc_bd1, 2, 'CRITICAL | value1=1 value2=2']])
self.assertEqual('CRITICAL', svc_bd1.state)
self.assertEqual('HARD', svc_bd1.state_type)
self.assertEqual(2, svc_bd1.last_hard_state_id)
# The rule must go CRITICAL
state = bp_rule.get_state()
self.assertEqual(2, state)
# Now we also set bd2 as WARNING/HARD...
self.scheduler_loop(2, [[svc_bd2, 1, 'WARNING | value1=1 value2=2']])
self.assertEqual('WARNING', svc_bd2.state)
self.assertEqual('HARD', svc_bd2.state_type)
self.assertEqual(1, svc_bd2.last_hard_state_id)
# And now the state of the rule must be 2
state = bp_rule.get_state()
self.assertEqual(2, state)
# And If we set one WARNING too?
self.scheduler_loop(2, [[svc_bd1, 1, 'WARNING | value1=1 value2=2']])
self.assertEqual('WARNING', svc_bd1.state)
self.assertEqual('HARD', svc_bd1.state_type)
self.assertEqual(1, svc_bd1.last_hard_state_id)
# Must be WARNING (worse no 0 value for both)
state = bp_rule.get_state()
self.assertEqual(1, state)
# We will try a simple 1of: bd1 OR/AND db2
def test_simple_1of_business_correlator(self):
self.run_simple_1of_business_correlator()
# We will try a simple -1of: bd1 OR/AND db2
def test_simple_1of_neg_business_correlator(self):
self.run_simple_1of_business_correlator(with_neg=True)
# We will try a simple 50%of: bd1 OR/AND db2
def test_simple_1of_pct_business_correlator(self):
self.run_simple_1of_business_correlator(with_pct=True)
# We will try a simple -50%of: bd1 OR/AND db2
def test_simple_1of_pct_neg_business_correlator(self):
self.run_simple_1of_business_correlator(with_pct=True, with_neg=True)
def run_simple_1of_business_correlator(self, with_pct=False, with_neg=False):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print("Get the hosts and services")
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc_bd1 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "db1")
self.assertEqual(False, svc_bd1.got_business_rule)
self.assertIs(None, svc_bd1.business_rule)
svc_bd2 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "db2")
self.assertEqual(False, svc_bd2.got_business_rule)
self.assertIs(None, svc_bd2.business_rule)
if with_pct is True:
if with_neg is True:
svc_cor = self.sched.services.find_srv_by_name_and_hostname(
"test_host_0", "Simple_1Of_pct_neg")
else:
svc_cor = self.sched.services.find_srv_by_name_and_hostname(
"test_host_0", "Simple_1Of_pct")
else:
if with_neg is True:
svc_cor = self.sched.services.find_srv_by_name_and_hostname(
"test_host_0", "Simple_1Of_neg")
else:
svc_cor = self.sched.services.find_srv_by_name_and_hostname(
"test_host_0", "Simple_1Of")
self.assertEqual(True, svc_cor.got_business_rule)
self.assertIsNot(svc_cor.business_rule, None)
bp_rule = svc_cor.business_rule
self.assertEqual('of:', bp_rule.operand)
# Simple 1of: so in fact a triple ('1','2','2') (1of and MAX,MAX
if with_pct is True:
if with_neg is True:
self.assertEqual(('-50%', '2', '2'), bp_rule.of_values)
else:
self.assertEqual(('50%', '2', '2'), bp_rule.of_values)
else:
if with_neg is True:
self.assertEqual(('-1', '2', '2'), bp_rule.of_values)
else:
self.assertEqual(('1', '2', '2'), bp_rule.of_values)
sons = bp_rule.sons
print("Sons,", sons)
# We've got 2 sons, 2 services nodes
self.assertEqual(2, len(sons))
self.assertEqual('service', sons[0].operand)
self.assertEqual(svc_bd1, sons[0].sons[0])
self.assertEqual('service', sons[1].operand)
self.assertEqual(svc_bd2, sons[1].sons[0])
# Now state working on the states
self.scheduler_loop(1, [[svc_bd2, 0, 'OK | value1=1 value2=2'], [svc_bd1, 0, 'OK | rtt=10']])
self.assertEqual('OK', svc_bd1.state)
self.assertEqual('HARD', svc_bd1.state_type)
self.assertEqual('OK', svc_bd2.state)
self.assertEqual('HARD', svc_bd2.state_type)
state = bp_rule.get_state()
self.assertEqual(0, state)
# Now we set the bd1 as soft/CRITICAL
self.scheduler_loop(1, [[svc_bd1, 2, 'CRITICAL | value1=1 value2=2']])
self.assertEqual('CRITICAL', svc_bd1.state)
self.assertEqual('SOFT', svc_bd1.state_type)
self.assertEqual(0, svc_bd1.last_hard_state_id)
# The business rule must still be 0
# becase we want HARD states
state = bp_rule.get_state()
self.assertEqual(0, state)
# Now we get bd1 CRITICAL/HARD
self.scheduler_loop(1, [[svc_bd1, 2, 'CRITICAL | value1=1 value2=2']])
self.assertEqual('CRITICAL', svc_bd1.state)
self.assertEqual('HARD', svc_bd1.state_type)
self.assertEqual(2, svc_bd1.last_hard_state_id)
# The rule still be OK
state = bp_rule.get_state()
self.assertEqual(0, state)
# Now we also set bd2 as CRITICAL/HARD...
self.scheduler_loop(2, [[svc_bd2, 2, 'CRITICAL | value1=1 value2=2']])
self.assertEqual('CRITICAL', svc_bd2.state)
self.assertEqual('HARD', svc_bd2.state_type)
self.assertEqual(2, svc_bd2.last_hard_state_id)
# And now the state of the rule must be 2 now
state = bp_rule.get_state()
self.assertEqual(2, state)
# And If we set one WARNING now?
self.scheduler_loop(2, [[svc_bd1, 1, 'WARNING | value1=1 value2=2']])
self.assertEqual('WARNING', svc_bd1.state)
self.assertEqual('HARD', svc_bd1.state_type)
self.assertEqual(1, svc_bd1.last_hard_state_id)
# Must be WARNING (worse no 0 value for both, like for AND rule)
state = bp_rule.get_state()
self.assertEqual(1, state)
# We will try a simple 1of: test_router_0 OR/AND test_host_0
def test_simple_1of_business_correlator_with_hosts(self):
self.run_simple_1of_business_correlator_with_hosts()
# We will try a simple -1of: test_router_0 OR/AND test_host_0
def test_simple_1of_neg_business_correlator_with_hosts(self):
self.run_simple_1of_business_correlator_with_hosts(with_neg=True)
# We will try a simple 50%of: test_router_0 OR/AND test_host_0
def test_simple_1of_pct_business_correlator_with_hosts(self):
self.run_simple_1of_business_correlator_with_hosts(with_pct=True)
# We will try a simple -50%of: test_router_0 OR/AND test_host_0
def test_simple_1of_pct_neg_business_correlator_with_hosts(self):
self.run_simple_1of_business_correlator_with_hosts(with_pct=True, with_neg=True)
def run_simple_1of_business_correlator_with_hosts(self, with_pct=False, with_neg=False):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print("Get the hosts and services")
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
if with_pct is True:
if with_neg is True:
svc_cor = self.sched.services.find_srv_by_name_and_hostname(
"test_host_0", "Simple_1Of_with_host_pct_neg")
else:
svc_cor = self.sched.services.find_srv_by_name_and_hostname(
"test_host_0", "Simple_1Of_with_host_pct")
else:
if with_neg is True:
svc_cor = self.sched.services.find_srv_by_name_and_hostname(
"test_host_0", "Simple_1Of_with_host_neg")
else:
svc_cor = self.sched.services.find_srv_by_name_and_hostname(
"test_host_0", "Simple_1Of_with_host")
self.assertEqual(True, svc_cor.got_business_rule)
self.assertIsNot(svc_cor.business_rule, None)
bp_rule = svc_cor.business_rule
self.assertEqual('of:', bp_rule.operand)
# Simple 1of: so in fact a triple ('1','2','2') (1of and MAX,MAX
if with_pct is True:
if with_neg is True:
self.assertEqual(('-50%', '2', '2'), bp_rule.of_values)
else:
self.assertEqual(('50%', '2', '2'), bp_rule.of_values)
else:
if with_neg is True:
self.assertEqual(('-1', '2', '2'), bp_rule.of_values)
else:
self.assertEqual(('1', '2', '2'), bp_rule.of_values)
sons = bp_rule.sons
print("Sons,", sons)
# We've got 2 sons, 2 services nodes
self.assertEqual(2, len(sons))
self.assertEqual('host', sons[0].operand)
self.assertEqual(host, sons[0].sons[0])
self.assertEqual('host', sons[1].operand)
self.assertEqual(router, sons[1].sons[0])
# We will try a simple bd1 OR db2, but this time we will
# schedule a real check and see if it's good
def test_simple_or_business_correlator_with_schedule(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print("Get the hosts and services")
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc_bd1 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "db1")
self.assertEqual(False, svc_bd1.got_business_rule)
self.assertIs(None, svc_bd1.business_rule)
svc_bd2 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "db2")
self.assertEqual(False, svc_bd2.got_business_rule)
self.assertIs(None, svc_bd2.business_rule)
svc_cor = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "Simple_Or")
self.assertEqual(True, svc_cor.got_business_rule)
self.assertIsNot(svc_cor.business_rule, None)
bp_rule = svc_cor.business_rule
self.assertEqual('|', bp_rule.operand)
sons = bp_rule.sons
print("Sons,", sons)
# We've got 2 sons, 2 services nodes
self.assertEqual(2, len(sons))
self.assertEqual('service', sons[0].operand)
self.assertEqual(svc_bd1, sons[0].sons[0])
self.assertEqual('service', sons[1].operand)
self.assertEqual(svc_bd2, sons[1].sons[0])
# Now state working on the states
self.scheduler_loop(1, [[svc_bd2, 0, 'OK | value1=1 value2=2'], [svc_bd1, 0, 'OK | rtt=10']])
self.assertEqual('OK', svc_bd1.state)
self.assertEqual('HARD', svc_bd1.state_type)
self.assertEqual('OK', svc_bd2.state)
self.assertEqual('HARD', svc_bd2.state_type)
state = bp_rule.get_state()
self.assertEqual(0, state)
print("Launch internal check")
svc_cor.launch_check(now-1)
c = svc_cor.actions[0]
self.assertEqual(True, c.internal)
self.assertTrue(c.is_launchable(now))
# ask the scheduler to launch this check
# and ask 2 loops: one for launch the check
# and another to integer the result
self.scheduler_loop(2, [])
# We should have no more the check
self.assertEqual(0, len(svc_cor.actions))
print("Look at svc_cor state", svc_cor.state)
# What is the svc_cor state now?
self.assertEqual('OK', svc_cor.state)
self.assertEqual('HARD', svc_cor.state_type)
self.assertEqual(0, svc_cor.last_hard_state_id)
# Now we set the bd1 as soft/CRITICAL
self.scheduler_loop(1, [[svc_bd1, 2, 'CRITICAL | value1=1 value2=2']])
self.assertEqual('CRITICAL', svc_bd1.state)
self.assertEqual('SOFT', svc_bd1.state_type)
self.assertEqual(0, svc_bd1.last_hard_state_id)
# The business rule must still be 0
state = bp_rule.get_state()
self.assertEqual(0, state)
print("Launch internal check")
svc_cor.launch_check(now-1)
c = svc_cor.actions[0]
self.assertEqual(True, c.internal)
self.assertTrue(c.is_launchable(now))
# ask the scheduler to launch this check
# and ask 2 loops: one for launch the check
# and another to integer the result
self.scheduler_loop(2, [])
# We should have no more the check
self.assertEqual(0, len(svc_cor.actions))
print("Look at svc_cor state", svc_cor.state)
# What is the svc_cor state now?
self.assertEqual('OK', svc_cor.state)
self.assertEqual('HARD', svc_cor.state_type)
self.assertEqual(0, svc_cor.last_hard_state_id)
# Now we get bd1 CRITICAL/HARD
self.scheduler_loop(1, [[svc_bd1, 2, 'CRITICAL | value1=1 value2=2']])
self.assertEqual('CRITICAL', svc_bd1.state)
self.assertEqual('HARD', svc_bd1.state_type)
self.assertEqual(2, svc_bd1.last_hard_state_id)
# The rule must still be a 0 (or inside)
state = bp_rule.get_state()
self.assertEqual(0, state)
print("Launch internal check")
svc_cor.launch_check(now-1)
c = svc_cor.actions[0]
self.assertEqual(True, c.internal)
self.assertTrue(c.is_launchable(now))
# ask the scheduler to launch this check
# and ask 2 loops: one for launch the check
# and another to integer the result
self.scheduler_loop(2, [])
# We should have no more the check
self.assertEqual(0, len(svc_cor.actions))
print("Look at svc_cor state", svc_cor.state)
# What is the svc_cor state now?
self.assertEqual('OK', svc_cor.state)
self.assertEqual('HARD', svc_cor.state_type)
self.assertEqual(0, svc_cor.last_hard_state_id)
# Now we also set bd2 as CRITICAL/HARD... byebye 0 :)
self.scheduler_loop(2, [[svc_bd2, 2, 'CRITICAL | value1=1 value2=2']])
self.assertEqual('CRITICAL', svc_bd2.state)
self.assertEqual('HARD', svc_bd2.state_type)
self.assertEqual(2, svc_bd2.last_hard_state_id)
# And now the state of the rule must be 2
state = bp_rule.get_state()
self.assertEqual(2, state)
# And now we must be CRITICAL/SOFT!
print("Launch internal check")
svc_cor.launch_check(now-1)
c = svc_cor.actions[0]
self.assertEqual(True, c.internal)
self.assertTrue(c.is_launchable(now))
# ask the scheduler to launch this check
# and ask 2 loops: one for launch the check
# and another to integer the result
self.scheduler_loop(2, [])
# We should have no more the check
self.assertEqual(0, len(svc_cor.actions))
print("Look at svc_cor state", svc_cor.state)
# What is the svc_cor state now?
self.assertEqual('CRITICAL', svc_cor.state)
self.assertEqual('SOFT', svc_cor.state_type)
self.assertEqual(0, svc_cor.last_hard_state_id)
# OK, re recheck again, GO HARD!
print("Launch internal check")
svc_cor.launch_check(now-1)
c = svc_cor.actions[0]
self.assertEqual(True, c.internal)
self.assertTrue(c.is_launchable(now))
# ask the scheduler to launch this check
# and ask 2 loops: one for launch the check
# and another to integer the result
self.scheduler_loop(2, [])
# We should have no more the check
self.assertEqual(0, len(svc_cor.actions))
print("Look at svc_cor state", svc_cor.state)
# What is the svc_cor state now?
self.assertEqual('CRITICAL', svc_cor.state)
self.assertEqual('HARD', svc_cor.state_type)
self.assertEqual(2, svc_cor.last_hard_state_id)
# And If we set one WARNING?
self.scheduler_loop(2, [[svc_bd2, 1, 'WARNING | value1=1 value2=2']])
self.assertEqual('WARNING', svc_bd2.state)
self.assertEqual('HARD', svc_bd2.state_type)
self.assertEqual(1, svc_bd2.last_hard_state_id)
# Must be WARNING (better no 0 value)
state = bp_rule.get_state()
self.assertEqual(1, state)
# And in a HARD
print("Launch internal check")
svc_cor.launch_check(now-1)
c = svc_cor.actions[0]
self.assertEqual(True, c.internal)
self.assertTrue(c.is_launchable(now))
# ask the scheduler to launch this check
# and ask 2 loops: one for launch the check
# and another to integer the result
self.scheduler_loop(2, [])
# We should have no more the check
self.assertEqual(0, len(svc_cor.actions))
print("Look at svc_cor state", svc_cor.state)
# What is the svc_cor state now?
self.assertEqual('WARNING', svc_cor.state)
self.assertEqual('HARD', svc_cor.state_type)
self.assertEqual(1, svc_cor.last_hard_state_id)
print("All elements", bp_rule.list_all_elements())
print("IMPACT:", svc_bd2.impacts)
for i in svc_bd2.impacts:
print(i.get_name())
# Assert that Simple_Or Is an impact of the problem bd2
self.assertIn(svc_cor, svc_bd2.impacts)
# and bd1 too
self.assertIn(svc_cor, svc_bd1.impacts)
def test_dep_node_list_elements(self):
svc_bd1 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "db1")
self.assertEqual(False, svc_bd1.got_business_rule)
self.assertIs(None, svc_bd1.business_rule)
svc_bd2 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "db2")
self.assertEqual(False, svc_bd2.got_business_rule)
self.assertIs(None, svc_bd2.business_rule)
svc_cor = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "Simple_Or")
self.assertEqual(True, svc_cor.got_business_rule)
self.assertIsNot(svc_cor.business_rule, None)
bp_rule = svc_cor.business_rule
self.assertEqual('|', bp_rule.operand)
print("All elements", bp_rule.list_all_elements())
all_elt = bp_rule.list_all_elements()
self.assertIn(svc_bd2, all_elt)
self.assertIn(svc_bd1, all_elt)
print("DBG: bd2 depend_on_me", svc_bd2.act_depend_of_me)
# We will try a full ERP rule and
# schedule a real check and see if it's good
def test_full_erp_rule_with_schedule(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print("Get the hosts and services")
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc_bd1 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "db1")
self.assertEqual(False, svc_bd1.got_business_rule)
self.assertIs(None, svc_bd1.business_rule)
svc_bd2 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "db2")
self.assertEqual(False, svc_bd2.got_business_rule)
self.assertIs(None, svc_bd2.business_rule)
svc_web1 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "web1")
self.assertEqual(False, svc_web1.got_business_rule)
self.assertIs(None, svc_web1.business_rule)
svc_web2 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "web2")
self.assertEqual(False, svc_web2.got_business_rule)
self.assertIs(None, svc_web2.business_rule)
svc_lvs1 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "lvs1")
self.assertEqual(False, svc_lvs1.got_business_rule)
self.assertIs(None, svc_lvs1.business_rule)
svc_lvs2 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "lvs2")
self.assertEqual(False, svc_lvs2.got_business_rule)
self.assertIs(None, svc_lvs2.business_rule)
svc_cor = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "ERP")
self.assertEqual(True, svc_cor.got_business_rule)
self.assertIsNot(svc_cor.business_rule, None)
bp_rule = svc_cor.business_rule
self.assertEqual('&', bp_rule.operand)
sons = bp_rule.sons
print("Sons,", sons)
# We've got 3 sons, each 3 rules
self.assertEqual(3, len(sons))
bd_node = sons[0]
self.assertEqual('|', bd_node.operand)
self.assertEqual(svc_bd1, bd_node.sons[0].sons[0])
self.assertEqual(svc_bd2, bd_node.sons[1].sons[0])
# Now state working on the states
self.scheduler_loop(1, [[svc_bd2, 0, 'OK | value1=1 value2=2'], [svc_bd1, 0, 'OK | rtt=10']])
self.assertEqual('OK', svc_bd1.state)
self.assertEqual('HARD', svc_bd1.state_type)
self.assertEqual('OK', svc_bd2.state)
self.assertEqual('HARD', svc_bd2.state_type)
state = bp_rule.get_state()
self.assertEqual(0, state)
print("Launch internal check")
svc_cor.launch_check(now-1)
c = svc_cor.actions[0]
self.assertEqual(True, c.internal)
self.assertTrue(c.is_launchable(now))
# ask the scheduler to launch this check
# and ask 2 loops: one for launch the check
# and another to integer the result
self.scheduler_loop(2, [])
# We should have no more the check
self.assertEqual(0, len(svc_cor.actions))
print("Look at svc_cor state", svc_cor.state)
# What is the svc_cor state now?
self.assertEqual('OK', svc_cor.state)
self.assertEqual('HARD', svc_cor.state_type)
self.assertEqual(0, svc_cor.last_hard_state_id)
# Now we set the bd1 as soft/CRITICAL
self.scheduler_loop(1, [[svc_bd1, 2, 'CRITICAL | value1=1 value2=2']])
self.assertEqual('CRITICAL', svc_bd1.state)
self.assertEqual('SOFT', svc_bd1.state_type)
self.assertEqual(0, svc_bd1.last_hard_state_id)
# The business rule must still be 0
state = bp_rule.get_state()
self.assertEqual(0, state)
print("Launch internal check")
svc_cor.launch_check(now-1)
c = svc_cor.actions[0]
self.assertEqual(True, c.internal)
self.assertTrue(c.is_launchable(now))
# ask the scheduler to launch this check
# and ask 2 loops: one for launch the check
# and another to integer the result
self.scheduler_loop(2, [])
# We should have no more the check
self.assertEqual(0, len(svc_cor.actions))
print("ERP: Look at svc_cor state", svc_cor.state)
# What is the svc_cor state now?
self.assertEqual('OK', svc_cor.state)
self.assertEqual('HARD', svc_cor.state_type)
self.assertEqual(0, svc_cor.last_hard_state_id)
# Now we get bd1 CRITICAL/HARD
self.scheduler_loop(1, [[svc_bd1, 2, 'CRITICAL | value1=1 value2=2']])
self.assertEqual('CRITICAL', svc_bd1.state)
self.assertEqual('HARD', svc_bd1.state_type)
self.assertEqual(2, svc_bd1.last_hard_state_id)
# The rule must still be a 0 (or inside)
state = bp_rule.get_state()
self.assertEqual(0, state)
print("ERP: Launch internal check")
svc_cor.launch_check(now-1)
c = svc_cor.actions[0]
self.assertEqual(True, c.internal)
self.assertTrue(c.is_launchable(now))
# ask the scheduler to launch this check
# and ask 2 loops: one for launch the check
# and another to integer the result
self.scheduler_loop(2, [])
# We should have no more the check
self.assertEqual(0, len(svc_cor.actions))
print("ERP: Look at svc_cor state", svc_cor.state)
# What is the svc_cor state now?
self.assertEqual('OK', svc_cor.state)
self.assertEqual('HARD', svc_cor.state_type)
self.assertEqual(0, svc_cor.last_hard_state_id)
# Now we also set bd2 as CRITICAL/HARD... byebye 0 :)
self.scheduler_loop(2, [[svc_bd2, 2, 'CRITICAL | value1=1 value2=2']])
self.assertEqual('CRITICAL', svc_bd2.state)
self.assertEqual('HARD', svc_bd2.state_type)
self.assertEqual(2, svc_bd2.last_hard_state_id)
# And now the state of the rule must be 2
state = bp_rule.get_state()
self.assertEqual(2, state)
# And now we must be CRITICAL/SOFT!
print("ERP: Launch internal check")
svc_cor.launch_check(now-1)
c = svc_cor.actions[0]
self.assertEqual(True, c.internal)
self.assertTrue(c.is_launchable(now))
# ask the scheduler to launch this check
# and ask 2 loops: one for launch the check
# and another to integer the result
self.scheduler_loop(2, [])
# We should have no more the check
self.assertEqual(0, len(svc_cor.actions))
print("ERP: Look at svc_cor state", svc_cor.state)
# What is the svc_cor state now?
self.assertEqual('CRITICAL', svc_cor.state)
self.assertEqual('SOFT', svc_cor.state_type)
self.assertEqual(0, svc_cor.last_hard_state_id)
# OK, re recheck again, GO HARD!
print("ERP: Launch internal check")
svc_cor.launch_check(now-1)
c = svc_cor.actions[0]
self.assertEqual(True, c.internal)
self.assertTrue(c.is_launchable(now))
# ask the scheduler to launch this check
# and ask 2 loops: one for launch the check
# and another to integer the result
self.scheduler_loop(2, [])
# We should have no more the check
self.assertEqual(0, len(svc_cor.actions))
print("ERP: Look at svc_cor state", svc_cor.state)
# What is the svc_cor state now?
self.assertEqual('CRITICAL', svc_cor.state)
self.assertEqual('HARD', svc_cor.state_type)
self.assertEqual(2, svc_cor.last_hard_state_id)
# And If we set one WARNING?
self.scheduler_loop(2, [[svc_bd2, 1, 'WARNING | value1=1 value2=2']])
self.assertEqual('WARNING', svc_bd2.state)
self.assertEqual('HARD', svc_bd2.state_type)
self.assertEqual(1, svc_bd2.last_hard_state_id)
# Must be WARNING (better no 0 value)
state = bp_rule.get_state()
self.assertEqual(1, state)
# And in a HARD
print("ERP: Launch internal check")
svc_cor.launch_check(now-1)
c = svc_cor.actions[0]
self.assertEqual(True, c.internal)
self.assertTrue(c.is_launchable(now))
# ask the scheduler to launch this check
# and ask 2 loops: one for launch the check
# and another to integer the result
self.scheduler_loop(2, [])
# We should have no more the check
self.assertEqual(0, len(svc_cor.actions))
print("ERP: Look at svc_cor state", svc_cor.state)
# What is the svc_cor state now?
self.assertEqual('WARNING', svc_cor.state)
self.assertEqual('HARD', svc_cor.state_type)
self.assertEqual(1, svc_cor.last_hard_state_id)
print("All elements", bp_rule.list_all_elements())
print("IMPACT:", svc_bd2.impacts)
for i in svc_bd2.impacts:
print(i.get_name())
# Assert that Simple_Or Is an impact of the problem bd2
self.assertIn(svc_cor, svc_bd2.impacts)
# and bd1 too
self.assertIn(svc_cor, svc_bd1.impacts)
# And now all is green :)
self.scheduler_loop(2, [[svc_bd2, 0, 'OK | value1=1 value2=2'], [svc_bd1, 0, 'OK | value1=1 value2=2']])
print("ERP: Launch internal check")
svc_cor.launch_check(now-1)
c = svc_cor.actions[0]
self.assertEqual(True, c.internal)
self.assertTrue(c.is_launchable(now))
# ask the scheduler to launch this check
# and ask 2 loops: one for launch the check
# and another to integer the result
self.scheduler_loop(2, [])
# We should have no more the check
self.assertEqual(0, len(svc_cor.actions))
print("ERP: Look at svc_cor state", svc_cor.state)
# What is the svc_cor state now?
self.assertEqual('OK', svc_cor.state)
self.assertEqual('HARD', svc_cor.state_type)
self.assertEqual(0, svc_cor.last_hard_state_id)
# And no more in impact
self.assertNotIn(svc_cor, svc_bd2.impacts)
self.assertNotIn(svc_cor, svc_bd1.impacts)
# And what if we set 2 service from distant rule CRITICAL?
# ERP should be still OK
# And now all is green :)
self.scheduler_loop(2, [[svc_bd1, 2, 'CRITICAL | value1=1 value2=2'], [svc_web1, 2, 'CRITICAL | value1=1 value2=2']])
print("ERP: Launch internal check")
svc_cor.launch_check(now-1)
c = svc_cor.actions[0]
self.assertEqual(True, c.internal)
self.assertTrue(c.is_launchable(now))
# ask the scheduler to launch this check
# and ask 2 loops: one for launch the check
# and another to integer the result
self.scheduler_loop(2, [])
# We should have no more the check
self.assertEqual(0, len(svc_cor.actions))
print("ERP: Look at svc_cor state", svc_cor.state)
# What is the svc_cor state now?
self.assertEqual('OK', svc_cor.state)
self.assertEqual('HARD', svc_cor.state_type)
self.assertEqual(0, svc_cor.last_hard_state_id)
# We will try a simple 1of: bd1 OR/AND db2
def test_complex_ABCof_business_correlator(self):
self.run_complex_ABCof_business_correlator(with_pct=False)
# We will try a simple 1of: bd1 OR/AND db2
def test_complex_ABCof_pct_business_correlator(self):
self.run_complex_ABCof_business_correlator(with_pct=True)
def run_complex_ABCof_business_correlator(self, with_pct=False):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print("Get the hosts and services")
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
A = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "A")
self.assertEqual(False, A.got_business_rule)
self.assertIs(None, A.business_rule)
B = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "B")
self.assertEqual(False, B.got_business_rule)
self.assertIs(None, B.business_rule)
C = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "C")
self.assertEqual(False, C.got_business_rule)
self.assertIs(None, C.business_rule)
D = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "D")
self.assertEqual(False, D.got_business_rule)
self.assertIs(None, D.business_rule)
E = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "E")
self.assertEqual(False, E.got_business_rule)
self.assertIs(None, E.business_rule)
if with_pct == False:
svc_cor = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "Complex_ABCOf")
else:
svc_cor = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "Complex_ABCOf_pct")
self.assertEqual(True, svc_cor.got_business_rule)
self.assertIsNot(svc_cor.business_rule, None)
bp_rule = svc_cor.business_rule
self.assertEqual('of:', bp_rule.operand)
if with_pct == False:
self.assertEqual(('5', '1', '1'), bp_rule.of_values)
else:
self.assertEqual(('100%', '20%', '20%'), bp_rule.of_values)
sons = bp_rule.sons
print("Sons,", sons)
# We've got 2 sons, 2 services nodes
self.assertEqual(5, len(sons))
self.assertEqual('service', sons[0].operand)
self.assertEqual(A, sons[0].sons[0])
self.assertEqual('service', sons[1].operand)
self.assertEqual(B, sons[1].sons[0])
self.assertEqual('service', sons[2].operand)
self.assertEqual(C, sons[2].sons[0])
self.assertEqual('service', sons[3].operand)
self.assertEqual(D, sons[3].sons[0])
self.assertEqual('service', sons[4].operand)
self.assertEqual(E, sons[4].sons[0])
# Now state working on the states
self.scheduler_loop(1, [[A, 0, 'OK'], [B, 0, 'OK'], [C, 0, 'OK'], [D, 0, 'OK'], [E, 0, 'OK']])
self.assertEqual('OK', A.state)
self.assertEqual('HARD', A.state_type)
self.assertEqual('OK', B.state)
self.assertEqual('HARD', B.state_type)
self.assertEqual('OK', C.state)
self.assertEqual('HARD', C.state_type)
self.assertEqual('OK', D.state)
self.assertEqual('HARD', D.state_type)
self.assertEqual('OK', E.state)
self.assertEqual('HARD', E.state_type)
state = bp_rule.get_state()
self.assertEqual(0, state)
# Now we set the A as soft/CRITICAL
self.scheduler_loop(1, [[A, 2, 'CRITICAL']])
self.assertEqual('CRITICAL', A.state)
self.assertEqual('SOFT', A.state_type)
self.assertEqual(0, A.last_hard_state_id)
# The business rule must still be 0
# becase we want HARD states
state = bp_rule.get_state()
self.assertEqual(0, state)
# Now we get A CRITICAL/HARD
self.scheduler_loop(1, [[A, 2, 'CRITICAL']])
self.assertEqual('CRITICAL', A.state)
self.assertEqual('HARD', A.state_type)
self.assertEqual(2, A.last_hard_state_id)
# The rule still be OK
state = bp_rule.get_state()
self.assertEqual(2, state)
# Now we also set B as CRITICAL/HARD...
self.scheduler_loop(2, [[B, 2, 'CRITICAL']])
self.assertEqual('CRITICAL', B.state)
self.assertEqual('HARD', B.state_type)
self.assertEqual(2, B.last_hard_state_id)
# And now the state of the rule must be 2 now
state = bp_rule.get_state()
self.assertEqual(2, state)
# And If we set A dn B WARNING now?
self.scheduler_loop(2, [[A, 1, 'WARNING'], [B, 1, 'WARNING']])
self.assertEqual('WARNING', A.state)
self.assertEqual('HARD', A.state_type)
self.assertEqual(1, A.last_hard_state_id)
self.assertEqual('WARNING', B.state)
self.assertEqual('HARD', B.state_type)
self.assertEqual(1, B.last_hard_state_id)
# Must be WARNING (worse no 0 value for both, like for AND rule)
state = bp_rule.get_state()
print("state", state)
self.assertEqual(1, state)
# Ok now more fun, with changing of_values and states
### W O O O O
# 4 of: -> Ok (we got 4 OK, and not 4 warn or crit, so it's OK)
# 5,1,1 -> Warning (at least one warning, and no crit -> warning)
# 5,2,1 -> OK (we want warning only if we got 2 bad states, so not here)
self.scheduler_loop(2, [[A, 1, 'WARNING'], [B, 0, 'OK']])
# 4 of: -> 4,5,5
if with_pct == False:
bp_rule.of_values = ('4', '5', '5')
else:
bp_rule.of_values = ('80%', '100%', '100%')
bp_rule.is_of_mul = False
self.assertEqual(0, bp_rule.get_state())
# 5,1,1
if with_pct == False:
bp_rule.of_values = ('5', '1', '1')
else:
bp_rule.of_values = ('100%', '20%', '20%')
bp_rule.is_of_mul = True
self.assertEqual(1, bp_rule.get_state())
# 5,2,1
if with_pct == False:
bp_rule.of_values = ('5', '2', '1')
else:
bp_rule.of_values = ('100%', '40%', '20%')
bp_rule.is_of_mul = True
self.assertEqual(0, bp_rule.get_state())
###* W C O O O
# 4 of: -> Crtitical (not 4 ok, so we take the worse state, the critical)
# 4,1,1 -> Critical (2 states raise the waring, but on raise critical, so worse state is critical)
self.scheduler_loop(2, [[A, 1, 'WARNING'], [B, 2, 'Crit']])
# 4 of: -> 4,5,5
if with_pct == False:
bp_rule.of_values = ('4', '5', '5')
else:
bp_rule.of_values = ('80%', '100%', '100%')
bp_rule.is_of_mul = False
self.assertEqual(2, bp_rule.get_state())
# 4,1,1
if with_pct == False:
bp_rule.of_values = ('4', '1', '1')
else:
bp_rule.of_values = ('40%', '20%', '20%')
bp_rule.is_of_mul = True
self.assertEqual(2, bp_rule.get_state())
##* W C C O O
# * 2 of: OK
# * 4,1,1 -> Critical (same as before)
# * 4,1,3 -> warning (the warning rule is raised, but the critical is not)
self.scheduler_loop(2, [[A, 1, 'WARNING'], [B, 2, 'Crit'], [C, 2, 'Crit']])
# * 2 of: 2,5,5
if with_pct == False:
bp_rule.of_values = ('2', '5', '5')
else:
bp_rule.of_values = ('40%', '100%', '100%')
bp_rule.is_of_mul = False
self.assertEqual(0, bp_rule.get_state())
# * 4,1,1
if with_pct == False:
bp_rule.of_values = ('4', '1', '1')
else:
bp_rule.of_values = ('80%', '20%', '20%')
bp_rule.is_of_mul = True
self.assertEqual(2, bp_rule.get_state())
# * 4,1,3
if with_pct == False:
bp_rule.of_values = ('4', '1', '3')
else:
bp_rule.of_values = ('80%', '20%', '60%')
bp_rule.is_of_mul = True
self.assertEqual(1, bp_rule.get_state())
# We will try a simple bd1 AND NOT db2
def test_simple_and_not_business_correlator(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print("Get the hosts and services")
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc_bd1 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "db1")
self.assertEqual(False, svc_bd1.got_business_rule)
self.assertIs(None, svc_bd1.business_rule)
svc_bd2 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "db2")
self.assertEqual(False, svc_bd2.got_business_rule)
self.assertIs(None, svc_bd2.business_rule)
svc_cor = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "Simple_And_not")
self.assertEqual(True, svc_cor.got_business_rule)
self.assertIsNot(svc_cor.business_rule, None)
bp_rule = svc_cor.business_rule
self.assertEqual('&', bp_rule.operand)
sons = bp_rule.sons
print("Sons,", sons)
# We've got 2 sons, 2 services nodes
self.assertEqual(2, len(sons))
self.assertEqual('service', sons[0].operand)
self.assertEqual(svc_bd1, sons[0].sons[0])
self.assertEqual('service', sons[1].operand)
self.assertEqual(svc_bd2, sons[1].sons[0])
# Now state working on the states
self.scheduler_loop(2, [[svc_bd1, 0, 'OK | value1=1 value2=2'], [svc_bd2, 2, 'CRITICAL | rtt=10']])
self.assertEqual('OK', svc_bd1.state)
self.assertEqual('HARD', svc_bd1.state_type)
self.assertEqual('CRITICAL', svc_bd2.state)
self.assertEqual('HARD', svc_bd2.state_type)
# We are a NOT, so should be OK here
state = bp_rule.get_state()
self.assertEqual(0, state)
# Now we set the bd1 as soft/CRITICAL
self.scheduler_loop(1, [[svc_bd1, 2, 'CRITICAL | value1=1 value2=2']])
self.assertEqual('CRITICAL', svc_bd1.state)
self.assertEqual('SOFT', svc_bd1.state_type)
self.assertEqual(0, svc_bd1.last_hard_state_id)
# The business rule must still be 0
# becase we want HARD states
state = bp_rule.get_state()
self.assertEqual(0, state)
# Now we get bd1 CRITICAL/HARD
self.scheduler_loop(1, [[svc_bd1, 2, 'CRITICAL | value1=1 value2=2']])
self.assertEqual('CRITICAL', svc_bd1.state)
self.assertEqual('HARD', svc_bd1.state_type)
self.assertEqual(2, svc_bd1.last_hard_state_id)
# The rule must go CRITICAL
state = bp_rule.get_state()
self.assertEqual(2, state)
# Now we also set bd2 as WARNING/HARD...
self.scheduler_loop(2, [[svc_bd2, 1, 'WARNING | value1=1 value2=2']])
self.assertEqual('WARNING', svc_bd2.state)
self.assertEqual('HARD', svc_bd2.state_type)
self.assertEqual(1, svc_bd2.last_hard_state_id)
# And now the state of the rule must be 2
state = bp_rule.get_state()
self.assertEqual(2, state)
# And If we set one WARNING too?
self.scheduler_loop(2, [[svc_bd1, 1, 'WARNING | value1=1 value2=2']])
self.assertEqual('WARNING', svc_bd1.state)
self.assertEqual('HARD', svc_bd1.state_type)
self.assertEqual(1, svc_bd1.last_hard_state_id)
# Must be WARNING (worse no 0 value for both)
state = bp_rule.get_state()
self.assertEqual(1, state)
# Now try to get ok in both place, should be bad :)
self.scheduler_loop(2, [[svc_bd1, 0, 'OK | value1=1 value2=2'], [svc_bd2, 0, 'OK | value1=1 value2=2']])
self.assertEqual('OK', svc_bd1.state)
self.assertEqual('HARD', svc_bd1.state_type)
self.assertEqual(0, svc_bd1.last_hard_state_id)
self.assertEqual('OK', svc_bd2.state)
self.assertEqual('HARD', svc_bd2.state_type)
self.assertEqual(0, svc_bd2.last_hard_state_id)
# Must be CRITICAL (ok and not ok IS no OK :) )
state = bp_rule.get_state()
self.assertEqual(2, state)
# We will try a simple bd1 OR db2
def test_multi_layers(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print("Get the hosts and services")
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
# THE RULE IS (test_host_0,db1| (test_host_0,db2 & (test_host_0,lvs1|test_host_0,lvs2) ) ) & test_router_0
svc_lvs1 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "lvs1")
self.assertIsNot(svc_lvs1, None)
svc_lvs2 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "lvs2")
self.assertIsNot(svc_lvs2, None)
svc_bd1 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "db1")
self.assertEqual(False, svc_bd1.got_business_rule)
self.assertIs(None, svc_bd1.business_rule)
svc_bd2 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "db2")
self.assertEqual(False, svc_bd2.got_business_rule)
self.assertIs(None, svc_bd2.business_rule)
svc_cor = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "Multi_levels")
self.assertEqual(True, svc_cor.got_business_rule)
self.assertIsNot(svc_cor.business_rule, None)
bp_rule = svc_cor.business_rule
self.assertEqual('&', bp_rule.operand)
# We check for good parent/childs links
# So svc_cor should be a son of svc_bd1 and svc_bd2
# and bd1 and bd2 should be parents of svc_cor
self.assertIn(svc_cor, svc_bd1.child_dependencies)
self.assertIn(svc_cor, svc_bd2.child_dependencies)
self.assertIn(svc_cor, router.child_dependencies)
self.assertIn(svc_bd1, svc_cor.parent_dependencies)
self.assertIn(svc_bd2, svc_cor.parent_dependencies)
self.assertIn(router, svc_cor.parent_dependencies)
sons = bp_rule.sons
print("Sons,", sons)
# We've got 2 sons, 2 services nodes
self.assertEqual(2, len(sons))
# Son0 is (test_host_0,db1| (test_host_0,db2 & (test_host_0,lvs1|test_host_0,lvs2) ) )
son0 = sons[0]
self.assertEqual('|', son0.operand)
# Son1 is test_router_0
self.assertEqual('host', sons[1].operand)
self.assertEqual(router, sons[1].sons[0])
# Son0_0 is test_host_0,db1
# Son0_1 is test_host_0,db2 & (test_host_0,lvs1|test_host_0,lvs2)
son0_0 = son0.sons[0]
son0_1 = son0.sons[1]
self.assertEqual('service', son0_0.operand)
self.assertEqual(svc_bd1, son0_0.sons[0])
self.assertEqual('&', son0_1.operand)
# Son0_1_0 is test_host_0,db2
# Son0_1_1 is test_host_0,lvs1|test_host_0,lvs2
son0_1_0 = son0_1.sons[0]
son0_1_1 = son0_1.sons[1]
self.assertEqual('service', son0_1_0.operand)
self.assertEqual(svc_bd2, son0_1_0.sons[0])
self.assertEqual('|', son0_1_1.operand)
# Son0_1_1_0 is test_host_0,lvs1
# Son0_1_1_1 is test_host_0,lvs2
son0_1_1_0 = son0_1_1.sons[0]
son0_1_1_1 = son0_1_1.sons[1]
self.assertEqual('service', son0_1_1_0.operand)
self.assertEqual(svc_lvs1, son0_1_1_0.sons[0])
self.assertEqual('service', son0_1_1_1.operand)
self.assertEqual(svc_lvs2, son0_1_1_1.sons[0])
# Now state working on the states
self.scheduler_loop(1, [[svc_bd2, 0, 'OK | value1=1 value2=2'], [svc_bd1, 0, 'OK | rtt=10'],
[svc_lvs1, 0, 'OK'], [svc_lvs2, 0, 'OK'], [router, 0, 'UP'] ])
self.assertEqual('OK', svc_bd1.state)
self.assertEqual('HARD', svc_bd1.state_type)
self.assertEqual('OK', svc_bd2.state)
self.assertEqual('HARD', svc_bd2.state_type)
# All is green, the rule should be green too
state = bp_rule.get_state()
self.assertEqual(0, state)
# Now we set the bd1 as soft/CRITICAL
self.scheduler_loop(1, [[svc_bd1, 2, 'CRITICAL | value1=1 value2=2']])
self.assertEqual('CRITICAL', svc_bd1.state)
self.assertEqual('SOFT', svc_bd1.state_type)
self.assertEqual(0, svc_bd1.last_hard_state_id)
# The business rule must still be 0
state = bp_rule.get_state()
self.assertEqual(0, state)
# Now we get bd1 CRITICAL/HARD
self.scheduler_loop(1, [[svc_bd1, 2, 'CRITICAL | value1=1 value2=2']])
self.assertEqual('CRITICAL', svc_bd1.state)
self.assertEqual('HARD', svc_bd1.state_type)
self.assertEqual(2, svc_bd1.last_hard_state_id)
# The rule must still be a 0 (or inside)
state = bp_rule.get_state()
self.assertEqual(0, state)
# Now we also set bd2 as CRITICAL/HARD... byebye 0 :)
self.scheduler_loop(2, [[svc_bd2, 2, 'CRITICAL | value1=1 value2=2']])
self.assertEqual('CRITICAL', svc_bd2.state)
self.assertEqual('HARD', svc_bd2.state_type)
self.assertEqual(2, svc_bd2.last_hard_state_id)
# And now the state of the rule must be 2
state = bp_rule.get_state()
self.assertEqual(2, state)
# And If we set one WARNING?
self.scheduler_loop(2, [[svc_bd2, 1, 'WARNING | value1=1 value2=2']])
self.assertEqual('WARNING', svc_bd2.state)
self.assertEqual('HARD', svc_bd2.state_type)
self.assertEqual(1, svc_bd2.last_hard_state_id)
# Must be WARNING (better no 0 value)
state = bp_rule.get_state()
self.assertEqual(1, state)
# We should got now svc_bd2 and svc_bd1 as root problems
print("Root problems")
for p in svc_cor.source_problems:
print(p.get_full_name())
self.assertIn(svc_bd1, svc_cor.source_problems)
self.assertIn(svc_bd2, svc_cor.source_problems)
# What about now with the router in DOWN?
self.scheduler_loop(5, [[router, 2, 'DOWN']])
self.assertEqual('DOWN', router.state)
self.assertEqual('HARD', router.state_type)
self.assertEqual(1, router.last_hard_state_id)
# Must be CRITICAL (CRITICAL VERSUS DOWN -> DOWN)
state = bp_rule.get_state()
self.assertEqual(2, state)
# Now our root problem is router
print("Root problems")
for p in svc_cor.source_problems:
print(p.get_full_name())
self.assertIn(router, svc_cor.source_problems)
# We will try a strange rule that ask UP&UP -> DOWN&DONW-> OK
def test_darthelmet_rule(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print("Get the hosts and services")
now = time.time()
host = self.sched.hosts.find_by_name("test_darthelmet")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
A = self.sched.hosts.find_by_name("test_darthelmet_A")
B = self.sched.hosts.find_by_name("test_darthelmet_B")
self.assertEqual(True, host.got_business_rule)
self.assertIsNot(host.business_rule, None)
bp_rule = host.business_rule
self.assertEqual('|', bp_rule.operand)
# Now state working on the states
self.scheduler_loop(3, [[host, 0, 'UP'], [A, 0, 'UP'], [B, 0, 'UP'] ] )
self.assertEqual('UP', host.state)
self.assertEqual('HARD', host.state_type)
self.assertEqual('UP', A.state)
self.assertEqual('HARD', A.state_type)
state = bp_rule.get_state()
print("WTF0", state)
self.assertEqual(0, state)
# Now we set the A as soft/DOWN
self.scheduler_loop(1, [[A, 2, 'DOWN']])
self.assertEqual('DOWN', A.state)
self.assertEqual('SOFT', A.state_type)
self.assertEqual(0, A.last_hard_state_id)
# The business rule must still be 0
state = bp_rule.get_state()
self.assertEqual(0, state)
# Now we get A DOWN/HARD
self.scheduler_loop(3, [[A, 2, 'DOWN']])
self.assertEqual('DOWN', A.state)
self.assertEqual('HARD', A.state_type)
self.assertEqual(1, A.last_hard_state_id)
# The rule must still be a 2 (or inside)
state = bp_rule.get_state()
print("WFT", state)
self.assertEqual(2, state)
# Now we also set B as DOWN/HARD, should get back to 0!
self.scheduler_loop(3, [[B, 2, 'DOWN']])
self.assertEqual('DOWN', B.state)
self.assertEqual('HARD', B.state_type)
self.assertEqual(1, B.last_hard_state_id)
# And now the state of the rule must be 0 again! (strange rule isn't it?)
state = bp_rule.get_state()
self.assertEqual(0, state)
class TestConfigBroken(ShinkenTest):
"""A class with a broken configuration, where business rules reference unknown hosts/services"""
def setUp(self):
self.setup_with_file('etc/shinken_business_correlator_broken.cfg')
def test_conf_is_correct(self):
#
# Business rules use services which don't exist. We want
# the arbiter to output an error message and exit
# in a controlled manner.
#
print("conf_is_correct", self.conf.conf_is_correct)
self.assertFalse(self.conf.conf_is_correct)
# Get the arbiter's log broks
#[b.prepare() for b in self.broks]
logs = [b.data['log'] for b in self.broks if b.type == 'log']
# Info: Simple_1Of_1unk_svc: my business rule is invalid
# Info: Simple_1Of_1unk_svc: Business rule uses unknown service test_host_0/db3
# Error: [items] In Simple_1Of_1unk_svc is incorrect ; from etc/business_correlator_broken/services.cfg
self.assertEqual(3, len([log for log in logs if re.search('Simple_1Of_1unk_svc', log)]) )
self.assertEqual(1, len([log for log in logs if re.search('service test_host_0/db3', log)]) )
self.assertEqual(1, len([log for log in logs if re.search('Simple_1Of_1unk_svc.+from etc.+business_correlator_broken.+services.cfg', log)]) )
# Info: ERP_unk_svc: my business rule is invalid
# Info: ERP_unk_svc: Business rule uses unknown service test_host_0/web100
# Info: ERP_unk_svc: Business rule uses unknown service test_host_0/lvs100
# Error: [items] In ERP_unk_svc is incorrect ; from etc/business_correlator_broken/services.cfg
self.assertEqual(4, len([log for log in logs if re.search('ERP_unk_svc', log)]) )
self.assertEqual(1, len([log for log in logs if re.search('service test_host_0/web100', log)]) )
self.assertEqual(1, len([log for log in logs if re.search('service test_host_0/lvs100', log)]) )
self.assertEqual(1, len([log for log in logs if re.search('ERP_unk_svc.+from etc.+business_correlator_broken.+services.cfg', log)]) )
# Info: Simple_1Of_1unk_host: my business rule is invalid
# Info: Simple_1Of_1unk_host: Business rule uses unknown host test_host_9
# Error: [items] In Simple_1Of_1unk_host is incorrect ; from etc/business_correlator_broken/services.cfg
self.assertEqual(3, len([log for log in logs if re.search('Simple_1Of_1unk_host', log)]) )
self.assertEqual(1, len([log for log in logs if re.search('host test_host_9', log)]) )
self.assertEqual(1, len([log for log in logs if re.search('Simple_1Of_1unk_host.+from etc.+business_correlator_broken.+services.cfg', log)]) )
# Now the number of all failed business rules.
self.assertEqual(3, len([log for log in logs if re.search('my business rule is invalid', log)]) )
if __name__ == '__main__':
unittest.main()
| 64,710
|
Python
|
.py
| 1,299
| 40.588915
| 150
| 0.620414
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,415
|
test_exclude_services.py
|
shinken-solutions_shinken/test/test_exclude_services.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test object properties overriding.
#
from __future__ import absolute_import, division, print_function, unicode_literals
from functools import partial
import re
from shinken_test import unittest, ShinkenTest
class TestPropertyOverride(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/exclude_include_services.cfg')
def test_exclude_services(self):
hst1 = self.sched.hosts.find_by_name("test_host_01")
hst2 = self.sched.hosts.find_by_name("test_host_02")
self.assertEqual([], hst1.service_excludes)
self.assertEqual(["srv-svc11", "srv-svc21", "proc proc1"], hst2.service_excludes)
Find = self.sched.services.find_srv_by_name_and_hostname
# All services should exist for test_host_01
find = partial(Find, 'test_host_01')
for svc in (
'srv-svc11', 'srv-svc12',
'srv-svc21', 'srv-svc22',
'proc proc1', 'proc proc2',
):
self.assertIsNotNone(find(svc))
# Half the services only should exist for test_host_02
find = partial(Find, 'test_host_02')
for svc in ('srv-svc12', 'srv-svc22', 'proc proc2'):
self.assertIsNotNone(find(svc))
for svc in ('srv-svc11', 'srv-svc21', 'proc proc1'):
self.assertIsNone(find(svc))
# 2 and 21 should not exist on test-04
find = partial(Find, 'test_host_04')
for svc in ('srv-svc11', 'srv-svc12', 'proc proc1'):
self.assertIsNotNone(find(svc))
for svc in ('srv-svc21', 'srv-svc22', 'proc proc2'):
self.assertIsNone(find(svc))
# no service should be defined on test_host_05
find = partial(Find, 'test_host_05')
for svc in ('srv-svc11', 'srv-svc12', 'proc proc1',
'srv-svc21', 'srv-svc22', 'proc proc2'):
self.assertIsNone(find(svc))
def test_service_includes(self):
Find = self.sched.services.find_srv_by_name_and_hostname
find = partial(Find, 'test_host_03')
for svc in ('srv-svc11', 'proc proc2', 'srv-svc22'):
self.assertIsNotNone(find(svc))
for svc in ('srv-svc12', 'srv-svc21', 'proc proc1'):
self.assertIsNone(find(svc))
if __name__ == '__main__':
unittest.main()
| 3,123
|
Python
|
.py
| 69
| 38.608696
| 89
| 0.658971
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,416
|
module.py
|
shinken-solutions_shinken/test/test_module_as_package/modB/module.py
|
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken.basemodule import BaseModule
properties = {
'daemons': ['broker', 'scheduler'],
'type': 'modB',
'external': False,
'phases': ['running'],
}
def get_instance(plugin):
return ThisModule(plugin)
class ThisModule(BaseModule):
pass
from .helpers import X as helpers_X
expected_helpers_X = 'B'
| 413
|
Python
|
.py
| 14
| 26.428571
| 82
| 0.72335
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,417
|
helpers.py
|
shinken-solutions_shinken/test/test_module_as_package/modB/helpers.py
|
from __future__ import absolute_import, division, print_function, unicode_literals
X = 'B'
| 93
|
Python
|
.py
| 2
| 44.5
| 82
| 0.764045
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,418
|
module.py
|
shinken-solutions_shinken/test/test_module_as_package/modA/module.py
|
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken.basemodule import BaseModule
properties = {
'daemons': ['broker', 'scheduler'],
'type': 'modA',
'external': False,
'phases': ['running'],
}
def get_instance(plugin):
return ThisModule(plugin)
class ThisModule(BaseModule):
pass
import sys
from .helpers import X as helpers_X
expected_helpers_X = 'A'
| 424
|
Python
|
.py
| 15
| 25.333333
| 82
| 0.727723
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,419
|
helpers.py
|
shinken-solutions_shinken/test/test_module_as_package/modA/helpers.py
|
from __future__ import absolute_import, division, print_function, unicode_literals
X = 'A'
| 93
|
Python
|
.py
| 2
| 44.5
| 82
| 0.764045
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,420
|
module.py
|
shinken-solutions_shinken/test/module_missing_imported_from_module_property/dummy_arbiter/module.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
# This Class is an example of an Arbiter module
# Here for the configuration phase AND running one
from __future__ import absolute_import, division, print_function, unicode_literals
import time
from shinken.basemodule import BaseModule
from shinken.external_command import ExternalCommand
from shinken.log import logger
properties = {
'daemons': ['arbiter'],
'type': 'dummy_arbiter',
'external': True,
}
# called by the plugin manager to get a broker
def get_instance(plugin):
logger.info("[Dummy Arbiter] Get a Dummy arbiter module for plugin %s", plugin.get_name())
instance = Dummy_arbiter(plugin)
return instance
# Just print(some stuff)
class Dummy_arbiter(BaseModule):
def __init__(self, mod_conf):
BaseModule.__init__(self, mod_conf)
# Called by Arbiter to say 'let's prepare yourself guy'
def init(self):
logger.info("[Dummy Arbiter] Initialization of the dummy arbiter module")
#self.return_queue = self.properties['from_queue']
# Ok, main function that is called in the CONFIGURATION phase
def get_objects(self):
logger.info("[Dummy Arbiter] Ask me for objects to return")
r = {'hosts': []}
h = {'name': 'dummy host from dummy arbiter module',
'register': '0',
}
r['hosts'].append(h)
r['hosts'].append({
'host_name': "dummyhost1",
'use': 'linux-server',
'address': 'localhost'
})
logger.info("[Dummy Arbiter] Returning to Arbiter the hosts: %s", str(r))
return r
def hook_late_configuration(self, conf):
logger.info("[Dummy Arbiter] Dummy in hook late config")
def do_loop_turn(self):
logger.info("[Dummy Arbiter] Raise a external command as example")
e = ExternalCommand('Viva la revolution')
self.from_q.put(e)
time.sleep(1)
| 2,886
|
Python
|
.py
| 69
| 36.014493
| 94
| 0.675241
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,421
|
shinken_python_crash_with_recursive_bp_rules.cfg
|
shinken-solutions_shinken/test/etc/shinken_python_crash_with_recursive_bp_rules.cfg
|
accept_passive_host_checks=1
accept_passive_service_checks=1
additional_freshness_latency=15
admin_email=shinken@localhost
admin_pager=shinken@localhost
auto_reschedule_checks=0
auto_rescheduling_interval=30
auto_rescheduling_window=180
cached_host_check_horizon=15
cached_service_check_horizon=15
cfg_file=standard/hosts.cfg
cfg_file=standard/services.cfg
cfg_file=standard/contacts.cfg
cfg_file=python_crash_with_recursive_bp_rules/commands.cfg
cfg_file=python_crash_with_recursive_bp_rules/test_specific.cfg
cfg_file=standard/timeperiods.cfg
cfg_file=standard/hostgroups.cfg
cfg_file=standard/servicegroups.cfg
cfg_file=standard/shinken-specific.cfg
check_external_commands=1
check_for_orphaned_hosts=1
check_for_orphaned_services=1
check_host_freshness=0
check_result_path=var/spool/checkresults
check_result_reaper_frequency=10
check_service_freshness=1
command_check_interval=-1
command_file=var/shinken.cmd
daemon_dumps_core=0
date_format=iso8601
debug_file=var/shinken.debug
debug_level=112
debug_verbosity=1
enable_embedded_perl=0
enable_environment_macros=1
enable_event_handlers=1
enable_flap_detection=0
enable_notifications=1
enable_predictive_host_dependency_checks=1
enable_predictive_service_dependency_checks=1
event_broker_options=-1
event_handler_timeout=30
execute_host_checks=1
execute_service_checks=1
external_command_buffer_slots=4096
high_host_flap_threshold=20
high_service_flap_threshold=20
host_check_timeout=30
host_freshness_check_interval=60
host_inter_check_delay_method=s
illegal_macro_output_chars=`~\$&|'"<>
illegal_object_name_chars=`~!\$%^&*|'"<>?,()=
interval_length=60
lock_file=var/shinken.pid
log_archive_path=var/archives
log_event_handlers=1
log_external_commands=1
log_file=var/shinken.log
log_host_retries=1
log_initial_states=0
log_notifications=1
log_passive_checks=1
log_rotation_method=d
log_service_retries=1
low_host_flap_threshold=5
low_service_flap_threshold=5
max_check_result_file_age=3600
max_check_result_reaper_time=30
max_concurrent_checks=0
max_debug_file_size=1000000
max_host_check_spread=30
max_service_check_spread=30
shinken_group=shinken
shinken_user=shinken
notification_timeout=30
object_cache_file=var/objects.cache
obsess_over_hosts=0
obsess_over_services=0
ocsp_timeout=5
#p1_file=/tmp/test_shinken/plugins/p1.pl
p1_file=/usr/local/shinken/bin/p1.pl
passive_host_checks_are_soft=0
perfdata_timeout=5
precached_object_file=var/objects.precache
process_performance_data=1
resource_file=resource.cfg
retain_state_information=1
retained_contact_host_attribute_mask=0
retained_contact_service_attribute_mask=0
retained_host_attribute_mask=0
retained_process_host_attribute_mask=0
retained_process_service_attribute_mask=0
retained_service_attribute_mask=0
retention_update_interval=60
service_check_timeout=60
service_freshness_check_interval=60
service_inter_check_delay_method=s
service_interleave_factor=s
##shinken_group=shinken
##shinken_user=shinken
#shinken_group=shinken
#shinken_user=shinken
sleep_time=0.25
soft_state_dependencies=0
state_retention_file=var/retention.dat
status_file=var/status.dat
status_update_interval=5
temp_file=tmp/shinken.tmp
temp_path=var/tmp
translate_passive_host_checks=0
use_aggressive_host_checking=0
use_embedded_perl_implicitly=0
use_large_installation_tweaks=0
use_regexp_matching=0
use_retained_program_state=1
use_retained_scheduling_info=1
use_syslog=0
use_true_regexp_matching=0
enable_problem_impacts_states_change=1
no_event_handlers_during_downtimes=0
| 3,467
|
Python
|
.py
| 120
| 27.9
| 63
| 0.856033
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,422
|
docker-file-UNIT-TEST-python2.txt
|
shinken-solutions_shinken/test/docker-files/docker-file-UNIT-TEST-python2.txt
|
FROM debian:9
MAINTAINER Jean Gabes <naparuba@gmail.com>
RUN apt-get update && apt-get install -y python
# Setup test env, "standard" installation is test with other tests ^^
RUN apt-get install -y python-pip
RUN pip install jinja2
RUN pip install leveldb
RUN pip install pyOpenSSL
RUN pip install pycrypto
RUN pip install requests
RUN pip install Crypto
RUN pip install pygments
RUN pip install coveralls
RUN pip install nose-cov
RUN apt-get install -y python-cherrypy3
RUN pip install rsa
# The internal yaml seems to not be used, thanks nose
RUN pip install ruamel.yaml==0.11.15
RUN apt-get install -y sysstat
RUN apt-get install -y curl
RUN apt-get install -y vim
RUN apt-get install -y procps
RUN apt-get install -y wget
RUN apt-get install -y net-tools
RUN apt-get install -y dnsutils
RUN apt-get install -y python-apt
RUN apt-get install -y strace
RUN apt-get install -y less
RUN apt-get install -y python-blessed
RUN apt-get install -y locales
RUN apt-get install -y python-setuptools
RUN apt-get install -y python-pycurl
RUN apt-get install -y dos2unix
RUN apt-get install -y pep8
ADD . /root/shinken-framework
WORKDIR /root/shinken-framework
#RUN python setup.py install
ENTRYPOINT cd test;./quick_tests.sh
# Specific test, manual launch
#ENTRYPOINT cd test; python test_raft_multiprocess.py TestRaftMultiProcess.test_raft_large_leader_election
#ENTRYPOINT cd test; python test_raft.py
#ENTRYPOINT cd test;python test_yaml.py
#ENTRYPOINT opsbro agent start
| 1,724
|
Python
|
.py
| 43
| 38.930233
| 111
| 0.691756
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,423
|
docker-file-DEV-debian9-test-python.txt
|
shinken-solutions_shinken/test/docker-files/docker-file-DEV-debian9-test-python.txt
|
FROM debian:9
MAINTAINER Jean Gabes <naparuba@gmail.com>
RUN apt-get update && apt-get install -y python
# Setup test env, "standard" installation is test with other tests ^^
RUN apt-get install -y python-pip
RUN pip install jinja2
RUN pip install leveldb
RUN pip install pyOpenSSL
RUN pip install pycrypto
RUN pip install requests
RUN pip install Crypto
RUN pip install pygments
RUN pip install coveralls
RUN pip install nose-cov
RUN apt-get install -y python-cherrypy3
RUN pip install rsa
# The internal yaml seems to not be used, thanks nose
RUN pip install ruamel.yaml==0.11.15
ADD . /root/shinken-framework
WORKDIR /root/shinken-framework
RUN python setup.py install
ENTRYPOINT nosetests -xv --processes=1 --process-timeout=300 --process-restartworker --with-cov --cov=shinken --exe
# Specific test, manual launch
#ENTRYPOINT cd test; python test_raft_multiprocess.py TestRaftMultiProcess.test_raft_large_leader_election
#ENTRYPOINT cd test; python test_raft.py
#ENTRYPOINT cd test;python test_yaml.py
#ENTRYPOINT opsbro agent start
| 1,185
|
Python
|
.py
| 27
| 42.555556
| 118
| 0.719756
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,424
|
hot_dep_export.py
|
shinken-solutions_shinken/test/libexec/hot_dep_export.py
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sys
try:
import json
except ImportError:
# For old Python version, load
# simple json (it can be hard json?! It's 2 functions guy!)
try:
import simplejson as json
except ImportError:
print("Error: you need the json or simplejson module for this script")
sys.exit(0)
print("Argv", sys.argv)
# Case 1 mean host0 is the father of host1
if sys.argv[1] == 'case1':
d = [[["host", "test_host_0"], ["host", "test_host_1"]]]
if sys.argv[1] == 'case2':
d = [[["host", "test_host_2"], ["host", "test_host_1"]]]
f = open(sys.argv[2], 'wb')
f.write(json.dumps(d))
f.close()
| 741
|
Python
|
.py
| 23
| 28.73913
| 82
| 0.653576
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,425
|
nmap_wrapper.py
|
shinken-solutions_shinken/test/libexec/nmap_wrapper.py
|
#!/usr/bin/env python
s = """
srv1::os=windows
srv1::osversion=2003
srv1::macvendor=Hewlett Packard
srv1::openports=135,139,445,80
srv2::os=windows
srv2::osversion=7
srv2::macvendor=VMware
srv2::openports=80,135,139,445
"""
print(s)
| 234
|
Python
|
.py
| 12
| 18.416667
| 31
| 0.782805
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,426
|
shinken-poller.py
|
shinken-solutions_shinken/bin/shinken-poller.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
'''
This class is the application that launches checks
The poller listens to the Arbiter for the configuration sent through
the given port as first argument.
The configuration sent by the arbiter specifies from which schedulers the
poller will take its checks.
When the poller is already launched and has its own conf, it keeps on
listening the arbiter (one a timeout)
In case the arbiter has a new conf to send, the poller forget its old
schedulers (and the associated checks) and take the new ones instead.
'''
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import os
import optparse
try:
from shinken.bin import VERSION
import shinken
except ImportError:
# If importing shinken fails, try to load from current directory
# or parent directory to support running without installation.
# Submodules will then be loaded from there, too.
import imp
imp.load_module('shinken',
*imp.find_module('shinken',
[os.path.realpath("."),
os.path.realpath(".."),
os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])),
"..")]))
import shinken
# Ok we should add the shinken root directory to our sys.path so our sons
# will be able to use the shinken import without problems
shinken_root_path = os.path.dirname(os.path.dirname(shinken.__file__))
os.environ['PYTHONPATH'] = os.path.join(os.environ.get('PYTHONPATH', ''), shinken_root_path)
from shinken.daemons.pollerdaemon import Poller
from shinken.bin import VERSION
# Protect for windows multiprocessing that will RELAUNCH all
def main():
parser = optparse.OptionParser(
"%prog [options]", version="%prog " + VERSION)
parser.add_option('-c', '--config',
dest="config_file", metavar="INI-CONFIG-FILE",
help='Config file')
parser.add_option('-d', '--daemon', action='store_true',
dest="is_daemon",
help="Run in daemon mode")
parser.add_option('-r', '--replace', action='store_true',
dest="do_replace",
help="Replace previous running poller")
parser.add_option('--debugfile', dest='debug_file',
help=("Enable debug log and save it to a file. "
"Default: not used"))
parser.add_option("-p", "--profile",
dest="profile",
help="Dump a profile file. Need the python cProfile library")
opts, args = parser.parse_args()
if args:
parser.error("Does not accept any argument.")
daemon = Poller(debug=opts.debug_file is not None, **opts.__dict__)
if not opts.profile:
daemon.main()
else:
# For perf tuning:
import cProfile
cProfile.runctx('''daemon.main()''', globals(), locals(), opts.profile)
if __name__ == '__main__':
main()
| 3,979
|
Python
|
.py
| 88
| 37.681818
| 97
| 0.653519
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,427
|
shinken-reactionner.py
|
shinken-solutions_shinken/bin/shinken-reactionner.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
'''
This class is an application that launches actions like
notifications or event handlers
The reactionner listens to the Arbiter for the configuration sent through
the given port as first argument.
The configuration sent by the arbiter specifies from which schedulers the
will take actions.
When the reactionner is already launched and has its own conf, it keeps
on listening the arbiter (one a timeout)
In case the arbiter has a new conf to send, the reactionner forget its old
schedulers (and the associated actions) and take the new ones instead.
'''
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import os
import optparse
# Try to see if we are in an android device or not
is_android = True
try:
import android
# Add our main script dir
if os.path.exists('/sdcard/sl4a/scripts/'):
sys.path.append('/sdcard/sl4a/scripts/')
os.chdir('/sdcard/sl4a/scripts/')
except ImportError:
is_android = False
try:
from shinken.bin import VERSION
import shinken
except ImportError:
# If importing shinken fails, try to load from current directory
# or parent directory to support running without installation.
# Submodules will then be loaded from there, too.
import imp
imp.load_module('shinken',
*imp.find_module('shinken',
[os.path.realpath("."),
os.path.realpath(".."),
os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])),
"..")]))
import shinken
# Ok we should add the shinken root directory to our sys.path so our sons
# will be able to use the shinken import without problems
shinken_root_path = os.path.dirname(os.path.dirname(shinken.__file__))
os.environ['PYTHONPATH'] = os.path.join(os.environ.get('PYTHONPATH', ''), shinken_root_path)
from shinken.daemons.reactionnerdaemon import Reactionner
from shinken.bin import VERSION
# Protect for windows multiprocessing that will RELAUNCH all
def main():
parser = optparse.OptionParser(
"%prog [options]", version="%prog " + VERSION)
parser.add_option('-c', '--config',
dest="config_file", metavar="INI-CONFIG-FILE",
help='Config file')
parser.add_option('-d', '--daemon', action='store_true',
dest="is_daemon",
help="Run in daemon mode")
parser.add_option('-r', '--replace', action='store_true',
dest="do_replace",
help="Replace previous running reactionner")
parser.add_option('--debugfile', dest='debug_file',
help=("Enable debug log and save it to a file. "
"Default: not used"))
parser.add_option("-p", "--profile",
dest="profile",
help="Dump a profile file. Need the python cProfile library")
opts, args = parser.parse_args()
if args:
parser.error("Does not accept any argument.")
daemon = Reactionner(debug=opts.debug_file is not None, **opts.__dict__)
daemon.main()
if __name__ == '__main__':
main()
| 4,180
|
Python
|
.py
| 94
| 37.361702
| 97
| 0.66331
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,428
|
shinken-receiver.py
|
shinken-solutions_shinken/bin/shinken-receiver.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
'''
This class is an interface for the Receiver
The receiver listens to the Arbiter for the configuration sent through
the given port as first argument.
The configuration sent by the arbiter specifies from which schedulers
the receiver will take broks.
When the receiver is already launched and has its own conf, it keeps on
listening the arbiter (one a timeout)
In case the arbiter has a new conf to send, the receiver forget its old
schedulers (and their associated broks) and take the new ones instead.
'''
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sys
import optparse
try:
from shinken.bin import VERSION
import shinken
except ImportError:
# If importing shinken fails, try to load from current directory
# or parent directory to support running without installation.
# Submodules will then be loaded from there, too.
import imp
imp.load_module('shinken',
*imp.find_module('shinken',
[os.path.realpath("."),
os.path.realpath(".."),
os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])),
"..")]))
import shinken
# Ok we should add the shinken root directory to our sys.path so our sons
# will be able to use the shinken import without problems
shinken_root_path = os.path.dirname(os.path.dirname(shinken.__file__))
os.environ['PYTHONPATH'] = os.path.join(os.environ.get('PYTHONPATH', ''), shinken_root_path)
from shinken.daemons.receiverdaemon import Receiver
from shinken.bin import VERSION
# Protect for windows multiprocessing that will RELAUNCH all
def main():
parser = optparse.OptionParser(
"%prog [options]", version="%prog " + VERSION)
parser.add_option('-c', '--config',
dest="config_file", metavar="INI-CONFIG-FILE",
help='Config file')
parser.add_option('-d', '--daemon', action='store_true',
dest="is_daemon",
help="Run in daemon mode")
parser.add_option('-r', '--replace', action='store_true',
dest="do_replace",
help="Replace previous running receiver")
parser.add_option('--debugfile', dest='debug_file',
help=("Enable debug log and save it to a file. "
"Default: not used"))
opts, args = parser.parse_args()
if args:
parser.error("Does not accept any argument.")
daemon = Receiver(debug=opts.debug_file is not None, **opts.__dict__)
daemon.main()
if __name__ == '__main__':
main()
| 3,650
|
Python
|
.py
| 80
| 38.5
| 97
| 0.664232
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,429
|
shinken-broker.py
|
shinken-solutions_shinken/bin/shinken-broker.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
'''
This class is an interface for the Broker
The broker listens to the Arbiter for the configuration sent through
the given port as first argument.
The configuration sent by the arbiter specifies from which schedulers
the broker will take broks.
When the broker is already launched and has its own conf, it keeps on
listening the arbiter (one a timeout)
In case the arbiter has a new conf to send, the broker forget its old
schedulers (and their associated broks) and take the new ones instead.
'''
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sys
import optparse
try:
from shinken.bin import VERSION
import shinken
except ImportError:
# If importing shinken fails, try to load from current directory
# or parent directory to support running without installation.
# Submodules will then be loaded from there, too.
import imp
imp.load_module('shinken',
*imp.find_module('shinken',
[os.path.realpath("."),
os.path.realpath(".."),
os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])),
"..")]))
import shinken
# Ok we should add the shinken root directory to our sys.path so our sons
# will be able to use the shinken import without problems
shinken_root_path = os.path.dirname(os.path.dirname(shinken.__file__))
os.environ['PYTHONPATH'] = os.path.join(os.environ.get('PYTHONPATH', ''), shinken_root_path)
from shinken.daemons.brokerdaemon import Broker
from shinken.bin import VERSION
# Protect for windows multiprocessing that will RELAUNCH all
def main():
parser = optparse.OptionParser(
"%prog [options]", version="%prog " + VERSION)
parser.add_option('-c', '--config',
dest="config_file", metavar="INI-CONFIG-FILE",
help='Config file')
parser.add_option('-d', '--daemon', action='store_true',
dest="is_daemon",
help="Run in daemon mode")
parser.add_option('-r', '--replace', action='store_true',
dest="do_replace",
help="Replace previous running broker")
parser.add_option('--debugfile', dest='debug_file',
help=("Enable debug log and save it to a file. "
"Default: not used"))
parser.add_option("-p", "--profile",
dest="profile",
help="Dump a profile file. Need the python cProfile library")
opts, args = parser.parse_args()
if args:
parser.error("Does not accept any argument.")
daemon = Broker(debug=opts.debug_file is not None, **opts.__dict__)
if not opts.profile:
daemon.main()
else:
# For perf tuning:
import cProfile
cProfile.runctx('''daemon.main()''', globals(), locals(), opts.profile)
if __name__ == '__main__':
main()
| 3,965
|
Python
|
.py
| 88
| 37.534091
| 97
| 0.652613
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,430
|
shinken-scheduler.py
|
shinken-solutions_shinken/bin/shinken-scheduler.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
# For the Shinken application, I try to respect
# The Zen of Python, by Tim Peters. It's just some
# very goods ideas that make Python programming very fun
# and efficient. If it's good for Python, it must be good for
# Shinken. :)
#
#
#
# Beautiful is better than ugly.
# Explicit is better than implicit.
# Simple is better than complex.
# Complex is better than complicated.
# Flat is better than nested.
# Sparse is better than dense.
# Readability counts.
# Special cases aren't special enough to break the rules.
# Although practicality beats purity.
# Errors should never pass silently.
# Unless explicitly silenced.
# In the face of ambiguity, refuse the temptation to guess.
# There should be one-- and preferably only one --obvious way to do it.
# Although that way may not be obvious at first unless you're Dutch.
# Now is better than never.
# Although never is often better than *right* now.
# If the implementation is hard to explain, it's a bad idea.
# If the implementation is easy to explain, it may be a good idea.
# Namespaces are one honking great idea -- let's do more of those!
'''
This class is the application in charge of scheduling
The scheduler listens to the Arbiter for the configuration sent through
the given port as first argument.
The configuration sent by the arbiter specifies which checks and actions
the scheduler must schedule, and a list of reactionners and pollers
to execute them
When the scheduler is already launched and has its own conf, it keeps on
listening the arbiter (one a timeout)
In case the arbiter has a new conf to send, the scheduler is stopped
and a new one is created.
'''
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sys
import optparse
# We try to raise up recusion limit on
# but we don't have resource module on windows
if os.name != 'nt':
import resource
# All the pickle will ask for a lot of recursion, so we must make
# sure to set it at a high value. The maximum recursion depth depends
# on the Python version and the process limit "stack size".
# The factors used were acquired by testing a broad range of installations
stacksize_soft, stacksize_hard = resource.getrlimit(3)
if sys.version_info < (3,):
sys.setrecursionlimit(int(stacksize_soft * 1.9 + 3200))
else:
sys.setrecursionlimit(int(stacksize_soft * 2.4 + 3200))
try:
from shinken.bin import VERSION
import shinken
except ImportError:
# If importing shinken fails, try to load from current directory
# or parent directory to support running without installation.
# Submodules will then be loaded from there, too.
import imp
imp.load_module('shinken',
*imp.find_module('shinken',
[os.path.realpath("."),
os.path.realpath(".."),
os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])),
"..")]))
import shinken
# Ok we should add the shinken root directory to our sys.path so our sons
# will be able to use the shinken import without problems
shinken_root_path = os.path.dirname(os.path.dirname(shinken.__file__))
os.environ['PYTHONPATH'] = os.path.join(os.environ.get('PYTHONPATH', ''), shinken_root_path)
from shinken.daemons.schedulerdaemon import Shinken
from shinken.bin import VERSION
# Protect for windows multiprocessing that will RELAUNCH all
def main():
parser = optparse.OptionParser(
"%prog [options]", version="%prog " + VERSION)
parser.add_option('-c', '--config',
dest="config_file", metavar="INI-CONFIG-FILE",
help='Config file')
parser.add_option('-d', '--daemon', action='store_true',
dest="is_daemon",
help="Run in daemon mode")
parser.add_option('-r', '--replace', action='store_true',
dest="do_replace",
help="Replace previous running scheduler")
parser.add_option('--debugfile', dest='debug_file',
help=("Enable debug log and save it to a file. "
"Default: not used"))
parser.add_option("-p", "--profile",
dest="profile",
help="Dump a profile file. Need the python cProfile library")
opts, args = parser.parse_args()
if args:
parser.error("Does not accept any argument.")
daemon = Shinken(debug=opts.debug_file is not None, **opts.__dict__)
if not opts.profile:
daemon.main()
else:
# For perf running:
import cProfile
cProfile.runctx('''daemon.main()''', globals(), locals(), opts.profile)
if __name__ == '__main__':
main()
| 5,754
|
Python
|
.py
| 129
| 38.72093
| 97
| 0.683075
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,431
|
shinken-arbiter.py
|
shinken-solutions_shinken/bin/shinken-arbiter.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
"""
This is the class of the Arbiter. Its role is to read configuration,
cut it, and send it to other elements like schedulers, reactionners
or pollers. It is also responsible for the high avaibility feature.
For example, if a scheduler dies, it sends the late scheduler's conf
to another scheduler available.
It also reads orders form users (nagios.cmd) and sends them to schedulers.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sys
import optparse
# We try to raise up recursion limit on
# but we don't have resource module on windows
if os.name != 'nt':
import resource
# All the pickle will ask for a lot of recursion, so we must make
# sure to set it at a high value. The maximum recursion depth depends
# on the Python version and the process limit "stack size".
# The factors used were acquired by testing a broad range of installations
stacksize_soft, stacksize_hard = resource.getrlimit(3)
if sys.version_info < (3,):
sys.setrecursionlimit(int(stacksize_soft * 1.9 + 3200))
else:
sys.setrecursionlimit(int(stacksize_soft * 2.4 + 3200))
try:
from shinken.bin import VERSION
import shinken
except ImportError:
# If importing shinken fails, try to load from current directory
# or parent directory to support running without installation.
# Submodules will then be loaded from there, too.
import imp
imp.load_module('shinken',
*imp.find_module('shinken',
[os.path.realpath("."),
os.path.realpath(".."),
os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])),
"..")]))
import shinken
# Ok we should add the shinken root directory to our sys.path so our sons
# will be able to use the shinken import without problems
shinken_root_path = os.path.dirname(os.path.dirname(shinken.__file__))
os.environ['PYTHONPATH'] = os.path.join(os.environ.get('PYTHONPATH', ''), shinken_root_path)
from shinken.bin import VERSION
from shinken.daemons.arbiterdaemon import Arbiter
def main():
parser = optparse.OptionParser(
"%prog [options] -c configfile [-c additional_config_file]",
version="%prog: " + VERSION)
parser.add_option('-c', '--config', action='append',
dest="config_files", metavar="CONFIG-FILE",
help=('Config file (your nagios.cfg). Multiple -c can be '
'used, it will be like if all files was just one'))
parser.add_option('-d', '--daemon', action='store_true',
dest="is_daemon",
help="Run in daemon mode")
parser.add_option('-r', '--replace', action='store_true',
dest="do_replace",
help="Replace previous running arbiter")
parser.add_option('--debugfile', dest='debug_file',
help=("Enable debug log and save it to a file. "
"Default: not used"))
parser.add_option("-v", "--verify-config",
dest="verify_only", action="store_true",
help="Verify config file and exit")
parser.add_option("-p", "--profile",
dest="profile",
help="Dump a profile file. Need the python cProfile library")
parser.add_option("-a", "--analyse",
dest="analyse",
help="Dump an analyse statistics file, for support")
parser.add_option("-m", "--migrate",
dest="migrate",
help="Migrate the raw configuration read from the arbiter to another "
"module. --> VERY EXPERIMENTAL!")
parser.add_option("-n", "--name",
dest="arb_name",
help="Give the arbiter name to use. Optionnal, will use the hostaddress "
"if not provide to find it.")
parser.add_option("--dump_conf",
dest="dump_config_file",
help="dump config as json to a file")
opts, args = parser.parse_args()
if not opts.config_files:
parser.error("Requires at least one config file (option -c/--config")
if args:
parser.error("Does not accept any argument. Use option -c/--config")
# Protect for windows multiprocessing that will RELAUNCH all
daemon = Arbiter(debug=opts.debug_file is not None, **opts.__dict__)
if not opts.profile:
daemon.main()
else:
# For perf tuning:
import cProfile
cProfile.runctx('''daemon.main()''', globals(), locals(), opts.profile)
if __name__ == '__main__':
main()
| 5,744
|
Python
|
.py
| 119
| 39.176471
| 97
| 0.627986
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,432
|
satellitelink.py
|
shinken-solutions_shinken/shinken/satellitelink.py
|
'''shinken.satellitelink is deprecated. Please use shinken.objects.satellitelink now.'''
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken.old_daemon_link import deprecation, make_deprecated
deprecation(__doc__)
from shinken.objects.satellitelink import (
SatelliteLink,
SatelliteLinks,
)
SatelliteLink = make_deprecated(SatelliteLink)
SatelliteLinks = make_deprecated(SatelliteLinks)
| 444
|
Python
|
.py
| 10
| 42.1
| 88
| 0.820513
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,433
|
db.py
|
shinken-solutions_shinken/shinken/db.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import six
class DB(object):
"""DB is a generic class for SQL Database"""
def __init__(self, table_prefix=''):
self.table_prefix = table_prefix
def create_insert_query(self, table, data):
"""Create a INSERT query in table with all data of data (a dict)"""
query = "INSERT INTO %s " % (self.table_prefix + table)
props_str = ' ('
values_str = ' ('
i = 0 # f or the ',' problem... look like C here...
for prop in sorted(data.keys()):
i += 1
val = data[prop]
# Boolean must be catch, because we want 0 or 1, not True or False
if isinstance(val, bool):
if val:
val = 1
else:
val = 0
if i == 1:
props_str = props_str + "%s " % prop
values_str = values_str + "'%s' " % val
else:
props_str = props_str + ", %s " % prop
values_str = values_str + ", '%s' " % val
# Ok we've got data, let's finish the query
props_str = props_str + ' )'
values_str = values_str + ' )'
query = query + props_str + ' VALUES' + values_str
return query
def create_update_query(self, table, data, where_data):
"""Create a update query of table with data, and use where data for
the WHERE clause
"""
query = "UPDATE %s set " % (self.table_prefix + table)
# First data manage
query_follow = ''
i = 0 # for the , problem...
for prop in sorted(data.keys()):
# Do not need to update a property that is in where
# it is even dangerous, will raise a warning
if prop in where_data:
continue
i += 1
val = data[prop]
# Boolean must be catch, because we want 0 or 1, not True or False
if isinstance(val, bool):
if val:
val = 1
else:
val = 0
if i == 1:
query_follow += "%s='%s' " % (prop, val)
else:
query_follow += ", %s='%s' " % (prop, val)
# Ok for data, now WHERE, same things
where_clause = " WHERE "
i = 0 # For the 'and' problem
for prop in sorted(where_data.keys()):
i += 1
val = where_data[prop]
# Boolean must be catch, because we want 0 or 1, not True or False
if isinstance(val, bool):
if val:
val = 1
else:
val = 0
if i == 1:
where_clause += "%s='%s' " % (prop, val)
else:
where_clause += "and %s='%s' " % (prop, val)
query = query + query_follow + where_clause
return query
def fetchone(self):
"""Just get an entry"""
return self.db_cursor.fetchone()
def fetchall(self):
"""Get all entry"""
return self.db_cursor.fetchall()
| 4,098
|
Python
|
.py
| 103
| 30.116505
| 82
| 0.54726
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,434
|
daterange.py
|
shinken-solutions_shinken/shinken/daterange.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import time
import calendar
import re
from shinken.util import get_sec_from_morning, get_day, get_start_of_day, get_end_of_day
from shinken.log import logger
# Get the day number (like 27 in July Tuesday 27 2010 for call:
# 2010, July, Tuesday, -1 (last Tuesday of July 2010)
def find_day_by_weekday_offset(year, month, weekday, offset):
# get the id of the weekday (1 for Tuesday)
weekday_id = Daterange.get_weekday_id(weekday)
if weekday_id is None:
return None
# same for month
month_id = Daterange.get_month_id(month)
if month_id is None:
return None
# thanks calendar :)
cal = calendar.monthcalendar(year, month_id)
# If we ask for a -1 day, just reverse cal
if offset < 0:
offset = abs(offset)
cal.reverse()
# ok go for it
nb_found = 0
try:
for i in range(0, offset + 1):
# in cal 0 mean "there are no day here :)"
if cal[i][weekday_id] != 0:
nb_found += 1
if nb_found == offset:
return cal[i][weekday_id]
return None
except Exception:
return None
def find_day_by_offset(year, month, offset):
month_id = Daterange.get_month_id(month)
if month_id is None:
return None
(tmp, days_in_month) = calendar.monthrange(year, month_id)
if offset >= 0:
return min(offset, days_in_month)
else:
return max(1, days_in_month + offset + 1)
class Timerange(object):
# entry is like 00:00-24:00
def __init__(self, entry):
pattern = r'(\d\d):(\d\d)-(\d\d):(\d\d)'
m = re.match(pattern, entry)
self.is_valid = m is not None
if self.is_valid:
self.hstart, self.mstart, self.hend, self.mend = map(int, m.groups())
def __str__(self):
return str(self.__dict__)
def get_sec_from_morning(self):
return self.hstart * 3600 + self.mstart * 60
def get_first_sec_out_from_morning(self):
# If start at 0:0, the min out is the end
if self.hstart == 0 and self.mstart == 0:
return self.hend * 3600 + self.mend * 60
return 0
def is_time_valid(self, t):
sec_from_morning = get_sec_from_morning(t)
return (self.is_valid and
self.hstart * 3600 + self.mstart * 60 <=
sec_from_morning <=
self.hend * 3600 + self.mend * 60)
def is_correct(self):
return self.is_valid
""" TODO: Add some comment about this class for the doc"""
class Daterange(object):
weekdays = { # NB : 0 based : 0 == monday
'monday': 0, 'tuesday': 1, 'wednesday': 2, 'thursday': 3,
'friday': 4, 'saturday': 5, 'sunday': 6
}
months = { # NB : 1 based : 1 == january..
'january': 1, 'february': 2, 'march': 3, 'april': 4, 'may': 5,
'june': 6, 'july': 7, 'august': 8, 'september': 9,
'october': 10, 'november': 11, 'december': 12
}
rev_weekdays = dict((v, k) for k, v in weekdays.items())
rev_months = dict((v, k) for k, v in months.items())
def __init__(self, syear, smon, smday, swday, swday_offset,
eyear, emon, emday, ewday, ewday_offset, skip_interval, other):
self.syear = int(syear)
self.smon = smon
self.smday = int(smday)
self.swday = swday
self.swday_offset = int(swday_offset)
self.eyear = int(eyear)
self.emon = emon
self.emday = int(emday)
self.ewday = ewday
self.ewday_offset = int(ewday_offset)
self.skip_interval = int(skip_interval)
self.other = other
self.timeranges = []
for timeinterval in other.split(','):
self.timeranges.append(Timerange(timeinterval.strip()))
def __str__(self):
return '' # str(self.__dict__)
def is_correct(self):
for tr in self.timeranges:
if not tr.is_correct():
return False
return True
@classmethod
def get_month_id(cls, month):
return Daterange.months[month]
@classmethod
def get_month_by_id(cls, month_id):
return Daterange.rev_months[month_id]
@classmethod
def get_weekday_id(cls, weekday):
return Daterange.weekdays[weekday]
@classmethod
def get_weekday_by_id(cls, weekday_id):
return Daterange.rev_weekdays[weekday_id]
def get_start_and_end_time(self, ref=None):
logger.warning("Calling function get_start_and_end_time which is not implemented")
raise NotImplementedError()
def is_time_valid(self, t):
# print("****Look for time valid for", time.asctime(time.localtime(t)))
if self.is_time_day_valid(t):
# print("is time day valid")
for tr in self.timeranges:
# print(tr, "is valid?", tr.is_time_valid(t))
if tr.is_time_valid(t):
# print("return True")
return True
return False
def get_min_sec_from_morning(self):
mins = []
for tr in self.timeranges:
mins.append(tr.get_sec_from_morning())
return min(mins)
def get_min_sec_out_from_morning(self):
mins = []
for tr in self.timeranges:
mins.append(tr.get_first_sec_out_from_morning())
return min(mins)
def get_min_from_t(self, t):
if self.is_time_valid(t):
return t
t_day_epoch = get_day(t)
tr_mins = self.get_min_sec_from_morning()
return t_day_epoch + tr_mins
def is_time_day_valid(self, t):
(start_time, end_time) = self.get_start_and_end_time(t)
if start_time <= t <= end_time:
return True
else:
return False
def is_time_day_invalid(self, t):
(start_time, end_time) = self.get_start_and_end_time(t)
if start_time <= t <= end_time:
return False
else:
return True
def get_next_future_timerange_valid(self, t):
# print("Look for get_next_future_timerange_valid for t", t, time.asctime(time.localtime(t)))
sec_from_morning = get_sec_from_morning(t)
starts = []
for tr in self.timeranges:
tr_start = tr.hstart * 3600 + tr.mstart * 60
if tr_start >= sec_from_morning:
starts.append(tr_start)
if starts != []:
return min(starts)
else:
return None
def get_next_future_timerange_invalid(self, t):
# print('Call for get_next_future_timerange_invalid from ', time.asctime(time.localtime(t)))
sec_from_morning = get_sec_from_morning(t)
# print('sec from morning', sec_from_morning)
ends = []
for tr in self.timeranges:
tr_start = tr.hstart * 3600 + tr.mstart * 60
if tr_start >= sec_from_morning:
ends.append(tr_start)
tr_end = tr.hend * 3600 + tr.mend * 60
if tr_end >= sec_from_morning:
ends.append(tr_end)
# print("Ends:", ends)
# Remove the last second of the day for 00->24h"
if 86400 in ends:
ends.remove(86400)
if ends != []:
return min(ends)
else:
return None
def get_next_valid_day(self, t):
if self.get_next_future_timerange_valid(t) is None:
# this day is finish, we check for next period
(start_time, end_time) = self.get_start_and_end_time(get_day(t) + 86400)
else:
(start_time, end_time) = self.get_start_and_end_time(t)
if t <= start_time:
return get_day(start_time)
if self.is_time_day_valid(t):
return get_day(t)
return None
def get_next_valid_time_from_t(self, t):
# print("\tDR Get next valid from:", time.asctime(time.localtime(t)))
# print("DR Get next valid from:", t)
if self.is_time_valid(t):
return t
# print("DR Get next valid from:", time.asctime(time.localtime(t)))
# First we search fot the day of t
t_day = self.get_next_valid_day(t)
# print("DR: T next valid day", time.asctime(time.localtime(t_day)))
# We search for the min of all tr.start > sec_from_morning
# if it's the next day, use a start of the day search for timerange
if t_day and t < t_day:
sec_from_morning = self.get_next_future_timerange_valid(t_day)
else: # t is in this day, so look from t (can be in the evening or so)
sec_from_morning = self.get_next_future_timerange_valid(t)
# print("DR: sec from morning", sec_from_morning)
if sec_from_morning is not None:
if t_day is not None and sec_from_morning is not None:
return t_day + sec_from_morning
# Then we search for the next day of t
# The sec will be the min of the day
t = get_day(t) + 86400
t_day2 = self.get_next_valid_day(t)
sec_from_morning = self.get_next_future_timerange_valid(t_day2)
if t_day2 is not None and sec_from_morning is not None:
return t_day2 + sec_from_morning
else:
# I'm not find any valid time
return None
def get_next_invalid_day(self, t):
# print("Look in", self.__dict__)
# print('DR: get_next_invalid_day for', time.asctime(time.localtime(t)))
if self.is_time_day_invalid(t):
# print("EARLY RETURN")
return t
next_future_timerange_invalid = self.get_next_future_timerange_invalid(t)
# print("next_future_timerange_invalid:", next_future_timerange_invalid)
# If today there is no more unavailable timerange, search the next day
if next_future_timerange_invalid is None:
# print('DR: get_next_future_timerange_invalid is None')
# this day is finish, we check for next period
(start_time, end_time) = self.get_start_and_end_time(get_day(t))
else:
# print('DR: get_next_future_timerange_invalid is',)
# print(time.asctime(time.localtime(next_future_timerange_invalid)))
(start_time, end_time) = self.get_start_and_end_time(t)
# (start_time, end_time) = self.get_start_and_end_time(t)
# print("START", time.asctime(time.localtime(start_time)),)
# print("END", time.asctime(time.localtime(end_time)))
# The next invalid day can be t day if there a possible
# invalid time range (timerange is not 00->24
if next_future_timerange_invalid is not None:
if start_time <= t <= end_time:
# print("Early Return next invalid day:", time.asctime(time.localtime(get_day(t))))
return get_day(t)
if start_time >= t:
# print("start_time >= t:", time.asctime(time.localtime(get_day(start_time))))
return get_day(start_time)
else:
# Else, there is no possibility than in our start_time<->end_time we got
# any invalid time (full period out). So it's end_time+1 sec (tomorrow of end_time)
return get_day(end_time + 1)
return None
def get_next_invalid_time_from_t(self, t):
if not self.is_time_valid(t):
return t
# First we search fot the day of t
t_day = self.get_next_invalid_day(t)
# print("FUCK NEXT DAY", time.asctime(time.localtime(t_day)))
# We search for the min of all tr.start > sec_from_morning
# if it's the next day, use a start of the day search for timerange
if t < t_day:
sec_from_morning = self.get_next_future_timerange_invalid(t_day)
else: # t is in this day, so look from t (can be in the evening or so)
sec_from_morning = self.get_next_future_timerange_invalid(t)
# print("DR: sec from morning", sec_from_morning)
# tr can't be valid, or it will be return at the beginning
# sec_from_morning = self.get_next_future_timerange_invalid(t)
# Ok we've got a next invalid day and a invalid possibility in
# timerange, so the next invalid is this day+sec_from_morning
# print("T_day", t_day, "Sec from morning", sec_from_morning)
if t_day is not None and sec_from_morning is not None:
return t_day + sec_from_morning + 1
# We've got a day but no sec_from_morning: the timerange is full (0->24h)
# so the next invalid is this day at the day_start
if t_day is not None and sec_from_morning is None:
return t_day
# Then we search for the next day of t
# The sec will be the min of the day
t = get_day(t) + 86400
t_day2 = self.get_next_invalid_day(t)
sec_from_morning = self.get_next_future_timerange_invalid(t_day2)
if t_day2 is not None and sec_from_morning is not None:
return t_day2 + sec_from_morning + 1
if t_day2 is not None and sec_from_morning is None:
return t_day2
else:
# I'm not find any valid time
return None
""" TODO: Add some comment about this class for the doc"""
class CalendarDaterange(Daterange):
def get_start_and_end_time(self, ref=None):
start_time = get_start_of_day(self.syear, int(self.smon), self.smday)
end_time = get_end_of_day(self.eyear, int(self.emon), self.emday)
return (start_time, end_time)
""" TODO: Add some comment about this class for the doc"""
class StandardDaterange(Daterange):
def __init__(self, day, other):
self.other = other
self.timeranges = []
for timeinterval in other.split(','):
self.timeranges.append(Timerange(timeinterval.strip()))
self.day = day
# It's correct only if the weekday (Sunday, etc) is a valid one
def is_correct(self):
b = self.day in Daterange.weekdays
if not b:
logger.error("Error: %s is not a valid day", self.day)
# Check also if Daterange is correct.
b &= Daterange.is_correct(self)
return b
def get_start_and_end_time(self, ref=None):
now = time.localtime(ref)
self.syear = now.tm_year
self.month = now.tm_mon
# month_start_id = now.tm_mon
# month_start = Daterange.get_month_by_id(month_start_id)
self.wday = now.tm_wday
day_id = Daterange.get_weekday_id(self.day)
today_morning = get_start_of_day(now.tm_year, now.tm_mon, now.tm_mday)
tonight = get_end_of_day(now.tm_year, now.tm_mon, now.tm_mday)
day_diff = (day_id - now.tm_wday) % 7
return (today_morning + day_diff * 86400, tonight + day_diff * 86400)
""" TODO: Add some comment about this class for the doc"""
class MonthWeekDayDaterange(Daterange):
# It's correct only if the weekday (Sunday, etc) is a valid one
def is_correct(self):
b = True
b &= self.swday in Daterange.weekdays
if not b:
logger.error("Error: %s is not a valid day", self.swday)
b &= self.ewday in Daterange.weekdays
if not b:
logger.error("Error: %s is not a valid day", self.ewday)
return b
def get_start_and_end_time(self, ref=None):
now = time.localtime(ref)
if self.syear == 0:
self.syear = now.tm_year
month_id = Daterange.get_month_id(self.smon)
day_start = find_day_by_weekday_offset(self.syear, self.smon, self.swday, self.swday_offset)
start_time = get_start_of_day(self.syear, month_id, day_start)
if self.eyear == 0:
self.eyear = now.tm_year
month_end_id = Daterange.get_month_id(self.emon)
day_end = find_day_by_weekday_offset(self.eyear, self.emon, self.ewday, self.ewday_offset)
end_time = get_end_of_day(self.eyear, month_end_id, day_end)
now_epoch = time.mktime(now)
if start_time > end_time: # the period is between years
if now_epoch > end_time: # check for next year
day_end = find_day_by_weekday_offset(self.eyear + 1,
self.emon, self.ewday, self.ewday_offset)
end_time = get_end_of_day(self.eyear + 1, month_end_id, day_end)
else:
# it s just that the start was the last year
day_start = find_day_by_weekday_offset(self.syear - 1,
self.smon, self.swday, self.swday_offset)
start_time = get_start_of_day(self.syear - 1, month_id, day_start)
else:
if now_epoch > end_time:
# just have to check for next year if necessary
day_start = find_day_by_weekday_offset(self.syear + 1,
self.smon, self.swday, self.swday_offset)
start_time = get_start_of_day(self.syear + 1, month_id, day_start)
day_end = find_day_by_weekday_offset(self.eyear + 1,
self.emon, self.ewday, self.ewday_offset)
end_time = get_end_of_day(self.eyear + 1, month_end_id, day_end)
return (start_time, end_time)
""" TODO: Add some comment about this class for the doc"""
class MonthDateDaterange(Daterange):
def get_start_and_end_time(self, ref=None):
now = time.localtime(ref)
if self.syear == 0:
self.syear = now.tm_year
month_start_id = Daterange.get_month_id(self.smon)
day_start = find_day_by_offset(self.syear, self.smon, self.smday)
start_time = get_start_of_day(self.syear, month_start_id, day_start)
if self.eyear == 0:
self.eyear = now.tm_year
month_end_id = Daterange.get_month_id(self.emon)
day_end = find_day_by_offset(self.eyear, self.emon, self.emday)
end_time = get_end_of_day(self.eyear, month_end_id, day_end)
now_epoch = time.mktime(now)
if start_time > end_time: # the period is between years
if now_epoch > end_time:
# check for next year
day_end = find_day_by_offset(self.eyear + 1, self.emon, self.emday)
end_time = get_end_of_day(self.eyear + 1, month_end_id, day_end)
else:
# it s just that start was the last year
day_start = find_day_by_offset(self.syear - 1, self.smon, self.emday)
start_time = get_start_of_day(self.syear - 1, month_start_id, day_start)
else:
if now_epoch > end_time:
# just have to check for next year if necessary
day_start = find_day_by_offset(self.syear + 1, self.smon, self.emday)
start_time = get_start_of_day(self.syear + 1, month_start_id, day_start)
day_end = find_day_by_offset(self.eyear + 1, self.emon, self.emday)
end_time = get_end_of_day(self.eyear + 1, month_end_id, day_end)
return (start_time, end_time)
""" TODO: Add some comment about this class for the doc"""
class WeekDayDaterange(Daterange):
def get_start_and_end_time(self, ref=None):
now = time.localtime(ref)
# If no year, it's our year
if self.syear == 0:
self.syear = now.tm_year
month_start_id = now.tm_mon
month_start = Daterange.get_month_by_id(month_start_id)
day_start = find_day_by_weekday_offset(self.syear,
month_start, self.swday, self.swday_offset)
start_time = get_start_of_day(self.syear, month_start_id, day_start)
# Same for end year
if self.eyear == 0:
self.eyear = now.tm_year
month_end_id = now.tm_mon
month_end = Daterange.get_month_by_id(month_end_id)
day_end = find_day_by_weekday_offset(self.eyear, month_end, self.ewday, self.ewday_offset)
end_time = get_end_of_day(self.eyear, month_end_id, day_end)
# Maybe end_time is before start. So look for the
# next month
if start_time > end_time:
month_end_id = month_end_id + 1
if month_end_id > 12:
month_end_id = 1
self.eyear += 1
month_end = Daterange.get_month_by_id(month_end_id)
day_end = find_day_by_weekday_offset(self.eyear,
month_end, self.ewday, self.ewday_offset)
end_time = get_end_of_day(self.eyear, month_end_id, day_end)
now_epoch = time.mktime(now)
# But maybe we look not enought far. We should add a month
if end_time < now_epoch:
month_end_id = month_end_id + 1
month_start_id = month_start_id + 1
if month_end_id > 12:
month_end_id = 1
self.eyear += 1
if month_start_id > 12:
month_start_id = 1
self.syear += 1
# First start
month_start = Daterange.get_month_by_id(month_start_id)
day_start = find_day_by_weekday_offset(self.syear,
month_start, self.swday, self.swday_offset)
start_time = get_start_of_day(self.syear, month_start_id, day_start)
# Then end
month_end = Daterange.get_month_by_id(month_end_id)
day_end = find_day_by_weekday_offset(self.eyear,
month_end, self.ewday, self.ewday_offset)
end_time = get_end_of_day(self.eyear, month_end_id, day_end)
return (start_time, end_time)
""" TODO: Add some comment about this class for the doc"""
class MonthDayDaterange(Daterange):
def get_start_and_end_time(self, ref=None):
now = time.localtime(ref)
if self.syear == 0:
self.syear = now.tm_year
month_start_id = now.tm_mon
month_start = Daterange.get_month_by_id(month_start_id)
day_start = find_day_by_offset(self.syear, month_start, self.smday)
start_time = get_start_of_day(self.syear, month_start_id, day_start)
if self.eyear == 0:
self.eyear = now.tm_year
month_end_id = now.tm_mon
month_end = Daterange.get_month_by_id(month_end_id)
day_end = find_day_by_offset(self.eyear, month_end, self.emday)
end_time = get_end_of_day(self.eyear, month_end_id, day_end)
now_epoch = time.mktime(now)
if start_time > end_time:
month_end_id = month_end_id + 1
if month_end_id > 12:
month_end_id = 1
self.eyear += 1
day_end = find_day_by_offset(self.eyear, month_end, self.emday)
end_time = get_end_of_day(self.eyear, month_end_id, day_end)
if end_time < now_epoch:
month_end_id = month_end_id + 1
month_start_id = month_start_id + 1
if month_end_id > 12:
month_end_id = 1
self.eyear += 1
if month_start_id > 12:
month_start_id = 1
self.syear += 1
# For the start
month_start = Daterange.get_month_by_id(month_start_id)
day_start = find_day_by_offset(self.syear, month_start, self.smday)
start_time = get_start_of_day(self.syear, month_start_id, day_start)
# For the end
month_end = Daterange.get_month_by_id(month_end_id)
day_end = find_day_by_offset(self.eyear, month_end, self.emday)
end_time = get_end_of_day(self.eyear, month_end_id, day_end)
return (start_time, end_time)
| 24,786
|
Python
|
.py
| 528
| 36.651515
| 101
| 0.591065
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,435
|
reactionnerlink.py
|
shinken-solutions_shinken/shinken/reactionnerlink.py
|
'''shinken.reactionnerlink is deprecated. Please use shinken.objects.reactionnerlink now.'''
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken.old_daemon_link import make_deprecated_daemon_link
from shinken.objects import reactionnerlink
make_deprecated_daemon_link(reactionnerlink)
| 333
|
Python
|
.py
| 5
| 64.8
| 92
| 0.842593
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,436
|
serializer.py
|
shinken-solutions_shinken/shinken/serializer.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import io
import six
import traceback
import zlib
if six.PY2:
import cPickle as pickle
else:
import pickle
from shinken.safepickle import SafeUnpickler
class SerializeError(Exception):
pass
def serialize(obj):
"""
Serializes an object to be sent through an HTTP request, for instance
:param mixed obj: The object to serialize
:rtype: bytes
:terun: The serialized object
"""
try:
return zlib.compress(pickle.dumps(obj, pickle.HIGHEST_PROTOCOL))
except pickle.PickleError as e:
logger.error("Failed to serialize object: %s", e)
logger.error(traceback.format_exc())
raise SerializeError(e)
def deserialize(payload):
"""
Deserializes an object got from an HTTP request, for instance
:param bytes obj: The payload to deserialize
:rtype: bytes
:terun: The serialized object
"""
try:
if hasattr(payload, 'read'):
raw = zlib.decompress(payload.read())
else:
raw = zlib.decompress(payload)
return SafeUnpickler(io.BytesIO(raw)).load()
except pickle.PickleError as e:
logger.error("Failed to serialize payload: %s", e)
logger.error(traceback.format_exc())
raise SerializeError(e)
| 2,247
|
Python
|
.py
| 63
| 31.634921
| 82
| 0.720663
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,437
|
old_daemon_link.py
|
shinken-solutions_shinken/shinken/old_daemon_link.py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import inspect
import warnings
def deprecation(msg, stacklevel=4):
warnings.warn(msg, DeprecationWarning, stacklevel=stacklevel)
def make_deprecated_daemon_link(new_module):
stack = inspect.stack()
full_mod_name = stack[1][0].f_locals['__name__']
mod_name = full_mod_name.split('.')[-1]
deprecation(
"{fullname} is deprecated module path ; "
"{name} must now be imported from shinken.objects.{name}"
" ; please update your code accordingly".format(name=mod_name, fullname=full_mod_name)
)
sys.modules[full_mod_name] = new_module
| 680
|
Python
|
.py
| 16
| 37.9375
| 94
| 0.713202
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,438
|
http_daemon.py
|
shinken-solutions_shinken/shinken/http_daemon.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import six
import sys
import errno
import time
import socket
import select
import inspect
import json
import zlib
import base64
import threading
import traceback
import io
from bottle import abort
try:
import ssl
except ImportError:
ssl = None
SSLAdapter = None
try:
from OpenSSL import SSL
except ImportError:
SSL = None
SSLAdapter = None
try:
from cheroot.wsgi import Server as CherryPyWSGIServer
if SSL is not None:
from cheroot.ssl.pyopenssl import pyOpenSSLAdapter as SSLAdapter
except ImportError:
try:
from cherrypy.wsgiserver import CherryPyWSGIServer
if SSL is not None:
if six.PY2:
from cherrypy.wsgiserver.ssl_pyopenssl import pyOpenSSLAdapter as SSLAdapter
else:
# A bug in CherryPy prevents from using pyOpenSSLAdapter
# with python3: https://github.com/cherrypy/cherrypy/issues/1399
# This has been fixed in cherrypy >= 9.0.0
# If performance is an issue, please consider upgrading cherrypy
from cherrypy.wsgiserver.ssl_builtin import BuiltinSSLAdapter as SSLAdapter
except ImportError:
CherryPyWSGIServer = None
if CherryPyWSGIServer and SSLAdapter:
# Create 'safe' SSL adapter by disabling SSLv2/SSLv3 connections
class SafeSSLAdapter(SSLAdapter):
def get_context(self):
c = SSLAdapter.get_context(self)
c.set_options(SSL.OP_NO_SSLv2 |
SSL.OP_NO_SSLv3)
return c
else:
SafeSSLAdapter = None
from wsgiref import simple_server
# load global helper objects for logs and stats computation
from shinken.log import logger
from shinken.stats import statsmgr
from shinken.serializer import deserialize
# Let's load bottle! :)
import bottle
bottle.debug(True)
class InvalidWorkDir(Exception):
pass
class PortNotFree(Exception):
pass
# CherryPy is allowing us to have a HTTP 1.1 server, and so have a KeepAlive
class CherryPyServer(bottle.ServerAdapter):
run_callback = None
def run(self, handler): # pragma: no cover
daemon_thread_pool_size = self.options['daemon_thread_pool_size']
server = CherryPyWSGIServer(
(self.host, self.port),
handler,
numthreads=daemon_thread_pool_size,
shutdown_timeout=1
)
logger.info('Initializing a CherryPy backend with %d threads', daemon_thread_pool_size)
use_ssl = self.options['use_ssl']
ca_cert = self.options['ca_cert']
ssl_cert = self.options['ssl_cert']
ssl_key = self.options['ssl_key']
if SafeSSLAdapter and use_ssl:
server.ssl_adapter = SafeSSLAdapter(ssl_cert, ssl_key, ca_cert)
if use_ssl:
server.ssl_certificate = ssl_cert
server.ssl_private_key = ssl_key
if CherryPyServer.run_callback is not None:
CherryPyServer.run_callback(server)
return server
class CherryPyBackend(object):
def __init__(self, host, port, use_ssl, ca_cert, ssl_key,
ssl_cert, hard_ssl_name_check, daemon_thread_pool_size):
self.port = port
self.use_ssl = use_ssl
self.srv = None
try:
def register_server(server):
self.srv = server
CherryPyServer.run_callback = staticmethod(register_server)
bottle.run(
host=host,
port=port,
server=CherryPyServer,
quiet=False,
use_ssl=use_ssl,
ca_cert=ca_cert,
ssl_key=ssl_key,
ssl_cert=ssl_cert,
daemon_thread_pool_size=daemon_thread_pool_size
)
except socket.error as exp:
msg = "Error: Sorry, the port %d is not free: %s" % (self.port, exp)
raise PortNotFree(msg)
except Exception as e:
# must be a problem with http workdir:
raise InvalidWorkDir(e)
# When call, it do not have a socket
def get_sockets(self):
return []
# We stop our processing, but also try to hard close our socket as cherrypy is not doing it...
def stop(self):
# TODO: find why, but in ssl mode the stop() is locking, so bailout before
if self.use_ssl:
return
try:
self.srv.stop()
except Exception as exp:
logger.warning('Cannot stop the CherryPy backend : %s', exp)
# Will run and LOCK
def run(self):
try:
self.srv.start()
except socket.error as exp:
msg = "Error: Sorry, the port %d is not free: %s" % (self.port, exp)
# from None stops the processing of `exp`: prevents exception in
# exception error
# PY23COMPAT: raise from not supported in python2
#raise PortNotFree(msg) from None
six.raise_from(exp, None)
finally:
try:
self.srv.stop()
except Exception:
pass
# WSGIRef is the default HTTP server, it CAN manage HTTPS, but at a Huge cost for the client,
# because it's only HTTP1.0
# so no Keep-Alive, and in HTTPS it's just a nightmare
class WSGIREFAdapter(bottle.ServerAdapter):
run_callback = None
def run(self, handler):
daemon_thread_pool_size = self.options['daemon_thread_pool_size']
from wsgiref.simple_server import WSGIRequestHandler
LoggerHandler = WSGIRequestHandler
if self.quiet:
class QuietHandler(WSGIRequestHandler):
def log_request(*args, **kw):
pass
LoggerHandler = QuietHandler
srv = simple_server.make_server(
self.host,
self.port,
handler,
handler_class=LoggerHandler
)
logger.info('Initializing a wsgiref backend with %d threads', daemon_thread_pool_size)
use_ssl = self.options['use_ssl']
ca_cert = self.options['ca_cert']
ssl_cert = self.options['ssl_cert']
ssl_key = self.options['ssl_key']
if use_ssl:
if not ssl:
logger.error("Missing python-openssl library,"
"please install it to open a https backend")
raise Exception("Missing python-openssl library, "
"please install it to open a https backend")
srv.socket = ssl.wrap_socket(
srv.socket,
keyfile=ssl_key,
certfile=ssl_cert,
server_side=True
)
if WSGIREFAdapter.run_callback is not None:
WSGIREFAdapter.run_callback(srv)
return srv
class WSGIREFBackend(object):
def __init__(self, host, port, use_ssl, ca_cert, ssl_key,
ssl_cert, hard_ssl_name_check, daemon_thread_pool_size):
self.daemon_thread_pool_size = daemon_thread_pool_size
self.srv = None
try:
def register_server(server):
self.srv = server
WSGIREFAdapter.run_callback = staticmethod(register_server)
bottle.run(
host=host,
port=port,
server=WSGIREFAdapter,
quiet=True,
use_ssl=use_ssl,
ca_cert=ca_cert,
ssl_key=ssl_key, ssl_cert=ssl_cert,
daemon_thread_pool_size=daemon_thread_pool_size
)
except socket.error as exp:
msg = "Error: Sorry, the port %d is not free: %s" % (port, exp)
raise PortNotFree(msg)
def get_sockets(self):
if self.srv.socket:
return [self.srv.socket]
else:
return []
def get_socks_activity(self, socks, timeout):
try:
ins, _, _ = select.select(socks, [], [], timeout)
except select.error as e:
if six.PY2:
err, _ = e
else:
err = e.errno
if err == errno.EINTR:
return []
elif err == errno.EBADF:
logger.error('Failed to handle request: %s', e)
return []
raise
return ins
# We are asking us to stop, so we close our sockets
def stop(self):
for s in self.get_sockets():
try:
s.close()
except Exception:
pass
self.srv.socket = None
# Manually manage the number of threads
def run(self):
# Ok create the thread
nb_threads = self.daemon_thread_pool_size
# Keep a list of our running threads
threads = []
logger.info('Using a %d http pool size', nb_threads)
while True:
# We must not run too much threads, so we will loop until
# we got at least one free slot available
free_slots = 0
while free_slots <= 0:
to_del = [t for t in threads if not t.is_alive()]
for t in to_del:
t.join()
threads.remove(t)
free_slots = nb_threads - len(threads)
if free_slots <= 0:
time.sleep(0.01)
socks = self.get_sockets()
# Blocking for 0.1 s max here
ins = self.get_socks_activity(socks, 0.1)
if len(ins) == 0: # trivial case: no fd activity:
continue
# If we got activity, Go for a new thread!
for sock in socks:
if sock in ins:
# GO!
t = threading.Thread(
None,
target=self.handle_one_request_thread,
name='http-request',
args=(sock,)
)
# We don't want to hang the master thread just because this one is still alive
t.daemon = True
t.start()
threads.append(t)
def handle_one_request_thread(self, sock):
self.srv.handle_request()
class HTTPDaemon(object):
def __init__(self, host, port, http_backend, use_ssl, ca_cert,
ssl_key, ssl_cert, hard_ssl_name_check,
daemon_thread_pool_size):
self.port = port
self.host = host
self.srv = None
# Port = 0 means "I don't want HTTP server"
if self.port == 0:
return
self.use_ssl = use_ssl
self.registered_fun = {}
self.registered_fun_names = []
self.registered_fun_defaults = {}
protocol = 'http'
if use_ssl:
protocol = 'https'
self.uri = '%s://%s:%s' % (protocol, self.host, self.port)
logger.info("Opening HTTP socket at %s", self.uri)
if http_backend == 'cherrypy' or http_backend == 'auto' and CherryPyWSGIServer:
self.srv = CherryPyBackend(
host,
port,
use_ssl,
ca_cert,
ssl_key,
ssl_cert,
hard_ssl_name_check,
daemon_thread_pool_size
)
else:
logger.warning('Loading the old WSGI Backend. CherryPy >= 3 is recommanded instead')
self.srv = WSGIREFBackend(
host,
port,
use_ssl,
ca_cert,
ssl_key,
ssl_cert,
hard_ssl_name_check,
daemon_thread_pool_size
)
self.lock = threading.RLock()
# Get the server socket but not if disabled or closed
def get_sockets(self):
if self.port == 0 or self.srv is None:
return []
return self.srv.get_sockets()
def run(self):
self.srv.run()
def _parse_request_params(self, cbname, method, request, args=[]):
"""
Parses the incoming request, and process the callback parametrs
:param list args: The callback parameters
:rtype: mixed
:return: The callback parameters
"""
if method in ('get', 'post'):
parms = {}
for arg in args:
val = None
if method == 'post':
val = request.forms.get(arg, None)
elif method == 'get':
val = request.GET.get(arg, None)
if val:
parms[arg] = val
else:
# Checks if the missing arg has a default value
default_args = self.registered_fun_defaults.get(cbname, {})
if arg not in default_args:
abort(400, 'Missing argument %s. request=%s' % arg)
return parms
elif method == 'put':
content = request.body
return deserialize(content)
else:
abort(400, 'Unmanaged HTTP method: %s' % method)
def register(self, obj):
methods = inspect.getmembers(obj, predicate=inspect.ismethod)
merge = [
cbname for (cbname, callback) in methods
if cbname in self.registered_fun_names
]
if merge != []:
methods_in = [
m.__name__ for m in obj.__class__.__dict__.values()
if inspect.isfunction(m)
]
methods = [m for m in methods if m[0] in methods_in]
for (cbname, callback) in methods:
if cbname.startswith('_'):
continue
# Get the args of the function to catch them in the queries
if six.PY2:
argspec = inspect.getargspec(callback)
keywords = argspec.keywords
else:
argspec = inspect.getfullargspec(callback)
keywords = argspec.varkw
args = argspec.args
varargs = argspec.varargs
defaults = argspec.defaults
# If we got some defauts, save arg=value so we can lookup
# for them after
if defaults:
default_args = zip(
argspec.args[-len(argspec.defaults):],
argspec.defaults
)
_d = {}
for (argname, defavalue) in default_args:
_d[argname] = defavalue
self.registered_fun_defaults[cbname] = _d
# remove useless self in args, because we alredy got a bonded method callback
if 'self' in args:
args.remove('self')
#print("Registering", cbname, args, obj)
self.registered_fun_names.append(cbname)
self.registered_fun[cbname] = (callback)
# WARNING : we MUST do a 2 levels function here, or the f_wrapper
# will be uniq and so will link to the last function again
# and again
def register_callback(cbname, args, callback, obj, lock):
def f_wrapper():
try:
t0 = time.time()
args_time = aqu_lock_time = calling_time = json_time = 0
need_lock = getattr(callback, 'need_lock', True)
# Warning : put the bottle.response set inside the wrapper
# because outside it will break bottle
method = getattr(callback, 'method', 'get').lower()
params = self._parse_request_params(
cbname,
method,
bottle.request,
args
)
t1 = time.time()
args_time = t1 - t0
if need_lock:
logger.debug("HTTP: calling lock for %s", cbname)
lock.acquire()
t2 = time.time()
aqu_lock_time = t2 - t1
try:
if method in ('get', 'post'):
cbres = callback(**params)
else:
cbres = callback(params)
# Always call the lock release if need
finally:
# Ok now we can release the lock
if need_lock:
lock.release()
t3 = time.time()
calling_time = t3 - t2
global_time = t3 - t0
logger.debug(
"Debug perf: %s [args:%s] [aqu_lock:%s] "
"[calling:%s] [global:%s]",
cbname,
args_time,
aqu_lock_time,
calling_time,
global_time
)
lst = [
('args', args_time),
('aqulock', aqu_lock_time),
('calling', calling_time),
('global', global_time)
]
# increase the stats timers
for (k, _t) in lst:
statsmgr.timing('http.%s.%s' % (cbname, k), _t, 'perf')
return cbres
except Exception as e:
logger.error("HTTP Request error: %s", e)
logger.error("function: %s", cbname)
logger.error("object: %s", obj)
logger.error(traceback.format_exc())
abort(500, "Internal Server Error: %s\n. Please check server logs for more details" % e)
# Ok now really put the route in place
bottle.route('/' + cbname, callback=f_wrapper,
method=getattr(callback, 'method', 'get').upper())
# and the name with - instead of _ if need
cbname_dash = cbname.replace('_', '-')
if cbname_dash != cbname:
bottle.route('/' + cbname_dash, callback=f_wrapper,
method=getattr(callback, 'method', 'get').upper())
register_callback(cbname, args, callback, obj, self.lock)
# Add a simple / page
def slash():
return "OK"
bottle.route('/', callback=slash)
def unregister(self, obj):
return
def handleRequests(self, s):
self.srv.handle_request()
# Close all sockets and delete the server object to be sure
# no one is still alive
def shutdown(self):
if self.srv is not None:
self.srv.stop()
self.srv = None
def get_socks_activity(self, timeout):
try:
ins, _, _ = select.select(self.get_sockets(), [], [], timeout)
except select.error as e:
errnum, _ = e
if errnum == errno.EINTR:
return []
raise
return ins
# TODO: clean this hack:
# see usage within basemodule & http_daemon.
daemon_inst = None
| 20,498
|
Python
|
.py
| 514
| 26.894942
| 112
| 0.532549
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,439
|
trigger_functions.py
|
shinken-solutions_shinken/shinken/trigger_functions.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import six
import time
import re
from shinken.misc.perfdata import PerfDatas
from shinken.log import logger
objs = {'hosts': [], 'services': []}
trigger_functions = {}
class declared(object):
""" Decorator to add function in trigger environnement
"""
def __init__(self, f):
self.f = f
n = f.__name__
# logger.debug("Initializing function %s %s" % (n, f))
trigger_functions[n] = f
def __call__(self, *args):
logger.debug("Calling %s with arguments %s", self.f.__name__, args)
return self.f(*args)
@declared
def up(obj, output):
""" Set a host in UP state
"""
set_value(obj, output, None, 0)
@declared
def down(obj, output):
""" Set a host in DOWN state
"""
set_value(obj, output, None, 1)
@declared
def ok(obj, output):
""" Set a service in OK state
"""
set_value(obj, output, None, 0)
@declared
def warning(obj, output):
""" Set a service in WARNING state
"""
set_value(obj, output, None, 1)
@declared
def critical(obj, output):
""" Set a service in CRITICAL state
"""
set_value(obj, output, None, 2)
@declared
def unknown(obj, output):
""" Set a service in UNKNOWN state
"""
set_value(obj, output, None, 3)
@declared
def set_value(obj_ref, output=None, perfdata=None, return_code=None):
""" Set output, state and perfdata to a service or host
"""
obj = get_object(obj_ref)
if not obj:
return
output = output or obj.output
perfdata = perfdata or obj.perf_data
if return_code is None:
return_code = obj.state_id
logger.debug("[trigger] Setting %s %s %s for object %s",
output,
perfdata,
return_code,
obj.get_full_name())
if perfdata:
output = output + ' | ' + perfdata
now = time.time()
cls = obj.__class__
i = obj.launch_check(now, force=True)
for chk in obj.checks_in_progress:
if chk.id == i:
logger.debug("[trigger] I found the check I want to change")
c = chk
# Now we 'transform the check into a result'
# So exit_status, output and status is eaten by the host
c.exit_status = return_code
c.get_outputs(output, obj.max_plugins_output_length)
c.status = 'waitconsume'
c.check_time = now
# IMPORTANT: tag this check as from a trigger, so we will not
# loop in an infinite way for triggers checks!
c.from_trigger = True
# Ok now this result will be read by scheduler the next loop
@declared
def perf(obj_ref, metric_name):
""" Get perf data from a service
"""
obj = get_object(obj_ref)
p = PerfDatas(obj.perf_data)
if metric_name in p:
logger.debug("[trigger] I found the perfdata")
return p[metric_name].value
logger.debug("[trigger] I am in perf command")
return None
@declared
def get_custom(obj_ref, cname, default=None):
""" Get custom varialbe from a service or a host
"""
obj = get_objects(obj_ref)
if not obj:
return default
cname = cname.upper().strip()
if not cname.startswith('_'):
cname = '_' + cname
return obj.customs.get(cname, default)
@declared
def perfs(objs_ref, metric_name):
""" TODO: check this description
Get perfdatas from multiple services/hosts
"""
objs = get_objects(objs_ref)
r = []
for o in objs:
v = perf(o, metric_name)
r.append(v)
return r
@declared
def allperfs(obj_ref):
""" Get all perfdatas from a service or a host
"""
obj = get_object(obj_ref)
p = PerfDatas(obj.perf_data)
logger.debug("[trigger] I get all perfdatas")
return dict([(metric.name, p[metric.name]) for metric in p])
@declared
def get_object(ref):
""" Retrive object (service/host) from name
"""
# Maybe it's already a real object, if so, return it :)
if not isinstance(ref, six.string_types):
return ref
# Ok it's a string
name = ref
if '/' not in name:
return objs['hosts'].find_by_name(name)
else:
elts = name.split('/', 1)
return objs['services'].find_srv_by_name_and_hostname(elts[0], elts[1])
@declared
def get_objects(ref):
""" TODO: check this description
Retrive objects (service/host) from names
"""
# Maybe it's already a real object, if so, return it :)
if not isinstance(ref, six.string_types):
return ref
name = ref
# Maybe there is no '*'? if so, it's one element
if '*' not in name:
return get_object(name)
# Ok we look for spliting the host or service thing
hname = ''
sdesc = ''
if '/' not in name:
hname = name
else:
elts = name.split('/', 1)
hname = elts[0]
sdesc = elts[1]
logger.debug("[trigger get_objects] Look for %s %s", hname, sdesc)
res = []
hosts = []
services = []
# Look for host, and if need, look for service
if '*' not in hname:
h = objs['hosts'].find_by_name(hname)
if h:
hosts.append(h)
else:
hname = hname.replace('*', '.*')
p = re.compile(hname)
for h in objs['hosts']:
logger.debug("[trigger] Compare %s with %s", hname, h.get_name())
if p.search(h.get_name()):
hosts.append(h)
# Maybe the user ask for justs hosts :)
if not sdesc:
return hosts
for h in hosts:
if '*' not in sdesc:
s = h.find_service_by_name(sdesc)
if s:
services.append(s)
else:
sdesc = sdesc.replace('*', '.*')
p = re.compile(sdesc)
for s in h.services:
logger.debug("[trigger] Compare %s with %s", s.service_description, sdesc)
if p.search(s.service_description):
services.append(s)
logger.debug("Found the following services: %s", services)
return services
| 7,067
|
Python
|
.py
| 215
| 26.772093
| 90
| 0.618684
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,440
|
db_mysql.py
|
shinken-solutions_shinken/shinken/db_mysql.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import MySQLdb
from MySQLdb import IntegrityError
from MySQLdb import ProgrammingError
from shinken.db import DB
from shinken.log import logger
class DBMysql(DB):
"""DBMysql is a MySQL access database class"""
def __init__(self, host, user, password, database, character_set,
table_prefix='', port=3306):
self.host = host
self.user = user
self.password = password
self.database = database
self.character_set = character_set
self.table_prefix = table_prefix
self.port = port
def connect_database(self):
"""Create the database connection
TODO: finish (begin :) ) error catch and conf parameters...
Import to catch exception
"""
# self.db = MySQLdb.connect (host = "localhost", user = "root",
# passwd = "root", db = "merlin")
self.db = MySQLdb.connect(host=self.host, user=self.user,
passwd=self.password, db=self.database,
port=self.port)
self.db.set_character_set(self.character_set)
self.db_cursor = self.db.cursor()
self.db_cursor.execute('SET NAMES %s;' % self.character_set)
self.db_cursor.execute('SET CHARACTER SET %s;' % self.character_set)
self.db_cursor.execute('SET character_set_connection=%s;' %
self.character_set)
# Thanks:
# http://www.dasprids.de/blog/2007/12/17/python-mysqldb-and-utf-8
# for utf8 code :)
def execute_query(self, query, do_debug=False):
"""Just run the query
TODO: finish catch
"""
if do_debug:
logger.debug("[MysqlDB]I run query %s", query)
try:
self.db_cursor.execute(query)
self.db.commit()
return True
except IntegrityError as exp:
logger.warning("[MysqlDB] A query raised an integrity error: %s, %s", query, exp)
return False
except ProgrammingError as exp:
logger.warning("[MysqlDB] A query raised a programming error: %s, %s", query, exp)
return False
| 3,199
|
Python
|
.py
| 74
| 35.864865
| 94
| 0.644509
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,441
|
borg.py
|
shinken-solutions_shinken/shinken/borg.py
|
#!/usr/bin/env python
#
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
class Borg(object):
""" Here is the new-style Borg
(not much more complex then the "old-style")
"""
__shared_state = {}
def __init__(self):
# print("Init Borg", self.__dict__, self.__class__.__shared_state)
self.__dict__ = self.__class__.__shared_state
| 1,301
|
Python
|
.py
| 33
| 37.212121
| 82
| 0.718354
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,442
|
easter.py
|
shinken-solutions_shinken/shinken/easter.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken.log import logger
def episode_iv():
hst = 'towel.blinkenlights.nl'
from telnetlib import Telnet
t = Telnet(hst)
while True:
buf = t.read_until('mesfesses', 0.1)
logger.info(buf)
def perdu():
import requests
r = requests.get("http://www.perdu.com")
logger.info(r.content)
def myip():
import requests
r = requests.get("http://whatismyip.org/")
logger.info(r.content)
def naheulbeuk():
import os
import sys
import requests
from PIL import Image
import aalib
if os.getenv('TERM') == 'linux':
screen = aalib.LinuxScreen
else:
screen = aalib.AnsiScreen
screen = screen(width=128, height=128)
r = requests.get(
'http://www.penofchaos.com/warham/bd/images/NBK-win7portrait-Nain02.JPG'
)
image = Image.open(r.content).convert('L').resize(screen.virtual_size)
screen.put_image((0, 0), image)
logger.info(screen.render())
def what_it_make_me_think(subject):
import hashlib
if hashlib.md5(subject.lower()).hexdigest() == '6376e9755f8047391621b577ae03966a':
print("Thanks to %s now I feel like this: https://youtu.be/efTZslkr5Fs?t=60" % subject)
def dark():
r"""
.-.
|_:_|
/(_Y_)\
( \/M\/ )
'. _.'-/'-'\-'._
': _/.--'[[[[]'--.\_
': /_' : |::"| : '.\
': // ./ |oUU| \.' :\
': _:'..' \_|___|_/ : :|
':. .' |_[___]_| :.':\
[::\ | : | | : ; : \
'-' \/'.| |.' \ .;.' |
|\_ \ '-' : |
| \ \ .: : | |
| \ | '. : \ |
/ \ :. .; |
/ | | :__/ : \\
| | | \: | \ | ||
/ \ : : |: / |__| /|
snd | : : :_/_| /'._\ '--|_\
/___.-/_|-' \ \
'-'
"""
logger.info(dark.__doc__)
def get_coffee():
r"""
(
) (
___...(-------)-....___
.-"" ) ( ""-.
.-'``'|-._ ) _.-|
/ .--.| `""---...........---""` |
/ / | |
| | | |
\ \ | |
`\ `\ | |
`\ `| |
_/ /\ /
(__/ \ /
_..---""` \ /`""---.._
.-' \ / '-.
: `-.__ __.-' :
: ) ""---...---"" ( :
'._ `"--...___...--"` _.'
jgs \""--..__ __..--""/
'._ "'"----.....______.....----"'" _.'
`""--..,,_____ _____,,..--""`
`"'"----"'"`
"""
logger.info(get_coffee.__doc__)
| 4,342
|
Python
|
.py
| 112
| 30.178571
| 96
| 0.380024
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,443
|
message.py
|
shinken-solutions_shinken/shinken/message.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
class Message(object):
"""This is a simple message class for communications between actionners and
workers
"""
my_type = 'message'
_type = None
_data = None
_from = None
def __init__(self, id, type, data=None, source=None):
self._type = type
self._data = data
self._from = id
self.source = source
def get_type(self):
return self._type
def get_data(self):
return self._data
def get_from(self):
return self._from
def str(self):
return "Message from %d (%s), Type: %s Data: %s" % (
self._from, self.source, self._type, self._data)
| 1,671
|
Python
|
.py
| 45
| 33.133333
| 82
| 0.689783
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,444
|
db_oracle.py
|
shinken-solutions_shinken/shinken/db_oracle.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
# Failed to import will be catch by __init__.py
from cx_Oracle import connect as connect_function
from cx_Oracle import IntegrityError as IntegrityError_exp
from cx_Oracle import ProgrammingError as ProgrammingError_exp
from cx_Oracle import DatabaseError as DatabaseError_exp
from cx_Oracle import InternalError as InternalError_exp
from cx_Oracle import DataError as DataError_exp
from cx_Oracle import OperationalError as OperationalError_exp
from shinken.db import DB
from shinken.log import logger
connect_function = None
IntegrityError_exp = None
ProgrammingError_exp = None
DatabaseError_exp = None
InternalError_exp = None
DataError_exp = None
OperationalError_exp = None
class DBOracle(DB):
"""Manage connection and query execution against Oracle databases."""
def __init__(self, user, password, database, table_prefix=''):
self.user = user
self.password = password
self.database = database
self.table_prefix = table_prefix
def connect_database(self):
"""Create the database connection
TODO: finish (begin :) ) error catch and conf parameters...
"""
connstr = '%s/%s@%s' % (self.user, self.password, self.database)
self.db = connect_function(connstr)
self.db_cursor = self.db.cursor()
self.db_cursor.arraysize = 50
def execute_query(self, query):
""" Execute a query against an Oracle database.
"""
logger.debug("[DBOracle] Execute Oracle query %s\n", query)
try:
self.db_cursor.execute(query)
self.db.commit()
except IntegrityError_exp as exp:
logger.warning("[DBOracle] Warning: a query raise an integrity error: %s, %s",
query, exp)
except ProgrammingError_exp as exp:
logger.warning("[DBOracle] Warning: a query raise a programming error: %s, %s",
query, exp)
except DatabaseError_exp as exp:
logger.warning("[DBOracle] Warning: a query raise a database error: %s, %s",
query, exp)
except InternalError_exp as exp:
logger.warning("[DBOracle] Warning: a query raise an internal error: %s, %s",
query, exp)
except DataError_exp as exp:
logger.warning("[DBOracle] Warning: a query raise a data error: %s, %s",
query, exp)
except OperationalError_exp as exp:
logger.warning("[DBOracle] Warning: a query raise an operational error: %s, %s",
query, exp)
except Exception as exp:
logger.warning("[DBOracle] Warning: a query raise an unknown error: %s, %s",
query, exp)
logger.warning(exp.__dict__)
| 3,826
|
Python
|
.py
| 84
| 38.428571
| 92
| 0.671762
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,445
|
dependencynode.py
|
shinken-solutions_shinken/shinken/dependencynode.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import re
from shinken.util import filter_any, filter_none
from shinken.util import filter_host_by_name, filter_host_by_regex, filter_host_by_group,\
filter_host_by_tag
from shinken.util import filter_service_by_name
from shinken.util import filter_service_by_regex_name
from shinken.util import filter_service_by_regex_host_name
from shinken.util import filter_service_by_host_name
from shinken.util import filter_service_by_bp_rule_label
from shinken.util import filter_service_by_hostgroup_name
from shinken.util import filter_service_by_host_tag_name
from shinken.util import filter_service_by_servicegroup_name
from shinken.util import filter_host_by_bp_rule_label
from shinken.util import filter_service_by_host_bp_rule_label
"""
Here is a node class for dependency_node(s) and a factory to create them
"""
class DependencyNode(object):
def __init__(self):
self.operand = None
self.sons = []
# Of: values are a triple OK,WARN,CRIT
self.of_values = ('0', '0', '0')
self.is_of_mul = False
self.configuration_errors = []
self.not_value = False
def __str__(self):
return "Op:'%s' Val:'%s' Sons:'[%s]' IsNot:'%s'" % (
self.operand,
self.of_values,
','.join([str(s) for s in self.sons]),
self.not_value
)
def get_reverse_state(self, state):
# Warning is still warning
if state == 1:
return 1
if state == 0:
return 2
if state == 2:
return 0
# should not go here...
return state
# We will get the state of this node, by looking at the state of
# our sons, and apply our operand
def get_state(self):
# print("Ask state of me", self)
# If we are a host or a service, wee just got the host/service
# hard state
if self.operand in ['host', 'service']:
return self.get_simple_node_state()
else:
return self.get_complex_node_state()
# Returns a simple node direct state (such as a host or a service). No
# calculation is needed
def get_simple_node_state(self):
state = self.sons[0].last_hard_state_id
# print("Get the hard state (%s) for the object %s" % (state, self.sons[0].get_name()))
# Make DOWN look as CRITICAL (2 instead of 1)
if self.operand == 'host' and state == 1:
state = 2
# Maybe we are a NOT node, so manage this
if self.not_value:
# We inverse our states
if self.operand == 'host' and state == 1:
return 0
if self.operand == 'host' and state == 0:
return 1
# Critical -> OK
if self.operand == 'service' and state == 2:
return 0
# OK -> CRITICAL (warning is untouched)
if self.operand == 'service' and state == 0:
return 2
return state
# Calculates a complex node state based on its sons state, and its operator
def get_complex_node_state(self):
if self.operand == '|':
return self.get_complex_or_node_state()
elif self.operand == '&':
return self.get_complex_and_node_state()
# It's an Xof rule
else:
return self.get_complex_xof_node_state()
# Calculates a complex node state with an | operand
def get_complex_or_node_state(self):
# First we get the state of all our sons
states = [s.get_state() for s in self.sons]
# Next we calculate the best state
best_state = min(states)
# Then we handle eventual not value
if self.not_value:
return self.get_reverse_state(best_state)
return best_state
# Calculates a complex node state with an & operand
def get_complex_and_node_state(self):
# First we get the state of all our sons
states = [s.get_state() for s in self.sons]
# Next we calculate the worst state
if 2 in states:
worst_state = 2
else:
worst_state = max(states)
# Then we handle eventual not value
if self.not_value:
return self.get_reverse_state(worst_state)
return worst_state
# Calculates a complex node state with an Xof operand
def get_complex_xof_node_state(self):
# First we get the state of all our sons
states = [s.get_state() for s in self.sons]
# We search for OK, WARN or CRIT applications
# And we will choice between them
nb_search_ok = self.of_values[0]
nb_search_warn = self.of_values[1]
nb_search_crit = self.of_values[2]
# We look for each application
nb_sons = len(states)
nb_ok = len([s for s in states if s == 0])
nb_warn = len([s for s in states if s == 1])
nb_crit = len([s for s in states if s == 2])
# print("NB:", nb_ok, nb_warn, nb_crit)
# Ok and Crit apply with their own values
# Warn can apply with warn or crit values
# so a W C can raise a Warning, but not enough for
# a critical
def get_state_for(nb_tot, nb_real, nb_search):
if nb_search.endswith('%'):
nb_search = int(nb_search[:-1])
if nb_search < 0:
# nb_search is negative, so +
nb_search = max(100 + nb_search, 0)
apply_for = float(nb_real) / nb_tot * 100 >= nb_search
else:
nb_search = int(nb_search)
if nb_search < 0:
# nb_search is negative, so +
nb_search = max(nb_tot + nb_search, 0)
apply_for = nb_real >= nb_search
return apply_for
ok_apply = get_state_for(nb_sons, nb_ok, nb_search_ok)
warn_apply = get_state_for(nb_sons, nb_warn + nb_crit, nb_search_warn)
crit_apply = get_state_for(nb_sons, nb_crit, nb_search_crit)
# print("What apply?", ok_apply, warn_apply, crit_apply)
# return the worst state that apply
if crit_apply:
if self.not_value:
return self.get_reverse_state(2)
return 2
if warn_apply:
if self.not_value:
return self.get_reverse_state(1)
return 1
if ok_apply:
if self.not_value:
return self.get_reverse_state(0)
return 0
# Maybe even OK is not possible, if so, it depends if the admin
# ask a simple form Xof: or a multiple one A,B,Cof:
# the simple should give OK, the mult should give the worst state
if self.is_of_mul:
# print("Is mul, send 0")
if self.not_value:
return self.get_reverse_state(0)
return 0
else:
# print("not mul, return worst", worse_state)
if 2 in states:
worst_state = 2
else:
worst_state = max(states)
if self.not_value:
return self.get_reverse_state(worst_state)
return worst_state
# return a list of all host/service in our node and below
def list_all_elements(self):
r = []
# We are a host/service
if self.operand in ['host', 'service']:
return [self.sons[0]]
for s in self.sons:
r.extend(s.list_all_elements())
# and uniq the result
return list(set(r))
# If we are a of: rule, we can get some 0 in of_values,
# if so, change them with NB sons instead
def switch_zeros_of_values(self):
nb_sons = len(self.sons)
# Need a list for assignment
self.of_values = list(self.of_values)
for i in [0, 1, 2]:
if self.of_values[i] == '0':
self.of_values[i] = str(nb_sons)
self.of_values = tuple(self.of_values)
# Check for empty (= not found) leaf nodes
def is_valid(self):
valid = True
if not self.sons:
valid = False
else:
for s in self.sons:
if isinstance(s, DependencyNode) and not s.is_valid():
self.configuration_errors.extend(s.configuration_errors)
valid = False
return valid
""" TODO: Add some comment about this class for the doc"""
class DependencyNodeFactory(object):
host_flags = "grlt"
service_flags = "grl"
def __init__(self, bound_item):
self.bound_item = bound_item
# the () will be eval in a recursiv way, only one level of ()
def eval_cor_pattern(self, pattern, hosts, services, running=False):
pattern = pattern.strip()
# print("***** EVAL ", pattern)
complex_node = False
# Look if it's a complex pattern (with rule) or
# if it's a leaf ofit, like a host/service
for m in '()&|':
if m in pattern:
complex_node = True
# If it's a simple node, evaluate it directly
if complex_node is False:
return self.eval_simple_cor_pattern(pattern, hosts, services, running)
else:
return self.eval_complex_cor_pattern(pattern, hosts, services, running)
# Checks if an expression is an Xof pattern, and parses its components if
# so. In such a case, once parsed, returns the cleaned patten.
def eval_xof_pattern(self, node, pattern):
p = r"^(-?\d+%?),*(-?\d*%?),*(-?\d*%?) *of: *(.+)"
r = re.compile(p)
m = r.search(pattern)
if m is not None:
# print("Match the of: thing N=", m.groups())
node.operand = 'of:'
g = m.groups()
# We can have a Aof: rule, or a multiple A,B,Cof: rule.
mul_of = (g[1] != '' and g[2] != '')
# If multi got (A,B,C)
if mul_of:
node.is_of_mul = True
node.of_values = (g[0], g[1], g[2])
else: # if not, use A,0,0, we will change 0 after to put MAX
node.of_values = (g[0], '0', '0')
pattern = m.groups()[3]
return pattern
# Evaluate a complex correlation expression, such as an &, |, nested
# expressions in par, and so on.
def eval_complex_cor_pattern(self, pattern, hosts, services, running=False):
node = DependencyNode()
pattern = self.eval_xof_pattern(node, pattern)
in_par = False
tmp = ''
son_is_not = False # We keep is the next son will be not or not
stacked_par = 0
for c in pattern:
if c == '(':
stacked_par += 1
# print("INCREASING STACK TO", stacked_par)
in_par = True
tmp = tmp.strip()
# Maybe we just start a par, but we got some things in tmp
# that should not be good in fact !
if stacked_par == 1 and tmp != '':
# TODO : real error
print("ERROR : bad expression near", tmp)
continue
# If we are already in a par, add this (
# but not if it's the first one so
if stacked_par > 1:
tmp += c
elif c == ')':
# print("Need closeing a sub expression?", tmp)
stacked_par -= 1
if stacked_par < 0:
# TODO : real error
print("Error : bad expression near", tmp, "too much ')'")
continue
if stacked_par == 0:
# print("THIS is closing a sub compress expression", tmp)
tmp = tmp.strip()
o = self.eval_cor_pattern(tmp, hosts, services, running)
# Maybe our son was notted
if son_is_not:
o.not_value = True
son_is_not = False
node.sons.append(o)
in_par = False
# OK now clean the tmp so we start clean
tmp = ''
continue
# ok here we are still in a huge par, we just close one sub one
tmp += c
# Expressions in par will be parsed in a sub node after. So just
# stack pattern
elif in_par:
tmp += c
# Until here, we're not in par
# Manage the NOT for an expression. Only allow ! at the beginning
# of a host or a host,service expression.
elif c == '!':
tmp = tmp.strip()
if tmp and tmp[0] != '!':
print("Error : bad expression near", tmp, "wrong position for '!'")
continue
# Flags next node not state
son_is_not = True
# DO NOT keep the c in tmp, we consumed it
# print("MATCHING", c, pattern)
elif c == '&' or c == '|':
# Oh we got a real cut in an expression, if so, cut it
# print("REAL & for cutting")
tmp = tmp.strip()
# Look at the rule viability
if node.operand is not None and node.operand != 'of:' and c != node.operand:
# Should be logged as a warning / info? :)
return None
if node.operand != 'of:':
node.operand = c
if tmp != '':
# print("Will analyse the current str", tmp)
o = self.eval_cor_pattern(tmp, hosts, services, running)
# Maybe our son was notted
if son_is_not:
o.not_value = True
son_is_not = False
node.sons.append(o)
tmp = ''
# Maybe it's a classic character or we're in par, if so, continue
else:
tmp += c
# Be sure to manage the trainling part when the line is done
tmp = tmp.strip()
if tmp != '':
# print("Managing trainling part", tmp)
o = self.eval_cor_pattern(tmp, hosts, services, running)
# Maybe our son was notted
if son_is_not:
o.not_value = True
son_is_not = False
# print("4end I've %s got new sons" % pattern , o)
node.sons.append(o)
# We got our nodes, so we can update 0 values of of_values
# with the number of sons
node.switch_zeros_of_values()
return node
# Evaluate a simple correlation expression, such as a host, a host + a
# service, or expand a host or service expression.
def eval_simple_cor_pattern(self, pattern, hosts, services, running=False):
node = DependencyNode()
pattern = self.eval_xof_pattern(node, pattern)
# print("Try to find?", pattern)
# If it's a not value, tag the node and find
# the name without this ! operator
if pattern.startswith('!'):
node.not_value = True
pattern = pattern[1:]
# Is the pattern an expression to be expanded?
if re.search(r"^([%s]+|\*):" % self.host_flags, pattern) or \
re.search(r",\s*([%s]+:.*|\*)$" % self.service_flags, pattern):
# o is just extracted its attributes, then trashed.
o = self.expand_expression(pattern, hosts, services, running)
if node.operand != 'of:':
node.operand = '&'
node.sons.extend(o.sons)
node.configuration_errors.extend(o.configuration_errors)
node.switch_zeros_of_values()
else:
node.operand = 'object'
obj, error = self.find_object(pattern, hosts, services)
if obj is not None:
# Set host or service
node.operand = obj.__class__.my_type
node.sons.append(obj)
else:
if running is False:
node.configuration_errors.append(error)
else:
# As business rules are re-evaluated at run time on
# each scheduling loop, if the rule becomes invalid
# because of a badly written macro modulation, it
# should be notified upper for the error to be
# displayed in the check output.
raise Exception(error)
return node
# We've got an object, like h1,db1 that mean the
# db1 service of the host db1, or just h1, that mean
# the host h1.
def find_object(self, pattern, hosts, services):
# print("Finding object", pattern)
obj = None
error = None
is_service = False
# h_name, service_desc are , separated
elts = pattern.split(',')
host_name = elts[0].strip()
# If host_name is empty, use the host_name the business rule is bound to
if not host_name:
host_name = self.bound_item.host_name
# Look if we have a service
if len(elts) > 1:
is_service = True
service_description = elts[1].strip()
if is_service:
obj = services.find_srv_by_name_and_hostname(host_name, service_description)
if not obj:
error = "Business rule uses unknown service %s/%s"\
% (host_name, service_description)
else:
obj = hosts.find_by_name(host_name)
if not obj:
error = "Business rule uses unknown host %s" % (host_name,)
return obj, error
# Tries to expand a host or service expression into a dependency node tree
# using (host|service)group membership, regex, or labels as item selector.
def expand_expression(self, pattern, hosts, services, running=False):
error = None
node = DependencyNode()
node.operand = '&'
elts = [e.strip() for e in pattern.split(',')]
# If host_name is empty, use the host_name the business rule is bound to
if not elts[0]:
elts[0] = self.bound_item.host_name
filters = []
# Looks for hosts/services using appropriate filters
try:
if len(elts) > 1:
# We got a service expression
host_expr, service_expr = elts
filters.extend(self.get_srv_host_filters(host_expr))
filters.extend(self.get_srv_service_filters(service_expr))
items = services.find_by_filter(filters)
else:
# We got a host expression
host_expr = elts[0]
filters.extend(self.get_host_filters(host_expr))
items = hosts.find_by_filter(filters)
except re.error as e:
error = "Business rule uses invalid regex %s: %s" % (pattern, e)
else:
if not items:
error = "Business rule got an empty result for pattern %s" % pattern
# Checks if we got result
if error:
if running is False:
node.configuration_errors.append(error)
else:
# As business rules are re-evaluated at run time on
# each scheduling loop, if the rule becomes invalid
# because of a badly written macro modulation, it
# should be notified upper for the error to be
# displayed in the check output.
raise Exception(error)
return node
# Creates dependency node subtree
for item in items:
# Creates a host/service node
son = DependencyNode()
son.operand = item.__class__.my_type
son.sons.append(item)
# Appends it to wrapping node
node.sons.append(son)
node.switch_zeros_of_values()
return node
# Generates filter list on a hosts host_name
def get_host_filters(self, expr):
if expr == "*":
return [filter_any]
match = re.search(r"^([%s]+):(.*)" % self.host_flags, expr)
if match is None:
return [filter_host_by_name(expr)]
flags, expr = match.groups()
if "g" in flags:
return [filter_host_by_group(expr)]
elif "r" in flags:
return [filter_host_by_regex(expr)]
elif "l" in flags:
return [filter_host_by_bp_rule_label(expr)]
elif "t" in flags:
return [filter_host_by_tag(expr)]
else:
return [filter_none]
# Generates filter list on services host_name
def get_srv_host_filters(self, expr):
if expr == "*":
return [filter_any]
match = re.search(r"^([%s]+):(.*)" % self.host_flags, expr)
if match is None:
return [filter_service_by_host_name(expr)]
flags, expr = match.groups()
if "g" in flags:
return [filter_service_by_hostgroup_name(expr)]
elif "r" in flags:
return [filter_service_by_regex_host_name(expr)]
elif "l" in flags:
return [filter_service_by_host_bp_rule_label(expr)]
elif "t" in flags:
return [filter_service_by_host_tag_name(expr)]
else:
return [filter_none]
# Generates filter list on services service_description
def get_srv_service_filters(self, expr):
if expr == "*":
return [filter_any]
match = re.search(r"^([%s]+):(.*)" % self.service_flags, expr)
if match is None:
return [filter_service_by_name(expr)]
flags, expr = match.groups()
if "g" in flags:
return [filter_service_by_servicegroup_name(expr)]
elif "r" in flags:
return [filter_service_by_regex_name(expr)]
elif "l" in flags:
return [filter_service_by_bp_rule_label(expr)]
else:
return [filter_none]
| 23,172
|
Python
|
.py
| 539
| 31.608534
| 95
| 0.556408
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,446
|
util.py
|
shinken-solutions_shinken/shinken/util.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import six
import time
import re
import copy
import sys
import os
import json
import platform
import traceback
try:
from ClusterShell.NodeSet import NodeSet, NodeSetParseRangeError
except ImportError:
NodeSet = None
try:
import resource
except ImportError:
resource = None
from shinken.macroresolver import MacroResolver
from shinken.log import logger
try:
stdout_encoding = sys.stdout.encoding
safe_stdout = (stdout_encoding == 'UTF-8')
except Exception as exp:
logger.error('Encoding detection error= %s', exp)
safe_stdout = False
# ########## Strings #############
# Try to print(strings, but if there is an utf8 error, go in simple ascii mode)
# (Like if the terminal do not have en_US.UTF8 as LANG for example)
def safe_print(*args):
print(' '.join(lst))
def split_semicolon(line, maxsplit=None):
"""Split a line on semicolons characters but not on the escaped semicolons
"""
# Split on ';' character
splitted_line = line.split(';')
splitted_line_size = len(splitted_line)
# if maxsplit is not specified, we set it to the number of part
if maxsplit is None or 0 > maxsplit:
maxsplit = splitted_line_size
# Join parts to the next one, if ends with a '\'
# because we mustn't split if the semicolon is escaped
i = 0
while i < splitted_line_size - 1:
# for each part, check if its ends with a '\'
ends = splitted_line[i].endswith('\\')
if ends:
# remove the last character '\'
splitted_line[i] = splitted_line[i][:-1]
# append the next part to the current if it is not the last and the current
# ends with '\' or if there is more than maxsplit parts
if (ends or i >= maxsplit) and i < splitted_line_size - 1:
splitted_line[i] = ";".join([splitted_line[i], splitted_line[i + 1]])
# delete the next part
del splitted_line[i + 1]
splitted_line_size -= 1
# increase i only if we don't have append because after append the new
# string can end with '\'
else:
i += 1
return splitted_line
# Json-ify the objects
def jsonify_r(obj):
res = {}
cls = obj.__class__
if not hasattr(cls, 'properties'):
try:
json.dumps(obj)
return obj
except Exception as exp:
return None
properties = list(cls.properties.keys())
if hasattr(cls, 'running_properties'):
properties.extend(list(cls.running_properties.keys()))
for prop in properties:
if not hasattr(obj, prop):
continue
v = getattr(obj, prop)
# Maybe the property is not jsonable
try:
if isinstance(v, set):
v = list(v)
if isinstance(v, list):
v = sorted(v)
json.dumps(v)
res[prop] = v
except Exception as exp:
if isinstance(v, list):
lst = []
for _t in v:
t = getattr(_t.__class__, 'my_type', '')
if t == 'CommandCall':
try:
lst.append(_t.call)
except Exception:
pass
continue
if t and hasattr(_t, t + '_name'):
lst.append(getattr(_t, t + '_name'))
else:
pass
# print("CANNOT MANAGE OBJECT", _t, type(_t), t)
res[prop] = lst
else:
t = getattr(v.__class__, 'my_type', '')
if t == 'CommandCall':
try:
res[prop] = v.call
except Exception:
pass
continue
if t and hasattr(v, t + '_name'):
res[prop] = getattr(v, t + '_name')
# else:
# print("CANNOT MANAGE OBJECT", v, type(v), t)
return res
# ################################## TIME ##################################
# @memoized
def get_end_of_day(year, month_id, day):
end_time = (year, month_id, day, 23, 59, 59, 0, 0, -1)
end_time_epoch = time.mktime(end_time)
return end_time_epoch
# @memoized
def print_date(t):
return time.asctime(time.localtime(t))
# @memoized
def get_day(t):
return int(t - get_sec_from_morning(t))
# Same but for week day
def get_wday(t):
t_lt = time.localtime(t)
return t_lt.tm_wday
# @memoized
def get_sec_from_morning(t):
t_lt = time.localtime(t)
h = t_lt.tm_hour
m = t_lt.tm_min
s = t_lt.tm_sec
return h * 3600 + m * 60 + s
# @memoized
def get_start_of_day(year, month_id, day):
start_time = (year, month_id, day, 0, 0, 0, 0, 0, -1)
try:
start_time_epoch = time.mktime(start_time)
except OverflowError:
# Windows mktime sometimes crashes on (1970, 1, 1, ...)
start_time_epoch = 0.0
return start_time_epoch
# change a time in seconds like 3600 into a format: 0d 1h 0m 0s
def format_t_into_dhms_format(t):
s = t
m, s = divmod(s, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
return '%sd %sh %sm %ss' % (d, h, m, s)
# ################################ Pythonization ###########################
# first change to float so manage for example 25.0 to 25
def to_int(val):
return int(float(val))
def to_float(val):
return float(val)
def to_char(val):
return val[0]
def to_split(val, split_on_coma=True):
if isinstance(val, list):
return val
if not split_on_coma:
return [val]
val = val.split(',')
if val == ['']:
val = []
return val
def list_split(val, split_on_coma=True):
if not split_on_coma:
return val
new_val = []
for x in val:
new_val.extend(x.split(','))
return new_val
def to_best_int_float(val):
i = int(float(val))
f = float(val)
# If the f is a .0 value,
# best match is int
if i == f:
return i
return f
# bool('0') = true, so...
def to_bool(val):
if val == '1' or val == 'on' or val == 'true' or val == 'True':
return True
else:
return False
def from_bool_to_string(b):
if b:
return '1'
else:
return '0'
def from_bool_to_int(b):
if b:
return 1
else:
return 0
def from_list_to_split(val):
val = ','.join(['%s' % v for v in val])
return val
def from_float_to_int(val):
val = int(val)
return val
# Functions for brok_transformations
# They take 2 parameters: ref, and a value
# ref is the item like a service, and value
# if the value to preprocess
# Just a string list of all names, with ,
def to_list_string_of_names(ref, tab):
return ",".join([e.get_name() for e in tab])
# Just a list of names
def to_list_of_names(ref, tab):
return [e.get_name() for e in tab]
# This will give a string if the value exists
# or '' if not
def to_name_if_possible(ref, value):
if value:
return value.get_name()
return ''
# take a list of hosts and return a list
# of all host_names
def to_hostnames_list(ref, tab):
r = []
for h in tab:
if hasattr(h, 'host_name'):
r.append(h.host_name)
return r
# Will create a dict with 2 lists:
# *services: all services of the tab
# *hosts: all hosts of the tab
def to_svc_hst_distinct_lists(ref, tab):
r = {'hosts': [], 'services': []}
for e in tab:
cls = e.__class__
if cls.my_type == 'service':
name = e.get_dbg_name()
r['services'].append(name)
else:
name = e.get_dbg_name()
r['hosts'].append(name)
return r
# Will expand the value with macros from the
# host/service ref before brok it
def expand_with_macros(ref, value):
return MacroResolver().resolve_simple_macros_in_string(value, ref.get_data_for_checks())
# Just get the string name of the object
# (like for realm)
def get_obj_name(obj):
# Maybe we do not have a real object but already a string. If so
# return the string
if isinstance(obj, six.string_types):
return obj
return obj.get_name()
# Same as before, but call with object,prop instead of just value
# But if we got an attribute error, return ''
def get_obj_name_two_args_and_void(obj, value):
try:
return value.get_name()
except AttributeError:
return ''
# Get the full name if there is one
def get_obj_full_name(obj):
try:
return obj.get_full_name()
except Exception:
return obj.get_name()
# return the list of keys of the custom dict
# but without the _ before
def get_customs_keys(d):
return [k[1:] for k in d.keys()]
# return the values of the dict
def get_customs_values(d):
return d.values()
# Checks that a parameter has an unique value. If it's a list, the last
# value set wins.
def unique_value(val):
if isinstance(val, list):
if val:
return val[-1]
else:
return ''
else:
return val
# ##################### Sorting ################
def scheduler_no_spare_first(x, y):
if x.spare and not y.spare:
return 1
elif x.spare and y.spare:
return 0
else:
return -1
def alive_then_spare_then_deads(satellites):
dead = []
alive = []
spare = []
for s in satellites:
if not s.alive:
dead.append(s)
elif s.spare:
spare.append(s)
else:
alive.append(s)
sorted_satellites = []
sorted_satellites.extend(alive)
sorted_satellites.extend(spare)
sorted_satellites.extend(dead)
return sorted_satellites
# -1 is x first, 0 equal, 1 is y first
def sort_by_ids(x, y):
if x.id < y.id:
return -1
if x.id > y.id:
return 1
# So is equal
return 0
# From a tab, get the avg, min, max
# for the tab values, but not the lower ones
# and higher ones that are too distinct
# than major ones
def nighty_five_percent(t):
t2 = copy.copy(t)
t2.sort()
l_t = len(t)
# If void tab, wtf??
if l_t == 0:
return (None, None, None)
t_reduce = t2
# only take a part if we got more
# than 100 elements, or it's a non sense
if l_t > 100:
offset = int(l_t * 0.05)
t_reduce = t_reduce[offset:-offset]
reduce_len = len(t_reduce)
reduce_sum = sum(t_reduce)
reduce_avg = float(reduce_sum) / reduce_len
reduce_max = max(t_reduce)
reduce_min = min(t_reduce)
return (reduce_avg, reduce_min, reduce_max)
# #################### Cleaning ##############
def strip_and_uniq(tab):
new_tab = set()
for elt in tab:
val = elt.strip()
if (val != ''):
new_tab.add(val)
return list(new_tab)
# ################### Pattern change application (mainly for host) #######
def expand_xy_pattern(pattern):
ns = NodeSet(str(pattern))
if len(ns) > 1:
for elem in ns:
for a in expand_xy_pattern(elem):
yield a
else:
yield pattern
# This function is used to generate all pattern change as
# recursive list.
# for example, for a [(1,3),(1,4),(1,5)] xy_couples,
# it will generate a 60 item list with:
# Rule: [1, '[1-5]', [1, '[1-4]', [1, '[1-3]', []]]]
# Rule: [1, '[1-5]', [1, '[1-4]', [2, '[1-3]', []]]]
# ...
def got_generation_rule_pattern_change(xy_couples):
res = []
xy_cpl = xy_couples
if xy_couples == []:
return []
(x, y) = xy_cpl[0]
for i in range(x, y + 1):
n = got_generation_rule_pattern_change(xy_cpl[1:])
if n != []:
for e in n:
res.append([i, '[%d-%d]' % (x, y), e])
else:
res.append([i, '[%d-%d]' % (x, y), []])
return res
# this function apply a recursive pattern change
# generate by the got_generation_rule_pattern_change
# function.
# It take one entry of this list, and apply
# recursively the change to s like:
# s = "Unit [1-3] Port [1-4] Admin [1-5]"
# rule = [1, '[1-5]', [2, '[1-4]', [3, '[1-3]', []]]]
# output = Unit 3 Port 2 Admin 1
def apply_change_recursive_pattern_change(s, rule):
# print("Try to change %s" % s, 'with', rule)
# new_s = s
(i, m, t) = rule
# print("replace %s by %s" % (r'%s' % m, str(i)), 'in', s)
s = s.replace(r'%s' % m, "%s" % i)
# print("And got", s)
if t == []:
return s
return apply_change_recursive_pattern_change(s, t)
# For service generator, get dict from a _custom properties
# as _disks C$(80%!90%),D$(80%!90%)$,E$(80%!90%)$
# return {'C': '80%!90%', 'D': '80%!90%', 'E': '80%!90%'}
# And if we have a key that look like [X-Y] we will expand it
# into Y-X+1 keys
GET_KEY_VALUE_SEQUENCE_ERROR_NOERROR = 0
GET_KEY_VALUE_SEQUENCE_ERROR_SYNTAX = 1
GET_KEY_VALUE_SEQUENCE_ERROR_NODEFAULT = 2
GET_KEY_VALUE_SEQUENCE_ERROR_NODE = 3
def get_key_value_sequence(entry, default_value=None):
array1 = []
array2 = []
conf_entry = entry
# match a key$(value1..n)$
keyval_pattern_txt = r"""
\s*(?P<key>[^,]+?)(?P<values>(\s*\$\(.*?\)\$\s*)*)(?:[,]|$)
"""
keyval_pattern = re.compile('(?x)' + keyval_pattern_txt)
# match a whole sequence of key$(value1..n)$
all_keyval_pattern = re.compile('(?x)^(' + keyval_pattern_txt + ')+$')
# match a single value
value_pattern = re.compile('(?:\s*\$\((?P<val>.*?)\)\$\s*)')
# match a sequence of values
all_value_pattern = re.compile('^(?:\s*\$\(.*?\)\$\s*)+$')
if all_keyval_pattern.match(conf_entry):
for mat in re.finditer(keyval_pattern, conf_entry):
r = {'KEY': mat.group('key')}
# The key is in mat.group('key')
# If there are also value(s)...
if mat.group('values'):
if all_value_pattern.match(mat.group('values')):
# If there are multiple values, loop over them
valnum = 1
for val in re.finditer(value_pattern, mat.group('values')):
r['VALUE%s' % valnum] = val.group('val')
valnum += 1
else:
# Value syntax error
return (None, GET_KEY_VALUE_SEQUENCE_ERROR_SYNTAX)
else:
r['VALUE1'] = None
array1.append(r)
else:
# Something is wrong with the values. (Maybe unbalanced '$(')
# TODO: count opening and closing brackets in the pattern
return (None, GET_KEY_VALUE_SEQUENCE_ERROR_SYNTAX)
# now fill the empty values with the default value
for r in array1:
if r['VALUE1'] is None:
if default_value is None:
return (None, GET_KEY_VALUE_SEQUENCE_ERROR_NODEFAULT)
else:
r['VALUE1'] = default_value
r['VALUE'] = r['VALUE1']
# Now create new one but for [X-Y] matchs
# array1 holds the original entries. Some of the keys may contain wildcards
# array2 is filled with originals and inflated wildcards
if NodeSet is None:
# The pattern that will say if we have a [X-Y] key.
pat = re.compile('\[(\d*)-(\d*)\]')
for r in array1:
key = r['KEY']
orig_key = r['KEY']
# We have no choice, we cannot use NodeSet, so we use the
# simple regexp
if NodeSet is None:
m = pat.search(key)
got_xy = (m is not None)
else: # Try to look with a nodeset check directly
try:
ns = NodeSet(str(key))
# If we have more than 1 element, we have a xy thing
got_xy = (len(ns) != 1)
except NodeSetParseRangeError:
return (None, GET_KEY_VALUE_SEQUENCE_ERROR_NODE)
pass # go in the next key
# Now we've got our couples of X-Y. If no void,
# we were with a "key generator"
if got_xy:
# Ok 2 cases: we have the NodeSet lib or not.
# if not, we use the dumb algo (quick, but manage less
# cases like /N or , in patterns)
if NodeSet is None: # us the old algo
still_loop = True
xy_couples = [] # will get all X-Y couples
while still_loop:
m = pat.search(key)
if m is not None: # we've find one X-Y
(x, y) = m.groups()
(x, y) = (int(x), int(y))
xy_couples.append((x, y))
# We must search if we've gotother X-Y, so
# we delete this one, and loop
key = key.replace('[%d-%d]' % (x, y), 'Z' * 10)
else: # no more X-Y in it
still_loop = False
# Now we have our xy_couples, we can manage them
# We search all pattern change rules
rules = got_generation_rule_pattern_change(xy_couples)
# Then we apply them all to get ours final keys
for rule in rules:
res = apply_change_recursive_pattern_change(orig_key, rule)
new_r = {}
for key in r:
new_r[key] = r[key]
new_r['KEY'] = res
array2.append(new_r)
else:
# The key was just a generator, we can remove it
# keys_to_del.append(orig_key)
# We search all pattern change rules
# rules = got_generation_rule_pattern_change(xy_couples)
nodes_set = expand_xy_pattern(orig_key)
new_keys = list(nodes_set)
# Then we apply them all to get ours final keys
for new_key in new_keys:
# res = apply_change_recursive_pattern_change(orig_key, rule)
new_r = {}
for key in r:
new_r[key] = r[key]
new_r['KEY'] = new_key
array2.append(new_r)
else:
# There were no wildcards
array2.append(r)
# t1 = time.time()
# print("***********Diff", t1 -t0)
return (array2, GET_KEY_VALUE_SEQUENCE_ERROR_NOERROR)
# ############################## Files management #######################
# We got a file like /tmp/toto/toto2/bob.png And we want to be sure the dir
# /tmp/toto/toto2/ will really exists so we can copy it. Try to make if if need
# and return True/False if succeed
def expect_file_dirs(root, path):
dirs = os.path.normpath(path).split('/')
dirs = [d for d in dirs if d != '']
# We will create all directory until the last one
# so we are doing a mkdir -p .....
# TODO: and windows????
tmp_dir = root
for d in dirs:
_d = os.path.join(tmp_dir, d)
logger.info('Verify the existence of file %s', _d)
if not os.path.exists(_d):
try:
os.mkdir(_d)
except Exception:
return False
tmp_dir = _d
return True
# ####################### Services/hosts search filters #######################
# Filters used in services or hosts find_by_filter method
# Return callback functions which are passed host or service instances, and
# should return a boolean value that indicates if the inscance mached the
# filter
def filter_any(name):
def inner_filter(host):
return True
return inner_filter
def filter_none(name):
def inner_filter(host):
return False
return inner_filter
def filter_host_by_name(name):
def inner_filter(host):
if host is None:
return False
return host.host_name == name
return inner_filter
def filter_host_by_regex(regex):
host_re = re.compile(regex)
def inner_filter(host):
if host is None:
return False
return host_re.match(host.host_name) is not None
return inner_filter
def filter_host_by_group(group):
def inner_filter(host):
if host is None:
return False
return group in [g.hostgroup_name for g in host.hostgroups]
return inner_filter
def filter_host_by_tag(tpl):
def inner_filter(host):
if host is None:
return False
return tpl in [t.strip() for t in host.tags]
return inner_filter
def filter_service_by_name(name):
def inner_filter(service):
if service is None:
return False
return service.service_description == name
return inner_filter
def filter_service_by_regex_name(regex):
host_re = re.compile(regex)
def inner_filter(service):
if service is None:
return False
return host_re.match(service.service_description) is not None
return inner_filter
def filter_service_by_host_name(host_name):
def inner_filter(service):
if service is None:
return False
return service.host_name == host_name
return inner_filter
def filter_service_by_regex_host_name(regex):
host_re = re.compile(regex)
def inner_filter(service):
if service is None:
return False
return host_re.match(service.host_name) is not None
return inner_filter
def filter_service_by_hostgroup_name(group):
def inner_filter(service):
if service is None or service.host is None:
return False
return group in [g.hostgroup_name for g in service.host.hostgroups]
return inner_filter
def filter_service_by_host_tag_name(tpl):
def inner_filter(service):
if service is None or service.host is None:
return False
return tpl in [t.strip() for t in service.host.tags]
return inner_filter
def filter_service_by_servicegroup_name(group):
def inner_filter(service):
if service is None:
return False
return group in [g.servicegroup_name for g in service.servicegroups]
return inner_filter
def filter_host_by_bp_rule_label(label):
def inner_filter(host):
if host is None:
return False
return label in host.labels
return inner_filter
def filter_service_by_host_bp_rule_label(label):
def inner_filter(service):
if service is None or service.host is None:
return False
return label in service.host.labels
return inner_filter
def filter_service_by_bp_rule_label(label):
def inner_filter(service):
if service is None:
return False
return label in service.labels
return inner_filter
def is_complex_expr(expr):
for m in '()&|!*':
if m in expr:
return True
return False
def get_exclude_match_expr(pattern):
if pattern == "*":
return lambda d: True
elif pattern.startswith("r:"):
reg = re.compile(pattern[2:])
return reg.match
else:
return lambda d: d == pattern
# ##################### system related utility functions #####################
def get_memory(who="self"):
if resource is None:
return 0
if who == "self":
return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss * 1024
elif who == "children":
return resource.getrusage(resource.RUSAGE_CHILDREN).ru_maxrss * 1024
elif who == "both":
return resource.getrusage(resource.RUSAGE_BOTH).ru_maxrss * 1024
else:
return 0
def parse_memory_expr(expr):
if expr is None:
return None
try:
if expr.endswith("K"):
value = float(expr[:-1]) * 1024
if expr.endswith("M"):
value = float(expr[:-1]) * 1048576
elif expr.endswith("G"):
value = float(expr[:-1]) * 1073741824
else:
value = float(expr)
return value
except ValueError:
logger.error("Invalid memory threshold expression: %s" % expr)
return None
def free_memory():
"""
Under Linux, when a new configuration is loaded, the old config memory
is not really freed.
This function forces memory to be freed.
"""
try:
if platform.system() == "Linux":
logger.debug("Forcing memory free")
import ctypes
libc6 = ctypes.CDLL('libc.so.6')
libc6.malloc_trim(0)
except Exception:
logger.error("Failed to free memory")
logger.debug(traceback.format_exc())
| 25,577
|
Python
|
.py
| 726
| 27.555096
| 92
| 0.578751
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,447
|
notification.py
|
shinken-solutions_shinken/shinken/notification.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import time
import six
from shinken.action import Action
from shinken.brok import Brok
from shinken.property import BoolProp, IntegerProp, StringProp, FloatProp
from shinken.autoslots import AutoSlots
class Notification(six.with_metaclass(AutoSlots, Action)):
"""Please Add a Docstring to describe the class here"""
my_type = 'notification'
properties = {
'is_a': StringProp(default='notification'),
'type': StringProp(default=''),
'notification_type': IntegerProp(default=0, fill_brok=['full_status']),
'start_time': StringProp(default=0, fill_brok=['full_status']),
'end_time': StringProp(default=0, fill_brok=['full_status']),
'contact_name': StringProp(default='', fill_brok=['full_status']),
'host_name': StringProp(default='', fill_brok=['full_status']),
'service_description': StringProp(default='', fill_brok=['full_status']),
'reason_type': StringProp(default=0, fill_brok=['full_status']),
'state': StringProp(default=0, fill_brok=['full_status']),
'output': StringProp(default='', fill_brok=['full_status']),
'ack_author': StringProp(default='', fill_brok=['full_status']),
'ack_data': StringProp(default='', fill_brok=['full_status']),
'escalated': BoolProp(default=False, fill_brok=['full_status']),
'contacts_notified': StringProp(default=0, fill_brok=['full_status']),
'env': StringProp(default={}),
'exit_status': IntegerProp(default=3),
'command_call': StringProp(default=None),
'execution_time': FloatProp(default=0),
'u_time': FloatProp(default=0.0),
's_time': FloatProp(default=0.0),
'contact': StringProp(default=None),
'_in_timeout': BoolProp(default=False),
'notif_nb': IntegerProp(default=0),
'status': StringProp(default='scheduled'),
't_to_go': IntegerProp(default=0),
'command': StringProp(default=''),
'sched_id': IntegerProp(default=0),
'timeout': IntegerProp(default=10),
'check_time': IntegerProp(default=0),
'module_type': StringProp(default='fork', fill_brok=['full_status']),
'worker': StringProp(default='none'),
'reactionner_tag': StringProp(default='None'),
'creation_time': IntegerProp(default=0),
'enable_environment_macros': BoolProp(default=False),
# Keep a list of currently active escalations
'already_start_escalations': StringProp(default=set()),
'priority': IntegerProp(default=100),
}
macros = {
'NOTIFICATIONTYPE': 'type',
'NOTIFICATIONRECIPIENTS': 'recipients',
'NOTIFICATIONISESCALATED': 'escalated',
'NOTIFICATIONAUTHOR': 'author',
'NOTIFICATIONAUTHORNAME': 'author_name',
'NOTIFICATIONAUTHORALIAS': 'author_alias',
'NOTIFICATIONCOMMENT': 'comment',
'HOSTNOTIFICATIONNUMBER': 'notif_nb',
'HOSTNOTIFICATIONID': 'id',
'SERVICENOTIFICATIONNUMBER': 'notif_nb',
'SERVICENOTIFICATIONID': 'id'
}
def __init__(self, type='PROBLEM', status='scheduled', command='UNSET',
command_call=None, ref=None, contact=None, t_to_go=0,
contact_name='', host_name='', service_description='',
reason_type=1, state=0, ack_author='', ack_data='',
escalated=False, contacts_notified=0,
start_time=0, end_time=0, notification_type=0, id=None,
notif_nb=1, timeout=10, env={}, module_type='fork',
reactionner_tag='None', enable_environment_macros=0,
priority=100):
self.is_a = 'notification'
self.type = type
if id is None: # id != None is for copy call only
self.id = Action.id
Action.id += 1
self._in_timeout = False
self.timeout = timeout
self.status = status
self.exit_status = 3
self.command = command
self.command_call = command_call
self.output = None
self.execution_time = 0
self.u_time = 0 # user executon time
self.s_time = 0 # system execution time
self.ref = ref
# Set host_name and description from the ref
try:
self.host_name = self.ref.host_name
except Exception:
self.host_name = host_name
try:
self.service_description = self.ref.service_description
except Exception:
self.service_description = service_description
self.env = env
self.module_type = module_type
self.t_to_go = t_to_go
self.notif_nb = notif_nb
self.contact = contact
# For brok part
self.contact_name = contact_name
self.reason_type = reason_type
self.state = state
self.ack_author = ack_author
self.ack_data = ack_data
self.escalated = escalated
self.contacts_notified = contacts_notified
self.start_time = start_time
self.end_time = end_time
self.notification_type = notification_type
self.creation_time = time.time()
self.worker = 'none'
self.reactionner_tag = reactionner_tag
self.already_start_escalations = set()
self.enable_environment_macros = enable_environment_macros
self.priority = priority
# return a copy of the check but just what is important for execution
# So we remove the ref and all
def copy_shell(self):
# We create a dummy check with nothing in it, just defaults values
return self.copy_shell__(Notification('', '', '', '', '', '', '', id=self.id))
def is_launchable(self, t):
return t >= self.t_to_go
def is_administrative(self):
if self.type in ('PROBLEM', 'RECOVERY'):
return False
else:
return True
def __str__(self):
return "Notification %d status:%s command:%s ref:%s t_to_go:%s" % \
(self.id, self.status, self.command, getattr(self, 'ref', 'unknown'),
time.asctime(time.localtime(self.t_to_go)))
def get_id(self):
return self.id
def get_return_from(self, n):
self.exit_status = n.exit_status
self.execution_time = n.execution_time
# Fill data with info of item by looking at brok_type
# in props of properties or running_properties
def fill_data_brok_from(self, data, brok_type):
cls = self.__class__
# Now config properties
for prop, entry in cls.properties.items():
if brok_type in entry.fill_brok:
data[prop] = getattr(self, prop)
# Get a brok with initial status
def get_initial_status_brok(self):
data = {'id': self.id}
self.fill_data_brok_from(data, 'full_status')
b = Brok('notification_raise', data)
return b
def __getstate__(self):
cls = self.__class__
# id is not in *_properties
res = {'id': self.id}
for prop in cls.properties:
if hasattr(self, prop):
res[prop] = getattr(self, prop)
return res
# Inverted function of getstate
def __setstate__(self, state):
cls = self.__class__
self.id = state['id']
for prop in cls.properties:
if prop in state:
setattr(self, prop, state[prop])
# Hook for load of 0.4 notification: there were no
# creation time, must put one
if not hasattr(self, 'creation_time'):
self.creation_time = time.time()
if not hasattr(self, 'reactionner_tag'):
self.reactionner_tag = 'None'
if not hasattr(self, 'worker'):
self.worker = 'none'
if not getattr(self, 'module_type', None):
self.module_type = 'fork'
if not hasattr(self, 'already_start_escalations'):
self.already_start_escalations = set()
if not hasattr(self, 'execution_time'):
self.execution_time = 0
# s_time and u_time are added between 1.2 and 1.4
if not hasattr(self, 'u_time'):
self.u_time = 0
self.s_time = 0
| 9,595
|
Python
|
.py
| 209
| 37.416268
| 86
| 0.598481
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,448
|
basemodule.py
|
shinken-solutions_shinken/shinken/basemodule.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
""" This python module contains the class BaseModule
that shinken modules will subclass
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import signal
import time
import traceback
from re import compile
from multiprocessing import Queue, Process
import shinken.http_daemon
from shinken.log import logger
from shinken.misc.common import setproctitle
# TODO: use a class for defining the module "properties" instead of
# plain dict?? Like:
'''
class ModuleProperties(object):
def __init__(self, type, phases, external=False)
self.type = type
self.phases = phases
self.external = external
'''
# and have the new modules instanciate this like follow:
'''
properties = ModuleProperties('the_module_type', the_module_phases, is_mod_ext)
'''
# The `properties dict defines what the module can do and
# if it's an external module or not.
properties = {
# name of the module type ; to distinguish between them:
'type': None,
# is the module "external" (external means here a daemon module)?
'external': True,
# Possible configuration phases where the module is involved:
'phases': ['configuration', 'late_configuration', 'running', 'retention'],
}
class ModulePhases(object):
"""TODO: Add some comment about this class for the doc"""
# TODO: why not use simply integers instead of string
# to represent the different phases??
CONFIGURATION = 1
LATE_CONFIGURATION = 2
RUNNING = 4
RETENTION = 8
class BaseModule(object):
"""This is the base class for the shinken modules.
Modules can be used by the different shinken daemons/services
for different tasks.
Example of task that a shinken module can do:
- load additional configuration objects.
- recurrently save hosts/services status/perfdata
informations in different format.
- ...
"""
def __init__(self, mod_conf):
"""Instanciate a new module.
There can be many instance of the same type.
'mod_conf' is module configuration object
for this new module instance.
"""
self.myconf = mod_conf
self.name = mod_conf.get_name()
# We can have sub modules
self.modules = getattr(mod_conf, 'modules', [])
self.props = mod_conf.properties.copy()
# TODO: choose between 'props' or 'properties'..
self.interrupted = False
self.properties = self.props
self.is_external = self.props.get('external', False)
# though a module defined with no phase is quite useless .
self.phases = self.props.get('phases', [])
self.phases.append(None)
# the queue the module will receive data to manage
self.to_q = None
# the queue the module will put its result data
self.from_q = None
self.process = None
self.illegal_char = compile(r'[^\w-]')
self.init_try = 0
# We want to know where we are load from? (broker, scheduler, etc)
self.loaded_into = 'unknown'
def init(self):
"""Handle this module "post" init ; just before it'll be started.
Like just open necessaries file(s), database(s),
or whatever the module will need.
"""
pass
def set_loaded_into(self, daemon_name):
self.loaded_into = daemon_name
def create_queues(self, manager=None):
"""The manager is None on android, but a true Manager() elsewhere
Create the shared queues that will be used by shinken daemon
process and this module process.
But clear queues if they were already set before recreating new one.
"""
self.clear_queues(manager)
# If no Manager() object, go with classic Queue()
if not manager:
self.from_q = Queue()
self.to_q = Queue()
else:
self.from_q = manager.Queue()
self.to_q = manager.Queue()
def clear_queues(self, manager):
"""Release the resources associated to the queues of this instance"""
for q in (self.to_q, self.from_q):
if q is None:
continue
# If we got no manager, we direct call the clean
if not manager:
q.close()
q.join_thread()
# else:
# q._callmethod('close')
# q._callmethod('join_thread')
self.to_q = self.from_q = None
def start_module(self):
try:
self._main()
except Exception as e:
logger.error('[%s] %s', self.name, traceback.format_exc())
raise e
# Start this module process if it's external. if not -> donothing
def start(self, http_daemon=None):
if not self.is_external:
return
self.stop_process()
logger.info("Starting external process for instance %s", self.name)
p = Process(target=self.start_module, args=())
# Under windows we should not call start() on an object that got
# its process as object, so we remove it and we set it in a earlier
# start
try:
del self.properties['process']
except KeyError:
pass
p.start()
# We save the process data AFTER the fork()
self.process = p
self.properties['process'] = p # TODO: temporary
logger.info("%s is now started ; pid=%d", self.name, p.pid)
def __kill(self):
"""Sometime terminate() is not enough, we must "help"
external modules to die...
"""
if os.name == 'nt':
self.process.terminate()
else:
# Ok, let him 1 second before really KILL IT
os.kill(self.process.pid, signal.SIGTERM)
time.sleep(1)
# You do not let me another choice guy...
if self.process.is_alive():
os.kill(self.process.pid, signal.SIGKILL)
def stop_process(self):
"""Request the module process to stop and release it"""
if self.process:
logger.info("I'm stopping module %r (pid=%s)",
self.get_name(), self.process.pid)
self.process.terminate()
self.process.join(timeout=1)
if self.process.is_alive():
logger.warning("%r is still alive normal kill, I help it to die",
self.get_name())
self.__kill()
self.process.join(1)
if self.process.is_alive():
logger.error("%r still alive after brutal kill, I leave it.",
self.get_name())
self.process = None
# TODO: are these 2 methods really needed?
def get_name(self):
return self.name
def has(self, prop):
"""The classic has: do we have a prop or not?"""
return hasattr(self, prop)
# For in scheduler modules, we will not send all broks to external
# modules, only what they really want
def want_brok(self, b):
return True
def manage_brok(self, brok):
"""Request the module to manage the given brok.
There a lot of different possible broks to manage.
"""
manage = getattr(self, 'manage_' + brok.type + '_brok', None)
if manage:
# Be sure the brok is prepared before call it
#brok.prepare()
return manage(brok)
def manage_signal(self, sig, frame):
self.interrupted = True
def set_signal_handler(self, sigs=None):
if sigs is None:
sigs = (signal.SIGINT, signal.SIGTERM)
for sig in sigs:
signal.signal(sig, self.manage_signal)
set_exit_handler = set_signal_handler
def do_stop(self):
"""Called just before the module will exit
Put in this method all you need to cleanly
release all open resources used by your module
"""
pass
def do_loop_turn(self):
"""For external modules only:
implement in this method the body of you main loop
"""
raise NotImplementedError()
def set_proctitle(self, name):
setproctitle("shinken-%s module: %s" % (self.loaded_into, name))
def _main(self):
"""module "main" method. Only used by external modules."""
self.set_proctitle(self.name)
# TODO: fix this hack:
if shinken.http_daemon.daemon_inst:
shinken.http_daemon.daemon_inst.shutdown()
self.set_signal_handler()
logger.info("[%s[%d]]: Now running..", self.name, os.getpid())
# Will block here!
self.main()
self.do_stop()
logger.info("[%s]: exiting now..", self.name)
# TODO: apparently some modules would uses "work" as the main method??
work = _main
| 9,788
|
Python
|
.py
| 247
| 31.728745
| 82
| 0.627267
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,449
|
http_client.py
|
shinken-solutions_shinken/shinken/http_client.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import six
import zlib
import base64
import json
import sys
import io
if six.PY2:
from urllib import urlencode
else:
from urllib.parse import urlencode
# Pycurl part
import pycurl
pycurl.global_init(pycurl.GLOBAL_ALL)
PYCURL_VERSION = pycurl.version_info()[1]
from shinken.bin import VERSION
from shinken.log import logger
class HTTPException(Exception):
pass
class FileReader(object):
def __init__(self, fp):
self.fp = fp
def read_callback(self, size):
return self.fp.read(size)
class HTTPClient(object):
def __init__(self, address='', port=0, use_ssl=False, timeout=3,
data_timeout=120, uri='', strong_ssl=False, proxy=''):
self.address = address
self.port = port
self.timeout = timeout
self.data_timeout = data_timeout
if not uri:
if use_ssl:
self.uri = "https://%s:%s/" % (self.address, self.port)
else:
self.uri = "http://%s:%s/" % (self.address, self.port)
else:
self.uri = uri
self.get_con = self.__create_con(proxy, strong_ssl)
self.post_con = self.__create_con(proxy, strong_ssl)
self.put_con = self.__create_con(proxy, strong_ssl)
def __create_con(self, proxy, strong_ssl):
con = pycurl.Curl()
con.setopt(con.VERBOSE, 0)
# Remove the Expect: 100-Continue default behavior of pycurl, because swsgiref do not
# manage it
headers = ['Expect:', 'Keep-Alive: 300', 'Connection: Keep-Alive']
user_agent = 'shinken:%s pycurl:%s' % (VERSION, PYCURL_VERSION)
# Manages bug described in https://github.com/pycurl/pycurl/issues/124
try:
con.setopt(pycurl.HTTPHEADER, headers)
con.setopt(pycurl.USERAGENT, user_agent)
except TypeError:
headers = [h.encode("utf-8") for h in headers]
user_agent = user_agent.encode("utf-8")
con.setopt(pycurl.HTTPHEADER, headers)
con.setopt(pycurl.USERAGENT, user_agent)
con.setopt(pycurl.FOLLOWLOCATION, 1)
con.setopt(pycurl.FAILONERROR, True)
con.setopt(pycurl.CONNECTTIMEOUT, self.timeout)
con.setopt(pycurl.HTTP_VERSION, pycurl.CURL_HTTP_VERSION_1_1)
if proxy:
# Manages bug described in https://github.com/pycurl/pycurl/issues/124
try:
con.setopt(pycurl.PROXY, proxy)
except TypeError:
proxy = proxy.encode("utf-8")
con.setopt(pycurl.PROXY, proxy)
# Also set the SSL options to do not look at the certificates too much
# unless the admin asked for it
if strong_ssl:
con.setopt(pycurl.SSL_VERIFYPEER, 1)
con.setopt(pycurl.SSL_VERIFYHOST, 2)
else:
con.setopt(pycurl.SSL_VERIFYPEER, 0)
con.setopt(pycurl.SSL_VERIFYHOST, 0)
return con
def set_proxy(self, proxy):
if proxy:
logger.debug('PROXY SETTING PROXY %s', proxy)
self.get_con.setopt(pycurl.PROXY, proxy)
self.post_con.setopt(pycurl.PROXY, proxy)
self.put_con.setopt(pycurl.PROXY, proxy)
# Try to get an URI path
def get(self, path, args={}, wait='short'):
c = self.get_con
c.setopt(c.POST, 0)
c.setopt(pycurl.HTTPGET, 1)
# For the TIMEOUT, it will depends if we are waiting for a long query or not
# long:data_timeout, like for huge broks receptions
# short:timeout, like for just "ok" connection
if wait == 'short':
c.setopt(c.TIMEOUT, self.timeout)
else:
c.setopt(c.TIMEOUT, self.data_timeout)
c.setopt(c.URL, self.uri + path + '?' + urlencode(args))
# Ok now manage the response
response = io.BytesIO()
c.setopt(pycurl.WRITEFUNCTION, response.write)
c.setopt(c.VERBOSE, 0)
try:
c.perform()
except pycurl.error as error:
errno, errstr = error.args
raise HTTPException('Connection error to %s : %s' % (self.uri, errstr))
r = c.getinfo(pycurl.HTTP_CODE)
# Do NOT close the connection, we want a keep alive
# c.close()
if r != 200:
err = response.getvalue().decode("utf-8")
logger.error("There was a critical error : %s", err)
raise HTTPException('Connection error to %s : %s' % (self.uri, r))
# Manage special return of pycurl
#ret = json.loads(.decode("utf-8").replace('\\/', '/'))
# print("GOT RAW RESULT", ret, type(ret))
return response.getvalue()
# Try to get an URI path
def post(self, path, args, wait='short'):
size = 0
for (k, v) in args.items():
args[k] = serialize(v)
size += len(args[k])
# Ok go for it!
c = self.post_con
c.setopt(pycurl.HTTPGET, 0)
c.setopt(c.POST, 1)
# For the TIMEOUT, it will depends if we are waiting for a long query or not
# long:data_timeout, like for huge broks receptions
# short:timeout, like for just "ok" connection
if wait == 'short':
c.setopt(c.TIMEOUT, self.timeout)
else:
c.setopt(c.TIMEOUT, self.data_timeout)
# if proxy:
# c.setopt(c.PROXY, proxy)
# Pycurl want a list of tuple as args
c.setopt(c.HTTPPOST, list(args.items()))
c.setopt(c.URL, self.uri + path)
# Ok now manage the response
response = io.BytesIO()
c.setopt(pycurl.WRITEFUNCTION, response.write)
c.setopt(c.VERBOSE, 0)
try:
c.perform()
except pycurl.error as error:
errno, errstr = error.args
raise HTTPException('Connection error to %s : %s' % (self.uri, errstr))
r = c.getinfo(pycurl.HTTP_CODE)
# Do NOT close the connection, we want a keep alive
# c.close()
if r != 200:
err = response.getvalue().decode("utf-8")
logger.error("There was a critical error : %s", err)
raise HTTPException('Connection error to %s : %s' % (self.uri, r))
# Manage special return of pycurl
# ret = json.loads(response.getvalue().replace('\\/', '/'))
return response.getvalue()
# Should return us pong string
return ret
# Try to get an URI path
def put(self, path, content, wait='short'):
c = self.put_con
filesize = len(content)
payload = io.BytesIO(content)
c.setopt(pycurl.INFILESIZE, filesize)
c.setopt(pycurl.PUT, 1)
c.setopt(pycurl.READFUNCTION, FileReader(payload).read_callback)
# For the TIMEOUT, it will depends if we are waiting for a long query or not
# long:data_timeout, like for huge broks receptions
# short:timeout, like for just "ok" connection
if wait == 'short':
c.setopt(c.TIMEOUT, self.timeout)
else:
c.setopt(c.TIMEOUT, self.data_timeout)
# if proxy:
# c.setopt(c.PROXY, proxy)
# Pycurl want a list of tuple as args
c.setopt(c.URL, self.uri + path)
c.setopt(c.VERBOSE, 0)
# Ok now manage the response
response = io.BytesIO()
c.setopt(pycurl.WRITEFUNCTION, response.write)
# c.setopt(c.VERBOSE, 1)
try:
c.perform()
except pycurl.error as error:
errno, errstr = error.args
raise HTTPException('Connection error to %s : %s' % (self.uri, errstr))
r = c.getinfo(pycurl.HTTP_CODE)
# Do NOT close the connection, we want a keep alive
# c.close()
if r != 200:
err = response.getvalue().decode("utf-8")
logger.error("There was a critical error : %s", err)
raise HTTPException('Connection error to %s : %s' % (self.uri, r))
return response.getvalue()
| 8,996
|
Python
|
.py
| 218
| 32.756881
| 93
| 0.612789
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,450
|
macroresolver.py
|
shinken-solutions_shinken/shinken/macroresolver.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
# This class resolve Macro in commands by looking at the macros list
# in Class of elements. It give a property that call be callable or not.
# It not callable, it's a simple property and replace the macro with the value
# If callable, it's a method that is called to get the value. for example, to
# get the number of service in a host, you call a method to get the
# len(host.services)
from __future__ import absolute_import, division, print_function, unicode_literals
import re
import time
from shinken.borg import Borg
class MacroResolver(Borg):
"""Please Add a Docstring to describe the class here"""
my_type = 'macroresolver'
# Global macros
macros = {
'TOTALHOSTSUP': '_get_total_hosts_up',
'TOTALHOSTSDOWN': '_get_total_hosts_down',
'TOTALHOSTSUNREACHABLE': '_get_total_hosts_unreachable',
'TOTALHOSTSDOWNUNHANDLED': '_get_total_hosts_unhandled',
'TOTALHOSTSUNREACHABLEUNHANDLED': '_get_total_hosts_unreachable_unhandled',
'TOTALHOSTPROBLEMS': '_get_total_host_problems',
'TOTALHOSTPROBLEMSUNHANDLED': '_get_total_host_problems_unhandled',
'TOTALSERVICESOK': '_get_total_service_ok',
'TOTALSERVICESWARNING': '_get_total_services_warning',
'TOTALSERVICESCRITICAL': '_get_total_services_critical',
'TOTALSERVICESUNKNOWN': '_get_total_services_unknown',
'TOTALSERVICESWARNINGUNHANDLED': '_get_total_services_warning_unhandled',
'TOTALSERVICESCRITICALUNHANDLED': '_get_total_services_critical_unhandled',
'TOTALSERVICESUNKNOWNUNHANDLED': '_get_total_services_unknown_unhandled',
'TOTALSERVICEPROBLEMS': '_get_total_service_problems',
'TOTALSERVICEPROBLEMSUNHANDLED': '_get_total_service_problems_unhandled',
'LONGDATETIME': '_get_long_date_time',
'SHORTDATETIME': '_get_short_date_time',
'DATE': '_get_date',
'TIME': '_get_time',
'TIMET': '_get_timet',
'PROCESSSTARTTIME': '_get_process_start_time',
'EVENTSTARTTIME': '_get_events_start_time',
}
output_macros = [
'HOSTOUTPUT',
'HOSTPERFDATA',
'HOSTACKAUTHOR',
'HOSTACKCOMMENT',
'SERVICEOUTPUT',
'SERVICEPERFDATA',
'SERVICEACKAUTHOR',
'SERVICEACKCOMMENT'
]
# This must be called ONCE. It just put links for elements
# by scheduler
def init(self, conf):
# For searching class and elements for ondemand
# we need link to types
self.conf = conf
self.lists_on_demand = []
self.hosts = conf.hosts
# For special void host_name handling...
self.host_class = self.hosts.inner_class
self.lists_on_demand.append(self.hosts)
self.services = conf.services
self.contacts = conf.contacts
self.lists_on_demand.append(self.contacts)
self.hostgroups = conf.hostgroups
self.lists_on_demand.append(self.hostgroups)
self.commands = conf.commands
self.servicegroups = conf.servicegroups
self.lists_on_demand.append(self.servicegroups)
self.contactgroups = conf.contactgroups
self.lists_on_demand.append(self.contactgroups)
self.illegal_macro_output_chars = conf.illegal_macro_output_chars
# Try cache :)
# self.cache = {}
# Return all macros of a string, so cut the $
# And create a dict with it:
# val: value, not set here
# type: type of macro, like class one, or ARGN one
def _get_macros(self, s):
# if s in self.cache:
# return self.cache[s]
p = re.compile(r'(\$)')
elts = p.split(s)
macros = {}
in_macro = False
for elt in elts:
if elt == '$':
in_macro = not in_macro
elif in_macro:
macros[elt] = {'val': '', 'type': 'unknown'}
# self.cache[s] = macros
if '' in macros:
del macros['']
return macros
# Get a value from a property of a element
# Prop can be a function or a property
# So we call it or not
def _get_value_from_element(self, elt, prop):
try:
value = getattr(elt, prop)
if callable(value):
return str(value())
else:
return str(value)
except AttributeError as exp:
# Return no value
return ''
except UnicodeError as exp:
if isinstance(value, str):
return str(value, 'utf8', errors='ignore')
else:
return ''
# For some macros, we need to delete unwanted characters
def _delete_unwanted_caracters(self, s):
for c in self.illegal_macro_output_chars:
s = s.replace(c, '')
return s
# return a dict with all environment variable came from
# the macros of the datas object
def get_env_macros(self, data):
env = {}
for o in data:
cls = o.__class__
macros = cls.macros
for macro in macros:
if macro.startswith("USER"):
break
prop = macros[macro]
value = self._get_value_from_element(o, prop)
env['NAGIOS_%s' % macro] = value
if hasattr(o, 'customs'):
# make NAGIOS__HOSTMACADDR from _MACADDR
for cmacro in o.customs:
new_env_name = 'NAGIOS__' + o.__class__.__name__.upper() + cmacro[1:].upper()
env[new_env_name] = o.customs[cmacro]
return env
# This function will look at elements in data (and args if it filled)
# to replace the macros in c_line with real value.
def resolve_simple_macros_in_string(self, c_line, data, args=None):
# Now we prepare the classes for looking at the class.macros
data.append(self) # For getting global MACROS
if hasattr(self, 'conf'):
data.append(self.conf) # For USERN macros
clss = [d.__class__ for d in data]
# we should do some loops for nested macros
# like $USER1$ hiding like a ninja in a $ARG2$ Macro. And if
# $USER1$ is pointing to $USER34$ etc etc, we should loop
# until we reach the bottom. So the last loop is when we do
# not still have macros :)
still_got_macros = True
nb_loop = 0
while still_got_macros:
nb_loop += 1
# Ok, we want the macros in the command line
macros = self._get_macros(c_line)
# We can get out if we do not have macros this loop
still_got_macros = (len(macros) != 0)
# print("Still go macros:", still_got_macros)
# Put in the macros the type of macro for all macros
self._get_type_of_macro(macros, clss)
# Now we get values from elements
for macro in macros:
# If type ARGN, look at ARGN cutting
if macros[macro]['type'] == 'ARGN' and args is not None:
macros[macro]['val'] = self._resolve_argn(macro, args)
macros[macro]['type'] = 'resolved'
# If class, get value from properties
if macros[macro]['type'] == 'class':
cls = macros[macro]['class']
for elt in data:
if elt is not None and elt.__class__ == cls:
prop = cls.macros[macro]
macros[macro]['val'] = self._get_value_from_element(elt, prop)
# Now check if we do not have a 'output' macro. If so, we must
# delete all special characters that can be dangerous
if macro in self.output_macros:
macros[macro]['val'] = \
self._delete_unwanted_caracters(macros[macro]['val'])
if macros[macro]['type'] == 'CUSTOM':
cls_type = macros[macro]['class']
# Beware : only cut the first _HOST value, so the macro name can have it on it..
macro_name = re.split('_' + cls_type, macro, 1)[1].upper()
# Ok, we've got the macro like MAC_ADDRESS for _HOSTMAC_ADDRESS
# Now we get the element in data that have the type HOST
# and we check if it got the custom value
for elt in data:
if elt is not None and elt.__class__.my_type.upper() == cls_type:
if '_' + macro_name in elt.customs:
macros[macro]['val'] = elt.customs['_' + macro_name]
# Then look on the macromodulations, in reserver order, so
# the last to set, will be the firt to have. (yes, don't want to play
# with break and such things sorry...)
mms = getattr(elt, 'macromodulations', [])
for mm in mms[::-1]:
# Look if the modulation got the value,
# but also if it's currently active
if '_' + macro_name in mm.customs and mm.is_active():
macros[macro]['val'] = mm.customs['_' + macro_name]
if macros[macro]['type'] == 'ONDEMAND':
macros[macro]['val'] = self._resolve_ondemand(macro, data)
# We resolved all we can, now replace the macro in the command call
for macro in macros:
c_line = c_line.replace('$' + macro + '$', macros[macro]['val'])
# A $$ means we want a $, it's not a macro!
# We replace $$ by a big dirty thing to be sure to not misinterpret it
c_line = c_line.replace("$$", "DOUBLEDOLLAR")
if nb_loop > 32: # too much loop, we exit
still_got_macros = False
# We now replace the big dirty token we made by only a simple $
c_line = c_line.replace("DOUBLEDOLLAR", "$")
# print("Retuning c_line", c_line.strip())
return c_line.strip()
# Resolve a command with macro by looking at data classes.macros
# And get macro from item properties.
def resolve_command(self, com, data):
c_line = com.command.command_line
return self.resolve_simple_macros_in_string(c_line, data, args=com.args)
# For all Macros in macros, set the type by looking at the
# MACRO name (ARGN? -> argn_type,
# HOSTBLABLA -> class one and set Host in class)
# _HOSTTOTO -> HOST CUSTOM MACRO TOTO
# $SERVICESTATEID:srv-1:Load$ -> MACRO SERVICESTATEID of
# the service Load of host srv-1
def _get_type_of_macro(self, macros, clss):
for macro in macros:
# ARGN Macros
if re.match('ARG\d', macro):
macros[macro]['type'] = 'ARGN'
continue
# USERN macros
# are managed in the Config class, so no
# need to look that here
elif re.match('_HOST\w', macro):
macros[macro]['type'] = 'CUSTOM'
macros[macro]['class'] = 'HOST'
continue
elif re.match('_SERVICE\w', macro):
macros[macro]['type'] = 'CUSTOM'
macros[macro]['class'] = 'SERVICE'
# value of macro: re.split('_HOST', '_HOSTMAC_ADDRESS')[1]
continue
elif re.match('_CONTACT\w', macro):
macros[macro]['type'] = 'CUSTOM'
macros[macro]['class'] = 'CONTACT'
continue
# On demand macro
elif len(macro.split(':')) > 1:
macros[macro]['type'] = 'ONDEMAND'
continue
# OK, classical macro...
for cls in clss:
if macro in cls.macros:
macros[macro]['type'] = 'class'
macros[macro]['class'] = cls
continue
# Resolve MACROS for the ARGN
def _resolve_argn(self, macro, args):
# first, get the number of args
id = None
r = re.search('ARG(?P<id>\d+)', macro)
if r is not None:
id = int(r.group('id')) - 1
try:
return args[id]
except IndexError:
return ''
# Resolve on-demand macro, quite hard in fact
def _resolve_ondemand(self, macro, data):
# print("\nResolving macro", macro)
elts = macro.split(':')
nb_parts = len(elts)
macro_name = elts[0]
# Len 3 == service, 2 = all others types...
if nb_parts == 3:
val = ''
# print("Got a Service on demand asking...", elts)
(host_name, service_description) = (elts[1], elts[2])
# host_name can be void, so it's the host in data
# that is important. We use our self.host_class to
# find the host in the data :)
if host_name == '':
for elt in data:
if elt is not None and elt.__class__ == self.host_class:
host_name = elt.host_name
# Ok now we get service
s = self.services.find_srv_by_name_and_hostname(host_name, service_description)
if s is not None:
cls = s.__class__
prop = cls.macros[macro_name]
val = self._get_value_from_element(s, prop)
# print("Got val:", val)
return val
# Ok, service was easy, now hard part
else:
val = ''
elt_name = elts[1]
# Special case: elt_name can be void
# so it's the host where it apply
if elt_name == '':
for elt in data:
if elt is not None and elt.__class__ == self.host_class:
elt_name = elt.host_name
for list in self.lists_on_demand:
cls = list.inner_class
# We search our type by looking at the macro
if macro_name in cls.macros:
prop = cls.macros[macro_name]
i = list.find_by_name(elt_name)
if i is not None:
val = self._get_value_from_element(i, prop)
# Ok we got our value :)
break
return val
return ''
# Get Fri 15 May 11:42:39 CEST 2009
def _get_long_date_time(self):
return time.strftime("%a %d %b %H:%M:%S %Z %Y")
# Get 10-13-2000 00:30:28
def _get_short_date_time(self):
return time.strftime("%d-%m-%Y %H:%M:%S")
# Get 10-13-2000
def _get_date(self):
return time.strftime("%d-%m-%Y")
# Get 00:30:28
def _get_time(self):
return time.strftime("%H:%M:%S")
# Get epoch time
def _get_timet(self):
return str(int(time.time()))
def _get_total_hosts_up(self):
return len([h for h in self.hosts if h.state == 'UP'])
def _get_total_hosts_down(self):
return len([h for h in self.hosts if h.state == 'DOWN'])
def _get_total_hosts_unreachable(self):
return len([h for h in self.hosts if h.state == 'UNREACHABLE'])
# TODO
def _get_total_hosts_unreachable_unhandled(self):
return 0
def _get_total_hosts_problems(self):
return len([h for h in self.hosts if h.is_problem])
def _get_total_hosts_problems_unhandled(self):
return 0
def _get_total_service_ok(self):
return len([s for s in self.services if s.state == 'OK'])
def _get_total_services_warning(self):
return len([s for s in self.services if s.state == 'WARNING'])
def _get_total_services_critical(self):
return len([s for s in self.services if s.state == 'CRITICAL'])
def _get_total_services_unknown(self):
return len([s for s in self.services if s.state == 'UNKNOWN'])
# TODO
def _get_total_services_warning_unhandled(self):
return 0
def _get_total_services_critical_unhandled(self):
return 0
def _get_total_services_unknown_unhandled(self):
return 0
def _get_total_service_problems(self):
return len([s for s in self.services if s.is_problem])
def _get_total_service_problems_unhandled(self):
return 0
def _get_process_start_time(self):
return 0
def _get_events_start_time(self):
return 0
| 17,681
|
Python
|
.py
| 386
| 34.38342
| 100
| 0.56279
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,451
|
schedulerlink.py
|
shinken-solutions_shinken/shinken/schedulerlink.py
|
'''shinken.schedulerlink is deprecated. Please use shinken.objects.schedulerlink now.'''
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken.old_daemon_link import make_deprecated_daemon_link
from shinken.objects import schedulerlink
make_deprecated_daemon_link(schedulerlink)
| 326
|
Python
|
.py
| 5
| 63.2
| 88
| 0.838608
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,452
|
worker.py
|
shinken-solutions_shinken/shinken/worker.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
# In android, we should use threads, not process
from __future__ import absolute_import, division, print_function, unicode_literals
try:
import android
is_android = True
except ImportError:
is_android = False
if is_android:
from threading import Thread as Process
else:
from multiprocessing import Process
import six
import os
import io
import time
import sys
import signal
import traceback
import multiprocessing
if six.PY2:
from Queue import Queue, Empty
else:
from queue import Queue, Empty
from shinken.log import logger, BrokHandler
from shinken.misc.common import setproctitle
class Worker(object):
"""This class is used for poller and reactionner to work.
The worker is a process launch by theses process and read Message in a Queue
(self.s) (slave)
They launch the Check and then send the result in the Queue self.m (master)
they can die if they do not do anything (param timeout)
"""
id = 0 # None
_process = None
_mortal = None
_idletime = None
_timeout = None
_c = None
def __init__(self, id, s, returns_queue, processes_by_worker, mortal=True, timeout=300,
max_plugins_output_length=8192, target=None, loaded_into='unknown',
http_daemon=None):
self.id = self.__class__.id
self.__class__.id += 1
self._mortal = mortal
self._idletime = 0
self._timeout = timeout
self.s = None
self.processes_by_worker = processes_by_worker
if is_android:
self._c = Queue() # Private Control queue for the Worker
else:
self._c = multiprocessing.Queue() # Private Control queue for the Worker
# By default, take our own code
if target is None:
target = self.work
self._process = Process(target=self._prework, args=(target, s, returns_queue, self._c))
self.returns_queue = returns_queue
self.max_plugins_output_length = max_plugins_output_length
self.i_am_dying = False
# Keep a trace where the worker is launch from (poller or reactionner?)
self.loaded_into = loaded_into
if os.name != 'nt':
self.http_daemon = http_daemon
else: # windows forker do not like serialize http/lock
self.http_daemon = None
def _prework(self, real_work, *args):
for handler in list(logger.handlers):
if isinstance(handler, BrokHandler):
logger.info("Cleaning BrokHandler %r from logger.handlers..", handler)
logger.removeHandler(handler)
real_work(*args)
def is_mortal(self):
return self._mortal
def start(self):
self._process.start()
# Kill the background process
# AND close correctly the queues (input and output)
# each queue got a thread, so close it too....
def terminate(self):
# We can just terminate process, not threads
if not is_android:
self._process.terminate()
# Is we are with a Manager() way
# there should be not such functions
if hasattr(self._c, 'close'):
self._c.close()
self._c.join_thread()
if hasattr(self.s, 'close'):
self.s.close()
self.s.join_thread()
def join(self, timeout=None):
self._process.join(timeout)
def is_alive(self):
return self._process.is_alive()
def is_killable(self):
return self._mortal and self._idletime > self._timeout
def add_idletime(self, time):
self._idletime = self._idletime + time
def reset_idle(self):
self._idletime = 0
def send_message(self, msg):
self._c.put(msg)
# A zombie is immortal, so kill not be kill anymore
def set_zombie(self):
self._mortal = False
# Get new checks if less than nb_checks_max
# If no new checks got and no check in queue,
# sleep for 1 sec
# REF: doc/shinken-action-queues.png (3)
def get_new_checks(self):
try:
while(len(self.checks) < self.processes_by_worker):
# print("I", self.id, "wait for a message")
msg = self.s.get(block=False)
if msg is not None:
self.checks.append(msg.get_data())
# print("I", self.id, "I've got a message!")
except Empty as exp:
if len(self.checks) == 0:
self._idletime = self._idletime + 1
time.sleep(1)
# Maybe the Queue() is not available, if so, just return
# get back to work :)
except IOError as exp:
return
# Launch checks that are in status
# REF: doc/shinken-action-queues.png (4)
def launch_new_checks(self):
# queue
for chk in self.checks:
if chk.status == 'queue':
self._idletime = 0
r = chk.execute()
# Maybe we got a true big problem in the
# action launching
if r == 'toomanyopenfiles':
# We should die as soon as we return all checks
logger.error("[%d] I am dying Too many open files %s ... ", self.id, chk)
self.i_am_dying = True
# Check the status of checks
# if done, return message finished :)
# REF: doc/shinken-action-queues.png (5)
def manage_finished_checks(self):
to_del = []
wait_time = 1
now = time.time()
for action in self.checks:
if action.status == 'launched' and action.last_poll < now - action.wait_time:
action.check_finished(self.max_plugins_output_length)
wait_time = min(wait_time, action.wait_time)
# If action done, we can launch a new one
if action.status in ('done', 'timeout'):
to_del.append(action)
# We answer to the master
# msg = Message(id=self.id, type='Result', data=action)
try:
self.returns_queue.put(action)
except IOError as exp:
logger.error("[%d] Exiting: %s", self.id, exp)
sys.exit(2)
# Little sleep
self.wait_time = wait_time
for chk in to_del:
self.checks.remove(chk)
# Little sleep
time.sleep(wait_time)
# Check if our system time change. If so, change our
def check_for_system_time_change(self):
now = time.time()
difference = now - self.t_each_loop
# Now set the new value for the tick loop
self.t_each_loop = now
# return the diff if it need, of just 0
if abs(difference) > 900:
return difference
else:
return 0
# Wrapper function for work in order to catch the exception
# to see the real work, look at do_work
def work(self, s, returns_queue, c):
try:
self.do_work(s, returns_queue, c)
# Catch any exception, try to print(it and exit anyway)
except Exception as exp:
output = io.StringIO()
traceback.print_exc(file=output)
logger.error("Worker '%d' exit with an unmanaged exception : %s",
self.id, output.getvalue())
output.close()
# Ok I die now
raise
# id = id of the worker
# s = Global Queue Master->Slave
# m = Queue Slave->Master
# return_queue = queue managed by manager
# c = Control Queue for the worker
def do_work(self, s, returns_queue, c):
# restore default signal handler for the workers:
# but on android, we are a thread, so don't do it
if not is_android:
signal.signal(signal.SIGTERM, signal.SIG_DFL)
self.set_proctitle()
print("I STOP THE http_daemon", self.http_daemon)
if self.http_daemon:
self.http_daemon.shutdown()
timeout = 1.0
self.checks = []
self.returns_queue = returns_queue
self.s = s
self.t_each_loop = time.time()
while True:
begin = time.time()
msg = None
cmsg = None
# If we are dying (big problem!) we do not
# take new jobs, we just finished the current one
if not self.i_am_dying:
# REF: doc/shinken-action-queues.png (3)
self.get_new_checks()
# REF: doc/shinken-action-queues.png (4)
self.launch_new_checks()
# REF: doc/shinken-action-queues.png (5)
self.manage_finished_checks()
# Now get order from master
try:
cmsg = c.get(block=False)
if cmsg.get_type() == 'Die':
logger.debug("[%d] Dad say we are dying...", self.id)
break
except Exception:
pass
# Look if we are dying, and if we finish all current checks
# if so, we really die, our master poller will launch a new
# worker because we were too weak to manage our job :(
if len(self.checks) == 0 and self.i_am_dying:
logger.warning("[%d] I DIE because I cannot do my job as I should"
"(too many open files?)... forgot me please.", self.id)
break
# Manage a possible time change (our avant will be change with the diff)
diff = self.check_for_system_time_change()
begin += diff
timeout -= time.time() - begin
if timeout < 0:
timeout = 1.0
def set_proctitle(self):
setproctitle("shinken-%s worker" % self.loaded_into)
| 10,727
|
Python
|
.py
| 268
| 30.664179
| 95
| 0.595696
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,453
|
contactdowntime.py
|
shinken-solutions_shinken/shinken/contactdowntime.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import time
from shinken.log import logger
""" TODO: Add some comment about this class for the doc"""
class ContactDowntime(object):
id = 1
# Just to list the properties we will send as pickle
# so to others daemons, so all but NOT REF
properties = {
# 'activate_me': None,
# 'entry_time': None,
# 'fixed': None,
'start_time': None,
# 'duration': None,
# 'trigger_id': None,
'end_time': None,
# 'real_end_time': None,
'author': None,
'comment': None,
'is_in_effect': None,
# 'has_been_triggered': None,
'can_be_deleted': None,
}
# Schedule a contact downtime. It's far more easy than a host/service
# one because we got a beginning, and an end. That's all for running.
# got also an author and a comment for logging purpose.
def __init__(self, ref, start_time, end_time, author, comment):
self.id = self.__class__.id
self.__class__.id += 1
self.ref = ref # pointer to srv or host we are apply
self.start_time = start_time
self.end_time = end_time
self.author = author
self.comment = comment
self.is_in_effect = False
self.can_be_deleted = False
# self.add_automatic_comment()
# Check if we came into the activation of this downtime
def check_activation(self):
now = time.time()
was_is_in_effect = self.is_in_effect
self.is_in_effect = (self.start_time <= now <= self.end_time)
logger.debug("CHECK ACTIVATION:%s", self.is_in_effect)
# Raise a log entry when we get in the downtime
if not was_is_in_effect and self.is_in_effect:
self.enter()
# Same for exit purpose
if was_is_in_effect and not self.is_in_effect:
self.exit()
def in_scheduled_downtime(self):
return self.is_in_effect
# The referenced host/service object enters now a (or another) scheduled
# downtime. Write a log message only if it was not already in a downtime
def enter(self):
self.ref.raise_enter_downtime_log_entry()
# The end of the downtime was reached.
def exit(self):
self.ref.raise_exit_downtime_log_entry()
self.can_be_deleted = True
# A scheduled downtime was prematurely canceled
def cancel(self):
self.is_in_effect = False
self.ref.raise_cancel_downtime_log_entry()
self.can_be_deleted = True
def __getstate__(self):
# print("Asking a getstate for a downtime on", self.ref.get_dbg_name())
cls = self.__class__
# id is not in *_properties
res = [self.id]
for prop in cls.properties:
res.append(getattr(self, prop))
# We reverse because we want to recreate
# By check at properties in the same order
res.reverse()
return res
# Inverted function of getstate
def __setstate__(self, state):
cls = self.__class__
self.id = state.pop()
for prop in cls.properties:
val = state.pop()
setattr(self, prop, val)
if self.id >= cls.id:
cls.id = self.id + 1
| 4,248
|
Python
|
.py
| 106
| 33.556604
| 82
| 0.63903
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,454
|
check.py
|
shinken-solutions_shinken/shinken/check.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken.action import Action
from shinken.property import BoolProp, IntegerProp, FloatProp
from shinken.property import StringProp
class Check(Action):
""" ODO: Add some comment about this class for the doc"""
# AutoSlots create the __slots__ with properties and
# running_properties names
# FIXME : reenable AutoSlots if possible
# __metaclass__ = AutoSlots
my_type = 'check'
properties = {
'is_a': StringProp(default='check'),
'type': StringProp(default=''),
'in_timeout': BoolProp(default=False),
'status': StringProp(default=''),
'exit_status': IntegerProp(default=3),
'state': IntegerProp(default=0),
'output': StringProp(default=''),
'long_output': StringProp(default=''),
'ref': IntegerProp(default=-1),
't_to_go': IntegerProp(default=0),
'depend_on': StringProp(default=[]),
'dep_check': StringProp(default=[]),
'check_time': IntegerProp(default=0),
'execution_time': FloatProp(default=0.0),
'u_time': FloatProp(default=0.0),
's_time': FloatProp(default=0.0),
'perf_data': StringProp(default=''),
'check_type': IntegerProp(default=0),
'poller_tag': StringProp(default='None'),
'reactionner_tag': StringProp(default='None'),
'env': StringProp(default={}),
'internal': BoolProp(default=False),
'module_type': StringProp(default='fork'),
'worker': StringProp(default='none'),
'from_trigger': BoolProp(default=False),
'check_variant': StringProp(default='state'),
'priority': IntegerProp(default=100),
}
def __init__(self, status, command, ref, t_to_go, dep_check=None, id=None,
timeout=10, poller_tag='None', reactionner_tag='None',
env={}, module_type='fork', from_trigger=False,
dependency_check=False, check_variant='state', priority=100):
self.is_a = 'check'
self.type = ''
if id is None: # id != None is for copy call only
self.id = Action.id
Action.id += 1
self.in_timeout = False
self.timeout = timeout
self.status = status
self.exit_status = 3
self.command = command
self.output = ''
self.long_output = ''
self.ref = ref
# self.ref_type = ref_type
self.t_to_go = t_to_go
self.depend_on = []
if dep_check is None:
self.depend_on_me = []
else:
self.depend_on_me = [dep_check]
self.check_time = 0
self.execution_time = 0
self.u_time = 0 # user executon time
self.s_time = 0 # system execution time
self.perf_data = ''
self.check_type = 0 # which kind of check result? 0=active 1=passive
self.poller_tag = poller_tag
self.reactionner_tag = reactionner_tag
self.module_type = module_type
self.env = env
# we keep the reference of the poller that will take us
self.worker = 'none'
# If it's a business rule, manage it as a special check
if ref and ref.got_business_rule or command.startswith('_internal'):
self.internal = True
else:
self.internal = False
self.from_trigger = from_trigger
self.dependency_check = dependency_check
self.check_variant = check_variant
self.priority = priority
def copy_shell(self):
"""return a copy of the check but just what is important for execution
So we remove the ref and all
"""
# We create a dummy check with nothing in it, just defaults values
return self.copy_shell__(Check('', '', '', '', '', id=self.id))
def get_return_from(self, c):
self.exit_status = c.exit_status
self.output = c.output
self.long_output = c.long_output
self.check_time = c.check_time
self.execution_time = c.execution_time
self.perf_data = c.perf_data
self.u_time = c.u_time
self.s_time = c.s_time
if c.status == "timeout":
self.in_timeout = True
else:
self.in_timeout = False
def is_launchable(self, t):
return t > self.t_to_go
def __str__(self):
return "Check %d status:%s command:%s ref:%s" % \
(self.id, self.status, self.command, self.ref)
def get_id(self):
return self.id
def set_type_active(self):
self.check_type = 0
def set_type_passive(self):
self.check_type = 1
def is_dependent(self):
return self.dependency_check
| 5,880
|
Python
|
.py
| 139
| 34.71223
| 82
| 0.602553
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,455
|
sorteddict.py
|
shinken-solutions_shinken/shinken/sorteddict.py
|
#!/usr/bin/env python
#
# sorteddict.py
# Sorted dictionary (implementation for Python 2.x)
#
# Copyright (c) 2010 Jan Kaliszewski (zuo)
#
# The MIT License:
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, division, print_function, unicode_literals
from bisect import bisect_left, insort
from itertools import izip, repeat
def dictdoc(method):
"A decorator making reuse of the ordinary dict's docstrings more concise."
dict_method = getattr(dict, method.__name__)
if hasattr(dict_method, '__doc__'):
method.__doc__ = dict_method.__doc__
return method
class SortedDict(dict):
'''Dictionary with sorted keys.
The interface is similar to the ordinary dict's one, but:
* methods: __repr__(), __str__(), __iter__(), iterkeys(), itervalues(),
iteritems(), keys(), values(), items() and popitem() -- return results
taking into consideration sorted keys order;
* new methods: largest_key(), largest_item(), smallest_key(),
smallest_item() added.
'''
def __init__(self, *args, **kwargs):
'''Like with the ordinary dict: from a mapping, from an iterable
of (key, value) pairs, or from keyword arguments.'''
dict.__init__(self, *args, **kwargs)
self._sorted_keys = sorted(dict.iterkeys(self))
@dictdoc
def __repr__(self):
return 'SortedDict({%s})' % ', '.join('%r: %r' % item
for item in self.iteritems())
@dictdoc
def __str__(self):
return repr(self)
@dictdoc
def __setitem__(self, key, value):
key_is_new = key not in self
dict.__setitem__(self, key, value)
if key_is_new:
insort(self._sorted_keys, key)
@dictdoc
def __delitem__(self, key):
dict.__delitem__(self, key)
del self._sorted_keys[bisect_left(self._sorted_keys, key)]
def __iter__(self, reverse=False):
'''D.__iter__() <==> iter(D) <==> D.iterkeys() -> an iterator over
sorted keys (add reverse=True for reverse ordering).'''
if reverse:
return reversed(self._sorted_keys)
else:
return iter(self._sorted_keys)
iterkeys = __iter__
def itervalues(self, reverse=False):
'''D.itervalues() -> an iterator over values sorted by keys
(add reverse=True for reverse ordering).'''
return (self[key] for key in self.iterkeys(reverse))
def iteritems(self, reverse=False):
'''D.iteritems() -> an iterator over (key, value) pairs sorted by keys
(add reverse=True for reverse ordering).'''
return ((key, self[key]) for key in self.iterkeys(reverse))
def keys(self, reverse=False):
'''D.keys() -> a sorted list of keys
(add reverse=True for reverse ordering).'''
return list(self.iterkeys(reverse))
def values(self, reverse=False):
'''D.values() -> a list of values sorted by keys
(add reverse=True for reverse ordering).'''
return list(self.itervalues(reverse))
def items(self, reverse=False):
'''D.items() -> a list of (key, value) pairs sorted by keys
(add reverse=True for reverse ordering).'''
return list(self.iteritems(reverse))
@dictdoc
def clear(self):
dict.clear(self)
del self._sorted_keys[:]
def copy(self):
'''D.copy() -> a shallow copy of D (still as a SortedDict).'''
return self.__class__(self)
@classmethod
@dictdoc
def fromkeys(cls, seq, value=None):
return cls(izip(seq, repeat(value)))
@dictdoc
def pop(self, key, *args, **kwargs):
if key in self:
del self._sorted_keys[bisect_left(self._sorted_keys, key)]
return dict.pop(self, key, *args, **kwargs)
def popitem(self):
'''D.popitem() -> (k, v). Remove and return a (key, value) pair with
the largest key; raise KeyError if D is empty.'''
try:
key = self._sorted_keys.pop()
except IndexError:
raise KeyError('popitem(): dictionary is empty')
else:
return key, dict.pop(self, key)
@dictdoc
def setdefault(self, key, default=None):
if key not in self:
insort(self._sorted_keys, key)
return dict.setdefault(self, key, default)
@dictdoc
def update(self, other=()):
if hasattr(other, 'keys') and hasattr(other, 'values'):
# mapping
newkeys = [key for key in other if key not in self]
else:
# iterator/sequence of pairs
other = list(other)
newkeys = [key for key, _ in other if key not in self]
dict.update(self, other)
for key in newkeys:
insort(self._sorted_keys, key)
def largest_key(self):
'''D.largest_key() -> the largest key; raise KeyError if D is empty.'''
try:
return self._sorted_keys[-1]
except IndexError:
raise KeyError('largest_key(): dictionary is empty')
def largest_item(self):
'''D.largest_item() -> a (key, value) pair with the largest key;
raise KeyError if D is empty.'''
key = self.largest_key()
return key, self[key]
def smallest_key(self):
'''D.smallest_key() -> the smallest key; raise KeyError if D is empty.'''
try:
return self._sorted_keys[0]
except IndexError:
raise KeyError('smallest_key(): dictionary is empty')
def smallest_item(self):
'''D.smallest_item() -> a (key, value) pair with the smallest key;
raise KeyError if D is empty.'''
key = self.smallest_key()
return key, self[key]
| 6,750
|
Python
|
.py
| 158
| 35.493671
| 82
| 0.63247
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,456
|
pollerlink.py
|
shinken-solutions_shinken/shinken/pollerlink.py
|
'''shinken.pollerlink is deprecated. Please use shinken.objects.pollerlink now.'''
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken.old_daemon_link import make_deprecated_daemon_link
from shinken.objects import pollerlink
make_deprecated_daemon_link(pollerlink)
| 313
|
Python
|
.py
| 5
| 60.8
| 82
| 0.832237
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,457
|
profilermgr.py
|
shinken-solutions_shinken/shinken/profilermgr.py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import signal
import time
class Sampler(object):
"""
A simple stack sampler for low-overhead CPU profiling: samples the call
stack every `interval` seconds and keeps track of counts by frame. Because
this uses signals, it only works on the main thread.
"""
def __init__(self, interval=0.005):
self.interval = interval
self._started = None
self._stack_counts = collections.defaultdict(int)
self.nb_sig = 0
def start(self):
self._started = time.time()
try:
signal.signal(signal.SIGVTALRM, self._sample)
except ValueError:
raise ValueError('Can only sample on the main thread')
signal.setitimer(signal.ITIMER_VIRTUAL, self.interval, self.interval)
def _sample(self, signum, frame):
stack = []
while frame is not None:
stack.append(self._format_frame(frame))
frame = frame.f_back
self.nb_sig += 1
stack = ';'.join(reversed(stack))
self._stack_counts[stack] += 1
def _format_frame(self, frame):
return '{}({})'.format(frame.f_code.co_name,
frame.f_globals.get('__name__'))
def output_stats(self):
if self._started is None:
return ''
elapsed = time.time() - self._started
lines = ['elapsed {}'.format(elapsed),
'granularity {}'.format(self.interval)]
ordered_stacks = sorted(self._stack_counts.items(),
key=lambda kv: kv[1], reverse=True)
lines.extend(['{} {}'.format(frame, count)
for frame, count in ordered_stacks])
return lines
def reset(self):
self._started = time.time()
self._stack_counts = collections.defaultdict(int)
profiler = Sampler()
| 1,940
|
Python
|
.py
| 48
| 31.166667
| 82
| 0.604899
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,458
|
db_sqlite.py
|
shinken-solutions_shinken/shinken/db_sqlite.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
from db import DB
from shinken.log import logger
import sqlite3
class DBSqlite(DB):
"""DBSqlite is a sqlite access database class"""
def __init__(self, db_path, table_prefix=''):
self.table_prefix = table_prefix
self.db_path = db_path
def connect_database(self):
"""Create the database connection"""
self.db = sqlite3.connect(self.db_path)
self.db_cursor = self.db.cursor()
def execute_query(self, query):
"""Just run the query"""
logger.debug("[SqliteDB] Info: I run query '%s'", query)
self.db_cursor.execute(query)
self.db.commit()
| 1,640
|
Python
|
.py
| 40
| 37.575
| 82
| 0.715273
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,459
|
comment.py
|
shinken-solutions_shinken/shinken/comment.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import time
""" TODO: Add some comment about this class for the doc"""
class Comment(object):
id = 1
properties = {
'entry_time': None,
'persistent': None,
'author': None,
'comment': None,
'comment_type': None,
'entry_type': None,
'source': None,
'expires': None,
'expire_time': None,
'can_be_deleted': None,
# TODO: find a very good way to handle the downtime "ref".
# ref must effectively not be in properties because it points
# onto a real object.
# 'ref': None
}
# Adds a comment to a particular service. If the "persistent" field
# is set to zero (0), the comment will be deleted the next time
# Shinken is restarted. Otherwise, the comment will persist
# across program restarts until it is deleted manually.
def __init__(self, ref, persistent, author, comment, comment_type, entry_type, source, expires,
expire_time):
self.id = self.__class__.id
self.__class__.id += 1
self.ref = ref # pointer to srv or host we are apply
self.entry_time = int(time.time())
self.persistent = persistent
self.author = author
self.comment = comment
# Now the hidden attributes
# HOST_COMMENT=1,SERVICE_COMMENT=2
self.comment_type = comment_type
# USER_COMMENT=1,DOWNTIME_COMMENT=2,FLAPPING_COMMENT=3,ACKNOWLEDGEMENT_COMMENT=4
self.entry_type = entry_type
# COMMENTSOURCE_INTERNAL=0,COMMENTSOURCE_EXTERNAL=1
self.source = source
self.expires = expires
self.expire_time = expire_time
self.can_be_deleted = False
def __str__(self):
return "Comment id=%d %s" % (self.id, self.comment)
def __getstate__(self):
cls = self.__class__
# id is not in *_properties
res = {'id': self.id}
for prop in cls.properties:
if hasattr(self, prop):
res[prop] = getattr(self, prop)
return res
# Inverted function of getstate
def __setstate__(self, state):
cls = self.__class__
# Maybe it's not a dict but a list like in the old 0.4 format
# so we should call the 0.4 function for it
if isinstance(state, list):
self.__setstate_deprecated__(state)
return
self.id = state['id']
for prop in cls.properties:
if prop in state:
setattr(self, prop, state[prop])
# to prevent from duplicating id in comments:
if self.id >= cls.id:
cls.id = self.id + 1
# This function is DEPRECATED and will be removed in a future version of
# Shinken. It should not be useful any more after a first load/save pass.
# Inverted function of getstate
def __setstate_deprecated__(self, state):
cls = self.__class__
# Check if the len of this state is like the previous,
# if not, we will do errors!
# -1 because of the 'id' prop
if len(cls.properties) != (len(state) - 1):
self.debug("Passing comment")
return
self.id = state.pop()
for prop in cls.properties:
val = state.pop()
setattr(self, prop, val)
if self.id >= cls.id:
cls.id = self.id + 1
| 4,392
|
Python
|
.py
| 108
| 33.435185
| 99
| 0.627226
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,460
|
modulesctx.py
|
shinken-solutions_shinken/shinken/modulesctx.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sys
from shinken.modulesmanager import ModulesManager
class ModulesContext(object):
def __init__(self):
self.modules_dir = None
def set_modulesdir(self, modulesdir):
self.modules_dir = modulesdir
def get_modulesdir(self):
return self.modules_dir
# Useful for a module to load another one, and get a handler to it
def get_module(self, mod_name):
if self.modules_dir and self.modules_dir not in sys.path:
sys.path.append(self.modules_dir)
if self.modules_dir:
mod_dir = os.path.join(self.modules_dir, mod_name)
else:
mod_dir = None
# to keep it back-compatible with previous Shinken module way,
# we first try with "import `mod_name`.module" and if we succeed
# then that's the one to actually use:
mod = ModulesManager.try_best_load('.module', mod_name)
if mod:
return mod
# otherwise simply try new and old style:
return ModulesManager.try_load(mod_name, mod_dir)
modulesctx = ModulesContext()
| 2,104
|
Python
|
.py
| 50
| 37.38
| 82
| 0.708476
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,461
|
graph.py
|
shinken-solutions_shinken/shinken/graph.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
class Graph(object):
"""Graph is a class to make graph things like DFS checks or accessibility
Why use an atomic bomb when a little hammer is enough?
"""
def __init__(self):
self.nodes = {}
# Do not call twice...
def add_node(self, node):
self.nodes[node] = []
# Just loop over nodes
def add_nodes(self, nodes):
for node in nodes:
self.add_node(node)
# Add an edge to the graph from->to
def add_edge(self, from_node, to_node):
# Maybe to_node is unknown
if to_node not in self.nodes:
self.add_node(to_node)
try:
self.nodes[from_node].append(to_node)
# If from_node does not exist, add it with its son
except KeyError as exp:
self.nodes[from_node] = [to_node]
# Return all nodes that are in a loop. So if return [], no loop
def loop_check(self):
in_loop = []
# Add the tag for dfs check
for node in self.nodes:
node.dfs_loop_status = 'DFS_UNCHECKED'
# Now do the job
for node in self.nodes:
# Run the dfs only if the node has not been already done */
if node.dfs_loop_status == 'DFS_UNCHECKED':
self.dfs_loop_search(node)
# If LOOP_INSIDE, must be returned
if node.dfs_loop_status == 'DFS_LOOP_INSIDE':
in_loop.append(node)
# Remove the tag
for node in self.nodes:
del node.dfs_loop_status
return in_loop
# DFS_UNCHECKED default value
# DFS_TEMPORARY_CHECKED check just one time
# DFS_OK no problem for node and its children
# DFS_NEAR_LOOP has trouble sons
# DFS_LOOP_INSIDE is a part of a loop!
def dfs_loop_search(self, root):
# Make the root temporary checked
root.dfs_loop_status = 'DFS_TEMPORARY_CHECKED'
# We are scanning the sons
for child in self.nodes[root]:
child_status = child.dfs_loop_status
# If a child is not checked, check it
if child_status == 'DFS_UNCHECKED':
self.dfs_loop_search(child)
child_status = child.dfs_loop_status
# If a child has already been temporary checked, it's a problem,
# loop inside, and its a acked status
if child_status == 'DFS_TEMPORARY_CHECKED':
child.dfs_loop_status = 'DFS_LOOP_INSIDE'
root.dfs_loop_status = 'DFS_LOOP_INSIDE'
# If a child has already been temporary checked, it's a problem, loop inside
if child_status in ('DFS_NEAR_LOOP', 'DFS_LOOP_INSIDE'):
# if a node is known to be part of a loop, do not let it be less
if root.dfs_loop_status != 'DFS_LOOP_INSIDE':
root.dfs_loop_status = 'DFS_NEAR_LOOP'
# We've already seen this child, it's a problem
child.dfs_loop_status = 'DFS_LOOP_INSIDE'
# If root have been modified, do not set it OK
# A node is OK if and only if all of its children are OK
# if it does not have a child, goes ok
if root.dfs_loop_status == 'DFS_TEMPORARY_CHECKED':
root.dfs_loop_status = 'DFS_OK'
# Get accessibility packs of the graph: in one pack,
# element are related in a way. Between packs, there is no relation
# at all.
# TODO: Make it work for directional graph too
# Because for now, edge must be father->son AND son->father
def get_accessibility_packs(self):
packs = []
# Add the tag for dfs check
for node in self.nodes:
node.dfs_loop_status = 'DFS_UNCHECKED'
for node in self.nodes:
# Run the dfs only if the node is not already done */
if node.dfs_loop_status == 'DFS_UNCHECKED':
packs.append(self.dfs_get_all_childs(node))
# Remove the tag
for node in self.nodes:
del node.dfs_loop_status
return packs
# Return all my children, and all my grandchildren
def dfs_get_all_childs(self, root):
root.dfs_loop_status = 'DFS_CHECKED'
ret = set()
# Me
ret.add(root)
# And my sons
ret.update(self.nodes[root])
for child in self.nodes[root]:
# I just don't care about already checked childs
if child.dfs_loop_status == 'DFS_UNCHECKED':
ret.update(self.dfs_get_all_childs(child))
return list(ret)
| 5,542
|
Python
|
.py
| 127
| 35.110236
| 88
| 0.623909
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,462
|
daemon.py
|
shinken-solutions_shinken/shinken/daemon.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import six
import os
import errno
import stat
import sys
import time
import signal
import select
import random
import threading
import traceback
import logging
import inspect
import subprocess
import socket
import io
if six.PY2:
import ConfigParser as configparser
from Queue import Empty
else:
from queue import Empty
import configparser
# Try to see if we are in an android device or not
try:
import android
is_android = True
except ImportError:
is_android = False
from multiprocessing.managers import SyncManager
import shinken.http_daemon
from shinken.http_daemon import HTTPDaemon, InvalidWorkDir
from shinken.log import logger
from shinken.stats import statsmgr
from shinken.modulesctx import modulesctx
from shinken.modulesmanager import ModulesManager
from shinken.property import StringProp, BoolProp, PathProp, ConfigPathProp, IntegerProp,\
LogLevelProp
from shinken.misc.common import setproctitle
from shinken.profilermgr import profiler
from shinken.util import get_memory
from shinken.serializer import serialize, deserialize
try:
import pwd
import grp
from pwd import getpwnam
from grp import getgrnam, getgrall
def get_cur_user():
return pwd.getpwuid(os.getuid()).pw_name
def get_cur_group():
return grp.getgrgid(os.getgid()).gr_name
def get_all_groups():
return getgrall()
except ImportError as exp: # Like in nt system or Android
# temporary workaround:
def get_cur_user():
return "shinken"
def get_cur_group():
return "shinken"
def get_all_groups():
return []
# ######################### DAEMON PART ###############################
# The standard I/O file descriptors are redirected to /dev/null by default.
REDIRECT_TO = getattr(os, "devnull", "/dev/null")
UMASK = 0o27
from shinken.bin import VERSION
""" TODO: Add some comment about this class for the doc"""
class InvalidPidFile(Exception):
pass
""" Interface for Inter satellites communications """
class Interface(object):
# 'app' is to be set to the owner of this interface.
def __init__(self, app):
self.app = app
self.start_time = int(time.time())
self.running_id = "%d.%d" % (
self.start_time, random.randint(0, 100000000)
)
doc = 'Test the connection to the daemon. Returns: pong'
def ping(self):
return "pong"
ping.need_lock = False
ping.doc = doc
doc = 'Profiling data'
def profiling_data(self):
return profiler.output_stats()
profiling_data.need_lock = False
profiling_data.doc = doc
doc = 'Get the start time of the daemon'
def get_start_time(self):
return self.start_time
doc = 'Get the current running id of the daemon (scheduler)'
def get_running_id(self):
return self.running_id
get_running_id.need_lock = False
get_running_id.doc = doc
doc = 'Send a new configuration to the daemon (internal)'
def put_conf(self, conf):
self.app.new_conf = conf
put_conf.method = 'PUT'
put_conf.doc = doc
doc = 'Ask the daemon to wait a new conf'
def wait_new_conf(self):
self.app.cur_conf = None
wait_new_conf.need_lock = False
wait_new_conf.doc = doc
doc = 'Does the daemon got an active configuration'
def have_conf(self):
return self.app.cur_conf is not None
have_conf.need_lock = False
have_conf.doc = doc
doc = 'Set the current log level in [NOTSET, DEBUG, INFO, WARNING, ERROR, CRITICAL, UNKNOWN]'
def set_log_level(self, loglevel):
return logger.setLevel(loglevel)
set_log_level.doc = doc
doc = 'Get the current log level in [NOTSET, DEBUG, INFO, WARNING, ERROR, CRITICAL, UNKNOWN]'
def get_log_level(self):
return {logging.NOTSET: 'NOTSET',
logging.DEBUG: 'DEBUG',
logging.INFO: 'INFO',
logging.WARNING: 'WARNING',
logging.ERROR: 'ERROR',
logging.CRITICAL: 'CRITICAL'}.get(logger._level, 'UNKNOWN')
get_log_level.doc = doc
doc = 'List the methods available on the daemon'
def api(self):
return self.app.http_daemon.registered_fun_names
api.doc = doc
doc = 'List the api methods and their parameters'
def api_full(self):
res = {}
for (fname, f) in self.app.http_daemon.registered_fun.items():
fclean = fname.replace('_', '-')
argspec = inspect.getargspec(f)
args = [a for a in argspec.args if a != 'self']
defaults = self.app.http_daemon.registered_fun_defaults.get(fname, {})
e = {}
# Get a string about the args and co
_s_nondef_args = ', '.join([a for a in args if a not in defaults])
_s_def_args = ', '.join(['%s=%s' % (k, v) for (k, v) in defaults.items()])
_s_args = ''
if _s_nondef_args:
_s_args += _s_nondef_args
if _s_def_args:
_s_args += ', ' + _s_def_args
e['proto'] = '%s(%s)' % (fclean, _s_args)
e['need_lock'] = getattr(f, 'need_lock', True)
e['method'] = getattr(f, 'method', 'GET').upper()
e['encode'] = getattr(f, 'encode', 'json')
doc = getattr(f, 'doc', '')
if doc:
e['doc'] = doc
res[fclean] = e
return res
api.doc = doc
# If we are under android, we can't give parameters
if is_android:
DEFAULT_WORK_DIR = '/sdcard/sl4a/scripts/'
DEFAULT_LIB_DIR = DEFAULT_WORK_DIR
else:
DEFAULT_WORK_DIR = '/var/run/shinken/'
DEFAULT_LIB_DIR = '/var/lib/shinken/'
class Daemon(object):
properties = {
# workdir is relative to $(dirname "$0"/..)
# where "$0" is the path of the file being executed,
# in python normally known as:
#
# os.path.join( os.getcwd(), sys.argv[0] )
#
# as returned once the daemon is started.
'workdir': PathProp(default=DEFAULT_WORK_DIR),
'modules_dir': PathProp(default=os.path.join(DEFAULT_LIB_DIR, 'modules')),
'host': StringProp(default='0.0.0.0'),
'user': StringProp(default=get_cur_user()),
'group': StringProp(default=get_cur_group()),
'use_ssl': BoolProp(default=False),
'server_key': StringProp(default='etc/certs/server.key'),
'ca_cert': StringProp(default='etc/certs/ca.pem'),
'server_cert': StringProp(default='etc/certs/server.cert'),
'use_local_log': BoolProp(default=True),
'log_level': LogLevelProp(default='WARNING'),
'hard_ssl_name_check': BoolProp(default=False),
'idontcareaboutsecurity': BoolProp(default=False),
'daemon_enabled': BoolProp(default=True),
'graceful_enabled': BoolProp(default=False),
'aggressive_memory_management': BoolProp(default=False),
'spare': BoolProp(default=False),
'max_queue_size': IntegerProp(default=0),
'daemon_thread_pool_size': IntegerProp(default=16),
'http_backend': StringProp(default='auto'),
'graceful_timeout': IntegerProp(default=60),
}
def __init__(self, name, config_file, is_daemon, do_replace, debug, debug_file):
self.check_shm()
self.name = name
self.config_file = config_file
self.is_daemon = is_daemon
self.do_replace = do_replace
self.debug = debug
self.debug_file = debug_file
self.interrupted = False
# Track time
now = time.time()
self.program_start = now
self.t_each_loop = now # used to track system time change
self.sleep_time = 0.0 # used to track the time we wait
self.http_daemon = None
# Log init
# self.log = logger
# self.log.load_obj(self)
logger.load_obj(self)
self.new_conf = None # used by controller to push conf
self.cur_conf = None
self.raw_conf = None
# Flag to know if we need to dump memory or not
self.need_dump_memory = False
# Flag to dump objects or not
self.need_objects_dump = False
# Keep a trace of the local_log file desc if needed
self.local_log_fd = None
# Put in queue some debug output we will raise
# when we will be in daemon
self.debug_output = []
# We will initialize the Manager() when we load modules
# and be really forked()
self.manager = None
os.umask(UMASK)
self.set_exit_handler()
# At least, lose the local log file if needed
def do_stop(self):
# Maybe the modules manager is not even created!
if getattr(self, 'modules_manager', None):
# We save what we can but NOT for the scheduler
# because the current sched object is a dummy one
# and the old one has already done it!
if not hasattr(self, 'sched'):
self.hook_point('save_retention')
# And we quit
print('Stopping all modules')
self.modules_manager.stop_all()
print('Stopping inter-process message')
if self.manager:
try:
self.manager.shutdown()
except Exception as e:
logger.error("failed to stop sync manager: %s", e)
if self.http_daemon:
# Release the lock so the daemon can shutdown without problem
try:
self.http_daemon.lock.release()
except Exception:
pass
self.http_daemon.shutdown()
def request_stop(self):
self.unlink()
self.do_stop()
# Brok facilities are no longer available simply print(the message to STDOUT)
msg = "Stopping daemon. Exiting"
logger.info(msg)
print(msg)
sys.exit(0)
# Maybe this daemon is configured to NOT run, if so, bailout
def look_for_early_exit(self):
if not self.daemon_enabled:
logger.info('This daemon is disabled in configuration. Bailing out')
self.request_stop()
def do_loop_turn(self):
raise NotImplementedError()
# Respawn daemon and send it received configuration
def switch_process(self):
logger.info("Gracefully reloading daemon: %s", " ".join(sys.argv))
env = os.environ.copy()
try:
p = subprocess.Popen(sys.argv,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
preexec_fn=os.setsid,
env=env)
except Exception as e:
logger.error("Failed to spawn child [pid=%s] [retcode=%s] [err=%s]" %
p.pid, p.returncode, e)
return False
logger.info("Reloading daemon [pid=%s]", p.pid)
try:
raw_conf = serialize(self.new_conf)
p.stdin.write(raw_conf)
p.stdin.close()
except Exception as e:
stdout = p.stdout.read()
logger.error("Failed to send configuration to spawned child "
"[pid=%s] [retcode=%s] [err=%s] stdout=[%s]",
p.pid, p.returncode, e, stdout)
return False
if p.poll() is not None:
stdout = p.stdout.read()
p.wait()
logger.error("Failed to spawn child [pid=%s] [retcode=%s] "
"[stdout=%s]" % p.pid, p.returncode, stdout)
logger.info("Resuming normal operations without switching process")
return False
self.request_stop()
return True
def is_switched_process(self):
mode = os.fstat(0).st_mode
data_available = stat.S_ISFIFO(mode) or stat.S_ISREG(mode)
return self.graceful_enabled and self.is_daemon and data_available
def wait_parent_exit(self):
logger.info("Waiting for parent to stop")
now = time.time()
if self.host == "0.0.0.0":
host = "127.0.0.1"
else:
host = self.host
timeout = self.graceful_timeout
while True:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
result = sock.connect_ex((host, self.port))
logger.info("waiting parent: %s" % result)
if result in (0, 110):
sock.close()
time.sleep(0.5)
else:
break
if timeout and time.time() - now > timeout:
break
# Loads configuration sent by parent process only if available
def load_parent_config(self):
if self.is_switched_process():
logger.info("Loading configuration from parent")
raw_config = sys.stdin.read()
new_conf = deserialize(raw_config)
logger.info("Successfully loaded configuration from parent")
logger.info("Waiting for parent to stop")
self.wait_parent_exit()
self.new_conf = new_conf
# Main loop for nearly all daemon
# the scheduler is not managed by it :'(
def do_mainloop(self):
while True:
self.do_loop_turn()
# If ask us to dump memory, do it
if self.need_dump_memory:
self.dump_memory()
self.need_dump_memory = False
if self.need_objects_dump:
logger.debug('Dumping objects')
self.need_objects_dump = False
# Maybe we ask us to die, if so, do it :)
if self.interrupted:
break
self.request_stop()
def do_load_modules(self):
self.modules_manager.load_and_init()
logger.info("I correctly loaded the modules: [%s]",
','.join([inst.get_name() for inst in self.modules_manager.instances]))
# Dummy method for adding broker to this daemon
def add(self, elt):
pass
def dump_memory(self):
logger.info("I dump my memory, it can take a minute")
try:
from guppy import hpy
hp = hpy()
logger.info(hp.heap())
except ImportError:
logger.warning('I do not have the module guppy for memory dump, please install it')
def load_config_file(self):
self.parse_config_file()
if self.config_file is not None:
# Some paths can be relatives. We must have a full path by taking
# the config file by reference
self.relative_paths_to_full(os.path.dirname(self.config_file))
def load_modules_manager(self):
if not modulesctx.get_modulesdir():
modulesctx.set_modulesdir(self.find_modules_path())
self.modules_manager = ModulesManager(self.name, self.find_modules_path(), [])
# Set the modules watchdogs
# TOFIX: Beware, the arbiter do not have the max_queue_size property
self.modules_manager.set_max_queue_size(getattr(self, 'max_queue_size', 0))
# And make the module manager load the sub-process Queue() manager
self.modules_manager.load_manager(self.manager)
# create a dir and set to my user
def __create_directory(self, d):
if not os.path.exists(d):
os.mkdir(d)
# And set the user as shinken so the sub-fork can
# reopen the pid when no more root
if os.name != 'nt':
uid = self.find_uid_from_name()
gid = self.find_gid_from_name()
os.chown(d, uid, gid)
def change_to_workdir(self):
self.workdir = os.path.abspath(self.workdir)
try:
# If the directory is missing, try to create it for me
if not os.path.exists(self.workdir):
self.__create_directory(self.workdir)
os.chdir(self.workdir)
except Exception as e:
raise InvalidWorkDir(e)
self.debug_output.append("Successfully changed to workdir: %s" % (self.workdir))
def unlink(self):
logger.debug("Unlinking %s", self.pidfile)
try:
os.unlink(self.pidfile)
except Exception as e:
logger.error("Got an error unlinking our pidfile: %s", e)
# Look if we need a local log or not
def register_local_log(self):
# The arbiter doesn't have such attribute
if hasattr(self, 'use_local_log') and self.use_local_log:
try:
# self.local_log_fd = self.log.register_local_log(self.local_log)
self.local_log_fd = logger.register_local_log(self.local_log)
except IOError as exp:
logger.error("Opening the log file '%s' failed with '%s'", self.local_log, exp)
sys.exit(2)
logger.info("Using the local log file '%s'", self.local_log)
# Only on linux: Check for /dev/shm write access
def check_shm(self):
shm_path = '/dev/shm'
if os.name == 'posix' and os.path.exists(shm_path):
# We get the access rights, and we check them
mode = stat.S_IMODE(os.lstat(shm_path)[stat.ST_MODE])
if not mode & stat.S_IWUSR or not mode & stat.S_IRUSR:
logger.critical("The directory %s is not writable or readable."
"Please make it read writable: %s", shm_path, shm_path)
sys.exit(2)
def __open_pidfile(self, write=False):
# if problem on opening or creating file it'll be raised to the caller:
try:
p = os.path.abspath(self.pidfile)
# Look if the pid directory is existing or not
# (some systems are cleaning /var/run directories, yes I look
# at you debian 8)
p_dir = os.path.dirname(p)
if not os.path.exists(p_dir):
self.__create_directory(p_dir)
self.debug_output.append("Opening pid file: %s" % p)
# Windows do not manage the rw+ mode,
# so we must open in read mode first, then reopen it write mode...
if not write and os.path.exists(p):
self.fpid = open(p, 'r+')
else: # If it doesn't exist too, we create it as void
self.fpid = open(p, 'w+')
except Exception as err:
raise InvalidPidFile(err)
# Check (in pidfile) if there isn't already a daemon running. If yes and
# do_replace: kill it.
# Keep in self.fpid the File object to the pidfile. Will be used by writepid.
def check_parallel_run(self):
# TODO: other daemon run on nt
if os.name == 'nt':
logger.warning("The parallel daemon check is not available on nt")
self.__open_pidfile(write=True)
return
# First open the pid file in open mode
self.__open_pidfile()
# If graceful restart is enabled, do not try to check/kill parent
# daemon unless explicitely asked for, by giving the parent pid as pid
# parameter.
if self.graceful_enabled and self.new_conf is not None:
logger.info("Graceful restart required, delaying "
"check_parallel_run.")
return
try:
pid = int(self.fpid.readline().strip(' \r\n'))
except Exception as err:
logger.info("Stale pidfile exists at %s (%s). Reusing it.",
err, self.pidfile)
return
try:
os.kill(pid, 0)
except Exception as err: # consider any exception as a stale pidfile.
# this includes :
# * PermissionError when a process with same pid exists but is executed by another user
# * ProcessLookupError: [Errno 3] No such process
logger.info("Stale pidfile exists (%s), Reusing it.", err)
return
if not self.do_replace:
raise SystemExit("valid pidfile exists (pid=%s) and not forced "
"to replace. Exiting." % pid)
self.debug_output.append("Replacing previous instance %d" % pid)
try:
pgid = os.getpgid(pid)
os.killpg(pgid, signal.SIGQUIT)
except os.error as err:
if err.errno != errno.ESRCH:
raise
self.fpid.close()
# TODO: give some time to wait that previous instance finishes?
time.sleep(1)
# we must also reopen the pid file in write mode
# because the previous instance should have deleted it!!
self.__open_pidfile(write=True)
def write_pid(self, pid=None):
if pid is None:
pid = os.getpid()
self.fpid.seek(0)
self.fpid.truncate()
self.fpid.write("%d" % (pid))
self.fpid.close()
del self.fpid # no longer needed
# Close all the process file descriptors. Skip the descriptors
# present in the skip_close_fds list
def close_fds(self, skip_close_fds):
# First we manage the file descriptor, because debug file can be
# relative to pwd
import resource
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = 1024
# Iterate through and close all file descriptors.
for fd in range(0, maxfd):
if fd in skip_close_fds:
continue
try:
os.close(fd)
except OSError: # ERROR, fd wasn't open to begin with (ignored)
pass
# Go in "daemon" mode: close unused fds, redirect stdout/err,
# chdir, umask, fork-setsid-fork-writepid
def daemonize(self, skip_close_fds=None):
if skip_close_fds is None:
skip_close_fds = tuple()
self.debug_output.append("Redirecting stdout and stderr as necessary..")
if self.debug:
fdtemp = os.open(self.debug_file, os.O_CREAT | os.O_WRONLY | os.O_TRUNC)
else:
fdtemp = os.open(REDIRECT_TO, os.O_RDWR)
# We close all fd but what we need:
self.close_fds(skip_close_fds + (self.fpid.fileno(), fdtemp))
os.dup2(fdtemp, 1) # standard output (1)
os.dup2(fdtemp, 2) # standard error (2)
# Now the fork/setsid/fork..
try:
pid = os.fork()
except OSError as e:
raise Exception("%s [%d]" % (e.strerror, e.errno))
if pid != 0:
# In the father: we check if our child exit correctly
# it has to write the pid of our future little child..
def do_exit(sig, frame):
logger.error("Timeout waiting child while it should have "
"quickly returned ;something weird happened")
os.kill(pid, 9)
sys.exit(1)
# wait the child process to check its return status:
signal.signal(signal.SIGALRM, do_exit)
signal.alarm(3) # forking & writing a pid in a file should be rather quick..
# if it's not then something wrong can already be on the way so let's wait max
# 3 secs here.
pid, status = os.waitpid(pid, 0)
if status != 0:
logger.error("Something weird happened with/during second "
"fork: status= %s", status)
os._exit(status != 0)
# halfway to daemonize..
os.setsid()
try:
pid = os.fork()
except OSError as e:
raise Exception("%s [%d]" % (e.strerror, e.errno))
if pid != 0:
# we are the last step and the real daemon is actually correctly created at least.
# we have still the last responsibility to write the pid of the daemon itself.
self.write_pid(pid)
os._exit(0)
self.fpid.close()
del self.fpid
self.pid = os.getpid()
self.debug_output.append("We are now fully daemonized :) pid=%d" % self.pid)
# We can now output some previously silenced debug output
logger.info("Printing stored debug messages prior to our daemonization")
for s in self.debug_output:
logger.info(s)
del self.debug_output
self.set_proctitle()
if is_android:
def _create_manager(self):
pass
else:
# The Manager is a sub-process, so we must be sure it won't have
# a socket of your http server alive
def _create_manager(self):
manager = SyncManager(('127.0.0.1', 0))
def close_http_daemon(daemon):
try:
# Be sure to release the lock so there won't be lock in shutdown phase
daemon.lock.release()
except Exception as exp:
pass
daemon.shutdown()
# Some multiprocessing lib got problems with start() that cannot take args
# so we must look at it before
if six.PY2:
startargs = inspect.getargspec(manager.start)
else:
startargs = inspect.getfullargspec(manager.start)
# startargs[0] will be ['self'] if old multiprocessing lib
# and ['self', 'initializer', 'initargs'] in newer ones
# note: windows do not like pickle http_daemon...
if os.name != 'nt' and len(startargs[0]) > 1:
manager.start(close_http_daemon, initargs=(self.http_daemon,))
else:
manager.start()
return manager
# Main "go daemon" mode. Will launch the double fork(), close old file descriptor
# and such things to have a true DAEMON :D
# bind_port= open the TCP port for communication
# fake= use for test to do not launch runonly feature, like the stats reaper thread
def do_daemon_init_and_start(self, bind_port=True, fake=False):
self.change_to_workdir()
self.change_to_user_group()
self.check_parallel_run()
if bind_port:
self.setup_daemon()
# Setting log level
logger.setLevel(self.log_level)
# Force the debug level if the daemon is said to start with such level
if self.debug:
logger.setLevel('DEBUG')
# Then start to log all in the local file if asked so
self.register_local_log()
if self.is_daemon:
socket_fds = [sock.fileno() for sock in self.http_daemon.get_sockets()]
# Do not close the local_log file too if it's open
if self.local_log_fd:
socket_fds.append(self.local_log_fd)
socket_fds = tuple(socket_fds)
self.daemonize(skip_close_fds=socket_fds)
else:
self.write_pid()
# Now we can start our Manager
# interprocess things. It's important!
self.manager = self._create_manager()
# We can start our stats thread but after the double fork() call and if we are not in
# a test launch (time.time() is hooked and will do BIG problems there)
if not fake:
statsmgr.launch_reaper_thread()
statsmgr.launch_harvester_thread()
# Now start the http_daemon thread
self.http_thread = None
if bind_port:
# Directly acquire it, so the http_thread will wait for us
self.http_daemon.lock.acquire()
self.http_thread = threading.Thread(None, self.http_daemon_thread, 'http_thread')
# Don't lock the main thread just because of the http thread
self.http_thread.daemon = True
self.http_thread.start()
# profiler.start()
def setup_daemon(self):
if hasattr(self, 'use_ssl'): # "common" daemon
ssl_conf = self
else:
ssl_conf = self.conf # arbiter daemon..
use_ssl = ssl_conf.use_ssl
ca_cert = ssl_cert = ssl_key = ''
http_backend = self.http_backend
# The SSL part
if use_ssl:
ssl_cert = os.path.abspath(ssl_conf.server_cert)
if not os.path.exists(ssl_cert):
logger.error('Error : the SSL certificate %s is missing (server_cert).'
'Please fix it in your configuration', ssl_cert)
sys.exit(2)
ca_cert = os.path.abspath(ssl_conf.ca_cert)
logger.info("Using ssl ca cert file: %s", ca_cert)
ssl_key = os.path.abspath(ssl_conf.server_key)
if not os.path.exists(ssl_key):
logger.error('Error : the SSL key %s is missing (server_key).'
'Please fix it in your configuration', ssl_key)
sys.exit(2)
logger.info("Using ssl server cert/key files: %s/%s", ssl_cert, ssl_key)
if ssl_conf.hard_ssl_name_check:
logger.info("Enabling hard SSL server name verification")
# Let's create the HTTPDaemon, it will be exec after
self.http_daemon = HTTPDaemon(self.host, self.port, http_backend,
use_ssl, ca_cert, ssl_key,
ssl_cert, ssl_conf.hard_ssl_name_check,
self.daemon_thread_pool_size)
# TODO: fix this "hack" :
shinken.http_daemon.daemon_inst = self.http_daemon
# Global loop part
def get_socks_activity(self, socks, timeout):
# some os are not managing void socks list, so catch this
# and just so a simple sleep instead
if socks == []:
time.sleep(timeout)
return []
try:
ins, _, _ = select.select(socks, [], [], timeout)
except select.error as e:
errnum, _ = e
if errnum == errno.EINTR:
return []
raise
return ins
# Find the absolute path of the shinken module directory and returns it.
# If the directory do not exist, we must exit!
def find_modules_path(self):
modules_dir = getattr(self, 'modules_dir', None)
if not modules_dir:
modules_dir = modulesctx.get_modulesdir()
if not modules_dir:
logger.error("Your configuration is missing the path to the modules (modules_dir). "
"I set it by default to /var/lib/shinken/modules. Please configure it")
modules_dir = '/var/lib/shinken/modules'
modulesctx.set_modulesdir(modules_dir)
self.modules_dir = modules_dir
logger.info("Modules directory: %s", modules_dir)
if not os.path.exists(modules_dir):
raise RuntimeError("The modules directory '%s' is missing! Bailing out."
"Please fix your configuration" % (modules_dir,))
return modules_dir
# modules can have process, and they can die
def check_and_del_zombie_modules(self):
# Active children make a join with every one, useful :)
self.modules_manager.check_alive_instances()
# and try to restart previous dead :)
self.modules_manager.try_to_restart_deads()
# Just give the uid of a user by looking at it's name
def find_uid_from_name(self):
try:
return getpwnam(self.user)[2]
except KeyError as exp:
logger.error("The user %s is unknown", self.user)
return None
# Just give the gid of a group by looking at its name
def find_gid_from_name(self):
try:
return getgrnam(self.group)[2]
except KeyError as exp:
logger.error("The group %s is unknown", self.group)
return None
# Change user of the running program. Just insult the admin
# if he wants root run (it can override). If change failed we sys.exit(2)
def change_to_user_group(self, insane=None):
if insane is None:
insane = not self.idontcareaboutsecurity
if is_android:
logger.warning("You can't change user on this system")
return
# TODO: change user on nt
if os.name == 'nt':
logger.warning("You can't change user on this system")
return
if (self.user == 'root' or self.group == 'root') and not insane:
logger.error("You want the application run under the root account?")
logger.error("I do not agree with it. If you really want it, put:")
logger.error("idontcareaboutsecurity=yes")
logger.error("in the config file")
logger.error("Exiting")
sys.exit(2)
uid = self.find_uid_from_name()
gid = self.find_gid_from_name()
if os.getuid() == uid and os.getgid() == gid:
logger.debug("No need to setuid, already good.")
return
if uid is None or gid is None:
logger.error("uid or gid is none. Exiting")
sys.exit(2)
# Maybe the os module got the initgroups function. If so, try to call it.
# Do this when we are still root
if hasattr(os, 'initgroups'):
logger.info('Trying to initialize additional groups for the daemon')
try:
os.initgroups(self.user, gid)
except OSError as e:
logger.warning('Cannot call the additional groups setting '
'with initgroups (%s)', e.strerror)
elif hasattr(os, 'setgroups'):
groups = [gid] + \
[group.gr_gid for group in get_all_groups() if self.user in group.gr_mem]
try:
os.setgroups(groups)
except OSError as e:
logger.warning('Cannot call the additional groups setting '
'with setgroups (%s)', e.strerror)
try:
# First group, then user :)
os.setregid(gid, gid)
os.setreuid(uid, uid)
except OSError as e:
logger.error("cannot change user/group to %s/%s (%s [%d]). Exiting",
self.user, self.group, e.strerror, e.errno)
sys.exit(2)
# Parse self.config_file and get all properties in it.
# If some properties need a pythonization, we do it.
# Also put default value in the properties if some are missing in the config_file
def parse_config_file(self):
properties = self.__class__.properties
if self.config_file is not None:
config = configparser.ConfigParser()
config.read(self.config_file)
if config._sections == {}:
logger.error("Bad or missing config file: %s ", self.config_file)
sys.exit(2)
try:
for (key, value) in config.items('daemon'):
if key in properties:
value = properties[key].pythonize(value)
setattr(self, key, value)
except configparser.InterpolationMissingOptionError as e:
e = str(e)
wrong_variable = e.split('\n')[3].split(':')[1].strip()
logger.error("Incorrect or missing variable '%s' in config file : %s",
wrong_variable, self.config_file)
sys.exit(2)
else:
logger.warning("No config file specified, use defaults parameters")
# Now fill all defaults where missing parameters
for prop, entry in properties.items():
if not hasattr(self, prop):
value = entry.pythonize(entry.default)
setattr(self, prop, value)
# Some paths can be relatives. We must have a full path by taking
# the config file by reference
def relative_paths_to_full(self, reference_path):
# print("Create relative paths with", reference_path)
properties = self.__class__.properties
for prop, entry in properties.items():
if isinstance(entry, ConfigPathProp):
path = getattr(self, prop)
if not os.path.isabs(path):
new_path = os.path.join(reference_path, path)
# print("DBG: changing", entry, "from", path, "to", new_path)
path = new_path
setattr(self, prop, path)
# print("Setting %s for %s" % (path, prop))
def manage_signal(self, sig, frame):
logger.debug("I'm process %d and I received signal %s", os.getpid(), sig)
if sig == signal.SIGUSR1: # if USR1, ask a memory dump
self.need_dump_memory = True
elif sig == signal.SIGUSR2: # if USR2, ask objects dump
self.need_objects_dump = True
else: # Ok, really ask us to die :)
self.interrupted = True
def set_exit_handler(self):
func = self.manage_signal
if os.name == "nt":
try:
import win32api
win32api.SetConsoleCtrlHandler(func, True)
except ImportError:
version = ".".join(map(str, sys.version_info[:2]))
raise Exception("pywin32 not installed for Python " + version)
else:
for sig in (signal.SIGTERM, signal.SIGINT, signal.SIGUSR1, signal.SIGUSR2):
signal.signal(sig, func)
def set_proctitle(self):
setproctitle("shinken-%s" % self.name)
def get_header(self):
return ["Shinken %s" % VERSION,
"Copyright (c) 2009-2014:",
"Gabes Jean (naparuba@gmail.com)",
"Gerhard Lausser, Gerhard.Lausser@consol.de",
"Gregory Starck, g.starck@gmail.com",
"Hartmut Goebel, h.goebel@goebel-consult.de",
"License: AGPL"]
def print_header(self):
for line in self.get_header():
logger.info(line)
# Main fonction of the http daemon thread will loop forever unless we stop the root daemon
def http_daemon_thread(self):
logger.info("Starting HTTP daemon")
# The main thing is to have a pool of X concurrent requests for the http_daemon,
# so "no_lock" calls can always be directly answer without having a "locked" version to
# finish
try:
self.http_daemon.run()
except Exception as exp:
logger.error('The HTTP daemon failed with the error %s, exiting', exp)
output = io.StringIO()
traceback.print_exc(file=output)
logger.error("Back trace of this error: %s", output.getvalue())
output.close()
self.do_stop()
# Hard mode exit from a thread
os._exit(2)
# Wait up to timeout to handle the http daemon requests.
# If suppl_socks is given it also looks for activity on that list of fd.
# Returns a 3-tuple:
# If timeout: first arg is 0, second is [], third is possible system time change value
# If not timeout (== some fd got activity):
# - first arg is elapsed time since wait,
# - second arg is sublist of suppl_socks that got activity.
# - third arg is possible system time change value, or 0 if no change.
def handleRequests(self, timeout, suppl_socks=None):
if suppl_socks is None:
suppl_socks = []
before = time.time()
socks = []
if suppl_socks:
socks.extend(suppl_socks)
# Release the lock so the http_thread can manage request during we are waiting
if self.http_daemon:
self.http_daemon.lock.release()
# Ok give me the socks taht moved during the timeout max
ins = self.get_socks_activity(socks, timeout)
# Ok now get back the global lock!
if self.http_daemon:
self.http_daemon.lock.acquire()
tcdiff = self.check_for_system_time_change()
before += tcdiff
# Increase our sleep time for the time go in select
self.sleep_time += time.time() - before
if len(ins) == 0: # trivial case: no fd activity:
return 0, [], tcdiff
# HERE WAS THE HTTP, but now it's managed in an other thread
# for sock in socks:
# if sock in ins and sock not in suppl_socks:
# ins.remove(sock)
# Track in elapsed the WHOLE time, even with handling requests
elapsed = time.time() - before
if elapsed == 0: # we have done a few instructions in 0 second exactly!? quantum computer?
elapsed = 0.01 # but we absolutely need to return!= 0 to indicate that we got activity
return elapsed, ins, tcdiff
# Check for a possible system time change and act correspondingly.
# If such a change is detected then we return the number of seconds that changed.
# 0 if no time change was detected.
# Time change can be positive or negative:
# positive when we have been sent in the future and negative if we have been sent in the past.
def check_for_system_time_change(self):
now = time.time()
difference = now - self.t_each_loop
# If we have more than 15 min time change, we need to compensate it
if abs(difference) > 900:
self.compensate_system_time_change(difference)
else:
difference = 0
self.t_each_loop = now
return difference
# Default action for system time change. Actually a log is done
def compensate_system_time_change(self, difference):
logger.warning('A system time change of %s has been detected. Compensating...', difference)
# Use to wait conf from arbiter.
# It send us conf in our http_daemon. It put the have_conf prop
# if he send us something
# (it can just do a ping)
def wait_for_initial_conf(self, timeout=1.0):
logger.info("Waiting for initial configuration")
cur_timeout = timeout
# Arbiter do not already set our have_conf param
while not self.new_conf and not self.interrupted:
elapsed, _, _ = self.handleRequests(cur_timeout)
if elapsed:
cur_timeout -= elapsed
if cur_timeout > 0:
continue
cur_timeout = timeout
sys.stdout.write(".")
sys.stdout.flush()
# We call the function of modules that got the this
# hook function
def hook_point(self, hook_name):
_t = time.time()
for inst in self.modules_manager.instances:
full_hook_name = 'hook_' + hook_name
if hasattr(inst, full_hook_name):
f = getattr(inst, full_hook_name)
try:
f(self)
except Exception as exp:
logger.warning('The instance %s raised an exception %s. I disabled it,'
'and set it to restart later', inst.get_name(), exp)
self.modules_manager.set_to_restart(inst)
statsmgr.timing('hook.%s' % hook_name, time.time() - _t, 'perf')
# Dummy function for daemons. Get all retention data
# So a module can save them
def get_retention_data(self):
return []
# Save, to get back all data
def restore_retention_data(self, data):
pass
# Dummy function for having the stats main structure before sending somewhere
def get_stats_struct(self):
r = {'metrics': [], 'version': VERSION, 'name': '', 'type': '', 'modules':
{'internal': {}, 'external': {}}}
modules = r['modules']
# first get data for all internal modules
for mod in self.modules_manager.get_internal_instances():
mname = mod.get_name()
state = {True: 'ok', False: 'stopped'}[(mod not in self.modules_manager.to_restart)]
e = {'name': mname, 'state': state}
modules['internal'][mname] = e
# Same but for external ones
for mod in self.modules_manager.get_external_instances():
mname = mod.get_name()
state = {True: 'ok', False: 'stopped'}[(mod not in self.modules_manager.to_restart)]
e = {'name': mname, 'state': state}
modules['external'][mname] = e
return r
@staticmethod
def print_unrecoverable(trace):
logger.critical("I got an unrecoverable error. I have to exit.")
logger.critical("You can get help at https://github.com/naparuba/shinken")
logger.critical("If you think this is a bug, create a new ticket including"
"details mentioned in the README")
logger.critical("Back trace of the error: %s", trace)
def get_objects_from_from_queues(self):
''' Get objects from "from" queues and add them.
:return: True if we got some objects, False otherwise.
'''
had_some_objects = False
for queue in self.modules_manager.get_external_from_queues():
while True:
try:
o = queue.get(block=False)
except (Empty, IOError, EOFError) as err:
if not isinstance(err, Empty):
logger.error("An external module queue got a problem '%s'", exp)
break
else:
had_some_objects = True
self.add(o)
return had_some_objects
# Checks memory consumption, and gracefully restarts if we're out of bounds
def check_memory_usage(self):
if self.graceful_enabled is False or self.harakiri_threshold is None:
return
if self.new_conf is None and get_memory() >= self.harakiri_threshold:
logger.info("Harakiri threshold reached, restarting the service")
self.new_conf = self.raw_conf
| 47,036
|
Python
|
.py
| 1,049
| 34.215443
| 100
| 0.592741
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,463
|
external_command.py
|
shinken-solutions_shinken/shinken/external_command.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import time
import re
from shinken.util import to_int, to_bool, split_semicolon
from shinken.downtime import Downtime
from shinken.contactdowntime import ContactDowntime
from shinken.comment import Comment
from shinken.commandcall import CommandCall
from shinken.log import logger, naglog_result
from shinken.objects.pollerlink import PollerLink
from shinken.eventhandler import EventHandler
from shinken.brok import Brok
from shinken.misc.common import DICT_MODATTR
""" TODO: Add some comment about this class for the doc"""
class ExternalCommand(object):
my_type = 'externalcommand'
def __init__(self, cmd_line):
self.cmd_line = cmd_line
""" TODO: Add some comment about this class for the doc"""
class ExternalCommandManager(object):
commands = {
'CHANGE_CONTACT_MODSATTR':
{'global': True, 'args': ['contact', None]},
'CHANGE_CONTACT_MODHATTR':
{'global': True, 'args': ['contact', None]},
'CHANGE_CONTACT_MODATTR':
{'global': True, 'args': ['contact', None]},
'CHANGE_CONTACT_HOST_NOTIFICATION_TIMEPERIOD':
{'global': True, 'args': ['contact', 'time_period']},
'ADD_SVC_COMMENT':
{'global': False, 'args': ['service', 'to_bool', 'author', None]},
'ADD_HOST_COMMENT':
{'global': False, 'args': ['host', 'to_bool', 'author', None]},
'ACKNOWLEDGE_SVC_PROBLEM':
{'global': False, 'args': ['service', 'to_int', 'to_bool', 'to_bool', 'author', None]},
'ACKNOWLEDGE_HOST_PROBLEM':
{'global': False, 'args': ['host', 'to_int', 'to_bool', 'to_bool', 'author', None]},
'ACKNOWLEDGE_SVC_PROBLEM_EXPIRE':
{'global': False, 'args': ['service', 'to_int', 'to_bool',
'to_bool', 'to_int', 'author', None]},
'ACKNOWLEDGE_HOST_PROBLEM_EXPIRE':
{'global': False,
'args': ['host', 'to_int', 'to_bool', 'to_bool', 'to_int', 'author', None]},
'CHANGE_CONTACT_SVC_NOTIFICATION_TIMEPERIOD':
{'global': True, 'args': ['contact', 'time_period']},
'CHANGE_CUSTOM_CONTACT_VAR':
{'global': True, 'args': ['contact', None, None]},
'CHANGE_CUSTOM_HOST_VAR':
{'global': False, 'args': ['host', None, None]},
'CHANGE_CUSTOM_SVC_VAR':
{'global': False, 'args': ['service', None, None]},
'CHANGE_GLOBAL_HOST_EVENT_HANDLER':
{'global': True, 'args': ['command']},
'CHANGE_GLOBAL_SVC_EVENT_HANDLER':
{'global': True, 'args': ['command']},
'CHANGE_HOST_CHECK_COMMAND':
{'global': False, 'args': ['host', 'command']},
'CHANGE_HOST_CHECK_TIMEPERIOD':
{'global': False, 'args': ['host', 'time_period']},
'CHANGE_HOST_EVENT_HANDLER':
{'global': False, 'args': ['host', 'command']},
'CHANGE_HOST_MODATTR':
{'global': False, 'args': ['host', 'to_int']},
'CHANGE_MAX_HOST_CHECK_ATTEMPTS':
{'global': False, 'args': ['host', 'to_int']},
'CHANGE_MAX_SVC_CHECK_ATTEMPTS':
{'global': False, 'args': ['service', 'to_int']},
'CHANGE_NORMAL_HOST_CHECK_INTERVAL':
{'global': False, 'args': ['host', 'to_int']},
'CHANGE_NORMAL_SVC_CHECK_INTERVAL':
{'global': False, 'args': ['service', 'to_int']},
'CHANGE_RETRY_HOST_CHECK_INTERVAL':
{'global': False, 'args': ['host', 'to_int']},
'CHANGE_RETRY_SVC_CHECK_INTERVAL':
{'global': False, 'args': ['service', 'to_int']},
'CHANGE_SVC_CHECK_COMMAND':
{'global': False, 'args': ['service', 'command']},
'CHANGE_SVC_CHECK_TIMEPERIOD':
{'global': False, 'args': ['service', 'time_period']},
'CHANGE_SVC_EVENT_HANDLER':
{'global': False, 'args': ['service', 'command']},
'CHANGE_SVC_MODATTR':
{'global': False, 'args': ['service', 'to_int']},
'CHANGE_SVC_NOTIFICATION_TIMEPERIOD':
{'global': False, 'args': ['service', 'time_period']},
'DELAY_HOST_NOTIFICATION':
{'global': False, 'args': ['host', 'to_int']},
'DELAY_SVC_NOTIFICATION':
{'global': False, 'args': ['service', 'to_int']},
'DEL_ALL_HOST_COMMENTS':
{'global': False, 'args': ['host']},
'DEL_ALL_HOST_DOWNTIMES':
{'global': False, 'args': ['host']},
'DEL_ALL_SVC_COMMENTS':
{'global': False, 'args': ['service']},
'DEL_ALL_SVC_DOWNTIMES':
{'global': False, 'args': ['service']},
'DEL_CONTACT_DOWNTIME':
{'global': True, 'args': ['to_int']},
'DEL_HOST_COMMENT':
{'global': True, 'args': ['to_int']},
'DEL_HOST_DOWNTIME':
{'global': True, 'args': ['to_int']},
'DEL_SVC_COMMENT':
{'global': True, 'args': ['to_int']},
'DEL_SVC_DOWNTIME':
{'global': True, 'args': ['to_int']},
'DISABLE_ALL_NOTIFICATIONS_BEYOND_HOST':
{'global': False, 'args': ['host']},
'DISABLE_CONTACTGROUP_HOST_NOTIFICATIONS':
{'global': True, 'args': ['contact_group']},
'DISABLE_CONTACTGROUP_SVC_NOTIFICATIONS':
{'global': True, 'args': ['contact_group']},
'DISABLE_CONTACT_HOST_NOTIFICATIONS':
{'global': True, 'args': ['contact']},
'DISABLE_CONTACT_SVC_NOTIFICATIONS':
{'global': True, 'args': ['contact']},
'DISABLE_EVENT_HANDLERS':
{'global': True, 'args': []},
'DISABLE_FAILURE_PREDICTION':
{'global': True, 'args': []},
'DISABLE_FLAP_DETECTION':
{'global': True, 'args': []},
'DISABLE_HOSTGROUP_HOST_CHECKS':
{'global': True, 'args': ['host_group']},
'DISABLE_HOSTGROUP_HOST_NOTIFICATIONS':
{'global': True, 'args': ['host_group']},
'DISABLE_HOSTGROUP_PASSIVE_HOST_CHECKS':
{'global': True, 'args': ['host_group']},
'DISABLE_HOSTGROUP_PASSIVE_SVC_CHECKS':
{'global': True, 'args': ['host_group']},
'DISABLE_HOSTGROUP_SVC_CHECKS':
{'global': True, 'args': ['host_group']},
'DISABLE_HOSTGROUP_SVC_NOTIFICATIONS':
{'global': True, 'args': ['host_group']},
'DISABLE_HOST_AND_CHILD_NOTIFICATIONS':
{'global': False, 'args': ['host']},
'DISABLE_HOST_CHECK':
{'global': False, 'args': ['host']},
'DISABLE_HOST_EVENT_HANDLER':
{'global': False, 'args': ['host']},
'DISABLE_HOST_FLAP_DETECTION':
{'global': False, 'args': ['host']},
'DISABLE_HOST_FRESHNESS_CHECKS':
{'global': True, 'args': []},
'DISABLE_HOST_NOTIFICATIONS':
{'global': False, 'args': ['host']},
'DISABLE_HOST_SVC_CHECKS':
{'global': False, 'args': ['host']},
'DISABLE_HOST_SVC_NOTIFICATIONS':
{'global': False, 'args': ['host']},
'DISABLE_NOTIFICATIONS':
{'global': True, 'args': []},
'DISABLE_PASSIVE_HOST_CHECKS':
{'global': False, 'args': ['host']},
'DISABLE_PASSIVE_SVC_CHECKS':
{'global': False, 'args': ['service']},
'DISABLE_PERFORMANCE_DATA':
{'global': True, 'args': []},
'DISABLE_SERVICEGROUP_HOST_CHECKS':
{'global': True, 'args': ['service_group']},
'DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS':
{'global': True, 'args': ['service_group']},
'DISABLE_SERVICEGROUP_PASSIVE_HOST_CHECKS':
{'global': True, 'args': ['service_group']},
'DISABLE_SERVICEGROUP_PASSIVE_SVC_CHECKS':
{'global': True, 'args': ['service_group']},
'DISABLE_SERVICEGROUP_SVC_CHECKS':
{'global': True, 'args': ['service_group']},
'DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS':
{'global': True, 'args': ['service_group']},
'DISABLE_SERVICE_FLAP_DETECTION':
{'global': False, 'args': ['service']},
'DISABLE_SERVICE_FRESHNESS_CHECKS':
{'global': True, 'args': []},
'DISABLE_SVC_CHECK':
{'global': False, 'args': ['service']},
'DISABLE_SVC_EVENT_HANDLER':
{'global': False, 'args': ['service']},
'DISABLE_SVC_FLAP_DETECTION':
{'global': False, 'args': ['service']},
'DISABLE_SVC_NOTIFICATIONS':
{'global': False, 'args': ['service']},
'ENABLE_ALL_NOTIFICATIONS_BEYOND_HOST':
{'global': False, 'args': ['host']},
'ENABLE_CONTACTGROUP_HOST_NOTIFICATIONS':
{'global': True, 'args': ['contact_group']},
'ENABLE_CONTACTGROUP_SVC_NOTIFICATIONS':
{'global': True, 'args': ['contact_group']},
'ENABLE_CONTACT_HOST_NOTIFICATIONS':
{'global': True, 'args': ['contact']},
'ENABLE_CONTACT_SVC_NOTIFICATIONS':
{'global': True, 'args': ['contact']},
'ENABLE_EVENT_HANDLERS':
{'global': True, 'args': []},
'ENABLE_FAILURE_PREDICTION':
{'global': True, 'args': []},
'ENABLE_FLAP_DETECTION':
{'global': True, 'args': []},
'ENABLE_HOSTGROUP_HOST_CHECKS':
{'global': True, 'args': ['host_group']},
'ENABLE_HOSTGROUP_HOST_NOTIFICATIONS':
{'global': True, 'args': ['host_group']},
'ENABLE_HOSTGROUP_PASSIVE_HOST_CHECKS':
{'global': True, 'args': ['host_group']},
'ENABLE_HOSTGROUP_PASSIVE_SVC_CHECKS':
{'global': True, 'args': ['host_group']},
'ENABLE_HOSTGROUP_SVC_CHECKS':
{'global': True, 'args': ['host_group']},
'ENABLE_HOSTGROUP_SVC_NOTIFICATIONS':
{'global': True, 'args': ['host_group']},
'ENABLE_HOST_AND_CHILD_NOTIFICATIONS':
{'global': False, 'args': ['host']},
'ENABLE_HOST_CHECK':
{'global': False, 'args': ['host']},
'ENABLE_HOST_EVENT_HANDLER':
{'global': False, 'args': ['host']},
'ENABLE_HOST_FLAP_DETECTION':
{'global': False, 'args': ['host']},
'ENABLE_HOST_FRESHNESS_CHECKS':
{'global': True, 'args': []},
'ENABLE_HOST_NOTIFICATIONS':
{'global': False, 'args': ['host']},
'ENABLE_HOST_SVC_CHECKS':
{'global': False, 'args': ['host']},
'ENABLE_HOST_SVC_NOTIFICATIONS':
{'global': False, 'args': ['host']},
'ENABLE_NOTIFICATIONS':
{'global': True, 'args': []},
'ENABLE_PASSIVE_HOST_CHECKS':
{'global': False, 'args': ['host']},
'ENABLE_PASSIVE_SVC_CHECKS':
{'global': False, 'args': ['service']},
'ENABLE_PERFORMANCE_DATA':
{'global': True, 'args': []},
'ENABLE_SERVICEGROUP_HOST_CHECKS':
{'global': True, 'args': ['service_group']},
'ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS':
{'global': True, 'args': ['service_group']},
'ENABLE_SERVICEGROUP_PASSIVE_HOST_CHECKS':
{'global': True, 'args': ['service_group']},
'ENABLE_SERVICEGROUP_PASSIVE_SVC_CHECKS':
{'global': True, 'args': ['service_group']},
'ENABLE_SERVICEGROUP_SVC_CHECKS':
{'global': True, 'args': ['service_group']},
'ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS':
{'global': True, 'args': ['service_group']},
'ENABLE_SERVICE_FRESHNESS_CHECKS':
{'global': True, 'args': []},
'ENABLE_SVC_CHECK':
{'global': False, 'args': ['service']},
'ENABLE_SVC_EVENT_HANDLER':
{'global': False, 'args': ['service']},
'ENABLE_SVC_FLAP_DETECTION':
{'global': False, 'args': ['service']},
'ENABLE_SVC_NOTIFICATIONS':
{'global': False, 'args': ['service']},
'PROCESS_FILE':
{'global': True, 'args': [None, 'to_bool']},
'PROCESS_HOST_CHECK_RESULT':
{'global': False, 'args': ['host', 'to_int', None]},
'PROCESS_HOST_OUTPUT':
{'global': False, 'args': ['host', None]},
'PROCESS_SERVICE_CHECK_RESULT':
{'global': False, 'args': ['service', 'to_int', None]},
'PROCESS_SERVICE_OUTPUT':
{'global': False, 'args': ['service', None]},
'READ_STATE_INFORMATION':
{'global': True, 'args': []},
'REMOVE_HOST_ACKNOWLEDGEMENT':
{'global': False, 'args': ['host']},
'REMOVE_SVC_ACKNOWLEDGEMENT':
{'global': False, 'args': ['service']},
'RESTART_PROGRAM':
{'global': True, 'internal': True, 'args': []},
'RELOAD_CONFIG':
{'global': True, 'internal': True, 'args': []},
'SAVE_STATE_INFORMATION':
{'global': True, 'args': []},
'SCHEDULE_AND_PROPAGATE_HOST_DOWNTIME':
{'global': False, 'args': ['host', 'to_int', 'to_int', 'to_bool',
'to_int', 'to_int', 'author', None]},
'SCHEDULE_AND_PROPAGATE_TRIGGERED_HOST_DOWNTIME':
{'global': False, 'args': ['host', 'to_int', 'to_int', 'to_bool',
'to_int', 'to_int', 'author', None]},
'SCHEDULE_CONTACT_DOWNTIME':
{'global': True, 'args': ['contact', 'to_int', 'to_int', 'author', None]},
'SCHEDULE_FORCED_HOST_CHECK':
{'global': False, 'args': ['host', 'to_int']},
'SCHEDULE_FORCED_HOST_SVC_CHECKS':
{'global': False, 'args': ['host', 'to_int']},
'SCHEDULE_FORCED_SVC_CHECK':
{'global': False, 'args': ['service', 'to_int']},
'SCHEDULE_HOSTGROUP_HOST_DOWNTIME':
{'global': True, 'args': ['host_group', 'to_int', 'to_int',
'to_bool', 'to_int', 'to_int', 'author', None]},
'SCHEDULE_HOSTGROUP_SVC_DOWNTIME':
{'global': True, 'args': ['host_group', 'to_int', 'to_int', 'to_bool',
'to_int', 'to_int', 'author', None]},
'SCHEDULE_HOST_CHECK':
{'global': False, 'args': ['host', 'to_int']},
'SCHEDULE_HOST_DOWNTIME':
{'global': False, 'args': ['host', 'to_int', 'to_int', 'to_bool',
'to_int', 'to_int', 'author', None]},
'SCHEDULE_HOST_SVC_CHECKS':
{'global': False, 'args': ['host', 'to_int']},
'SCHEDULE_HOST_SVC_DOWNTIME':
{'global': False, 'args': ['host', 'to_int', 'to_int', 'to_bool',
'to_int', 'to_int', 'author', None]},
'SCHEDULE_SERVICEGROUP_HOST_DOWNTIME':
{'global': True, 'args': ['service_group', 'to_int', 'to_int', 'to_bool',
'to_int', 'to_int', 'author', None]},
'SCHEDULE_SERVICEGROUP_SVC_DOWNTIME':
{'global': True, 'args': ['service_group', 'to_int', 'to_int', 'to_bool',
'to_int', 'to_int', 'author', None]},
'SCHEDULE_SVC_CHECK':
{'global': False, 'args': ['service', 'to_int']},
'SCHEDULE_SVC_DOWNTIME': {'global': False, 'args': ['service', 'to_int', 'to_int',
'to_bool', 'to_int', 'to_int',
'author', None]},
'SEND_CUSTOM_HOST_NOTIFICATION':
{'global': False, 'args': ['host', 'to_int', 'author', None]},
'SEND_CUSTOM_SVC_NOTIFICATION':
{'global': False, 'args': ['service', 'to_int', 'author', None]},
'SET_HOST_NOTIFICATION_NUMBER':
{'global': False, 'args': ['host', 'to_int']},
'SET_SVC_NOTIFICATION_NUMBER':
{'global': False, 'args': ['service', 'to_int']},
'SHUTDOWN_PROGRAM':
{'global': True, 'args': []},
'START_ACCEPTING_PASSIVE_HOST_CHECKS':
{'global': True, 'args': []},
'START_ACCEPTING_PASSIVE_SVC_CHECKS':
{'global': True, 'args': []},
'START_EXECUTING_HOST_CHECKS':
{'global': True, 'args': []},
'START_EXECUTING_SVC_CHECKS':
{'global': True, 'args': []},
'START_OBSESSING_OVER_HOST':
{'global': False, 'args': ['host']},
'START_OBSESSING_OVER_HOST_CHECKS':
{'global': True, 'args': []},
'START_OBSESSING_OVER_SVC':
{'global': False, 'args': ['service']},
'START_OBSESSING_OVER_SVC_CHECKS':
{'global': True, 'args': []},
'STOP_ACCEPTING_PASSIVE_HOST_CHECKS':
{'global': True, 'args': []},
'STOP_ACCEPTING_PASSIVE_SVC_CHECKS':
{'global': True, 'args': []},
'STOP_EXECUTING_HOST_CHECKS':
{'global': True, 'args': []},
'STOP_EXECUTING_SVC_CHECKS':
{'global': True, 'args': []},
'STOP_OBSESSING_OVER_HOST':
{'global': False, 'args': ['host']},
'STOP_OBSESSING_OVER_HOST_CHECKS':
{'global': True, 'args': []},
'STOP_OBSESSING_OVER_SVC':
{'global': False, 'args': ['service']},
'STOP_OBSESSING_OVER_SVC_CHECKS':
{'global': True, 'args': []},
'LAUNCH_SVC_EVENT_HANDLER':
{'global': False, 'args': ['service']},
'LAUNCH_HOST_EVENT_HANDLER':
{'global': False, 'args': ['host']},
# Now internal calls
'ADD_SIMPLE_HOST_DEPENDENCY':
{'global': False, 'args': ['host', 'host']},
'DEL_HOST_DEPENDENCY':
{'global': False, 'args': ['host', 'host']},
'ADD_SIMPLE_POLLER':
{'global': True, 'internal': True, 'args': [None, None, None, None]},
}
def __init__(self, conf, mode):
self.mode = mode
if conf:
self.conf = conf
self.hosts = conf.hosts
self.services = conf.services
self.contacts = conf.contacts
self.hostgroups = conf.hostgroups
self.commands = conf.commands
self.servicegroups = conf.servicegroups
self.contactgroups = conf.contactgroups
self.timeperiods = conf.timeperiods
self.pipe_path = conf.command_file
self.fifo = None
self.cmd_fragments = ''
if self.mode == 'dispatcher':
self.confs = conf.confs
# Will change for each command read, so if a command need it,
# it can get it
self.current_timestamp = 0
def load_scheduler(self, scheduler):
self.sched = scheduler
def load_arbiter(self, arbiter):
self.arbiter = arbiter
def load_receiver(self, receiver):
self.receiver = receiver
def open(self):
# At the first open del and create the fifo
if self.fifo is None:
if os.path.exists(self.pipe_path):
os.unlink(self.pipe_path)
if not os.path.exists(self.pipe_path):
os.umask(0)
try:
os.mkfifo(self.pipe_path, 0o660)
open(self.pipe_path, 'w+', os.O_NONBLOCK)
except OSError as exp:
self.error("Pipe creation failed (%s): %s" % (self.pipe_path, exp))
return None
self.fifo = os.open(self.pipe_path, os.O_NONBLOCK)
return self.fifo
def get(self):
buf = os.read(self.fifo, 8096)
r = []
fullbuf = len(buf) == 8096 and True or False
# If the buffer ended with a fragment last time, prepend it here
buf = self.cmd_fragments + buf
buflen = len(buf)
self.cmd_fragments = ''
if fullbuf and buf[-1] != '\n':
# The buffer was full but ends with a command fragment
r.extend([ExternalCommand(s) for s in (buf.split('\n'))[:-1] if s])
self.cmd_fragments = (buf.split('\n'))[-1]
elif buflen:
# The buffer is either half-filled or full with a '\n' at the end.
r.extend([ExternalCommand(s) for s in buf.split('\n') if s])
else:
# The buffer is empty. We "reset" the fifo here. It will be
# re-opened in the main loop.
os.close(self.fifo)
return r
def resolve_command(self, excmd):
# Maybe the command is invalid. Bailout
try:
command = excmd.cmd_line
except AttributeError as exp:
logger.debug("resolve_command:: error with command %s: %s", excmd, exp)
return
# Strip and get utf8 only strings
command = command.strip()
# Only log if we are in the Arbiter
if self.mode == 'dispatcher' and self.conf.log_external_commands:
# Fix #1263
# logger.info('EXTERNAL COMMAND: ' + command.rstrip())
naglog_result('info', 'EXTERNAL COMMAND: ' + command.rstrip())
r = self.get_command_and_args(command, excmd)
# If we are a receiver, bail out here
if self.mode == 'receiver':
return
if r is not None:
is_global = r['global']
if not is_global:
c_name = r['c_name']
args = r['args']
logger.debug("Got commands %s %s", c_name, args)
f = getattr(self, c_name)
f(*args)
else:
command = r['cmd']
self.dispatch_global_command(command)
# Ok the command is not for every one, so we search
# by the hostname which scheduler have the host. Then send
# the command
def search_host_and_dispatch(self, host_name, command, extcmd):
logger.debug("Calling search_host_and_dispatch for %s", host_name)
host_found = False
# If we are a receiver, just look in the receiver
if self.mode == 'receiver':
logger.info("Receiver looking a scheduler for the external command %s %s",
host_name, command)
sched = self.receiver.get_sched_from_hname(host_name)
if sched:
host_found = True
logger.debug("Receiver found a scheduler: %s", sched)
logger.info("Receiver pushing external command to scheduler %s", sched)
sched['external_commands'].append(extcmd)
else:
for cfg in self.confs.values():
if cfg.hosts.find_by_name(host_name) is not None:
logger.debug("Host %s found in a configuration", host_name)
if cfg.is_assigned:
host_found = True
sched = cfg.assigned_to
logger.debug("Sending command to the scheduler %s", sched.get_name())
# sched.run_external_command(command)
sched.external_commands.append(command)
break
else:
logger.warning("Problem: a configuration is found, but is not assigned!")
if not host_found:
if getattr(self, 'receiver',
getattr(self, 'arbiter', None)).accept_passive_unknown_check_results:
b = self.get_unknown_check_result_brok(command)
getattr(self, 'receiver', getattr(self, 'arbiter', None)).add(b)
else:
logger.warning("Passive check result was received for host '%s', "
"but the host could not be found!", host_name)
# Takes a PROCESS_SERVICE_CHECK_RESULT
# external command line and returns an unknown_[type]_check_result brok
@staticmethod
def get_unknown_check_result_brok(cmd_line):
match = re.match(
r'^\[([0-9]{10})] PROCESS_(SERVICE)_CHECK_RESULT;'
r'([^\;]*);([^\;]*);([^\;]*);([^\|]*)(?:\|(.*))?', cmd_line)
if not match:
match = re.match(
r'^\[([0-9]{10})] PROCESS_(HOST)_CHECK_RESULT;'
r'([^\;]*);([^\;]*);([^\|]*)(?:\|(.*))?', cmd_line)
if not match:
return None
data = {
'time_stamp': int(match.group(1)),
'host_name': match.group(3),
}
if match.group(2) == 'SERVICE':
data['service_description'] = match.group(4)
data['return_code'] = match.group(5)
data['output'] = match.group(6)
data['perf_data'] = match.group(7)
else:
data['return_code'] = match.group(4)
data['output'] = match.group(5)
data['perf_data'] = match.group(6)
b = Brok('unknown_%s_check_result' % match.group(2).lower(), data)
return b
# The command is global, so sent it to every schedulers
def dispatch_global_command(self, command):
for sched in self.conf.schedulers:
logger.debug("Sending a command '%s' to scheduler %s", command, sched)
if sched.alive:
# sched.run_external_command(command)
sched.external_commands.append(command)
# We need to get the first part, the command name, and the reference ext command object
def get_command_and_args(self, command, extcmd=None):
# safe_print("Trying to resolve", command)
command = command.rstrip()
elts = split_semicolon(command) # danger!!! passive checkresults with perfdata
part1 = elts[0]
elts2 = part1.split(' ')
# print("Elts2:", elts2)
if len(elts2) != 2:
logger.debug("Malformed command '%s'", command)
return None
ts = elts2[0]
# Now we will get the timestamps as [123456]
if not ts.startswith('[') or not ts.endswith(']'):
logger.debug("Malformed command '%s'", command)
return None
# Ok we remove the [ ]
ts = ts[1:-1]
try: # is an int or not?
self.current_timestamp = to_int(ts)
except ValueError:
logger.debug("Malformed command '%s'", command)
return None
# Now get the command
c_name = elts2[1]
# safe_print("Get command name", c_name)
if c_name not in ExternalCommandManager.commands:
logger.debug("Command '%s' is not recognized, sorry", c_name)
return None
# Split again based on the number of args we expect. We cannot split
# on every ; because this character may appear in the perfdata of
# passive check results.
entry = ExternalCommandManager.commands[c_name]
# Look if the command is purely internal or not
internal = False
if 'internal' in entry and entry['internal']:
internal = True
numargs = len(entry['args'])
if numargs and 'service' in entry['args']:
numargs += 1
elts = split_semicolon(command, numargs)
logger.debug("mode= %s, global= %s", self.mode, entry['global'])
if self.mode == 'dispatcher' and entry['global']:
if not internal:
logger.debug("Command '%s' is a global one, we resent it to all schedulers", c_name)
return {'global': True, 'cmd': command}
# print("Is global?", c_name, entry['global'])
# print("Mode:", self.mode)
# print("This command have arguments:", entry['args'], len(entry['args']))
args = []
i = 1
in_service = False
tmp_host = ''
try:
for elt in elts[1:]:
logger.debug("Searching for a new arg: %s (%d)", elt, i)
val = elt.strip()
if val.endswith('\n'):
val = val[:-1]
logger.debug("For command arg: %s", val)
if not in_service:
type_searched = entry['args'][i - 1]
# safe_print("Search for a arg", type_searched)
if type_searched == 'host':
if self.mode == 'dispatcher' or self.mode == 'receiver':
self.search_host_and_dispatch(val, command, extcmd)
return None
h = self.hosts.find_by_name(val)
if h is not None:
args.append(h)
elif self.conf.accept_passive_unknown_check_results:
b = self.get_unknown_check_result_brok(command)
self.sched.add_Brok(b)
elif type_searched == 'contact':
c = self.contacts.find_by_name(val)
if c is not None:
args.append(c)
elif type_searched == 'time_period':
t = self.timeperiods.find_by_name(val)
if t is not None:
args.append(t)
elif type_searched == 'to_bool':
args.append(to_bool(val))
elif type_searched == 'to_int':
args.append(to_int(val))
elif type_searched in ('author', None):
args.append(val)
elif type_searched == 'command':
c = self.commands.find_by_name(val)
if c is not None:
# the find will be redone by
# the commandCall creation, but != None
# is useful so a bad command will be caught
args.append(val)
elif type_searched == 'host_group':
hg = self.hostgroups.find_by_name(val)
if hg is not None:
args.append(hg)
elif type_searched == 'service_group':
sg = self.servicegroups.find_by_name(val)
if sg is not None:
args.append(sg)
elif type_searched == 'contact_group':
cg = self.contact_groups.find_by_name(val)
if cg is not None:
args.append(cg)
# special case: service are TWO args host;service, so one more loop
# to get the two parts
elif type_searched == 'service':
in_service = True
tmp_host = elt.strip()
# safe_print("TMP HOST", tmp_host)
if tmp_host[-1] == '\n':
tmp_host = tmp_host[:-1]
if self.mode == 'dispatcher':
self.search_host_and_dispatch(tmp_host, command, extcmd)
return None
i += 1
else:
in_service = False
srv_name = elt
if srv_name[-1] == '\n':
srv_name = srv_name[:-1]
# If we are in a receiver, bailout now.
if self.mode == 'receiver':
self.search_host_and_dispatch(tmp_host, command, extcmd)
return None
# safe_print("Got service full", tmp_host, srv_name)
s = self.services.find_srv_by_name_and_hostname(tmp_host, srv_name)
if s is not None:
args.append(s)
elif self.conf.accept_passive_unknown_check_results:
b = self.get_unknown_check_result_brok(command)
self.sched.add_Brok(b)
else:
logger.warning(
"A command was received for service '%s' on host '%s', "
"but the service could not be found!", srv_name, tmp_host)
except IndexError:
logger.debug("Sorry, the arguments are not corrects")
return None
# safe_print('Finally got ARGS:', args)
if len(args) == len(entry['args']):
# safe_print("OK, we can call the command", c_name, "with", args)
return {'global': False, 'c_name': c_name, 'args': args}
# f = getattr(self, c_name)
# apply(f, args)
else:
logger.debug("Sorry, the arguments are not corrects (%s)", args)
return None
# CHANGE_CONTACT_MODSATTR;<contact_name>;<value>
def CHANGE_CONTACT_MODSATTR(self, contact, value): # TODO
contact.modified_service_attributes = int(value)
# CHANGE_CONTACT_MODHATTR;<contact_name>;<value>
def CHANGE_CONTACT_MODHATTR(self, contact, value): # TODO
contact.modified_host_attributes = int(value)
# CHANGE_CONTACT_MODATTR;<contact_name>;<value>
def CHANGE_CONTACT_MODATTR(self, contact, value):
contact.modified_attributes = int(value)
# CHANGE_CONTACT_HOST_NOTIFICATION_TIMEPERIOD;<contact_name>;<notification_timeperiod>
def CHANGE_CONTACT_HOST_NOTIFICATION_TIMEPERIOD(self, contact, notification_timeperiod):
contact.modified_host_attributes |= DICT_MODATTR["MODATTR_NOTIFICATION_TIMEPERIOD"].value
contact.host_notification_period = notification_timeperiod
self.sched.get_and_register_status_brok(contact)
# ADD_SVC_COMMENT;<host_name>;<service_description>;<persistent>;<author>;<comment>
def ADD_SVC_COMMENT(self, service, persistent, author, comment):
c = Comment(service, persistent, author, comment, 2, 1, 1, False, 0)
service.add_comment(c)
self.sched.add(c)
# ADD_HOST_COMMENT;<host_name>;<persistent>;<author>;<comment>
def ADD_HOST_COMMENT(self, host, persistent, author, comment):
c = Comment(host, persistent, author, comment, 1, 1, 1, False, 0)
host.add_comment(c)
self.sched.add(c)
# ACKNOWLEDGE_SVC_PROBLEM;<host_name>;<service_description>;
# <sticky>;<notify>;<persistent>;<author>;<comment>
def ACKNOWLEDGE_SVC_PROBLEM(self, service, sticky, notify, persistent, author, comment):
service.acknowledge_problem(sticky, notify, persistent, author, comment)
# ACKNOWLEDGE_HOST_PROBLEM;<host_name>;<sticky>;<notify>;<persistent>;<author>;<comment>
# TODO: add a better ACK management
def ACKNOWLEDGE_HOST_PROBLEM(self, host, sticky, notify, persistent, author, comment):
host.acknowledge_problem(sticky, notify, persistent, author, comment)
# ACKNOWLEDGE_SVC_PROBLEM_EXPIRE;<host_name>;<service_description>;
# <sticky>;<notify>;<persistent>;<end_time>;<author>;<comment>
def ACKNOWLEDGE_SVC_PROBLEM_EXPIRE(self, service, sticky, notify,
persistent, end_time, author, comment):
service.acknowledge_problem(sticky, notify, persistent, author, comment, end_time=end_time)
# ACKNOWLEDGE_HOST_PROBLEM_EXPIRE;<host_name>;<sticky>;
# <notify>;<persistent>;<end_time>;<author>;<comment>
# TODO: add a better ACK management
def ACKNOWLEDGE_HOST_PROBLEM_EXPIRE(self, host, sticky, notify,
persistent, end_time, author, comment):
host.acknowledge_problem(sticky, notify, persistent, author, comment, end_time=end_time)
# CHANGE_CONTACT_SVC_NOTIFICATION_TIMEPERIOD;<contact_name>;<notification_timeperiod>
def CHANGE_CONTACT_SVC_NOTIFICATION_TIMEPERIOD(self, contact, notification_timeperiod):
contact.modified_service_attributes |= \
DICT_MODATTR["MODATTR_NOTIFICATION_TIMEPERIOD"].value
contact.service_notification_period = notification_timeperiod
self.sched.get_and_register_status_brok(contact)
# CHANGE_CUSTOM_CONTACT_VAR;<contact_name>;<varname>;<varvalue>
def CHANGE_CUSTOM_CONTACT_VAR(self, contact, varname, varvalue):
contact.modified_attributes |= DICT_MODATTR["MODATTR_CUSTOM_VARIABLE"].value
contact.customs[varname.upper()] = varvalue
# CHANGE_CUSTOM_HOST_VAR;<host_name>;<varname>;<varvalue>
def CHANGE_CUSTOM_HOST_VAR(self, host, varname, varvalue):
host.modified_attributes |= DICT_MODATTR["MODATTR_CUSTOM_VARIABLE"].value
host.customs[varname.upper()] = varvalue
# CHANGE_CUSTOM_SVC_VAR;<host_name>;<service_description>;<varname>;<varvalue>
def CHANGE_CUSTOM_SVC_VAR(self, service, varname, varvalue):
service.modified_attributes |= DICT_MODATTR["MODATTR_CUSTOM_VARIABLE"].value
service.customs[varname.upper()] = varvalue
# CHANGE_GLOBAL_HOST_EVENT_HANDLER;<event_handler_command>
def CHANGE_GLOBAL_HOST_EVENT_HANDLER(self, event_handler_command):
# TODO: DICT_MODATTR["MODATTR_EVENT_HANDLER_COMMAND"].value
pass
# CHANGE_GLOBAL_SVC_EVENT_HANDLER;<event_handler_command> # TODO
def CHANGE_GLOBAL_SVC_EVENT_HANDLER(self, event_handler_command):
# TODO: DICT_MODATTR["MODATTR_EVENT_HANDLER_COMMAND"].value
pass
# CHANGE_HOST_CHECK_COMMAND;<host_name>;<check_command>
def CHANGE_HOST_CHECK_COMMAND(self, host, check_command):
host.modified_attributes |= DICT_MODATTR["MODATTR_CHECK_COMMAND"].value
host.check_command = CommandCall(self.commands, check_command, poller_tag=host.poller_tag)
self.sched.get_and_register_status_brok(host)
# CHANGE_HOST_CHECK_TIMEPERIOD;<host_name>;<timeperiod>
def CHANGE_HOST_CHECK_TIMEPERIOD(self, host, timeperiod):
# TODO is timeperiod a string or a Timeperiod object?
host.modified_attributes |= DICT_MODATTR["MODATTR_CHECK_TIMEPERIOD"].value
host.check_period = timeperiod
self.sched.get_and_register_status_brok(host)
# CHANGE_HOST_EVENT_HANDLER;<host_name>;<event_handler_command>
def CHANGE_HOST_EVENT_HANDLER(self, host, event_handler_command):
host.modified_attributes |= DICT_MODATTR["MODATTR_EVENT_HANDLER_COMMAND"].value
host.event_handler = CommandCall(self.commands, event_handler_command)
self.sched.get_and_register_status_brok(host)
# CHANGE_HOST_MODATTR;<host_name>;<value>
def CHANGE_HOST_MODATTR(self, host, value):
host.modified_attributes = int(value)
# CHANGE_MAX_HOST_CHECK_ATTEMPTS;<host_name>;<check_attempts>
def CHANGE_MAX_HOST_CHECK_ATTEMPTS(self, host, check_attempts):
host.modified_attributes |= DICT_MODATTR["MODATTR_MAX_CHECK_ATTEMPTS"].value
host.max_check_attempts = check_attempts
if host.state_type == 'HARD' and host.state == 'UP' and host.attempt > 1:
host.attempt = host.max_check_attempts
self.sched.get_and_register_status_brok(host)
# CHANGE_MAX_SVC_CHECK_ATTEMPTS;<host_name>;<service_description>;<check_attempts>
def CHANGE_MAX_SVC_CHECK_ATTEMPTS(self, service, check_attempts):
service.modified_attributes |= DICT_MODATTR["MODATTR_MAX_CHECK_ATTEMPTS"].value
service.max_check_attempts = check_attempts
if service.state_type == 'HARD' and service.state == 'OK' and service.attempt > 1:
service.attempt = service.max_check_attempts
self.sched.get_and_register_status_brok(service)
# CHANGE_NORMAL_HOST_CHECK_INTERVAL;<host_name>;<check_interval>
def CHANGE_NORMAL_HOST_CHECK_INTERVAL(self, host, check_interval):
host.modified_attributes |= DICT_MODATTR["MODATTR_NORMAL_CHECK_INTERVAL"].value
old_interval = host.check_interval
host.check_interval = check_interval
# If there were no regular checks (interval=0), then schedule
# a check immediately.
if old_interval == 0 and host.checks_enabled:
host.schedule(force=False, force_time=int(time.time()))
self.sched.get_and_register_status_brok(host)
# CHANGE_NORMAL_SVC_CHECK_INTERVAL;<host_name>;<service_description>;<check_interval>
def CHANGE_NORMAL_SVC_CHECK_INTERVAL(self, service, check_interval):
service.modified_attributes |= DICT_MODATTR["MODATTR_NORMAL_CHECK_INTERVAL"].value
old_interval = service.check_interval
service.check_interval = check_interval
# If there were no regular checks (interval=0), then schedule
# a check immediately.
if old_interval == 0 and service.checks_enabled:
service.schedule(force=False, force_time=int(time.time()))
self.sched.get_and_register_status_brok(service)
# CHANGE_RETRY_HOST_CHECK_INTERVAL;<host_name>;<check_interval>
def CHANGE_RETRY_HOST_CHECK_INTERVAL(self, host, check_interval):
host.modified_attributes |= DICT_MODATTR["MODATTR_RETRY_CHECK_INTERVAL"].value
host.retry_interval = check_interval
self.sched.get_and_register_status_brok(host)
# CHANGE_RETRY_SVC_CHECK_INTERVAL;<host_name>;<service_description>;<check_interval>
def CHANGE_RETRY_SVC_CHECK_INTERVAL(self, service, check_interval):
service.modified_attributes |= DICT_MODATTR["MODATTR_RETRY_CHECK_INTERVAL"].value
service.retry_interval = check_interval
self.sched.get_and_register_status_brok(service)
# CHANGE_SVC_CHECK_COMMAND;<host_name>;<service_description>;<check_command>
def CHANGE_SVC_CHECK_COMMAND(self, service, check_command):
service.modified_attributes |= DICT_MODATTR["MODATTR_CHECK_COMMAND"].value
service.check_command = CommandCall(self.commands, check_command,
poller_tag=service.poller_tag)
self.sched.get_and_register_status_brok(service)
# CHANGE_SVC_CHECK_TIMEPERIOD;<host_name>;<service_description>;<check_timeperiod>
def CHANGE_SVC_CHECK_TIMEPERIOD(self, service, check_timeperiod):
service.modified_attributes |= DICT_MODATTR["MODATTR_CHECK_TIMEPERIOD"].value
service.check_period = check_timeperiod
self.sched.get_and_register_status_brok(service)
# CHANGE_SVC_EVENT_HANDLER;<host_name>;<service_description>;<event_handler_command>
def CHANGE_SVC_EVENT_HANDLER(self, service, event_handler_command):
service.modified_attributes |= DICT_MODATTR["MODATTR_EVENT_HANDLER_COMMAND"].value
service.event_handler = CommandCall(self.commands, event_handler_command)
self.sched.get_and_register_status_brok(service)
# CHANGE_SVC_MODATTR;<host_name>;<service_description>;<value>
def CHANGE_SVC_MODATTR(self, service, value):
# This is not enough.
# We need to also change each of the needed attributes.
previous_value = service.modified_attributes
future_value = int(value)
changes = future_value ^ previous_value
for modattr in [
"MODATTR_NOTIFICATIONS_ENABLED", "MODATTR_ACTIVE_CHECKS_ENABLED",
"MODATTR_PASSIVE_CHECKS_ENABLED", "MODATTR_EVENT_HANDLER_ENABLED",
"MODATTR_FLAP_DETECTION_ENABLED", "MODATTR_PERFORMANCE_DATA_ENABLED",
"MODATTR_OBSESSIVE_HANDLER_ENABLED", "MODATTR_FRESHNESS_CHECKS_ENABLED"]:
if changes & DICT_MODATTR[modattr].value:
logger.info("[CHANGE_SVC_MODATTR] Reset %s", modattr)
setattr(service, DICT_MODATTR[modattr].attribute, not
getattr(service, DICT_MODATTR[modattr].attribute))
# TODO : Handle not boolean attributes.
# ["MODATTR_EVENT_HANDLER_COMMAND",
# "MODATTR_CHECK_COMMAND", "MODATTR_NORMAL_CHECK_INTERVAL",
# "MODATTR_RETRY_CHECK_INTERVAL",
# "MODATTR_MAX_CHECK_ATTEMPTS", "MODATTR_FRESHNESS_CHECKS_ENABLED",
# "MODATTR_CHECK_TIMEPERIOD", "MODATTR_CUSTOM_VARIABLE", "MODATTR_NOTIFICATION_TIMEPERIOD"]
service.modified_attributes = future_value
# And we need to push the information to the scheduler.
self.sched.get_and_register_status_brok(service)
# CHANGE_SVC_NOTIFICATION_TIMEPERIOD;<host_name>;
# <service_description>;<notification_timeperiod>
def CHANGE_SVC_NOTIFICATION_TIMEPERIOD(self, service, notification_timeperiod):
service.modified_attributes |= DICT_MODATTR["MODATTR_NOTIFICATION_TIMEPERIOD"].value
service.notification_period = notification_timeperiod
self.sched.get_and_register_status_brok(service)
# DELAY_HOST_NOTIFICATION;<host_name>;<notification_time>
def DELAY_HOST_NOTIFICATION(self, host, notification_time):
host.first_notification_delay = notification_time
self.sched.get_and_register_status_brok(host)
# DELAY_SVC_NOTIFICATION;<host_name>;<service_description>;<notification_time>
def DELAY_SVC_NOTIFICATION(self, service, notification_time):
service.first_notification_delay = notification_time
self.sched.get_and_register_status_brok(service)
# DEL_ALL_HOST_COMMENTS;<host_name>
def DEL_ALL_HOST_COMMENTS(self, host):
for c in host.comments:
self.DEL_HOST_COMMENT(c.id)
# DEL_ALL_HOST_COMMENTS;<host_name>
def DEL_ALL_HOST_DOWNTIMES(self, host):
for dt in host.downtimes:
self.DEL_HOST_DOWNTIME(dt.id)
# DEL_ALL_SVC_COMMENTS;<host_name>;<service_description>
def DEL_ALL_SVC_COMMENTS(self, service):
for c in service.comments:
self.DEL_SVC_COMMENT(c.id)
# DEL_ALL_SVC_COMMENTS;<host_name>;<service_description>
def DEL_ALL_SVC_DOWNTIMES(self, service):
for dt in service.downtimes:
self.DEL_SVC_DOWNTIME(dt.id)
# DEL_CONTACT_DOWNTIME;<downtime_id>
def DEL_CONTACT_DOWNTIME(self, downtime_id):
if downtime_id in self.sched.contact_downtimes:
self.sched.contact_downtimes[downtime_id].cancel()
# DEL_HOST_COMMENT;<comment_id>
def DEL_HOST_COMMENT(self, comment_id):
if comment_id in self.sched.comments:
self.sched.comments[comment_id].can_be_deleted = True
# DEL_HOST_DOWNTIME;<downtime_id>
def DEL_HOST_DOWNTIME(self, downtime_id):
if downtime_id in self.sched.downtimes:
self.sched.downtimes[downtime_id].cancel()
# DEL_SVC_COMMENT;<comment_id>
def DEL_SVC_COMMENT(self, comment_id):
if comment_id in self.sched.comments:
self.sched.comments[comment_id].can_be_deleted = True
# DEL_SVC_DOWNTIME;<downtime_id>
def DEL_SVC_DOWNTIME(self, downtime_id):
if downtime_id in self.sched.downtimes:
self.sched.downtimes[downtime_id].cancel()
# DISABLE_ALL_NOTIFICATIONS_BEYOND_HOST;<host_name>
def DISABLE_ALL_NOTIFICATIONS_BEYOND_HOST(self, host):
pass
# DISABLE_CONTACTGROUP_HOST_NOTIFICATIONS;<contactgroup_name>
def DISABLE_CONTACTGROUP_HOST_NOTIFICATIONS(self, contactgroup):
for contact in contactgroup:
self.DISABLE_CONTACT_HOST_NOTIFICATIONS(contact)
# DISABLE_CONTACTGROUP_SVC_NOTIFICATIONS;<contactgroup_name>
def DISABLE_CONTACTGROUP_SVC_NOTIFICATIONS(self, contactgroup):
for contact in contactgroup:
self.DISABLE_CONTACT_SVC_NOTIFICATIONS(contact)
# DISABLE_CONTACT_HOST_NOTIFICATIONS;<contact_name>
def DISABLE_CONTACT_HOST_NOTIFICATIONS(self, contact):
if contact.host_notifications_enabled:
contact.modified_attributes |= DICT_MODATTR["MODATTR_NOTIFICATIONS_ENABLED"].value
contact.host_notifications_enabled = False
self.sched.get_and_register_status_brok(contact)
# DISABLE_CONTACT_SVC_NOTIFICATIONS;<contact_name>
def DISABLE_CONTACT_SVC_NOTIFICATIONS(self, contact):
if contact.service_notifications_enabled:
contact.modified_attributes |= DICT_MODATTR["MODATTR_NOTIFICATIONS_ENABLED"].value
contact.service_notifications_enabled = False
self.sched.get_and_register_status_brok(contact)
# DISABLE_EVENT_HANDLERS
def DISABLE_EVENT_HANDLERS(self):
if self.conf.enable_event_handlers:
self.conf.modified_attributes |= DICT_MODATTR["MODATTR_EVENT_HANDLER_ENABLED"].value
self.conf.enable_event_handlers = False
self.conf.explode_global_conf()
self.sched.get_and_register_update_program_status_brok()
# DISABLE_FAILURE_PREDICTION
def DISABLE_FAILURE_PREDICTION(self):
if self.conf.enable_failure_prediction:
self.conf.modified_attributes |= \
DICT_MODATTR["MODATTR_FAILURE_PREDICTION_ENABLED"].value
self.conf.enable_failure_prediction = False
self.conf.explode_global_conf()
self.sched.get_and_register_update_program_status_brok()
# DISABLE_FLAP_DETECTION
def DISABLE_FLAP_DETECTION(self):
if self.conf.enable_flap_detection:
self.conf.modified_attributes |= DICT_MODATTR["MODATTR_FLAP_DETECTION_ENABLED"].value
self.conf.enable_flap_detection = False
self.conf.explode_global_conf()
self.sched.get_and_register_update_program_status_brok()
# Is need, disable flap state for hosts and services
for service in self.conf.services:
if service.is_flapping:
service.is_flapping = False
service.flapping_changes = []
self.sched.get_and_register_status_brok(service)
for host in self.conf.hosts:
if host.is_flapping:
host.is_flapping = False
host.flapping_changes = []
self.sched.get_and_register_status_brok(host)
# DISABLE_HOSTGROUP_HOST_CHECKS;<hostgroup_name>
def DISABLE_HOSTGROUP_HOST_CHECKS(self, hostgroup):
for host in hostgroup:
self.DISABLE_HOST_CHECK(host)
# DISABLE_HOSTGROUP_HOST_NOTIFICATIONS;<hostgroup_name>
def DISABLE_HOSTGROUP_HOST_NOTIFICATIONS(self, hostgroup):
for host in hostgroup:
self.DISABLE_HOST_NOTIFICATIONS(host)
# DISABLE_HOSTGROUP_PASSIVE_HOST_CHECKS;<hostgroup_name>
def DISABLE_HOSTGROUP_PASSIVE_HOST_CHECKS(self, hostgroup):
for host in hostgroup:
self.DISABLE_PASSIVE_HOST_CHECKS(host)
# DISABLE_HOSTGROUP_PASSIVE_SVC_CHECKS;<hostgroup_name>
def DISABLE_HOSTGROUP_PASSIVE_SVC_CHECKS(self, hostgroup):
for host in hostgroup:
for service in host.services:
self.DISABLE_PASSIVE_SVC_CHECKS(service)
# DISABLE_HOSTGROUP_SVC_CHECKS;<hostgroup_name>
def DISABLE_HOSTGROUP_SVC_CHECKS(self, hostgroup):
for host in hostgroup:
for service in host.services:
self.DISABLE_SVC_CHECK(service)
# DISABLE_HOSTGROUP_SVC_NOTIFICATIONS;<hostgroup_name>
def DISABLE_HOSTGROUP_SVC_NOTIFICATIONS(self, hostgroup):
for host in hostgroup:
for service in host.services:
self.DISABLE_SVC_NOTIFICATIONS(service)
# DISABLE_HOST_AND_CHILD_NOTIFICATIONS;<host_name>
def DISABLE_HOST_AND_CHILD_NOTIFICATIONS(self, host):
pass
# DISABLE_HOST_CHECK;<host_name>
def DISABLE_HOST_CHECK(self, host):
if host.active_checks_enabled:
host.modified_attributes |= DICT_MODATTR["MODATTR_ACTIVE_CHECKS_ENABLED"].value
host.disable_active_checks()
self.sched.get_and_register_status_brok(host)
# DISABLE_HOST_EVENT_HANDLER;<host_name>
def DISABLE_HOST_EVENT_HANDLER(self, host):
if host.event_handler_enabled:
host.modified_attributes |= DICT_MODATTR["MODATTR_EVENT_HANDLER_ENABLED"].value
host.event_handler_enabled = False
self.sched.get_and_register_status_brok(host)
# DISABLE_HOST_FLAP_DETECTION;<host_name>
def DISABLE_HOST_FLAP_DETECTION(self, host):
if host.flap_detection_enabled:
host.modified_attributes |= DICT_MODATTR["MODATTR_FLAP_DETECTION_ENABLED"].value
host.flap_detection_enabled = False
# Maybe the host was flapping, if so, stop flapping
if host.is_flapping:
host.is_flapping = False
host.flapping_changes = []
self.sched.get_and_register_status_brok(host)
# DISABLE_HOST_FRESHNESS_CHECKS
def DISABLE_HOST_FRESHNESS_CHECKS(self):
if self.conf.check_host_freshness:
self.conf.modified_attributes |= DICT_MODATTR["MODATTR_FRESHNESS_CHECKS_ENABLED"].value
self.conf.check_host_freshness = False
self.conf.explode_global_conf()
self.sched.get_and_register_update_program_status_brok()
# DISABLE_HOST_NOTIFICATIONS;<host_name>
def DISABLE_HOST_NOTIFICATIONS(self, host):
if host.notifications_enabled:
host.modified_attributes |= DICT_MODATTR["MODATTR_NOTIFICATIONS_ENABLED"].value
host.notifications_enabled = False
self.sched.get_and_register_status_brok(host)
# DISABLE_HOST_SVC_CHECKS;<host_name>
def DISABLE_HOST_SVC_CHECKS(self, host):
for s in host.services:
self.DISABLE_SVC_CHECK(s)
# DISABLE_HOST_SVC_NOTIFICATIONS;<host_name>
def DISABLE_HOST_SVC_NOTIFICATIONS(self, host):
for s in host.services:
self.DISABLE_SVC_NOTIFICATIONS(s)
self.sched.get_and_register_status_brok(s)
# DISABLE_NOTIFICATIONS
def DISABLE_NOTIFICATIONS(self):
if self.conf.enable_notifications:
self.conf.modified_attributes |= DICT_MODATTR["MODATTR_NOTIFICATIONS_ENABLED"].value
self.conf.enable_notifications = False
self.conf.explode_global_conf()
self.sched.get_and_register_update_program_status_brok()
# DISABLE_PASSIVE_HOST_CHECKS;<host_name>
def DISABLE_PASSIVE_HOST_CHECKS(self, host):
if host.passive_checks_enabled:
host.modified_attributes |= DICT_MODATTR["MODATTR_PASSIVE_CHECKS_ENABLED"].value
host.passive_checks_enabled = False
self.sched.get_and_register_status_brok(host)
# DISABLE_PASSIVE_SVC_CHECKS;<host_name>;<service_description>
def DISABLE_PASSIVE_SVC_CHECKS(self, service):
if service.passive_checks_enabled:
service.modified_attributes |= DICT_MODATTR["MODATTR_PASSIVE_CHECKS_ENABLED"].value
service.passive_checks_enabled = False
self.sched.get_and_register_status_brok(service)
# DISABLE_PERFORMANCE_DATA
def DISABLE_PERFORMANCE_DATA(self):
if self.conf.process_performance_data:
self.conf.modified_attributes |= DICT_MODATTR["MODATTR_PERFORMANCE_DATA_ENABLED"].value
self.conf.process_performance_data = False
self.conf.explode_global_conf()
self.sched.get_and_register_update_program_status_brok()
# DISABLE_SERVICEGROUP_HOST_CHECKS;<servicegroup_name>
def DISABLE_SERVICEGROUP_HOST_CHECKS(self, servicegroup):
for service in servicegroup:
self.DISABLE_HOST_CHECK(service.host)
# DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS;<servicegroup_name>
def DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS(self, servicegroup):
for service in servicegroup:
self.DISABLE_HOST_NOTIFICATIONS(service.host)
# DISABLE_SERVICEGROUP_PASSIVE_HOST_CHECKS;<servicegroup_name>
def DISABLE_SERVICEGROUP_PASSIVE_HOST_CHECKS(self, servicegroup):
for service in servicegroup:
self.DISABLE_PASSIVE_HOST_CHECKS(service.host)
# DISABLE_SERVICEGROUP_PASSIVE_SVC_CHECKS;<servicegroup_name>
def DISABLE_SERVICEGROUP_PASSIVE_SVC_CHECKS(self, servicegroup):
for service in servicegroup:
self.DISABLE_PASSIVE_SVC_CHECKS(service)
# DISABLE_SERVICEGROUP_SVC_CHECKS;<servicegroup_name>
def DISABLE_SERVICEGROUP_SVC_CHECKS(self, servicegroup):
for service in servicegroup:
self.DISABLE_SVC_CHECK(service)
# DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS;<servicegroup_name>
def DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS(self, servicegroup):
for service in servicegroup:
self.DISABLE_SVC_NOTIFICATIONS(service)
# DISABLE_SERVICE_FLAP_DETECTION;<host_name>;<service_description>
def DISABLE_SERVICE_FLAP_DETECTION(self, service):
if service.flap_detection_enabled:
service.modified_attributes |= DICT_MODATTR["MODATTR_FLAP_DETECTION_ENABLED"].value
service.flap_detection_enabled = False
# Maybe the service was flapping, if so, stop flapping
if service.is_flapping:
service.is_flapping = False
service.flapping_changes = []
self.sched.get_and_register_status_brok(service)
# DISABLE_SERVICE_FRESHNESS_CHECKS
def DISABLE_SERVICE_FRESHNESS_CHECKS(self):
if self.conf.check_service_freshness:
self.conf.modified_attributes |= DICT_MODATTR["MODATTR_FRESHNESS_CHECKS_ENABLED"].value
self.conf.check_service_freshness = False
self.conf.explode_global_conf()
self.sched.get_and_register_update_program_status_brok()
# DISABLE_SVC_CHECK;<host_name>;<service_description>
def DISABLE_SVC_CHECK(self, service):
if service.active_checks_enabled:
service.disable_active_checks()
service.modified_attributes |= DICT_MODATTR["MODATTR_ACTIVE_CHECKS_ENABLED"].value
self.sched.get_and_register_status_brok(service)
# DISABLE_SVC_EVENT_HANDLER;<host_name>;<service_description>
def DISABLE_SVC_EVENT_HANDLER(self, service):
if service.event_handler_enabled:
service.modified_attributes |= DICT_MODATTR["MODATTR_EVENT_HANDLER_ENABLED"].value
service.event_handler_enabled = False
self.sched.get_and_register_status_brok(service)
# DISABLE_SVC_FLAP_DETECTION;<host_name>;<service_description>
def DISABLE_SVC_FLAP_DETECTION(self, service):
self.DISABLE_SERVICE_FLAP_DETECTION(service)
# DISABLE_SVC_NOTIFICATIONS;<host_name>;<service_description>
def DISABLE_SVC_NOTIFICATIONS(self, service):
if service.notifications_enabled:
service.modified_attributes |= DICT_MODATTR["MODATTR_NOTIFICATIONS_ENABLED"].value
service.notifications_enabled = False
self.sched.get_and_register_status_brok(service)
# ENABLE_ALL_NOTIFICATIONS_BEYOND_HOST;<host_name>
def ENABLE_ALL_NOTIFICATIONS_BEYOND_HOST(self, host):
pass
# ENABLE_CONTACTGROUP_HOST_NOTIFICATIONS;<contactgroup_name>
def ENABLE_CONTACTGROUP_HOST_NOTIFICATIONS(self, contactgroup):
for contact in contactgroup:
self.ENABLE_CONTACT_HOST_NOTIFICATIONS(contact)
# ENABLE_CONTACTGROUP_SVC_NOTIFICATIONS;<contactgroup_name>
def ENABLE_CONTACTGROUP_SVC_NOTIFICATIONS(self, contactgroup):
for contact in contactgroup:
self.ENABLE_CONTACT_SVC_NOTIFICATIONS(contact)
# ENABLE_CONTACT_HOST_NOTIFICATIONS;<contact_name>
def ENABLE_CONTACT_HOST_NOTIFICATIONS(self, contact):
if not contact.host_notifications_enabled:
contact.modified_attributes |= DICT_MODATTR["MODATTR_NOTIFICATIONS_ENABLED"].value
contact.host_notifications_enabled = True
self.sched.get_and_register_status_brok(contact)
# ENABLE_CONTACT_SVC_NOTIFICATIONS;<contact_name>
def ENABLE_CONTACT_SVC_NOTIFICATIONS(self, contact):
if not contact.service_notifications_enabled:
contact.modified_attributes |= DICT_MODATTR["MODATTR_NOTIFICATIONS_ENABLED"].value
contact.service_notifications_enabled = True
self.sched.get_and_register_status_brok(contact)
# ENABLE_EVENT_HANDLERS
def ENABLE_EVENT_HANDLERS(self):
if not self.conf.enable_event_handlers:
self.conf.modified_attributes |= DICT_MODATTR["MODATTR_EVENT_HANDLER_ENABLED"].value
self.conf.enable_event_handlers = True
self.conf.explode_global_conf()
self.sched.get_and_register_update_program_status_brok()
# ENABLE_FAILURE_PREDICTION
def ENABLE_FAILURE_PREDICTION(self):
if not self.conf.enable_failure_prediction:
self.conf.modified_attributes |= \
DICT_MODATTR["MODATTR_FAILURE_PREDICTION_ENABLED"].value
self.conf.enable_failure_prediction = True
self.conf.explode_global_conf()
self.sched.get_and_register_update_program_status_brok()
# ENABLE_FLAP_DETECTION
def ENABLE_FLAP_DETECTION(self):
if not self.conf.enable_flap_detection:
self.conf.modified_attributes |= DICT_MODATTR["MODATTR_FLAP_DETECTION_ENABLED"].value
self.conf.enable_flap_detection = True
self.conf.explode_global_conf()
self.sched.get_and_register_update_program_status_brok()
# ENABLE_HOSTGROUP_HOST_CHECKS;<hostgroup_name>
def ENABLE_HOSTGROUP_HOST_CHECKS(self, hostgroup):
for host in hostgroup:
self.ENABLE_HOST_CHECK(host)
# ENABLE_HOSTGROUP_HOST_NOTIFICATIONS;<hostgroup_name>
def ENABLE_HOSTGROUP_HOST_NOTIFICATIONS(self, hostgroup):
for host in hostgroup:
self.ENABLE_HOST_NOTIFICATIONS(host)
# ENABLE_HOSTGROUP_PASSIVE_HOST_CHECKS;<hostgroup_name>
def ENABLE_HOSTGROUP_PASSIVE_HOST_CHECKS(self, hostgroup):
for host in hostgroup:
self.ENABLE_PASSIVE_HOST_CHECKS(host)
# ENABLE_HOSTGROUP_PASSIVE_SVC_CHECKS;<hostgroup_name>
def ENABLE_HOSTGROUP_PASSIVE_SVC_CHECKS(self, hostgroup):
for host in hostgroup:
for service in host.services:
self.ENABLE_PASSIVE_SVC_CHECKS(service)
# ENABLE_HOSTGROUP_SVC_CHECKS;<hostgroup_name>
def ENABLE_HOSTGROUP_SVC_CHECKS(self, hostgroup):
for host in hostgroup:
for service in host.services:
self.ENABLE_SVC_CHECK(service)
# ENABLE_HOSTGROUP_SVC_NOTIFICATIONS;<hostgroup_name>
def ENABLE_HOSTGROUP_SVC_NOTIFICATIONS(self, hostgroup):
for host in hostgroup:
for service in host.services:
self.ENABLE_SVC_NOTIFICATIONS(service)
# ENABLE_HOST_AND_CHILD_NOTIFICATIONS;<host_name>
def ENABLE_HOST_AND_CHILD_NOTIFICATIONS(self, host):
pass
# ENABLE_HOST_CHECK;<host_name>
def ENABLE_HOST_CHECK(self, host):
if not host.active_checks_enabled:
host.active_checks_enabled = True
host.modified_attributes |= DICT_MODATTR["MODATTR_ACTIVE_CHECKS_ENABLED"].value
self.sched.get_and_register_status_brok(host)
# ENABLE_HOST_EVENT_HANDLER;<host_name>
def ENABLE_HOST_EVENT_HANDLER(self, host):
if not host.event_handler_enabled:
host.modified_attributes |= DICT_MODATTR["MODATTR_EVENT_HANDLER_ENABLED"].value
host.event_handler_enabled = True
self.sched.get_and_register_status_brok(host)
# ENABLE_HOST_FLAP_DETECTION;<host_name>
def ENABLE_HOST_FLAP_DETECTION(self, host):
if not host.flap_detection_enabled:
host.modified_attributes |= DICT_MODATTR["MODATTR_FLAP_DETECTION_ENABLED"].value
host.flap_detection_enabled = True
self.sched.get_and_register_status_brok(host)
# ENABLE_HOST_FRESHNESS_CHECKS
def ENABLE_HOST_FRESHNESS_CHECKS(self):
if not self.conf.check_host_freshness:
self.conf.modified_attributes |= DICT_MODATTR["MODATTR_FRESHNESS_CHECKS_ENABLED"].value
self.conf.check_host_freshness = True
self.conf.explode_global_conf()
self.sched.get_and_register_update_program_status_brok()
# ENABLE_HOST_NOTIFICATIONS;<host_name>
def ENABLE_HOST_NOTIFICATIONS(self, host):
if not host.notifications_enabled:
host.modified_attributes |= DICT_MODATTR["MODATTR_NOTIFICATIONS_ENABLED"].value
host.notifications_enabled = True
self.sched.get_and_register_status_brok(host)
# ENABLE_HOST_SVC_CHECKS;<host_name>
def ENABLE_HOST_SVC_CHECKS(self, host):
for s in host.services:
self.ENABLE_SVC_CHECK(s)
# ENABLE_HOST_SVC_NOTIFICATIONS;<host_name>
def ENABLE_HOST_SVC_NOTIFICATIONS(self, host):
for s in host.services:
self.ENABLE_SVC_NOTIFICATIONS(s)
self.sched.get_and_register_status_brok(s)
# ENABLE_NOTIFICATIONS
def ENABLE_NOTIFICATIONS(self):
if not self.conf.enable_notifications:
self.conf.modified_attributes |= DICT_MODATTR["MODATTR_NOTIFICATIONS_ENABLED"].value
self.conf.enable_notifications = True
self.conf.explode_global_conf()
self.sched.get_and_register_update_program_status_brok()
# ENABLE_PASSIVE_HOST_CHECKS;<host_name>
def ENABLE_PASSIVE_HOST_CHECKS(self, host):
if not host.passive_checks_enabled:
host.modified_attributes |= DICT_MODATTR["MODATTR_PASSIVE_CHECKS_ENABLED"].value
host.passive_checks_enabled = True
self.sched.get_and_register_status_brok(host)
# ENABLE_PASSIVE_SVC_CHECKS;<host_name>;<service_description>
def ENABLE_PASSIVE_SVC_CHECKS(self, service):
if not service.passive_checks_enabled:
service.modified_attributes |= DICT_MODATTR["MODATTR_PASSIVE_CHECKS_ENABLED"].value
service.passive_checks_enabled = True
self.sched.get_and_register_status_brok(service)
# ENABLE_PERFORMANCE_DATA
def ENABLE_PERFORMANCE_DATA(self):
if not self.conf.process_performance_data:
self.conf.modified_attributes |= DICT_MODATTR["MODATTR_PERFORMANCE_DATA_ENABLED"].value
self.conf.process_performance_data = True
self.conf.explode_global_conf()
self.sched.get_and_register_update_program_status_brok()
# ENABLE_SERVICEGROUP_HOST_CHECKS;<servicegroup_name>
def ENABLE_SERVICEGROUP_HOST_CHECKS(self, servicegroup):
for service in servicegroup:
self.ENABLE_HOST_CHECK(service.host)
# ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS;<servicegroup_name>
def ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS(self, servicegroup):
for service in servicegroup:
self.ENABLE_HOST_NOTIFICATIONS(service.host)
# ENABLE_SERVICEGROUP_PASSIVE_HOST_CHECKS;<servicegroup_name>
def ENABLE_SERVICEGROUP_PASSIVE_HOST_CHECKS(self, servicegroup):
for service in servicegroup:
self.ENABLE_PASSIVE_HOST_CHECKS(service.host)
# ENABLE_SERVICEGROUP_PASSIVE_SVC_CHECKS;<servicegroup_name>
def ENABLE_SERVICEGROUP_PASSIVE_SVC_CHECKS(self, servicegroup):
for service in servicegroup:
self.ENABLE_PASSIVE_SVC_CHECKS(service)
# ENABLE_SERVICEGROUP_SVC_CHECKS;<servicegroup_name>
def ENABLE_SERVICEGROUP_SVC_CHECKS(self, servicegroup):
for service in servicegroup:
self.ENABLE_SVC_CHECK(service)
# ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS;<servicegroup_name>
def ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS(self, servicegroup):
for service in servicegroup:
self.ENABLE_SVC_NOTIFICATIONS(service)
# ENABLE_SERVICE_FRESHNESS_CHECKS
def ENABLE_SERVICE_FRESHNESS_CHECKS(self):
if not self.conf.check_service_freshness:
self.conf.modified_attributes |= DICT_MODATTR["MODATTR_FRESHNESS_CHECKS_ENABLED"].value
self.conf.check_service_freshness = True
self.conf.explode_global_conf()
self.sched.get_and_register_update_program_status_brok()
# ENABLE_SVC_CHECK;<host_name>;<service_description>
def ENABLE_SVC_CHECK(self, service):
if not service.active_checks_enabled:
service.modified_attributes |= DICT_MODATTR["MODATTR_ACTIVE_CHECKS_ENABLED"].value
service.active_checks_enabled = True
self.sched.get_and_register_status_brok(service)
# ENABLE_SVC_EVENT_HANDLER;<host_name>;<service_description>
def ENABLE_SVC_EVENT_HANDLER(self, service):
if not service.event_handler_enabled:
service.modified_attributes |= DICT_MODATTR["MODATTR_EVENT_HANDLER_ENABLED"].value
service.event_handler_enabled = True
self.sched.get_and_register_status_brok(service)
# ENABLE_SVC_FLAP_DETECTION;<host_name>;<service_description>
def ENABLE_SVC_FLAP_DETECTION(self, service):
if not service.flap_detection_enabled:
service.modified_attributes |= DICT_MODATTR["MODATTR_FLAP_DETECTION_ENABLED"].value
service.flap_detection_enabled = True
self.sched.get_and_register_status_brok(service)
# ENABLE_SVC_NOTIFICATIONS;<host_name>;<service_description>
def ENABLE_SVC_NOTIFICATIONS(self, service):
if not service.notifications_enabled:
service.modified_attributes |= DICT_MODATTR["MODATTR_NOTIFICATIONS_ENABLED"].value
service.notifications_enabled = True
self.sched.get_and_register_status_brok(service)
# PROCESS_FILE;<file_name>;<delete>
def PROCESS_FILE(self, file_name, delete):
pass
# TODO: say that check is PASSIVE
# PROCESS_HOST_CHECK_RESULT;<host_name>;<status_code>;<plugin_output>
def PROCESS_HOST_CHECK_RESULT(self, host, status_code, plugin_output):
# raise a PASSIVE check only if needed
if self.conf.log_passive_checks:
naglog_result('info', 'PASSIVE HOST CHECK: %s;%d;%s' % (
host.get_name(), status_code, plugin_output
))
now = time.time()
cls = host.__class__
# If globally disable OR locally, do not launch
if cls.accept_passive_checks and host.passive_checks_enabled:
# Maybe the check is just too old, if so, bail out!
if self.current_timestamp < host.last_chk:
return
i = host.launch_check(now, force=True)
c = None
for chk in host.get_checks_in_progress():
if chk.id == i:
c = chk
# Should not be possible to not find the check, but if so, don't crash
if not c:
logger.error('Passive host check failed. Cannot find the check id %s', i)
return
# Now we 'transform the check into a result'
# So exit_status, output and status is eaten by the host
c.exit_status = status_code
c.get_outputs(plugin_output, host.max_plugins_output_length)
c.status = 'waitconsume'
c.check_time = self.current_timestamp # we are using the external command timestamps
# Set the corresponding host's check_type to passive=1
c.set_type_passive()
self.sched.nb_check_received += 1
# Ok now this result will be read by scheduler the next loop
# PROCESS_HOST_OUTPUT;<host_name>;<plugin_output>
def PROCESS_HOST_OUTPUT(self, host, plugin_output):
self.PROCESS_HOST_CHECK_RESULT(host, host.state_id, plugin_output)
# PROCESS_SERVICE_CHECK_RESULT;<host_name>;<service_description>;<return_code>;<plugin_output>
def PROCESS_SERVICE_CHECK_RESULT(self, service, return_code, plugin_output):
# raise a PASSIVE check only if needed
if self.conf.log_passive_checks:
naglog_result('info', 'PASSIVE SERVICE CHECK: %s;%s;%d;%s' % (
service.host.get_name(), service.get_name(), return_code,
plugin_output
))
now = time.time()
cls = service.__class__
# If globally disable OR locally, do not launch
if cls.accept_passive_checks and service.passive_checks_enabled:
# Maybe the check is just too old, if so, bail out!
if self.current_timestamp < service.last_chk:
return
c = None
i = service.launch_check(now, force=True)
for chk in service.get_checks_in_progress():
if chk.id == i:
c = chk
# Should not be possible to not find the check, but if so, don't crash
if not c:
logger.error('Passive service check failed. Cannot find the check id %s', i)
return
# Now we 'transform the check into a result'
# So exit_status, output and status is eaten by the service
c.exit_status = return_code
c.get_outputs(plugin_output, service.max_plugins_output_length)
c.status = 'waitconsume'
c.check_time = self.current_timestamp # we are using the external command timestamps
# Set the corresponding service's check_type to passive=1
c.set_type_passive()
self.sched.nb_check_received += 1
# Ok now this result will be reap by scheduler the next loop
# PROCESS_SERVICE_CHECK_RESULT;<host_name>;<service_description>;<plugin_output>
def PROCESS_SERVICE_OUTPUT(self, service, plugin_output):
self.PROCESS_SERVICE_CHECK_RESULT(service, service.state_id, plugin_output)
# READ_STATE_INFORMATION
def READ_STATE_INFORMATION(self):
pass
# REMOVE_HOST_ACKNOWLEDGEMENT;<host_name>
def REMOVE_HOST_ACKNOWLEDGEMENT(self, host):
host.unacknowledge_problem()
# REMOVE_SVC_ACKNOWLEDGEMENT;<host_name>;<service_description>
def REMOVE_SVC_ACKNOWLEDGEMENT(self, service):
service.unacknowledge_problem()
# RESTART_PROGRAM
def RESTART_PROGRAM(self):
restart_cmd = self.commands.find_by_name('restart-shinken')
if not restart_cmd:
logger.error("Cannot restart Shinken : missing command named"
" 'restart-shinken'. Please add one")
return
restart_cmd_line = restart_cmd.command_line
logger.warning("RESTART command : %s", restart_cmd_line)
# Ok get an event handler command that will run in 15min max
e = EventHandler(restart_cmd_line, timeout=900)
# Ok now run it
e.execute()
# And wait for the command to finish
while e.status not in ('done', 'timeout'):
e.check_finished(64000)
if e.status == 'timeout' or e.exit_status != 0:
logger.error("Cannot restart Shinken : the 'restart-shinken' command failed with"
" the error code '%d' and the text '%s'.", e.exit_status, e.output)
return
# Ok here the command succeed, we can now wait our death
naglog_result('info', e.output)
# RELOAD_CONFIG
def RELOAD_CONFIG(self):
reload_cmd = self.commands.find_by_name('reload-shinken')
if not reload_cmd:
logger.error("Cannot restart Shinken : missing command"
" named 'reload-shinken'. Please add one")
return
reload_cmd_line = reload_cmd.command_line
logger.warning("RELOAD command : %s", reload_cmd_line)
# Ok get an event handler command that will run in 15min max
e = EventHandler(reload_cmd_line, timeout=900)
# Ok now run it
e.execute()
# And wait for the command to finish
while e.status not in ('done', 'timeout'):
e.check_finished(64000)
if e.status == 'timeout' or e.exit_status != 0:
logger.error("Cannot reload Shinken configuration: the 'reload-shinken' command failed"
" with the error code '%d' and the text '%s'." % (e.exit_status, e.output))
return
# Ok here the command succeed, we can now wait our death
naglog_result('info', e.output)
# SAVE_STATE_INFORMATION
def SAVE_STATE_INFORMATION(self):
pass
# SCHEDULE_AND_PROPAGATE_HOST_DOWNTIME;<host_name>;<start_time>;<end_time>;
# <fixed>;<trigger_id>;<duration>;<author>;<comment>
def SCHEDULE_AND_PROPAGATE_HOST_DOWNTIME(self, host, start_time, end_time,
fixed, trigger_id, duration, author, comment):
pass
# SCHEDULE_AND_PROPAGATE_TRIGGERED_HOST_DOWNTIME;<host_name>;<start_time>;<end_time>;<fixed>;
# <trigger_id>;<duration>;<author>;<comment>
def SCHEDULE_AND_PROPAGATE_TRIGGERED_HOST_DOWNTIME(self, host, start_time, end_time, fixed,
trigger_id, duration, author, comment):
pass
# SCHEDULE_CONTACT_DOWNTIME;<contact_name>;<start_time>;<end_time>;<author>;<comment>
def SCHEDULE_CONTACT_DOWNTIME(self, contact, start_time, end_time, author, comment):
dt = ContactDowntime(contact, start_time, end_time, author, comment)
contact.add_downtime(dt)
self.sched.add(dt)
self.sched.get_and_register_status_brok(contact)
# SCHEDULE_FORCED_HOST_CHECK;<host_name>;<check_time>
def SCHEDULE_FORCED_HOST_CHECK(self, host, check_time):
host.schedule(force=True, force_time=check_time)
self.sched.get_and_register_status_brok(host)
# SCHEDULE_FORCED_HOST_SVC_CHECKS;<host_name>;<check_time>
def SCHEDULE_FORCED_HOST_SVC_CHECKS(self, host, check_time):
for s in host.services:
self.SCHEDULE_FORCED_SVC_CHECK(s, check_time)
self.sched.get_and_register_status_brok(s)
# SCHEDULE_FORCED_SVC_CHECK;<host_name>;<service_description>;<check_time>
def SCHEDULE_FORCED_SVC_CHECK(self, service, check_time):
service.schedule(force=True, force_time=check_time)
self.sched.get_and_register_status_brok(service)
# SCHEDULE_HOSTGROUP_HOST_DOWNTIME;<hostgroup_name>;<start_time>;<end_time>;
# <fixed>;<trigger_id>;<duration>;<author>;<comment>
def SCHEDULE_HOSTGROUP_HOST_DOWNTIME(self, hostgroup, start_time, end_time, fixed,
trigger_id, duration, author, comment):
for host in hostgroup:
self.SCHEDULE_HOST_DOWNTIME(host, start_time, end_time, fixed,
trigger_id, duration, author, comment)
# SCHEDULE_HOSTGROUP_SVC_DOWNTIME;<hostgroup_name>;<start_time>;<end_time>;<fixed>;
# <trigger_id>;<duration>;<author>;<comment>
def SCHEDULE_HOSTGROUP_SVC_DOWNTIME(self, hostgroup, start_time, end_time, fixed,
trigger_id, duration, author, comment):
for host in hostgroup:
for s in host.services:
self.SCHEDULE_SVC_DOWNTIME(s, start_time, end_time, fixed,
trigger_id, duration, author, comment)
# SCHEDULE_HOST_CHECK;<host_name>;<check_time>
def SCHEDULE_HOST_CHECK(self, host, check_time):
host.schedule(force=False, force_time=check_time)
self.sched.get_and_register_status_brok(host)
# SCHEDULE_HOST_DOWNTIME;<host_name>;<start_time>;<end_time>;<fixed>;
# <trigger_id>;<duration>;<author>;<comment>
def SCHEDULE_HOST_DOWNTIME(self, host, start_time, end_time, fixed,
trigger_id, duration, author, comment):
dt = Downtime(host, start_time, end_time, fixed, trigger_id, duration, author, comment)
host.add_downtime(dt)
self.sched.add(dt)
self.sched.get_and_register_status_brok(host)
if trigger_id != 0 and trigger_id in self.sched.downtimes:
self.sched.downtimes[trigger_id].trigger_me(dt)
data = {
'host_name': host.get_name(),
'start_time': start_time,
'end_time': end_time,
'fixed': fixed,
'trigger_id': trigger_id,
'duration': duration,
'author': author,
'comment': comment
}
self.sched.add_Brok(Brok('schedule_host_downtime', data))
# SCHEDULE_HOST_SVC_CHECKS;<host_name>;<check_time>
def SCHEDULE_HOST_SVC_CHECKS(self, host, check_time):
for s in host.services:
self.SCHEDULE_SVC_CHECK(s, check_time)
self.sched.get_and_register_status_brok(s)
# SCHEDULE_HOST_SVC_DOWNTIME;<host_name>;<start_time>;<end_time>;
# <fixed>;<trigger_id>;<duration>;<author>;<comment>
def SCHEDULE_HOST_SVC_DOWNTIME(self, host, start_time, end_time, fixed,
trigger_id, duration, author, comment):
for s in host.services:
self.SCHEDULE_SVC_DOWNTIME(s, start_time, end_time, fixed,
trigger_id, duration, author, comment)
# SCHEDULE_SERVICEGROUP_HOST_DOWNTIME;<servicegroup_name>;<start_time>;<end_time>;<fixed>;
# <trigger_id>;<duration>;<author>;<comment>
def SCHEDULE_SERVICEGROUP_HOST_DOWNTIME(self, servicegroup, start_time, end_time,
fixed, trigger_id, duration, author, comment):
for h in [s.host for s in servicegroup.get_services()]:
self.SCHEDULE_HOST_DOWNTIME(h, start_time, end_time, fixed,
trigger_id, duration, author, comment)
# SCHEDULE_SERVICEGROUP_SVC_DOWNTIME;<servicegroup_name>;<start_time>;<end_time>;
# <fixed>;<trigger_id>;<duration>;<author>;<comment>
def SCHEDULE_SERVICEGROUP_SVC_DOWNTIME(self, servicegroup, start_time, end_time,
fixed, trigger_id, duration, author, comment):
for s in servicegroup.get_services():
self.SCHEDULE_SVC_DOWNTIME(s, start_time, end_time, fixed,
trigger_id, duration, author, comment)
# SCHEDULE_SVC_CHECK;<host_name>;<service_description>;<check_time>
def SCHEDULE_SVC_CHECK(self, service, check_time):
service.schedule(force=False, force_time=check_time)
self.sched.get_and_register_status_brok(service)
# SCHEDULE_SVC_DOWNTIME;<host_name>;<service_description><start_time>;<end_time>;
# <fixed>;<trigger_id>;<duration>;<author>;<comment>
def SCHEDULE_SVC_DOWNTIME(self, service, start_time, end_time, fixed,
trigger_id, duration, author, comment):
dt = Downtime(service, start_time, end_time, fixed, trigger_id, duration, author, comment)
service.add_downtime(dt)
self.sched.add(dt)
self.sched.get_and_register_status_brok(service)
if trigger_id != 0 and trigger_id in self.sched.downtimes:
self.sched.downtimes[trigger_id].trigger_me(dt)
data = {
'host_name': service.host_name,
'service_description': service.service_description,
'start_time': start_time,
'end_time': end_time,
'fixed': fixed,
'trigger_id': trigger_id,
'duration': duration,
'author': author,
'comment': comment
}
self.sched.add_Brok(Brok('schedule_service_downtime', data))
# SEND_CUSTOM_HOST_NOTIFICATION;<host_name>;<options>;<author>;<comment>
def SEND_CUSTOM_HOST_NOTIFICATION(self, host, options, author, comment):
pass
# SEND_CUSTOM_SVC_NOTIFICATION;<host_name>;<service_description>;<options>;<author>;<comment>
def SEND_CUSTOM_SVC_NOTIFICATION(self, service, options, author, comment):
pass
# SET_HOST_NOTIFICATION_NUMBER;<host_name>;<notification_number>
def SET_HOST_NOTIFICATION_NUMBER(self, host, notification_number):
pass
# SET_SVC_NOTIFICATION_NUMBER;<host_name>;<service_description>;<notification_number>
def SET_SVC_NOTIFICATION_NUMBER(self, service, notification_number):
pass
# SHUTDOWN_PROGRAM
def SHUTDOWN_PROGRAM(self):
pass
# START_ACCEPTING_PASSIVE_HOST_CHECKS
def START_ACCEPTING_PASSIVE_HOST_CHECKS(self):
if not self.conf.accept_passive_host_checks:
self.conf.modified_attributes |= DICT_MODATTR["MODATTR_PASSIVE_CHECKS_ENABLED"].value
self.conf.accept_passive_host_checks = True
self.conf.explode_global_conf()
self.sched.get_and_register_update_program_status_brok()
# START_ACCEPTING_PASSIVE_SVC_CHECKS
def START_ACCEPTING_PASSIVE_SVC_CHECKS(self):
if not self.conf.accept_passive_service_checks:
self.conf.modified_attributes |= DICT_MODATTR["MODATTR_PASSIVE_CHECKS_ENABLED"].value
self.conf.accept_passive_service_checks = True
self.conf.explode_global_conf()
self.sched.get_and_register_update_program_status_brok()
# START_EXECUTING_HOST_CHECKS
def START_EXECUTING_HOST_CHECKS(self):
if not self.conf.execute_host_checks:
self.conf.modified_attributes |= DICT_MODATTR["MODATTR_ACTIVE_CHECKS_ENABLED"].value
self.conf.execute_host_checks = True
self.conf.explode_global_conf()
self.sched.get_and_register_update_program_status_brok()
# START_EXECUTING_SVC_CHECKS
def START_EXECUTING_SVC_CHECKS(self):
if not self.conf.execute_service_checks:
self.conf.modified_attributes |= DICT_MODATTR["MODATTR_ACTIVE_CHECKS_ENABLED"].value
self.conf.execute_service_checks = True
self.conf.explode_global_conf()
self.sched.get_and_register_update_program_status_brok()
# START_OBSESSING_OVER_HOST;<host_name>
def START_OBSESSING_OVER_HOST(self, host):
if not host.obsess_over_host:
host.modified_attributes |= DICT_MODATTR["MODATTR_OBSESSIVE_HANDLER_ENABLED"].value
host.obsess_over_host = True
self.sched.get_and_register_status_brok(host)
# START_OBSESSING_OVER_HOST_CHECKS
def START_OBSESSING_OVER_HOST_CHECKS(self):
if not self.conf.obsess_over_hosts:
self.conf.modified_attributes |= DICT_MODATTR["MODATTR_OBSESSIVE_HANDLER_ENABLED"].value
self.conf.obsess_over_hosts = True
self.conf.explode_global_conf()
self.sched.get_and_register_update_program_status_brok()
# START_OBSESSING_OVER_SVC;<host_name>;<service_description>
def START_OBSESSING_OVER_SVC(self, service):
if not service.obsess_over_service:
service.modified_attributes |= DICT_MODATTR["MODATTR_OBSESSIVE_HANDLER_ENABLED"].value
service.obsess_over_service = True
self.sched.get_and_register_status_brok(service)
# START_OBSESSING_OVER_SVC_CHECKS
def START_OBSESSING_OVER_SVC_CHECKS(self):
if not self.conf.obsess_over_services:
self.conf.modified_attributes |= DICT_MODATTR["MODATTR_OBSESSIVE_HANDLER_ENABLED"].value
self.conf.obsess_over_services = True
self.conf.explode_global_conf()
self.sched.get_and_register_update_program_status_brok()
# STOP_ACCEPTING_PASSIVE_HOST_CHECKS
def STOP_ACCEPTING_PASSIVE_HOST_CHECKS(self):
if self.conf.accept_passive_host_checks:
self.conf.modified_attributes |= DICT_MODATTR["MODATTR_PASSIVE_CHECKS_ENABLED"].value
self.conf.accept_passive_host_checks = False
self.conf.explode_global_conf()
self.sched.get_and_register_update_program_status_brok()
# STOP_ACCEPTING_PASSIVE_SVC_CHECKS
def STOP_ACCEPTING_PASSIVE_SVC_CHECKS(self):
if self.conf.accept_passive_service_checks:
self.conf.modified_attributes |= DICT_MODATTR["MODATTR_PASSIVE_CHECKS_ENABLED"].value
self.conf.accept_passive_service_checks = False
self.conf.explode_global_conf()
self.sched.get_and_register_update_program_status_brok()
# STOP_EXECUTING_HOST_CHECKS
def STOP_EXECUTING_HOST_CHECKS(self):
if self.conf.execute_host_checks:
self.conf.modified_attributes |= DICT_MODATTR["MODATTR_ACTIVE_CHECKS_ENABLED"].value
self.conf.execute_host_checks = False
self.conf.explode_global_conf()
self.sched.get_and_register_update_program_status_brok()
# STOP_EXECUTING_SVC_CHECKS
def STOP_EXECUTING_SVC_CHECKS(self):
if self.conf.execute_service_checks:
self.conf.modified_attributes |= DICT_MODATTR["MODATTR_ACTIVE_CHECKS_ENABLED"].value
self.conf.execute_service_checks = False
self.conf.explode_global_conf()
self.sched.get_and_register_update_program_status_brok()
# STOP_OBSESSING_OVER_HOST;<host_name>
def STOP_OBSESSING_OVER_HOST(self, host):
if host.obsess_over_host:
host.modified_attributes |= DICT_MODATTR["MODATTR_OBSESSIVE_HANDLER_ENABLED"].value
host.obsess_over_host = False
self.sched.get_and_register_status_brok(host)
# STOP_OBSESSING_OVER_HOST_CHECKS
def STOP_OBSESSING_OVER_HOST_CHECKS(self):
if self.conf.obsess_over_hosts:
self.conf.modified_attributes |= DICT_MODATTR["MODATTR_OBSESSIVE_HANDLER_ENABLED"].value
self.conf.obsess_over_hosts = False
self.conf.explode_global_conf()
self.sched.get_and_register_update_program_status_brok()
# STOP_OBSESSING_OVER_SVC;<host_name>;<service_description>
def STOP_OBSESSING_OVER_SVC(self, service):
if service.obsess_over_service:
service.modified_attributes |= DICT_MODATTR["MODATTR_OBSESSIVE_HANDLER_ENABLED"].value
service.obsess_over_service = False
self.sched.get_and_register_status_brok(service)
# STOP_OBSESSING_OVER_SVC_CHECKS
def STOP_OBSESSING_OVER_SVC_CHECKS(self):
if self.conf.obsess_over_services:
self.conf.modified_attributes |= DICT_MODATTR["MODATTR_OBSESSIVE_HANDLER_ENABLED"].value
self.conf.obsess_over_services = False
self.conf.explode_global_conf()
self.sched.get_and_register_update_program_status_brok()
# Now the shinken specific ones
# LAUNCH_SVC_EVENT_HANDLER;<host_name>;<service_description>
def LAUNCH_SVC_EVENT_HANDLER(self, service):
service.get_event_handlers(externalcmd=True)
# LAUNCH_SVC_EVENT_HANDLER;<host_name>;<service_description>
def LAUNCH_HOST_EVENT_HANDLER(self, host):
host.get_event_handlers(externalcmd=True)
# ADD_SIMPLE_HOST_DEPENDENCY;<host_name>;<host_name>
def ADD_SIMPLE_HOST_DEPENDENCY(self, son, father):
if not son.is_linked_with_host(father):
logger.debug("Doing simple link between %s and %s", son.get_name(), father.get_name())
# Flag them so the modules will know that a topology change
# happened
son.topology_change = True
father.topology_change = True
# Now do the work
# Add a dep link between the son and the father
son.add_host_act_dependency(father, ['w', 'u', 'd'], None, True)
self.sched.get_and_register_status_brok(son)
self.sched.get_and_register_status_brok(father)
# DEL_SIMPLE_HOST_DEPENDENCY;<host_name>;<host_name>
def DEL_HOST_DEPENDENCY(self, son, father):
if son.is_linked_with_host(father):
logger.debug("Removing simple link between %s and %s",
son.get_name(), father.get_name())
# Flag them so the modules will know that a topology change
# happened
son.topology_change = True
father.topology_change = True
# Now do the work
son.del_host_act_dependency(father)
self.sched.get_and_register_status_brok(son)
self.sched.get_and_register_status_brok(father)
# ADD_SIMPLE_POLLER;realm_name;poller_name;address;port
def ADD_SIMPLE_POLLER(self, realm_name, poller_name, address, port):
logger.debug("I need to add the poller (%s, %s, %s, %s)",
realm_name, poller_name, address, port)
# First we look for the realm
r = self.conf.realms.find_by_name(realm_name)
if r is None:
logger.debug("Sorry, the realm %s is unknown", realm_name)
return
logger.debug("We found the realm: %s", r)
# TODO: backport this in the config class?
# We create the PollerLink object
t = {'poller_name': poller_name, 'address': address, 'port': port}
p = PollerLink(t)
p.fill_default()
p.prepare_for_conf()
parameters = {'max_plugins_output_length': self.conf.max_plugins_output_length}
p.add_global_conf_parameters(parameters)
self.arbiter.conf.pollers[p.id] = p
self.arbiter.dispatcher.elements.append(p)
self.arbiter.dispatcher.satellites.append(p)
r.pollers.append(p)
r.count_pollers()
r.fill_potential_satellites_by_type('pollers')
logger.debug("Poller %s added", poller_name)
logger.debug("Potential %s", r.get_potential_satellites_by_type('poller'))
if __name__ == '__main__':
FIFO_PATH = '/tmp/my_fifo'
if os.path.exists(FIFO_PATH):
os.unlink(FIFO_PATH)
if not os.path.exists(FIFO_PATH):
os.umask(0)
os.mkfifo(FIFO_PATH, 0o660)
my_fifo = open(FIFO_PATH, 'w+')
logger.debug("my_fifo: %s", my_fifo)
logger.debug(open(FIFO_PATH, 'r').readline())
| 94,138
|
Python
|
.py
| 1,792
| 41.395647
| 100
| 0.616425
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,464
|
autoslots.py
|
shinken-solutions_shinken/shinken/autoslots.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
"""The AutoSlots Class is a MetaClass: it manages how other classes
are created (Classes, not instances of theses classes).
Here it's role is to create the __slots__ list of the class with
all properties of Class.properties and Class.running_properties
so we do not have to add manually all properties to the __slots__
list when we add a new entry"""
from __future__ import absolute_import, division, print_function, unicode_literals
class AutoSlots(type):
# new is call when we create a new Class
# that have metaclass = AutoSlots
# CLS is AutoSlots
# name is string of the Class (like Service)
# bases are the Classes of which Class inherits (like SchedulingItem)
# dct is the new Class dict (like all method of Service)
# Some properties names are not allowed in __slots__ like 2d_coords of
# Host, so we must tag them in properties with no_slots
def __new__(cls, name, bases, dct):
# Thanks to Bertrand Mathieu to the set idea
slots = set(dct.get('__slots__', set()))
# Now get properties from properties and running_properties
if 'properties' in dct:
props = dct['properties']
slots.update((p for p in props
if not props[p].no_slots))
if 'running_properties' in dct:
props = dct['running_properties']
slots.update((p for p in props
if not props[p].no_slots))
dct['__slots__'] = tuple(slots)
return type.__new__(cls, name, bases, dct)
| 2,464
|
Python
|
.py
| 53
| 41.679245
| 82
| 0.694929
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,465
|
eventhandler.py
|
shinken-solutions_shinken/shinken/eventhandler.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import six
import time
#from .action import Action
from shinken.action import Action
from shinken.property import IntegerProp, StringProp, FloatProp, BoolProp
from shinken.autoslots import AutoSlots
""" TODO: Add some comment about this class for the doc"""
class EventHandler(six.with_metaclass(AutoSlots, Action)):
my_type = 'eventhandler'
properties = {
'is_a': StringProp(default='eventhandler'),
'type': StringProp(default=''),
'_in_timeout': StringProp(default=False),
'status': StringProp(default=''),
'exit_status': StringProp(default=3),
'output': StringProp(default=''),
'long_output': StringProp(default=''),
't_to_go': StringProp(default=0),
'check_time': StringProp(default=0),
'execution_time': FloatProp(default=0),
'u_time': FloatProp(default=0.0),
's_time': FloatProp(default=0.0),
'env': StringProp(default={}),
'perf_data': StringProp(default=''),
'sched_id': IntegerProp(default=0),
'timeout': IntegerProp(default=10),
'check_time': IntegerProp(default=0),
'command': StringProp(default=''),
'module_type': StringProp(default='fork'),
'worker': StringProp(default='none'),
'reactionner_tag': StringProp(default='None'),
'is_snapshot': BoolProp(default=False),
'priority': IntegerProp(default=100),
}
# id = 0 #Is common to Actions
def __init__(self, command, id=None, ref=None, timeout=10, env={},
module_type='fork', reactionner_tag='None',
is_snapshot=False, priority=100):
self.is_a = 'eventhandler'
self.type = ''
self.status = 'scheduled'
if id is None: # id != None is for copy call only
self.id = Action.id
Action.id += 1
self.ref = ref
self._in_timeout = False
self.timeout = timeout
self.exit_status = 3
self.command = command
self.output = ''
self.long_output = ''
self.t_to_go = time.time()
self.check_time = 0
self.execution_time = 0
self.u_time = 0
self.s_time = 0
self.perf_data = ''
self.env = {}
self.module_type = module_type
self.worker = 'none'
self.reactionner_tag = reactionner_tag
self.is_snapshot = is_snapshot
self.priority = priority
# return a copy of the check but just what is important for execution
# So we remove the ref and all
def copy_shell(self):
# We create a dummy check with nothing in it, just defaults values
return self.copy_shell__(EventHandler('', id=self.id, is_snapshot=self.is_snapshot))
def get_return_from(self, e):
self.exit_status = e.exit_status
self.output = e.output
self.long_output = getattr(e, 'long_output', '')
self.check_time = e.check_time
self.execution_time = getattr(e, 'execution_time', 0.0)
self.perf_data = getattr(e, 'perf_data', '')
def get_outputs(self, out, max_plugins_output_length):
self.output = out
def is_launchable(self, t):
return t >= self.t_to_go
def __str__(self):
return "Check %d status:%s command:%s" % (self.id, self.status, self.command)
def get_id(self):
return self.id
def __getstate__(self):
cls = self.__class__
# id is not in *_properties
res = {'id': self.id}
for prop in cls.properties:
if hasattr(self, prop):
res[prop] = getattr(self, prop)
return res
# Inverted function of getstate
def __setstate__(self, state):
cls = self.__class__
self.id = state['id']
for prop in cls.properties:
if prop in state:
setattr(self, prop, state[prop])
if not hasattr(self, 'worker'):
self.worker = 'none'
if not getattr(self, 'module_type', None):
self.module_type = 'fork'
# s_time and u_time are added between 1.2 and 1.4
if not hasattr(self, 'u_time'):
self.u_time = 0
self.s_time = 0
| 5,346
|
Python
|
.py
| 129
| 34.147287
| 92
| 0.609516
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,466
|
complexexpression.py
|
shinken-solutions_shinken/shinken/complexexpression.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken.util import strip_and_uniq
"""
Here is a node class for complex_expression(s) and a factory to create them
"""
class ComplexExpressionNode(object):
def __init__(self):
self.operand = None
self.sons = []
self.configuration_errors = []
self.not_value = False
# If leaf, the content will be the hostgroup or hosts
# that are selected with this node
self.leaf = False
self.content = None
def __str__(self):
if not self.leaf:
return "Op:'%s' Leaf:%s Sons:'[%s] IsNot:%s'" % \
(self.operand, self.leaf, ','.join([str(s) for s in self.sons]), self.not_value)
else:
return 'IS LEAF %s' % self.content
def resolve_elements(self):
# If it's a leaf, we just need to dump a set with the content of the node
if self.leaf:
# print("Is a leaf", self.content)
if not self.content:
return set()
return set(self.content)
# first got the not ones in a list, and the other in the other list
not_nodes = [s for s in self.sons if s.not_value]
positiv_nodes = [s for s in self.sons if not s.not_value] # ok a not not is hard to read..
# print("Not nodes", not_nodes)
# print("Positiv nodes", positiv_nodes)
# By default we are using a OR rule
if not self.operand:
self.operand = '|'
res = set()
# print("Will now merge all of this", self.operand)
# The operand will change the positiv loop only
i = 0
for n in positiv_nodes:
node_members = n.resolve_elements()
if self.operand == '|':
# print("OR rule", node_members)
res = res.union(node_members)
elif self.operand == '&':
# print("AND RULE", node_members)
# The first elements of an AND rule should be used
if i == 0:
res = node_members
else:
res = res.intersection(node_members)
i += 1
# And we finally remove all NOT elements from the result
for n in not_nodes:
node_members = n.resolve_elements()
res = res.difference(node_members)
return res
# Check for empty (= not found) leaf nodes
def is_valid(self):
valid = True
if not self.sons:
valid = False
else:
for s in self.sons:
if isinstance(s, DependencyNode) and not s.is_valid():
self.configuration_errors.extend(s.configuration_errors)
valid = False
return valid
""" TODO: Add some comment about this class for the doc"""
class ComplexExpressionFactory(object):
def __init__(self, ctx='hostgroups', grps=None, all_elements=None):
self.ctx = ctx
self.grps = grps
self.all_elements = all_elements
# the () will be eval in a recursiv way, only one level of ()
def eval_cor_pattern(self, pattern):
pattern = pattern.strip()
# print("eval_cor_pattern::", pattern)
complex_node = False
# Look if it's a complex pattern (with rule) or
# if it's a leaf ofit, like a host/service
for m in '()+&|,':
if m in pattern:
complex_node = True
node = ComplexExpressionNode()
# print("Is so complex?", complex_node, pattern, node)
# if it's a single expression like !linux or production
# we will get the objects from it and return a leaf node
if not complex_node:
# If it's a not value, tag the node and find
# the name without this ! operator
if pattern.startswith('!'):
node.not_value = True
pattern = pattern[1:]
node.operand = self.ctx
node.leaf = True
obj, error = self.find_object(pattern)
if obj is not None:
node.content = obj
else:
node.configuration_errors.append(error)
return node
in_par = False
tmp = ''
stacked_par = 0
for c in pattern:
# print("MATCHING", c)
if c == ',' or c == '|':
# Maybe we are in a par, if so, just stack it
if in_par:
# print(", in a par, just staking it")
tmp += c
else:
# Oh we got a real cut in an expression, if so, cut it
# print("REAL , for cutting")
tmp = tmp.strip()
node.operand = '|'
if tmp != '':
# print("Will analyse the current str", tmp)
o = self.eval_cor_pattern(tmp)
node.sons.append(o)
tmp = ''
elif c == '&' or c == '+':
# Maybe we are in a par, if so, just stack it
if in_par:
# print(" & in a par, just staking it")
tmp += c
else:
# Oh we got a real cut in an expression, if so, cut it
# print("REAL & for cutting")
tmp = tmp.strip()
node.operand = '&'
if tmp != '':
# print("Will analyse the current str", tmp)
o = self.eval_cor_pattern(tmp)
node.sons.append(o)
tmp = ''
elif c == '(':
stacked_par += 1
# print("INCREASING STACK TO", stacked_par)
in_par = True
tmp = tmp.strip()
# Maybe we just start a par, but we got some things in tmp
# that should not be good in fact !
if stacked_par == 1 and tmp != '':
# TODO : real error
print("ERROR : bad expression near", tmp)
continue
# If we are already in a par, add this (
# but not if it's the first one so
if stacked_par > 1:
tmp += c
# o = self.eval_cor_pattern(tmp)
# print("1( I've %s got new sons" % pattern , o)
# node.sons.append(o)
elif c == ')':
# print("Need closeing a sub expression?", tmp)
stacked_par -= 1
if stacked_par < 0:
# TODO : real error
print("Error : bad expression near", tmp, "too much ')'")
continue
if stacked_par == 0:
# print("THIS is closing a sub compress expression", tmp)
tmp = tmp.strip()
o = self.eval_cor_pattern(tmp)
node.sons.append(o)
in_par = False
# OK now clean the tmp so we start clean
tmp = ''
continue
# ok here we are still in a huge par, we just close one sub one
tmp += c
# Maybe it's a classic character, if so, continue
else:
tmp += c
# Be sure to manage the trainling part when the line is done
tmp = tmp.strip()
if tmp != '':
# print("Managing trainling part", tmp)
o = self.eval_cor_pattern(tmp)
# print("4end I've %s got new sons" % pattern , o)
node.sons.append(o)
# print("End, tmp", tmp)
# print("R %s:" % pattern, node)
return node
# We've got an object, like super-grp, so we should link th group here
def find_object(self, pattern):
obj = None
error = None
pattern = pattern.strip()
if pattern == '*':
obj = [h.host_name for h in self.all_elements.items.values()
if getattr(h, 'host_name', '') != '' and not h.is_tpl()]
return obj, error
# Ok a more classic way
# print("GRPS", self.grps)
if self.ctx == 'hostgroups':
# Ok try to find this hostgroup
hg = self.grps.find_by_name(pattern)
# Maybe it's an known one?
if not hg:
error = "Error : cannot find the %s of the expression '%s'" % (self.ctx, pattern)
return hg, error
# Ok the group is found, get the elements!
elts = hg.get_hosts()
elts = strip_and_uniq(elts)
# Maybe the hostgroup memebrs is '*', if so expand with all hosts
if '*' in elts:
elts.extend([h.host_name for h in self.all_elements.items.values()
if getattr(h, 'host_name', '') != '' and not h.is_tpl()])
# And remove this strange hostname too :)
elts.remove('*')
return elts, error
else: # templates
obj = self.grps.find_hosts_that_use_template(pattern)
return obj, error
| 10,257
|
Python
|
.py
| 240
| 30.058333
| 99
| 0.519711
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,467
|
__init__.py
|
shinken-solutions_shinken/shinken/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
| 923
|
Python
|
.py
| 23
| 39.130435
| 77
| 0.757778
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,468
|
acknowledge.py
|
shinken-solutions_shinken/shinken/acknowledge.py
|
#!/usr/bin/python2
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
class Acknowledge(object):
"""
Allows you to acknowledge the current problem for the specified service.
By acknowledging the current problem, future notifications (for the same
servicestate) are disabled.
"""
id = 1
# Just to list the properties we will send as serialized object
# so to others daemons, all but NOT REF
properties = {
'id': None,
'sticky': None,
'notify': None,
'end_time': None,
'author': None,
'comment': None,
}
# If the "sticky" option is set to one (1), the acknowledgement
# will remain until the service returns to an OK state. Otherwise
# the acknowledgement will automatically be removed when the
# service changes state. In this case Web interfaces set a value
# of (2).
#
# If the "notify" option is set to one (1), a notification will be
# sent out to contacts indicating that the current service problem
# has been acknowledged.
#
# <WTF??>
# If the "persistent" option is set to one (1), the comment
# associated with the acknowledgement will survive across restarts
# of the Shinken process. If not, the comment will be deleted the
# next time Shinken restarts. "persistent" not only means "survive
# restarts", but also
#
# => End of comment Missing!!
# </WTF??>
def __init__(self, ref, sticky, notify, persistent,
author, comment, end_time=0):
self.id = self.__class__.id
self.__class__.id += 1
self.ref = ref # pointer to srv or host we are applied
self.sticky = sticky
self.notify = notify
self.end_time = end_time
self.author = author
self.comment = comment
def __getstate__(self):
cls = self.__class__
# id is not in *_properties
res = {'id': self.id}
for prop in cls.properties:
if hasattr(self, prop):
res[prop] = getattr(self, prop)
return res
# Inversed function of getstate
def __setstate__(self, state):
cls = self.__class__
self.id = state['id']
for prop in cls.properties:
if prop in state:
setattr(self, prop, state[prop])
# If load a old ack, set the end_time to 0 which refers to infinite
if not hasattr(self, 'end_time'):
self.end_time = 0
| 3,410
|
Python
|
.py
| 87
| 33.390805
| 82
| 0.658117
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,469
|
dispatcher.py
|
shinken-solutions_shinken/shinken/dispatcher.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
"""
This is the class of the dispatcher. Its role is to dispatch
configurations to other elements like schedulers, reactionner,
pollers, receivers and brokers. It is responsible for high availability part. If an
element dies and the element type has a spare, it sends the config of the
dead one to the spare
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import time
import random
from shinken.util import alive_then_spare_then_deads
from shinken.log import logger
# Always initialize random :)
random.seed()
# Dispatcher Class
class Dispatcher(object):
# Load all elements, set them as not assigned
# and add them to elements, so loop will be easier :)
def __init__(self, conf, arbiter):
self.arbiter = arbiter
# Pointer to the whole conf
self.conf = conf
self.realms = conf.realms
# Direct pointer to important elements for us
for sat_type in ('arbiters', 'schedulers', 'reactionners',
'brokers', 'receivers', 'pollers'):
setattr(self, sat_type, getattr(self.conf, sat_type))
# for each satellite, we look if current arbiter have a specific
# satellitemap value set for this satellite.
# if so, we give this map to the satellite (used to build satellite URI later)
if arbiter is None:
continue
key = sat_type[:-1] + '_name' # i.e: schedulers -> scheduler_name
for satellite in getattr(self, sat_type):
sat_name = getattr(satellite, key)
satellite.set_arbiter_satellitemap(arbiter.satellitemap.get(sat_name, {}))
self.dispatch_queue = {'schedulers': [], 'reactionners': [],
'brokers': [], 'pollers': [], 'receivers': []}
self.elements = [] # all elements, sched and satellites
self.satellites = [] # only satellites not schedulers
for cfg in self.conf.confs.values():
cfg.is_assigned = False
cfg.assigned_to = None
# We try to remember each "push", so we
# can know with configuration ids+flavor
# if a satellite already got it or not :)
cfg.push_flavor = 0
# Add satellites in the good lists
self.elements.extend(self.schedulers)
# Others are in 2 lists
self.elements.extend(self.reactionners)
self.satellites.extend(self.reactionners)
self.elements.extend(self.pollers)
self.satellites.extend(self.pollers)
self.elements.extend(self.brokers)
self.satellites.extend(self.brokers)
self.elements.extend(self.receivers)
self.satellites.extend(self.receivers)
# Some flag about dispatch need or not
self.dispatch_ok = False
self.first_dispatch_done = False
# Prepare the satellites confs
for satellite in self.satellites:
satellite.prepare_for_conf()
# Some properties must be given to satellites from global
# configuration, like the max_plugins_output_length to pollers
parameters = {'max_plugins_output_length': self.conf.max_plugins_output_length}
for poller in self.pollers:
poller.add_global_conf_parameters(parameters)
# Reset need_conf for all schedulers.
for s in self.schedulers:
s.need_conf = True
# Same for receivers
for rec in self.receivers:
rec.need_conf = True
# checks alive elements
def check_alive(self):
for elt in self.elements:
# print("Updating elements", elt.get_name(), elt.__dict__)
elt.update_infos()
# Not alive needs new need_conf
# and spare too if they do not have already a conf
# REF: doc/shinken-scheduler-lost.png (1)
if not elt.alive or hasattr(elt, 'conf') and elt.conf is None:
elt.need_conf = True
for arb in self.arbiters:
# If not me, but not the master too
if arb != self.arbiter and arb.spare:
arb.update_infos()
# print("Arb", arb.get_name(), "alive?", arb.alive, arb.__dict__)
# Check if all active items are still alive
# the result goes into self.dispatch_ok
# TODO: finish need conf
def check_dispatch(self):
# Check if the other arbiter has a conf, but only if I am a master
for arb in self.arbiters:
# If not me and I'm a master
if arb != self.arbiter and self.arbiter and not self.arbiter.spare:
if not arb.have_conf(self.conf.magic_hash):
if not hasattr(self.conf, 'whole_conf_pack'):
logger.error('CRITICAL: the arbiter try to send a configureion but '
'it is not a MASTER one?? Look at your configuration.')
continue
arb.put_conf(self.conf.whole_conf_pack)
# Remind it that WE are the master here!
arb.do_not_run()
else:
# Ok, it already has the conf. I remember that
# it does not have to run, I'm still alive!
arb.do_not_run()
# We check for confs to be dispatched on alive scheds. If not dispatched, need dispatch :)
# and if dispatch on a failed node, remove the association, and need a new dispatch
for r in self.realms:
for cfg_id in r.confs:
push_flavor = r.confs[cfg_id].push_flavor
sched = r.confs[cfg_id].assigned_to
if sched is None:
if self.first_dispatch_done:
logger.info("Scheduler configuration %d is unmanaged!!", cfg_id)
self.dispatch_ok = False
else:
if not sched.alive:
self.dispatch_ok = False # so we ask a new dispatching
logger.warning("Scheduler %s had the configuration %d but is dead, "
"I am not happy.", sched.get_name(), cfg_id)
sched.conf.assigned_to = None
sched.conf.is_assigned = False
sched.conf.push_flavor = 0
sched.push_flavor = 0
sched.conf = None
# Maybe the scheduler restarts, so is alive but without
# the conf we think it was managing so ask it what it is
# really managing, and if not, put the conf unassigned
if not sched.do_i_manage(cfg_id, push_flavor):
self.dispatch_ok = False # so we ask a new dispatching
logger.warning("Scheduler %s did not managed its configuration %d, "
"I am not happy.", sched.get_name(), cfg_id)
if sched.conf:
sched.conf.assigned_to = None
sched.conf.is_assigned = False
sched.conf.push_flavor = 0
sched.push_flavor = 0
sched.need_conf = True
sched.conf = None
# Else: ok the conf is managed by a living scheduler
# Maybe satellites are alive, but do not have a cfg yet.
# I think so. It is not good. I ask a global redispatch for
# the cfg_id I think is not correctly dispatched.
for r in self.realms:
for cfg_id in r.confs:
push_flavor = r.confs[cfg_id].push_flavor
try:
for kind in ('reactionner', 'poller', 'broker', 'receiver'):
# We must have the good number of satellite or we are not happy
# So we are sure to raise a dispatch every loop a satellite is missing
if (len(r.to_satellites_managed_by[kind][cfg_id]) < r.get_nb_of_must_have_satellites(kind)):
logger.warning("Missing satellite %s for configuration %d:" % (kind, cfg_id))
# TODO: less violent! Must only resent to who need?
# must be caught by satellite who sees that
# it already has the conf (hash) and do nothing
self.dispatch_ok = False # so we will redispatch all
r.to_satellites_need_dispatch[kind][cfg_id] = True
r.to_satellites_managed_by[kind][cfg_id] = []
for satellite in r.to_satellites_managed_by[kind][cfg_id]:
# Maybe the sat was marked as not alive, but still in
# to_satellites_managed_by. That means that a new dispatch
# is needed
# Or maybe it is alive but I thought that this reactionner
# managed the conf and it doesn't.
# I ask a full redispatch of these cfg for both cases
if push_flavor == 0 and satellite.alive:
logger.warning('[%s] The %s %s manage a unmanaged configuration',
r.get_name(), kind, satellite.get_name())
continue
if not satellite.alive or (
satellite.reachable and not
satellite.do_i_manage(cfg_id, push_flavor)):
logger.warning('[%s] The %s %s seems to be down, '
'I must re-dispatch its role to someone else.',
r.get_name(), kind, satellite.get_name())
self.dispatch_ok = False # so we will redispatch all
r.to_satellites_need_dispatch[kind][cfg_id] = True
r.to_satellites_managed_by[kind][cfg_id] = []
# At the first pass, there is no cfg_id in to_satellites_managed_by
except KeyError:
pass
# Look for receivers. If they got conf, it's ok, if not, need a simple
# conf
for r in self.realms:
for rec in r.receivers:
# If the receiver does not have a conf, must got one :)
if rec.reachable and not rec.got_conf():
self.dispatch_ok = False # so we will redispatch all
rec.need_conf = True
# Imagine a world where... oh no, wait...
# Imagine a master got the conf and the network is down
# a spare takes it (good :) ). Like the Empire, the master
# strikes back! It was still alive! (like Elvis). It still got conf
# and is running! not good!
# Bad dispatch: a link that has a conf but I do not allow this
# so I ask it to wait a new conf and stop kidding.
def check_bad_dispatch(self):
for elt in self.elements:
if hasattr(elt, 'conf'):
# If element has a conf, I do not care, it's a good dispatch
# If dead: I do not ask it something, it won't respond..
if elt.conf is None and elt.reachable:
# print("Ask", elt.get_name() , 'if it got conf')
if elt.have_conf():
logger.warning("The element %s have a conf and should "
"not have one! I ask it to idle now",
elt.get_name())
elt.active = False
elt.wait_new_conf()
# I do not care about order not send or not. If not,
# The next loop will resent it
# else:
# print("No conf")
# I ask satellites which sched_id they manage. If I do not agree, I ask
# them to remove it
for satellite in self.satellites:
kind = satellite.get_my_type()
if satellite.reachable:
cfg_ids = satellite.managed_confs # what_i_managed()
# I do not care about satellites that do nothing, they already
# do what I want :)
if len(cfg_ids) != 0:
id_to_delete = []
for cfg_id in cfg_ids:
# DBG print(kind, ":", satellite.get_name(), "manage cfg id:", cfg_id)
# Ok, we search for realms that have the conf
for r in self.realms:
if cfg_id in r.confs:
# Ok we've got the realm, we check its to_satellites_managed_by
# to see if reactionner is in. If not, we remove he sched_id for it
if satellite not in r.to_satellites_managed_by[kind][cfg_id]:
id_to_delete.append(cfg_id)
# Maybe we removed all cfg_id of this reactionner
# We can put it idle, no active and wait_new_conf
if len(id_to_delete) == len(cfg_ids):
satellite.active = False
logger.info("I ask %s to wait a new conf", satellite.get_name())
satellite.wait_new_conf()
else:
# It is not fully idle, just less cfg
for id in id_to_delete:
logger.info("I ask to remove configuration N%d from %s",
id, satellite.get_name())
satellite.remove_from_conf(id)
# Make an ORDERED list of schedulers so we can
# send them conf in this order for a specific realm
def get_scheduler_ordered_list(self, r):
# get scheds, alive and no spare first
scheds = []
for s in r.schedulers:
scheds.append(s)
# now the spare scheds of higher realms
# they are after the sched of realm, so
# they will be used after the spare of
# the realm
for higher_r in r.higher_realms:
for s in higher_r.schedulers:
if s.spare:
scheds.append(s)
# Now we sort the scheds so we take master, then spare
# the dead, but we do not care about them
scheds = alive_then_spare_then_deads(scheds)
scheds.reverse() # pop is last, I need first
return scheds
# Manage the dispatch
# REF: doc/shinken-conf-dispatching.png (3)
def dispatch(self):
# Ok, we pass at least one time in dispatch, so now errors are True errors
self.first_dispatch_done = True
# If no needed to dispatch, do not dispatch :)
if not self.dispatch_ok:
for r in self.realms:
conf_to_dispatch = [cfg for cfg in r.confs.values() if not cfg.is_assigned]
nb_conf = len(conf_to_dispatch)
if nb_conf > 0:
logger.info("Dispatching Realm %s", r.get_name())
logger.info('[%s] Dispatching %d/%d configurations',
r.get_name(), nb_conf, len(r.confs))
# Now we get in scheds all scheduler of this realm and upper so
# we will send them conf (in this order)
scheds = self.get_scheduler_ordered_list(r)
if nb_conf > 0:
print_string = '[%s] Schedulers order: %s' % (
r.get_name(), ','.join([s.get_name() for s in scheds]))
logger.info(print_string)
# Try to send only for alive members
scheds = [s for s in scheds if s.alive]
# Now we do the real job
# every_one_need_conf = False
for conf in conf_to_dispatch:
logger.info('[%s] Dispatching configuration %s', r.get_name(), conf.id)
# If there is no alive schedulers, not good...
if len(scheds) == 0:
logger.info('[%s] but there a no alive schedulers in this realm!',
r.get_name())
# we need to loop until the conf is assigned
# or when there are no more schedulers available
while True:
try:
sched = scheds.pop()
except IndexError: # No more schedulers.. not good, no loop
# need_loop = False
# The conf does not need to be dispatch
cfg_id = conf.id
for kind in ('reactionner', 'poller', 'broker', 'receiver'):
r.to_satellites[kind][cfg_id] = None
r.to_satellites_need_dispatch[kind][cfg_id] = False
r.to_satellites_managed_by[kind][cfg_id] = []
break
logger.info('[%s] Trying to send conf %d to scheduler %s',
r.get_name(), conf.id, sched.get_name())
if not sched.need_conf:
logger.info('[%s] The scheduler %s do not need conf, sorry',
r.get_name(), sched.get_name())
continue
# We tag conf with the instance_name = scheduler_name
instance_name = sched.scheduler_name
# We give this configuration a new 'flavor'
conf.push_flavor = random.randint(1, 1000000)
# REF: doc/shinken-conf-dispatching.png (3)
# REF: doc/shinken-scheduler-lost.png (2)
override_conf = sched.get_override_configuration()
satellites_for_sched = r.get_satellites_links_for_scheduler()
s_conf = r.serialized_confs[conf.id]
# Prepare the conf before sending it
conf_package = {
'conf': s_conf, 'override_conf': override_conf,
'modules': sched.modules, 'satellites': satellites_for_sched,
'instance_name': sched.scheduler_name, 'push_flavor': conf.push_flavor,
'skip_initial_broks': sched.skip_initial_broks,
'accept_passive_unknown_check_results':
sched.accept_passive_unknown_check_results,
'harakiri_threshold': sched.harakiri_threshold,
# shinken.io part
'api_key': self.conf.api_key,
'secret': self.conf.secret,
'http_proxy': self.conf.http_proxy,
# statsd one too because OlivierHA love statsd
# and after some years of effort he manages to make me
# understand the powerfullness of metrics :)
'statsd_host': self.conf.statsd_host,
'statsd_port': self.conf.statsd_port,
'statsd_prefix': self.conf.statsd_prefix,
'statsd_enabled': self.conf.statsd_enabled,
'statsd_interval': self.conf.statsd_interval,
'statsd_types': self.conf.statsd_types,
'statsd_pattern': self.conf.statsd_pattern,
}
t1 = time.time()
is_sent = sched.put_conf(conf_package)
logger.debug("Conf is sent in %d", time.time() - t1)
if not is_sent:
logger.warning('[%s] configuration dispatching error for scheduler %s',
r.get_name(), sched.get_name())
continue
logger.info('[%s] Dispatch OK of conf in scheduler %s',
r.get_name(), sched.get_name())
sched.conf = conf
sched.push_flavor = conf.push_flavor
sched.need_conf = False
conf.is_assigned = True
conf.assigned_to = sched
# We update all data for this scheduler
sched.managed_confs = {conf.id: conf.push_flavor}
# Now we generate the conf for satellites:
cfg_id = conf.id
for kind in ('reactionner', 'poller', 'broker', 'receiver'):
r.to_satellites[kind][cfg_id] = sched.give_satellite_cfg()
r.to_satellites_need_dispatch[kind][cfg_id] = True
r.to_satellites_managed_by[kind][cfg_id] = []
# Ok, the conf is dispatched, no more loop for this
# configuration
break
# We pop conf to dispatch, so it must be no more conf...
conf_to_dispatch = [cfg for cfg in self.conf.confs.values() if not cfg.is_assigned]
nb_missed = len(conf_to_dispatch)
if nb_missed > 0:
logger.warning("All schedulers configurations are not dispatched, %d are missing",
nb_missed)
else:
logger.info("OK, all schedulers configurations are dispatched :)")
self.dispatch_ok = True
# Sched without conf in a dispatch ok are set to no need_conf
# so they do not raise dispatch where no use
if self.dispatch_ok:
for sched in self.schedulers.items.values():
if sched.conf is None:
# print("Tagging sched", sched.get_name(),)
# "so it do not ask anymore for conf"
sched.need_conf = False
arbiters_cfg = {}
for arb in self.arbiters:
arbiters_cfg[arb.id] = arb.give_satellite_cfg()
# We put the satellites conf with the "new" way so they see only what we want
for r in self.realms:
for cfg in r.confs.values():
cfg_id = cfg.id
# flavor if the push number of this configuration send to a scheduler
flavor = cfg.push_flavor
for kind in ('reactionner', 'poller', 'broker', 'receiver'):
if not r.to_satellites_need_dispatch[kind][cfg_id]:
continue
cfg_for_satellite_part = r.to_satellites[kind][cfg_id]
# make copies of potential_react list for sort
satellites = []
for satellite in r.get_potential_satellites_by_type(kind):
satellites.append(satellite)
satellites = alive_then_spare_then_deads(satellites)
# Only keep alive Satellites and reachable ones
satellites = [s for s in satellites if s.alive and s.reachable]
# If we got a broker, we make the list to pop a new
# item first for each scheduler, so it will smooth the load
# But the spare must stay at the end ;)
# WARNING : skip this if we are in a complet broker link realm
if kind == "broker" and not r.broker_complete_links:
nospare = [s for s in satellites if not s.spare]
# Should look over the list, not over
if len(nospare) != 0:
idx = cfg_id % len(nospare)
spares = [s for s in satellites if s.spare]
new_satellites = nospare[idx:]
for _b in nospare[: -idx + 1]:
if _b not in new_satellites:
new_satellites.append(_b)
satellites = new_satellites
satellites.extend(spares)
# Dump the order where we will send conf
satellite_string = "[%s] Dispatching %s satellite with order: " % (
r.get_name(), kind)
for satellite in satellites:
satellite_string += '%s (spare:%s), ' % (
satellite.get_name(), satellite.spare
)
logger.info(satellite_string)
# Now we dispatch cfg to every one ask for it
nb_cfg_sent = 0
for satellite in satellites:
# Send only if we need, and if we can
if (nb_cfg_sent < r.get_nb_of_must_have_satellites(kind) and
satellite.alive):
satellite.cfg['schedulers'][cfg_id] = cfg_for_satellite_part
if satellite.manage_arbiters:
satellite.cfg['arbiters'] = arbiters_cfg
# Brokers should have poller/reactionners links too
if kind == "broker":
r.fill_broker_with_poller_reactionner_links(satellite)
is_sent = False
# Maybe this satellite already got this configuration,
# so skip it
if satellite.do_i_manage(cfg_id, flavor):
logger.info('[%s] Skipping configuration %d send '
'to the %s %s: it already got it',
r.get_name(), cfg_id, kind,
satellite.get_name())
is_sent = True
else: # ok, it really need it :)
logger.info('[%s] Trying to send configuration to %s %s',
r.get_name(), kind, satellite.get_name())
is_sent = satellite.put_conf(satellite.cfg)
if is_sent:
satellite.active = True
logger.info('[%s] Dispatch OK of configuration %s to %s %s',
r.get_name(), cfg_id, kind,
satellite.get_name())
# We change the satellite configuration, update our data
satellite.known_conf_managed_push(cfg_id, flavor)
nb_cfg_sent += 1
r.to_satellites_managed_by[kind][cfg_id].append(satellite)
# If we got a broker, the conf_id must be sent to only ONE
# broker in a classic realm.
if kind == "broker" and not r.broker_complete_links:
break
# If receiver, we must send the hostnames
# of this configuration
if kind == 'receiver':
hnames = [h.get_name() for h in cfg.hosts]
logger.debug("[%s] Sending %s hostnames to the "
"receiver %s",
r.get_name(), len(hnames),
satellite.get_name())
satellite.push_host_names({'sched_id': cfg_id, 'hnames': hnames})
# else:
# #I've got enough satellite, the next ones are considered spares
if nb_cfg_sent == r.get_nb_of_must_have_satellites(kind):
logger.info("[%s] OK, no more %s sent need", r.get_name(), kind)
r.to_satellites_need_dispatch[kind][cfg_id] = False
# And now we dispatch receivers. It's easier, they need ONE conf
# in all their life :)
for r in self.realms:
for rec in r.receivers:
if not rec.need_conf:
continue
logger.info('[%s] Trying to send configuration to receiver %s',
r.get_name(), rec.get_name())
is_sent = False
if rec.reachable:
is_sent = rec.put_conf(rec.cfg)
else:
logger.info('[%s] Skyping configuration sent to offline receiver %s',
r.get_name(), rec.get_name())
if is_sent:
rec.active = True
rec.need_conf = False
logger.info('[%s] Dispatch OK of configuration to receiver %s',
r.get_name(), rec.get_name())
else:
logger.error('[%s] Dispatching failed for receiver %s',
r.get_name(), rec.get_name())
| 31,386
|
Python
|
.py
| 535
| 37.760748
| 116
| 0.493128
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,470
|
commandcall.py
|
shinken-solutions_shinken/shinken/commandcall.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import six
from shinken.autoslots import AutoSlots
from shinken.property import StringProp, BoolProp, IntegerProp
class DummyCommandCall(object):
"""Ok, slots are fun: you cannot set the __autoslots__
on the same class you use, fun isn't it? So we define*
a dummy useless class to get such :)
"""
pass
class CommandCall(six.with_metaclass(AutoSlots, DummyCommandCall)):
"""This class is use when a service, contact or host define
a command with args.
"""
# __slots__ = ('id', 'call', 'command', 'valid', 'args', 'poller_tag',
# 'reactionner_tag', 'module_type', '__dict__')
id = 0
my_type = 'CommandCall'
properties = {
'call': StringProp(),
'command': StringProp(),
'poller_tag': StringProp(default='None'),
'reactionner_tag': StringProp(default='None'),
'module_type': StringProp(default='fork'),
'valid': BoolProp(default=False),
'args': StringProp(default=[]),
'timeout': IntegerProp(default=-1),
'late_relink_done': BoolProp(default=False),
'enable_environment_macros': BoolProp(default=False),
'priority': IntegerProp(default=100),
}
def __init__(self, commands, call, poller_tag='None',
reactionner_tag='None', enable_environment_macros=0,
priority=100):
self.id = self.__class__.id
self.__class__.id += 1
self.call = call
self.timeout = -1
# Now split by ! and get command and args
self.get_command_and_args()
self.command = commands.find_by_name(self.command.strip())
self.late_relink_done = False # To do not relink again and again the same commandcall
if self.command is not None:
self.valid = True
else:
self.valid = False
if self.valid:
# If the host/service do not give an override poller_tag, take
# the one of the command
self.poller_tag = poller_tag # from host/service
self.reactionner_tag = reactionner_tag
self.module_type = self.command.module_type
self.enable_environment_macros = self.command.enable_environment_macros
self.timeout = int(self.command.timeout)
if self.valid and poller_tag == 'None':
# from command if not set
self.poller_tag = self.command.poller_tag
# Same for reactionner tag
if self.valid and reactionner_tag == 'None':
# from command if not set
self.reactionner_tag = self.command.reactionner_tag
# Item priority has precedence if a value is explicitely set
if int(priority) != self.properties["priority"].default:
self.priority = int(priority)
else:
self.priority = int(self.command.priority)
def get_command_and_args(self):
"""We want to get the command and the args with ! splitting.
but don't forget to protect against the \! to do not split them
"""
# First protect
p_call = self.call.replace(r'\!', '___PROTECT_EXCLAMATION___')
tab = p_call.split('!')
self.command = tab[0]
# Reverse the protection
self.args = [s.replace('___PROTECT_EXCLAMATION___', '!')
for s in tab[1:]]
# If we didn't already lately relink us, do it
def late_linkify_with_command(self, commands):
if self.late_relink_done:
return
self.late_relink_done = True
c = commands.find_by_name(self.command)
self.command = c
def is_valid(self):
return self.valid
def __str__(self):
return str(self.__dict__)
def get_name(self):
return self.call
def __getstate__(self):
cls = self.__class__
# id is not in *_properties
res = {'id': self.id}
for prop in cls.properties:
if hasattr(self, prop):
res[prop] = getattr(self, prop)
# The command is a bit special, we just put it's name
# or a '' if need
if self.command and not isinstance(self.command, six.string_types):
res['command'] = self.command.get_name()
# Maybe it's a repickle of a unpickle thing... (like with deepcopy). If so
# only take the value
elif self.command and isinstance(self.command, six.string_types):
res['command'] = self.command
else:
res['command'] = ''
return res
def __setstate__(self, state):
"""Inverted function of getstate"""
cls = self.__class__
# We move during 1.0 to a dict state
# but retention file from 0.8 was tuple
if isinstance(state, tuple):
self.__setstate_pre_1_0__(state)
return
self.id = state['id']
for prop in cls.properties:
if prop in state:
setattr(self, prop, state[prop])
def __setstate_pre_1_0__(self, state):
"""In 1.0 we move to a dict save. Before, it was
a tuple save, like
({'id': 11}, {'poller_tag': 'None', 'reactionner_tag': 'None',
'command_line': '/usr/local/nagios/bin/rss-multiuser',
'module_type': 'fork', 'command_name': 'notify-by-rss'})
"""
for d in state:
for k, v in d.items():
setattr(self, k, v)
| 6,529
|
Python
|
.py
| 152
| 34.592105
| 94
| 0.605507
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,471
|
scheduler.py
|
shinken-solutions_shinken/shinken/scheduler.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import six
import time
import os
import io
import sys
import tempfile
import traceback
import threading
if six.PY2:
from Queue import Empty
else:
from queue import Empty
from shinken.external_command import ExternalCommand
from shinken.check import Check
from shinken.notification import Notification
from shinken.eventhandler import EventHandler
from shinken.brok import Brok
from shinken.downtime import Downtime
from shinken.contactdowntime import ContactDowntime
from shinken.comment import Comment
from shinken.acknowledge import Acknowledge
from shinken.log import logger
from shinken.util import nighty_five_percent, get_memory
from shinken.serializer import deserialize
from shinken.load import Load
from shinken.http_client import HTTPClient, HTTPException
from shinken.stats import statsmgr
from shinken.misc.common import DICT_MODATTR
class Scheduler(object):
"""Please Add a Docstring to describe the class here"""
def __init__(self, scheduler_daemon):
'''
:type scheduler_daemon: shinken.daemons.schedulerdaemon.Shinken
'''
self.sched_daemon = scheduler_daemon
# When set to false by us, we die and arbiter launch a new Scheduler
self.must_run = True
# protect this uniq list
self.waiting_results_lock = threading.RLock()
self.waiting_results = [] # satellites returns us results
# and to not wait for them, we put them here and
# use them later
# Every N seconds we call functions like consume, del zombies
# etc. All of theses functions are in recurrent_works with the
# every tick to run. So must be an integer > 0
# The order is important, so make key an int.
# TODO: at load, change value by configuration one (like reaper time, etc)
self.recurrent_works = {
0: ('update_downtimes_and_comments', self.update_downtimes_and_comments, 1),
1: ('schedule', self.schedule, 1), # just schedule
2: ('consume_results', self.consume_results, 1), # incorporate checks and dependencies
# now get the news actions (checks, notif) raised
3: ('get_new_actions', self.get_new_actions, 1),
4: ('get_new_broks', self.get_new_broks, 1), # and broks
5: ('scatter_master_notifications', self.scatter_master_notifications, 1),
6: ('delete_zombie_checks', self.delete_zombie_checks, 1),
7: ('delete_zombie_actions', self.delete_zombie_actions, 1),
# 3: (self.delete_unwanted_notifications, 1),
8: ('check_freshness', self.check_freshness, 10),
9: ('clean_caches', self.clean_caches, 1),
10: ('update_retention_file', self.update_retention_file, 3600),
11: ('check_orphaned', self.check_orphaned, 60),
# For NagVis like tools: update our status every 10s
12: ('get_and_register_update_program_status_brok',
self.get_and_register_update_program_status_brok, 10),
# Check for system time change. And AFTER get new checks
# so they are changed too.
13: ('check_for_system_time_change', self.sched_daemon.check_for_system_time_change, 1),
# launch if need all internal checks
14: ('manage_internal_checks', self.manage_internal_checks, 1),
# clean some times possible overridden Queues, to do not explode in memory usage
# every 1/4 of hour
15: ('clean_queues', self.clean_queues, 1),
# Look for new business_impact change by modulation every minute
16: ('update_business_values', self.update_business_values, 60),
# Reset the topology change flag if need
17: ('reset_topology_change_flag', self.reset_topology_change_flag, 1),
18: ('check_for_expire_acknowledge', self.check_for_expire_acknowledge, 1),
19: ('send_broks_to_modules', self.send_broks_to_modules, 1),
20: ('get_objects_from_from_queues', self.get_objects_from_from_queues, 1),
}
# stats part
self.nb_checks_send = 0
self.nb_actions_send = 0
self.nb_broks_send = 0
self.nb_check_received = 0
# Log init
logger.load_obj(self)
self.instance_id = 0 # Temporary set. Will be erase later
# Ours queues
self.checks = {}
self.actions = {}
self.downtimes = {}
self.contact_downtimes = {}
self.comments = {}
self.broks = []
# Some flags
self.has_full_broks = False # have a initial_broks in broks queue?
self.need_dump_memory = False # set by signal 1
self.need_objects_dump = False # set by signal 2
# And a dummy push flavor
self.push_flavor = 0
# Now fake initialize for our satellites
self.brokers = {}
self.pollers = {}
self.reactionners = {}
def reset(self):
self.must_run = True
with self.waiting_results_lock:
del self.waiting_results[:]
for o in self.checks, self.actions, self.downtimes,\
self.contact_downtimes, self.comments,\
self.brokers:
o.clear()
del self.broks[:]
def iter_hosts_and_services(self):
for what in (self.hosts, self.services):
for elt in what:
yield elt
# Load conf for future use
# we are in_test if the data are from an arbiter object like,
# so only for tests
def load_conf(self, conf, in_test=False):
self.program_start = int(time.time())
self.conf = conf
self.hostgroups = conf.hostgroups
self.services = conf.services
# We need reversed list for search in the retention
# file read
self.services.optimize_service_search(conf.hosts)
self.hosts = conf.hosts
self.notificationways = conf.notificationways
self.checkmodulations = conf.checkmodulations
self.macromodulations = conf.macromodulations
self.contacts = conf.contacts
self.contactgroups = conf.contactgroups
self.servicegroups = conf.servicegroups
self.timeperiods = conf.timeperiods
self.commands = conf.commands
self.triggers = conf.triggers
self.triggers.compile()
self.triggers.load_objects(self)
if not in_test:
# Commands in the host/services/contacts are not real one
# we must relink them
t0 = time.time()
self.conf.late_linkify()
logger.debug("Late command relink in %d", time.time() - t0)
# self.status_file = StatusFile(self)
# External status file
# From Arbiter. Use for Broker to differentiate schedulers
self.instance_id = conf.instance_id
# Tag our hosts with our instance_id
for h in self.hosts:
h.instance_id = conf.instance_id
for s in self.services:
s.instance_id = conf.instance_id
# self for instance_name
self.instance_name = conf.instance_name
# and push flavor
self.push_flavor = conf.push_flavor
# Now we can update our 'ticks' for special calls
# like the retention one, etc
self.update_recurrent_works_tick('update_retention_file',
self.conf.retention_update_interval * 60)
self.update_recurrent_works_tick('clean_queues', self.conf.cleaning_queues_interval)
# Update the 'tick' for a function call in our
# recurrent work
def update_recurrent_works_tick(self, f_name, new_tick):
for i in self.recurrent_works:
(name, f, old_tick) = self.recurrent_works[i]
if name == f_name:
logger.debug("Changing the tick to %d for the function %s", new_tick, name)
self.recurrent_works[i] = (name, f, new_tick)
# Load the pollers from our app master
def load_satellites(self, pollers, reactionners):
self.pollers = pollers
self.reactionners = reactionners
# Oh... Arbiter want us to die... To launch a new Scheduler
# "Mais qu'a-t-il de plus que je n'ais pas?"
# "But.. On which point it is better than me?"
def die(self):
self.must_run = False
def dump_objects(self):
d = tempfile.gettempdir()
p = os.path.join(d, 'scheduler-obj-dump-%d' % time.time())
logger.info('Opening the DUMP FILE %s', p)
try:
f = open(p, 'w')
f.write('Scheduler DUMP at %d\n' % time.time())
for c in self.checks.values():
s = 'CHECK: %s:%s:%s:%s:%s:%s\n' % \
(c.id, c.status, c.t_to_go, c.poller_tag, c.command, c.worker)
f.write(s)
for a in self.actions.values():
s = '%s: %s:%s:%s:%s:%s:%s\n' % \
(a.__class__.my_type.upper(), a.id, a.status,
a.t_to_go, a.reactionner_tag, a.command, a.worker)
f.write(s)
for b in self.broks:
s = 'BROK: %s:%s\n' % (b.id, b.type)
f.write(s)
f.close()
except Exception as exp:
logger.error("Error in writing the dump file %s : %s", p, exp)
def dump_config(self):
d = tempfile.gettempdir()
p = os.path.join(d, 'scheduler-conf-dump-%d' % time.time())
logger.info('Opening the DUMP FILE %s', p)
try:
f = open(p, 'w')
f.write('Scheduler config DUMP at %d\n' % time.time())
self.conf.dump(f)
f.close()
except Exception as exp:
logger.error("Error in writing the dump file %s : %s", p, exp)
# Load the external command
def load_external_command(self, e):
self.external_command = e
# We've got activity in the fifo, we get and run commands
def run_external_commands(self, cmds):
for command in cmds:
self.run_external_command(command)
def run_external_command(self, command):
logger.debug("scheduler resolves command '%s'", command)
ext_cmd = ExternalCommand(command)
self.external_command.resolve_command(ext_cmd)
# Add_Brok is a bit more complex than the others, because
# on starting, the broks are put in a global queue : self.broks
# then when the first broker connect, it will generate initial_broks
# in it's own queue (so bname != None).
# and when in "normal" run, we just need to put the brok to all queues
def add_Brok(self, brok, bname=None):
# For brok, we TAG brok with our instance_id
brok.instance_id = self.instance_id
# Maybe it's just for one broker
if bname:
self.brokers[bname]['broks'].append(brok)
else:
# If there are known brokers, give it to them
if len(self.brokers) > 0:
# Or maybe it's for all
for bname in self.brokers:
self.brokers[bname]['broks'].append(brok)
else: # no brokers? maybe at startup for logs
# we will put in global queue, that the first broker
# connection will get all
self.broks.append(brok)
def add_Notification(self, notif):
self.actions[notif.id] = notif
# A notification ask for a brok
if notif.contact is not None:
b = notif.get_initial_status_brok()
self.add(b)
def add_Check(self, c):
self.checks[c.id] = c
# A new check means the host/service changes its next_check
# need to be refreshed
b = c.ref.get_next_schedule_brok()
self.add(b)
def add_EventHandler(self, action):
# print("Add an event Handler", elt.id)
self.actions[action.id] = action
def add_Downtime(self, dt):
self.downtimes[dt.id] = dt
if dt.extra_comment:
self.add_Comment(dt.extra_comment)
def add_ContactDowntime(self, contact_dt):
self.contact_downtimes[contact_dt.id] = contact_dt
def add_Comment(self, comment):
self.comments[comment.id] = comment
b = comment.ref.get_update_status_brok()
self.add(b)
# Ok one of our modules send us a command? just run it!
def add_ExternalCommand(self, ext_cmd):
self.external_command.resolve_command(ext_cmd)
# Schedulers have some queues. We can simplify call by adding
# elements into the proper queue just by looking at their type
# Brok -> self.broks
# Check -> self.checks
# Notification -> self.actions
# Downtime -> self.downtimes
# ContactDowntime -> self.contact_downtimes
def add(self, elt):
f = self.__add_actions.get(elt.__class__, None)
if f:
# print("found action for %s: %s" % (elt.__class__.__name__, f.__name__))
f(self, elt)
__add_actions = {
Check: add_Check,
Brok: add_Brok,
Notification: add_Notification,
EventHandler: add_EventHandler,
Downtime: add_Downtime,
ContactDowntime: add_ContactDowntime,
Comment: add_Comment,
ExternalCommand: add_ExternalCommand,
}
# We call the function of modules that got the
# hook function
# TODO: find a way to merge this and the version in daemon.py
def hook_point(self, hook_name):
for inst in self.sched_daemon.modules_manager.instances:
full_hook_name = 'hook_' + hook_name
logger.debug("hook_point: %s: %s %s",
inst.get_name(), hasattr(inst, full_hook_name), hook_name)
if hasattr(inst, full_hook_name):
f = getattr(inst, full_hook_name)
try:
f(self)
except Exception as exp:
logger.error(
"The instance %s raise an exception %s." "I disable it "
"and set it to restart it later", inst.get_name(), exp
)
output = io.StringIO()
traceback.print_exc(file=output)
logger.error("Exception trace follows: %s", output.getvalue())
output.close()
self.sched_daemon.modules_manager.set_to_restart(inst)
# Ours queues may explode if no one ask us for elements
# It's very dangerous: you can crash your server... and it's a bad thing :)
# So we 'just' keep last elements: 5 of max is a good overhead
def clean_queues(self):
# if we set the interval at 0, we bail out
if self.conf.cleaning_queues_interval == 0:
return
max_checks = 5 * (len(self.hosts) + len(self.services))
max_broks = 5 * (len(self.hosts) + len(self.services))
max_actions = 5 * len(self.contacts) * (len(self.hosts) + len(self.services))
# For checks, it's not very simple:
# For checks, they may be referred to their host/service
# We do not just del them in the check list, but also in their service/host
# We want id of lower than max_id - 2*max_checks
if len(self.checks) > max_checks:
# keys does not ensure sorted keys. Max is slow but we have no other way.
id_max = max(self.checks.keys())
to_del_checks = [c for c in self.checks.values() if c.id < id_max - max_checks]
nb_checks_drops = len(to_del_checks)
if nb_checks_drops > 0:
logger.info("I have to del some checks (%d)..., sorry", nb_checks_drops)
for c in to_del_checks:
i = c.id
elt = c.ref
# First remove the link in host/service
elt.remove_in_progress_check(c)
# Then in dependent checks (I depend on, or check
# depend on me)
for dependent_checks in c.depend_on_me:
dependent_checks.depend_on.remove(c.id)
for c_temp in c.depend_on:
c_temp.depen_on_me.remove(c)
del self.checks[i] # Final Bye bye ...
else:
nb_checks_drops = 0
# For broks and actions, it's more simple
# or brosk, manage global but also all brokers queue
b_lists = [self.broks]
for (bname, e) in self.brokers.items():
b_lists.append(e['broks'])
nb_broks_drops = 0
for broks in b_lists:
if len(broks) > max_broks:
count = len(broks) - max_broks
del broks[-count:]
nb_broks_drops += count
if len(self.actions) > max_actions:
id_max = max(self.actions.keys())
id_to_del_actions = [i for i in self.actions if i < id_max - max_actions]
nb_actions_drops = len(id_to_del_actions)
for i in id_to_del_actions:
# Remember to delete reference of notification in service/host
a = self.actions[i]
if a.is_a == 'notification':
a.ref.remove_in_progress_notification(a)
del self.actions[i]
else:
nb_actions_drops = 0
statsmgr.incr("scheduler.checks.dropped", nb_checks_drops, "queue")
statsmgr.incr("scheduler.broks.dropped", nb_broks_drops, "queue")
statsmgr.incr("scheduler.actions.dropped", nb_actions_drops, "queue")
if nb_checks_drops != 0 or nb_broks_drops != 0 or nb_actions_drops != 0:
logger.warning("We drop %d checks, %d broks and %d actions",
nb_checks_drops, nb_broks_drops, nb_actions_drops)
# For tunning purpose we use caches but we do not want them to explode
# So we clean them
def clean_caches(self):
for tp in self.timeperiods:
tp.clean_cache()
# Ask item (host or service) an update_status
# and add it to our broks queue
def get_and_register_status_brok(self, item):
b = item.get_update_status_brok()
self.add(b)
# Ask item (host or service) a check_result_brok
# and add it to our broks queue
def get_and_register_check_result_brok(self, item):
b = item.get_check_result_brok()
self.add(b)
# We do not want this downtime id
def del_downtime(self, dt_id):
if dt_id in self.downtimes:
self.downtimes[dt_id].ref.del_downtime(dt_id)
del self.downtimes[dt_id]
# We do not want this downtime id
def del_contact_downtime(self, dt_id):
if dt_id in self.contact_downtimes:
self.contact_downtimes[dt_id].ref.del_downtime(dt_id)
del self.contact_downtimes[dt_id]
# We do not want this comment id
def del_comment(self, c_id):
if c_id in self.comments:
self.comments[c_id].ref.del_comment(c_id)
del self.comments[c_id]
# We are looking for outdated acks, and if so, remove them
def check_for_expire_acknowledge(self):
for elt in self.iter_hosts_and_services():
elt.check_for_expire_acknowledge()
# We update all business_impact to look at new modulation
# start for impacts, and so update broks status and
# problems value too
def update_business_values(self):
for elt in self.iter_hosts_and_services():
if not elt.is_problem:
was = elt.business_impact
elt.update_business_impact_value()
new = elt.business_impact
# Ok, the business_impact change, we can update the broks
if new != was:
# print("The elements", i.get_name(), "change it's business_impact value")
self.get_and_register_status_brok(elt)
# When all impacts and classic elements are updated,
# we can update problems (their value depend on impacts, so
# they must be done after)
for elt in self.iter_hosts_and_services():
# We first update impacts and classic elements
if elt.is_problem:
was = elt.business_impact
elt.update_business_impact_value()
new = elt.business_impact
# Maybe one of the impacts change it's business_impact to a high value
# and so ask for the problem to raise too
if new != was:
# print("The elements", i.get_name(),)
# print("change it's business_impact value from", was, "to", new)
self.get_and_register_status_brok(elt)
# Each second we search for master notification that are scatterisable and we do the job
# we take the sons and we put them into our actions queue
def scatter_master_notifications(self):
now = time.time()
for a in list(self.actions.values()):
# We only want notifications
if a.is_a != 'notification':
continue
if a.status == 'scheduled' and a.is_launchable(now):
if not a.contact:
# This is a "master" notification created by create_notifications.
# It wont sent itself because it has no contact.
# We use it to create "child" notifications (for the contacts and
# notification_commands) which are executed in the reactionner.
item = a.ref
childnotifications = []
if not item.notification_is_blocked_by_item(a.type, now):
# If it is possible to send notifications
# of this type at the current time, then create
# a single notification for each contact of this item.
childnotifications = item.scatter_notification(a)
for c in childnotifications:
c.status = 'scheduled'
self.add(c) # this will send a brok
# If we have notification_interval then schedule
# the next notification (problems only)
if a.type == 'PROBLEM':
# Update the ref notif number after raise the one of the notification
if len(childnotifications) != 0:
# notif_nb of the master notification
# was already current_notification_number+1.
# If notifications were sent,
# then host/service-counter will also be incremented
item.current_notification_number = a.notif_nb
if item.notification_interval != 0 and a.t_to_go is not None:
# We must continue to send notifications.
# Just leave it in the actions list and set it to "scheduled"
# and it will be found again later
# Ask the service/host to compute the next notif time. It can be just
# a.t_to_go + item.notification_interval*item.__class__.interval_length
# or maybe before because we have an
# escalation that need to raise up before
a.t_to_go = item.get_next_notification_time(a)
a.notif_nb = item.current_notification_number + 1
a.status = 'scheduled'
else:
# Wipe out this master notification. One problem notification is enough.
item.remove_in_progress_notification(a)
self.actions[a.id].status = 'zombie'
else:
# Wipe out this master notification.
# We don't repeat recover/downtime/flap/etc...
item.remove_in_progress_notification(a)
self.actions[a.id].status = 'zombie'
# Called by poller to get checks
# Can get checks and actions (notifications and co)
def get_to_run_checks(self, do_checks=False, do_actions=False,
poller_tags=['None'], reactionner_tags=['None'],
worker_name='none', module_types=['fork'],
max_actions=None
):
res = []
now = time.time()
# As priority attribute may not exist on objects loaded from retention
# backend, we ensure that filtering does not break
def get_prio(o):
return getattr(o, "priority", o.properties["priority"].default)
# If poller want to do checks
if do_checks:
for c in sorted(self.checks.values(), key=get_prio):
if max_actions is not None and len(res) >= max_actions:
break
# If the command is untagged, and the poller too, or if both are tagged
# with same name, go for it
# if do_check, call for poller, and so poller_tags by default is ['None']
# by default poller_tag is 'None' and poller_tags is ['None']
# and same for module_type, the default is the 'fork' type
if c.poller_tag in poller_tags and c.module_type in module_types:
# must be ok to launch, and not an internal one (business rules based)
if c.status == 'scheduled' and c.is_launchable(now) and not c.internal:
c.status = 'inpoller'
c.worker = worker_name
# We do not send c, because it is a link (c.ref) to
# host/service and poller do not need it. It only
# need a shell with id, command and defaults
# parameters. It's the goal of copy_shell
res.append(c.copy_shell())
# If reactionner want to notify too
if do_actions:
for a in sorted(self.actions.values(), key=get_prio):
if max_actions is not None and len(res) >= max_actions:
break
is_master = (a.is_a == 'notification' and not a.contact)
if not is_master:
# if do_action, call the reactionner,
# and so reactionner_tags by default is ['None']
# by default reactionner_tag is 'None' and reactionner_tags is ['None'] too
# So if not the good one, loop for next :)
if a.reactionner_tag not in reactionner_tags:
continue
# same for module_type
if a.module_type not in module_types:
continue
# And now look for can launch or not :)
if a.status == 'scheduled' and a.is_launchable(now):
if not is_master:
# This is for child notifications and eventhandlers
a.status = 'inpoller'
a.worker = worker_name
new_a = a.copy_shell()
res.append(new_a)
return res
# Called by poller and reactionner to send result
def put_results(self, c):
if c.is_a == 'notification':
# We will only see childnotifications here
try:
timeout = False
if c.status == 'timeout':
# Unfortunately the remove_in_progress_notification
# sets the status to zombie, so we need to save it here.
timeout = True
execution_time = c.execution_time
self.actions[c.id].get_return_from(c)
item = self.actions[c.id].ref
item.remove_in_progress_notification(c)
self.actions[c.id].status = 'zombie'
item.last_notification = c.check_time
# And we ask the item to update it's state
self.get_and_register_status_brok(item)
# If we' ve got a problem with the notification, raise a Warning log
if timeout:
logger.warning("Contact %s %s notification command '%s ' "
"timed out after %d seconds",
self.actions[c.id].contact.contact_name,
self.actions[c.id].ref.__class__.my_type,
self.actions[c.id].command,
int(execution_time))
elif c.exit_status != 0:
logger.warning("The notification command '%s' raised an error "
"(exit code=%d): '%s'", c.command, c.exit_status, c.output)
except KeyError as exp: # bad number for notif, not that bad
logger.warning('put_results:: get unknown notification : %s ', exp)
except AttributeError as exp: # bad object, drop it
logger.warning('put_results:: get bad notification : %s ', exp)
elif c.is_a == 'check':
try:
if c.status == 'timeout':
c.output = "(%s Check Timed Out)" %\
self.checks[c.id].ref.__class__.my_type.capitalize()
c.long_output = c.output
c.exit_status = self.conf.timeout_exit_status
self.checks[c.id].get_return_from(c)
self.checks[c.id].status = 'waitconsume'
except KeyError as exp:
pass
elif c.is_a == 'eventhandler':
try:
old_action = self.actions[c.id]
old_action.status = 'zombie'
except KeyError: # cannot find old action
return
if c.status == 'timeout':
_type = 'event handler'
if c.is_snapshot:
_type = 'snapshot'
logger.warning("%s %s command '%s ' timed out after %d seconds" %
(self.actions[c.id].ref.__class__.my_type.capitalize(),
_type,
self.actions[c.id].command,
int(c.execution_time)))
# If it's a snapshot we should get the output an export it
if c.is_snapshot:
old_action.get_return_from(c)
b = old_action.ref.get_snapshot_brok(old_action.output, old_action.exit_status)
self.add(b)
else:
logger.error("The received result type in unknown! %s", c.is_a)
# Get the good tabs for links regarding to the kind. If unknown, return None
def get_links_from_type(self, type):
t = {'poller': self.pollers, 'reactionner': self.reactionners}
if type in t:
return t[type]
return None
# Check if we do not connect to often to this
def is_connection_try_too_close(self, elt):
now = time.time()
last_connection = elt['last_connection']
if now - last_connection < 5:
return True
return False
# initialize or re-initialize connection with a poller
# or a reactionner
def pynag_con_init(self, id, type='poller'):
# Get good links tab for looping..
links = self.get_links_from_type(type)
if links is None:
logger.debug("Unknown '%s' type for connection!", type)
return
# We want only to initiate connections to the passive
# pollers and reactionners
passive = links[id]['passive']
if not passive:
return
# If we try to connect too much, we slow down our tests
if self.is_connection_try_too_close(links[id]):
return
# Ok, we can now update it
links[id]['last_connection'] = time.time()
logger.debug("Init connection with %s", links[id]['uri'])
uri = links[id]['uri']
try:
links[id]['con'] = HTTPClient(uri=uri, strong_ssl=links[id]['hard_ssl_name_check'])
con = links[id]['con']
except HTTPException as exp:
logger.warning(
"Connection problem to the %s %s: %s",
type, links[id]['name'], exp
)
links[id]['con'] = None
return
try:
# initial ping must be quick
con.get('ping')
except HTTPException as exp:
logger.warning(
"Connection problem to the %s %s: %s",
type, links[id]['name'], exp
)
links[id]['con'] = None
return
except KeyError as exp:
logger.warning(
"The %s '%s' is not initialized: %s",
type, links[id]['name'], exp)
links[id]['con'] = None
return
logger.info("Connection OK to the %s %s", type, links[id]['name'])
# We should push actions to our passives satellites
def push_actions_to_passives_satellites(self):
# We loop for our passive pollers or reactionners
for p in filter(lambda p: p['passive'], self.pollers.values()):
logger.debug("I will send actions to the poller %s", p)
con = p['con']
poller_tags = p['poller_tags']
if con is not None:
# get actions
lst = self.get_to_run_checks(True, False, poller_tags, worker_name=p['name'])
try:
# initial ping must be quick
logger.debug("Sending %s actions", len(lst))
con.put(
'push_actions',
serialize(
{'actions': lst, 'sched_id': self.instance_id}
)
)
self.nb_checks_send += len(lst)
except HTTPException as exp:
logger.warning(
"Connection problem to the %s %s: %s",
type, p['name'], exp
)
p['con'] = None
return
except KeyError as exp:
logger.warning(
"The %s '%s' is not initialized: %s",
type, p['name'], exp
)
p['con'] = None
return
else: # no connection? try to reconnect
self.pynag_con_init(p['instance_id'], type='poller')
# TODO:factorize
# We loop for our passive reactionners
for p in filter(lambda p: p['passive'], self.reactionners.values()):
logger.debug("I will send actions to the reactionner %s", p)
con = p['con']
reactionner_tags = p['reactionner_tags']
if con is not None:
# get actions
lst = self.get_to_run_checks(False, True,
reactionner_tags=reactionner_tags,
worker_name=p['name'])
try:
# initial ping must be quick
logger.debug("Sending %d actions", len(lst))
con.put(
'push_actions',
serialize(
{'actions': lst, 'sched_id': self.instance_id}
)
)
self.nb_checks_send += len(lst)
except HTTPException as exp:
logger.warning(
"Connection problem to the %s %s: %s",
type, p['name'], exp
)
p['con'] = None
return
except KeyError as exp:
logger.warning(
"The %s '%s' is not initialized: %s",
type, p['name'], exp
)
p['con'] = None
return
else: # no connection? try to reconnect
self.pynag_con_init(p['instance_id'], type='reactionner')
# We should get returns from satellites
def get_actions_from_passives_satellites(self):
# We loop for our passive pollers
for p in [p for p in self.pollers.values() if p['passive']]:
logger.debug("I will get actions from the poller %s", p)
con = p['con']
poller_tags = p['poller_tags']
if con is not None:
try:
# initial ping must be quick
# Before ask a call that can be long, do a simple ping to be sure it is alive
con.get('ping')
payload = con.get('get_returns', {'sched_id': self.instance_id}, wait='long')
try:
results = deserilize(payload)
except Exception as exp:
logger.error(
'Cannot load passive results from satellite %s : %s',
p['name'], exp)
continue
nb_received = len(results)
self.nb_check_received += nb_received
logger.debug("Received %d passive results", nb_received)
for result in results:
result.set_type_passive()
with self.waiting_results_lock:
self.waiting_results.extend(results)
except HTTPException as exp:
logger.warning(
"Connection problem to the %s %s: %s",
type, p['name'], exp
)
p['con'] = None
continue
except KeyError as exp:
logger.warning(
"The %s '%s' is not initialized: %s",
type, p['name'], exp
)
p['con'] = None
continue
else: # no connection, try reinit
self.pynag_con_init(p['instance_id'], type='poller')
# We loop for our passive reactionners
for p in [p for p in self.reactionners.values() if p['passive']]:
logger.debug("I will get actions from the reactionner %s", p)
con = p['con']
reactionner_tags = p['reactionner_tags']
if con is not None:
try:
# initial ping must be quick
# Before ask a call that can be long, do a simple ping to be sure it is alive
con.get('ping')
payload = con.get('get_returns', {'sched_id': self.instance_id}, wait='long')
results = deserialize(payload)
nb_received = len(results)
self.nb_check_received += nb_received
logger.debug("Received %d passive results", nb_received)
for result in results:
result.set_type_passive()
with self.waiting_results_lock:
self.waiting_results.extend(results)
except HTTPException as exp:
logger.warning(
"Connection problem to the %s %s: %s",
type, p['name'], exp
)
p['con'] = None
return
except KeyError as exp:
logger.warning(
"The %s '%s' is not initialized: %s",
type, p['name'], exp
)
p['con'] = None
return
else: # no connection, try reinit
self.pynag_con_init(p['instance_id'], type='reactionner')
# Some checks are purely internal, like business based one
# simply ask their ref to manage it when it's ok to run
def manage_internal_checks(self):
now = time.time()
for c in self.checks.values():
# must be ok to launch, and not an internal one (business rules based)
if c.internal and c.status == 'scheduled' and c.is_launchable(now):
c.ref.manage_internal_check(self.hosts, self.services, c)
# it manage it, now just ask to consume it
# like for all checks
c.status = 'waitconsume'
# Call by brokers to have broks
# We give them, and clean them!
def get_broks(self, bname, broks_batch=0):
if broks_batch:
try:
broks_batch = int(broks_batch)
except ValueError:
logger.error("Invalid broks_batch in get_broks, should be an "
"integer. Igored.")
broks_batch = 0
res = []
if broks_batch == 0:
count = len(self.broks)
else:
count = min(broks_batch, len(self.broks))
res.extend(self.broks[:count])
del self.broks[:count]
# If we are here, we are sure the broker entry exists
if broks_batch == 0:
count = len(self.brokers[bname]['broks'])
else:
count = min(broks_batch, len(self.brokers[bname]['broks']))
count -= len(res)
res.extend(self.brokers[bname]['broks'][:count])
del self.brokers[bname]['broks'][:count]
return res
# An element can have its topology changed by an external command
# if so a brok will be generated with this flag. No need to reset all of
# them.
def reset_topology_change_flag(self):
for i in self.hosts:
i.topology_change = False
for i in self.services:
i.topology_change = False
# Update the retention file and give all te data in
# a dict so the read function can pickup what it wants
# For now compression is not used, but it can be added easily
# just uncomment :)
def update_retention_file(self, forced=False):
# If we set the update to 0, we do not want of this
# if we do not forced (like at stopping)
if self.conf.retention_update_interval == 0 and not forced:
return
self.hook_point('save_retention')
# Load the retention file and get status from it. It does not get all checks in progress
# for the moment, just the status and the notifications.
def retention_load(self):
self.hook_point('load_retention')
# Helper function for module, will give the host and service
# data
def get_retention_data(self):
# We create an all_data dict with list of useful retention data dicts
# of our hosts and services
all_data = {'hosts': {}, 'services': {}}
for h in self.hosts:
d = {}
running_properties = h.__class__.running_properties
for prop, entry in running_properties.items():
if entry.retention:
v = getattr(h, prop)
# Maybe we should "prepare" the data before saving it
# like get only names instead of the whole objects
f = entry.retention_preparation
if f:
v = f(h, v)
d[prop] = v
# and some properties are also like this, like
# active checks enabled or not
properties = h.__class__.properties
for prop, entry in properties.items():
if entry.retention:
v = getattr(h, prop)
# Maybe we should "prepare" the data before saving it
# like get only names instead of the whole objects
f = entry.retention_preparation
if f:
v = f(h, v)
d[prop] = v
all_data['hosts'][h.host_name] = d
# Same for services
for s in self.services:
d = {}
running_properties = s.__class__.running_properties
for prop, entry in running_properties.items():
if entry.retention:
v = getattr(s, prop)
# Maybe we should "prepare" the data before saving it
# like get only names instead of the whole objects
f = entry.retention_preparation
if f:
v = f(s, v)
d[prop] = v
# We consider the service ONLY if it has modified attributes.
# If not, then no non-running attributes will be saved for this service.
if s.modified_attributes > 0:
# Same for properties, like active checks enabled or not
properties = s.__class__.properties
for prop, entry in properties.items():
# We save the value only if the attribute
# is selected for retention AND has been modified.
if entry.retention and \
not (prop in DICT_MODATTR and
not DICT_MODATTR[prop].value & s.modified_attributes):
v = getattr(s, prop)
# Maybe we should "prepare" the data before saving it
# like get only names instead of the whole objects
f = entry.retention_preparation
if f:
v = f(s, v)
d[prop] = v
all_data['services'][(s.host.host_name, s.service_description)] = d
return all_data
# Get back our broks from a retention module :)
def restore_retention_data(self, data):
"""
Now load interesting properties in hosts/services
Tagging retention=False prop that not be directly load
Items will be with theirs status, but not in checking, so
a new check will be launched like with a normal beginning (random distributed
scheduling)
:param dict data: The loaded retention data
"""
# Restores retention data
objects = []
ret_hosts = data['hosts']
for ret_h_name in ret_hosts:
d = data['hosts'][ret_h_name]
h = self.hosts.find_by_name(ret_h_name)
if h is not None:
self.restore_object_retention_data(h, d)
objects.append(h)
ret_services = data['services']
for (ret_s_h_name, ret_s_desc) in ret_services:
d = data['services'][(ret_s_h_name, ret_s_desc)]
s = self.services.find_srv_by_name_and_hostname(ret_s_h_name, ret_s_desc)
if s is not None:
self.restore_object_retention_data(s, d)
objects.append(s)
# Re-celculates object status attributes once states have been restored
for o in objects:
o.reprocess_state()
def restore_object_retention_data(self, o, data):
"""
Now load interesting properties in hosts/services
Tagging retention=False prop that not be directly load
Items will be with theirs status, but not in checking, so
a new check will be launched like with a normal beginning (random distributed
scheduling)
:param Item o: The object to load data to
:param dict data: The object's loaded retention data
"""
# First manage all running properties
running_properties = o.__class__.running_properties
for prop, entry in running_properties.items():
if entry.retention:
# Maybe the saved one was not with this value, so
# we just bypass this
if prop in data:
setattr(o, prop, data[prop])
# Ok, some are in properties too (like active check enabled
# or not. Will OVERRIDE THE CONFIGURATION VALUE!
properties = o.__class__.properties
for prop, entry in properties.items():
if entry.retention:
# Maybe the saved one was not with this value, so
# we just bypass this
if prop in data:
setattr(o, prop, data[prop])
# Now manage all linked oects load from previous run
for a in o.notifications_in_progress.values():
a.ref = o
self.add(a)
# Also raises the action id, so do not overlap ids
a.assume_at_least_id(a.id)
# And also add downtimes and comments
for dt in o.downtimes:
dt.ref = o
if hasattr(dt, 'extra_comment'):
dt.extra_comment.ref = o
else:
dt.extra_comment = None
# raises the downtime id to do not overlap
Downtime.id = max(Downtime.id, dt.id + 1)
self.add(dt)
for c in o.comments:
c.ref = o
self.add(c)
# raises comment id to do not overlap ids
Comment.id = max(Comment.id, c.id + 1)
if o.acknowledgement is not None:
o.acknowledgement.ref = o
# Raises the id of future ack so we don't overwrite
# these one
Acknowledge.id = max(Acknowledge.id, o.acknowledgement.id + 1)
# Relink the notified_contacts as a set() of true contacts objects
# it it was load from the retention, it's now a list of contacts
# names
if 'notified_contacts' in data:
new_notified_contacts = set()
for cname in o.notified_contacts:
c = self.contacts.find_by_name(cname)
# Maybe the contact is gone. Skip it
if c:
new_notified_contacts.add(c)
o.notified_contacts = new_notified_contacts
# Fill the self.broks with broks of self (process id, and co)
# broks of service and hosts (initial status)
def fill_initial_broks(self, bname, with_logs=False):
# First a Brok for delete all from my instance_id
b = Brok('clean_all_my_instance_id', {'instance_id': self.instance_id})
self.add_Brok(b, bname)
# first the program status
b = self.get_program_status_brok()
self.add_Brok(b, bname)
# We can't call initial_status from all this types
# The order is important, service need host...
initial_status_types = (self.timeperiods, self.commands,
self.contacts, self.contactgroups,
self.hosts, self.hostgroups,
self.services, self.servicegroups)
self.conf.skip_initial_broks = getattr(self.conf, 'skip_initial_broks', False)
logger.debug("Skipping initial broks? %s", self.conf.skip_initial_broks)
if not self.conf.skip_initial_broks:
for tab in initial_status_types:
for i in tab:
b = i.get_initial_status_brok()
self.add_Brok(b, bname)
# Only raises the all logs at the scheduler startup
if with_logs:
# Ask for INITIAL logs for services and hosts
for i in self.hosts:
i.raise_initial_state()
for i in self.services:
i.raise_initial_state()
# Add a brok to say that we finished all initial_pass
b = Brok('initial_broks_done', {'instance_id': self.instance_id})
self.add_Brok(b, bname)
# We now have all full broks
self.has_full_broks = True
logger.info("[%s] Created %d initial Broks for broker %s",
self.instance_name, len(self.brokers[bname]['broks']), bname)
# Crate a brok with program status info
def get_and_register_program_status_brok(self):
b = self.get_program_status_brok()
self.add(b)
# Crate a brok with program status info
def get_and_register_update_program_status_brok(self):
b = self.get_program_status_brok()
b.type = 'update_program_status'
self.add(b)
# Get a brok with program status
# TODO: GET REAL VALUES
def get_program_status_brok(self):
now = int(time.time())
data = {"is_running": 1,
"instance_id": self.instance_id,
"instance_name": self.instance_name,
"last_alive": now,
"interval_length": self.conf.interval_length,
"program_start": self.program_start,
"pid": os.getpid(),
"daemon_mode": 1,
"last_command_check": now,
"last_log_rotation": now,
"notifications_enabled": self.conf.enable_notifications,
"active_service_checks_enabled": self.conf.execute_service_checks,
"passive_service_checks_enabled": self.conf.accept_passive_service_checks,
"active_host_checks_enabled": self.conf.execute_host_checks,
"passive_host_checks_enabled": self.conf.accept_passive_host_checks,
"event_handlers_enabled": self.conf.enable_event_handlers,
"flap_detection_enabled": self.conf.enable_flap_detection,
"failure_prediction_enabled": 0,
"process_performance_data": self.conf.process_performance_data,
"obsess_over_hosts": self.conf.obsess_over_hosts,
"obsess_over_services": self.conf.obsess_over_services,
"modified_host_attributes": 0,
"modified_service_attributes": 0,
"global_host_event_handler": self.conf.global_host_event_handler,
'global_service_event_handler': self.conf.global_service_event_handler,
'check_external_commands': self.conf.check_external_commands,
'check_service_freshness': self.conf.check_service_freshness,
'check_host_freshness': self.conf.check_host_freshness,
'command_file': self.conf.command_file
}
b = Brok('program_status', data)
return b
# Called every 1sec to consume every result in services or hosts
# with these results, they are OK, CRITICAL, UP/DOWN, etc...
def consume_results(self):
# All results are in self.waiting_results
# We need to get them first
with self.waiting_results_lock:
waiting_results = self.waiting_results
self.waiting_results = []
for c in waiting_results:
self.put_results(c)
# Then we consume them
# print("**********Consume*********")
for c in self.checks.values():
if c.status == 'waitconsume':
item = c.ref
item.consume_result(c)
# All 'finished' checks (no more dep) raise checks they depends on
for c in self.checks.values():
if c.status == 'havetoresolvedep':
for dependent_checks in c.depend_on_me:
# Ok, now dependent will no more wait c
dependent_checks.depend_on.remove(c.id)
# REMOVE OLD DEP CHECK -> zombie
c.status = 'zombie'
# Now, reinteger dep checks
for c in self.checks.values():
if c.status == 'waitdep' and len(c.depend_on) == 0:
item = c.ref
item.consume_result(c)
# Called every 1sec to delete all checks in a zombie state
# zombie = not useful anymore
def delete_zombie_checks(self):
# print("**********Delete zombies checks****")
id_to_del = []
for c in self.checks.values():
if c.status == 'zombie':
id_to_del.append(c.id)
# une petite tape dans le dos et tu t'en vas, merci...
# *pat pat* GFTO, thks :)
for id in id_to_del:
del self.checks[id] # ZANKUSEN!
# Called every 1sec to delete all actions in a zombie state
# zombie = not useful anymore
def delete_zombie_actions(self):
# print("**********Delete zombies actions****")
id_to_del = []
for a in self.actions.values():
if a.status == 'zombie':
id_to_del.append(a.id)
# une petite tape dans le dos et tu t'en vas, merci...
# *pat pat* GFTO, thks :)
for id in id_to_del:
del self.actions[id] # ZANKUSEN!
def get_maintenance_dt_times(self, now, elt):
start_time = None
end_time = None
period = elt.maintenance_period
if period is not None and period.is_time_valid(now):
start_time = period.get_next_valid_time_from_t(now)
end_time = period.get_next_invalid_time_from_t(start_time + 1) - 1
elif elt.maintenance_state_id == 1:
start_time = now
duration = elt.maintenance_check_interval * elt.interval_length
end_time = now + duration * 3
return start_time, end_time
def update_maintenance_downtimes(self, now):
# Check maintenance periods
for elt in self.iter_hosts_and_services():
if elt.maintenance_period is None and elt.maintenance_check_period is None:
continue
if elt.in_maintenance is None:
start_time, end_time = self.get_maintenance_dt_times(now, elt)
# Got a maintenance period or check
if start_time is not None:
dt = Downtime(elt, start_time, end_time, True, 0, 0,
"system",
"this downtime was automatically scheduled "
"through a maintenance_period or "
"a maintenance_check")
elt.add_downtime(dt)
self.add(dt)
self.get_and_register_status_brok(elt)
elt.in_maintenance = dt.id
else:
dt = self.downtimes.get(elt.in_maintenance)
# In case of retention loading issue, avoid crashing the
# scheduler if the downtime could not be found
if dt is None:
logger.error(
"Failed to find maintenance downtime %s, resetting "
"in_maintenance" % elt.in_maintenance
)
elt.in_maintenance = None
continue
# If detected as under maintenance by check command, extend
# the current downtime length, otehwise invalidate it.
if elt.maintenance_state_id == 1 or \
(elt.maintenance_period is not None and
elt.maintenance_period.is_time_valid(now)):
_, end_time = self.get_maintenance_dt_times(now, elt)
dt.end_time = dt.real_end_time = end_time
dt.duration = dt.end_time - dt.start_time
elif elt.maintenance_state_id == 0:
dt.end_time = dt.real_end_time = now - 1
dt.duration = dt.end_time - dt.start_time
def cleanup_maintenance_downtimes(self):
for elt in self.iter_hosts_and_services():
if elt.in_maintenance is not None and \
elt.in_maintenance not in self.downtimes:
# the main downtimes has expired or was manually deleted
elt.in_maintenance = None
# Check for downtimes start and stop, and register
# them if needed
def update_downtimes_and_comments(self):
broks = []
# Necessary to floor time value because miliseconds may result in
# too early downtimes expiration.
now = int(time.time())
# Look for in objects comments, and look if we already got them
for elt in self.iter_hosts_and_services():
for c in elt.comments:
if c.id not in self.comments:
self.comments[c.id] = c
self.update_maintenance_downtimes(now)
# Check the validity of contact downtimes
for elt in self.contacts:
for dt in elt.downtimes:
dt.check_activation()
# Check start and stop times
for dt in self.downtimes.values():
if dt.real_end_time < now:
# this one has expired
broks.extend(dt.exit()) # returns downtimestop notifications
elif now >= dt.start_time and dt.fixed is True and \
dt.is_in_effect is False and dt.can_be_deleted is False:
# this one has to start now
broks.extend(dt.enter()) # returns downtimestart notifications
broks.append(dt.ref.get_update_status_brok())
# A loop where those downtimes are removed
# which were marked for deletion (mostly by dt.exit())
for dt in list(self.downtimes.values()):
if dt.can_be_deleted is True:
ref = dt.ref
self.del_downtime(dt.id)
broks.append(ref.get_update_status_brok())
# Same for contact downtimes:
for dt in list(self.contact_downtimes.values()):
if dt.can_be_deleted is True:
ref = dt.ref
self.del_contact_downtime(dt.id)
broks.append(ref.get_update_status_brok())
# Downtimes are usually accompanied by a comment.
# An exiting downtime also invalidates it's comment.
for c in list(self.comments.values()):
if c.can_be_deleted is True:
ref = c.ref
self.del_comment(c.id)
broks.append(ref.get_update_status_brok())
# If downtimes were previously deleted, cleanup in_maintenance
self.cleanup_maintenance_downtimes()
for b in broks:
self.add(b)
# Main schedule function to make the regular scheduling
def schedule(self):
# ask for service and hosts their next check
for elt in self.iter_hosts_and_services():
elt.schedule()
# Main actions reaper function: it get all new checks,
# notification and event handler from hosts and services
def get_new_actions(self):
self.hook_point('get_new_actions')
# ask for service and hosts their next check
for elt in self.iter_hosts_and_services():
for a in elt.actions:
self.add(a)
# We take all, we can clear it
elt.actions = []
# Similar as above, but for broks
def get_new_broks(self):
# ask for service and hosts their broks waiting
# be eaten
for elt in self.iter_hosts_and_services():
for b in elt.broks:
self.add(b)
# We take all, we can clear it
elt.broks = []
# Raises checks for no fresh states for services and hosts
def check_freshness(self):
# print("********** Check freshness******")
for elt in self.iter_hosts_and_services():
c = elt.do_check_freshness()
if c is not None:
self.add(c)
# Check for orphaned checks: checks that never returns back
# so if inpoller and t_to_go < now - 300s: pb!
# Warn only one time for each "worker"
# XXX I think we should make "time_to_orphanage" configurable
# each action type, each for notification, event_handler & check
# I think it will be a little more useful that way, not sure tho
def check_orphaned(self):
worker_names = {}
now = int(time.time())
for c in self.checks.values():
time_to_orphanage = c.ref.get_time_to_orphanage()
if time_to_orphanage:
if c.status == 'inpoller' and c.t_to_go < now - time_to_orphanage:
c.status = 'scheduled'
if c.worker not in worker_names:
worker_names[c.worker] = {"checks": 1}
continue
if "checks" not in worker_names[c.worker]:
worker_names[c.worker]["checks"] = 1
continue
worker_names[c.worker]["checks"] += 1
for a in self.actions.values():
time_to_orphanage = a.ref.get_time_to_orphanage()
if time_to_orphanage:
if a.status == 'inpoller' and a.t_to_go < now - time_to_orphanage:
a.status = 'scheduled'
if a.worker not in worker_names:
worker_names[a.worker] = {"actions": 1}
continue
if "actions" not in worker_names[a.worker]:
worker_names[a.worker]["actions"] = 1
continue
worker_names[a.worker]["actions"] += 1
reenabled = {"checks": 0, "actions": 0}
for w in worker_names:
for _type in worker_names[w]:
reenabled[_type] += worker_names[w][_type]
logger.warning("%d %s never came back for the satellite "
"'%s'. I reenable them for polling",
worker_names[w][_type], _type, w)
for _type in reenabled:
count = reenabled[_type]
if count:
statsmgr.incr("scheduler.%s.reenabled" % _type, count, "queue")
# Each loop we are going to send our broks to our modules (if need)
def send_broks_to_modules(self):
t0 = time.time()
nb_sent = 0
for mod in self.sched_daemon.modules_manager.get_external_instances():
logger.debug("Look for sending to module %s", mod.get_name())
q = mod.to_q
to_send = [b for b in self.broks
if not getattr(b, 'sent_to_sched_externals', False) and mod.want_brok(b)]
q.put(to_send)
nb_sent += len(to_send)
# No more need to send them
for b in self.broks:
b.sent_to_sched_externals = True
logger.debug("Time to send %s broks (after %d secs)", nb_sent, time.time() - t0)
# special one for scheduler ; see Daemon.get_objects_from_from_queues()
def get_objects_from_from_queues(self):
''' Same behavior than Daemon.get_objects_from_from_queues(). '''
return self.sched_daemon.get_objects_from_from_queues()
# Gets internal metrics for both statsd and
def get_internal_metrics(self):
# Queues
metrics = [
('core.scheduler.mem', get_memory(), 'system'),
('core.scheduler.checks.queue', len(self.checks), 'queue'),
('core.scheduler.actions.queue', len(self.actions), 'queue'),
('core.scheduler.broks.queue', len(self.broks), 'queue'),
('core.scheduler.downtimes.queue', len(self.downtimes), 'queue'),
('core.scheduler.comments.queue', len(self.comments), 'queue'),
]
# Queues
for s in ("scheduled", "inpoller", "zombie", "timeout",
"waitconsume", "waitdep", "havetoresolvedep"):
count = len([c for c in self.checks.values() if c.status == s])
metrics.append(('core.scheduler.checks.%s' % s, count, 'queue'))
# Latency
latencies = [s.latency for s in self.services]
lat_avg, lat_min, lat_max = nighty_five_percent(latencies)
if lat_min:
metrics.append(('core.scheduler.latency.min', lat_min, 'queue'))
metrics.append(('core.scheduler.latency.avg', lat_avg, 'queue'))
metrics.append(('core.scheduler.latency.max', lat_max, 'queue'))
# Objects
for t in ("contacts", "contactgroups", "hosts", "hostgroups",
"services", "servicegroups", "commands"):
count = len(getattr(self, t))
metrics.append(('core.scheduler.%s' % t, count, 'object'))
return metrics
# stats threads is asking us a main structure for stats
def get_stats_struct(self):
now = int(time.time())
res = self.sched_daemon.get_stats_struct()
instance_name = getattr(self, "instance_name", "")
res.update({'name': instance_name, 'type': 'scheduler'})
# Get a overview of the latencies with just
# a 95 percentile view, but lso min/max values
latencies = [s.latency for s in self.services]
lat_avg, lat_min, lat_max = nighty_five_percent(latencies)
res['latency'] = (0.0, 0.0, 0.0)
if lat_avg:
res['latency'] = {'avg': lat_avg, 'min': lat_min, 'max': lat_max}
# Managed objects
res["objects"] = {}
for t in ("contacts", "contactgroups", "hosts", "hostgroups",
"services", "servicegroups", "commands"):
res["objects"][t] = len(getattr(self, t))
# metrics specific
metrics = res['metrics']
for metric in self.get_internal_metrics():
name, value, mtype = metric
metrics.append(name, value, now, mtype)
all_commands = {}
# compute some stats
for elt in self.iter_hosts_and_services():
last_cmd = elt.last_check_command
if not last_cmd:
continue
interval = elt.check_interval
if interval == 0:
interval = 1
cmd = os.path.split(last_cmd.split(' ', 1)[0])[1]
u_time = elt.u_time
s_time = elt.s_time
old_u_time, old_s_time = all_commands.get(cmd, (0.0, 0.0))
old_u_time += u_time / interval
old_s_time += s_time / interval
all_commands[cmd] = (old_u_time, old_s_time)
# now sort it
p = []
for (c, e) in all_commands.items():
u_time, s_time = e
p.append({'cmd': c, 'u_time': u_time, 's_time': s_time})
def p_sort(e1, e2):
if e1['u_time'] > e2['u_time']:
return 1
if e1['u_time'] < e2['u_time']:
return -1
return 0
p.sort(p_sort)
# takethe first 10 ones for the put
res['commands'] = p[:10]
return res
# Main function
def run(self):
# Then we see if we've got info in the retention file
self.retention_load()
# Finally start the external modules now we got our data
self.hook_point('pre_scheduler_mod_start')
self.sched_daemon.modules_manager.start_external_instances(late_start=True)
# Ok, now all is initialized, we can make the initial broks
logger.info("[%s] First scheduling launched", self.instance_name)
self.schedule()
logger.info("[%s] First scheduling done", self.instance_name)
# Now connect to the passive satellites if needed
for p_id in self.pollers:
self.pynag_con_init(p_id, type='poller')
for r_id in self.reactionners:
self.pynag_con_init(r_id, type='reactionner')
# Ticks are for recurrent function call like consume
# del zombies etc
ticks = 0
timeout = 1.0 # For the select
gogogo = time.time()
# We must reset it if we received a new conf from the Arbiter.
# Otherwise, the stat check average won't be correct
self.nb_check_received = 0
self.load_one_min = Load(initial_value=1)
logger.debug("First loop at %d", time.time())
while self.must_run:
# print("Loop")
# Before answer to brokers, we send our broks to modules
# Ok, go to send our broks to our external modules
# self.send_broks_to_modules()
elapsed, _, _ = self.sched_daemon.handleRequests(timeout)
if elapsed:
timeout -= elapsed
if timeout > 0:
continue
self.load_one_min.update_load(self.sched_daemon.sleep_time)
# load of the scheduler is the percert of time it is waiting
load = min(100, 100.0 - self.load_one_min.get_load() * 100)
logger.debug("Load: (sleep) %.2f (average: %.2f) -> %d%%",
self.sched_daemon.sleep_time, self.load_one_min.get_load(), load)
self.sched_daemon.sleep_time = 0.0
# Timeout or time over
timeout = 1.0
ticks += 1
# Do recurrent works like schedule, consume
# delete_zombie_checks
for i in self.recurrent_works:
(name, f, nb_ticks) = self.recurrent_works[i]
# A 0 in the tick will just disable it
if nb_ticks != 0:
if ticks % nb_ticks == 0:
# Call it and save the time spend in it
_t = time.time()
f()
statsmgr.timing('loop.scheduler.%s' % name, time.time() - _t, 'perf')
# Getting memory has a cost, do not cellect it if not needed
# DBG: push actions to passives?
self.push_actions_to_passives_satellites()
self.get_actions_from_passives_satellites()
# stats
nb_scheduled = len([c for c in self.checks.values() if c.status == 'scheduled'])
nb_inpoller = len([c for c in self.checks.values() if c.status == 'inpoller'])
nb_zombies = len([c for c in self.checks.values() if c.status == 'zombie'])
nb_notifications = len(self.actions)
logger.debug("Checks: total %s, scheduled %s,"
"inpoller %s, zombies %s, notifications %s",
len(self.checks), nb_scheduled, nb_inpoller, nb_zombies, nb_notifications)
# Get a overview of the latencies with just
# a 95 percentile view, but lso min/max values
latencies = [s.latency for s in self.services]
lat_avg, lat_min, lat_max = nighty_five_percent(latencies)
if lat_avg is not None:
logger.debug("Latency (avg/min/max): %.2f/%.2f/%.2f", lat_avg, lat_min, lat_max)
# print("Notifications:", nb_notifications)
now = time.time()
if self.nb_checks_send != 0:
logger.debug("Nb checks/notifications/event send: %s", self.nb_checks_send)
self.nb_checks_send = 0
if self.nb_broks_send != 0:
logger.debug("Nb Broks send: %s", self.nb_broks_send)
self.nb_broks_send = 0
time_elapsed = now - gogogo
logger.debug("Check average = %d checks/s", int(self.nb_check_received / time_elapsed))
if self.need_dump_memory:
self.sched_daemon.dump_memory()
self.need_dump_memory = False
if self.need_objects_dump:
logger.debug('I need to dump my objects!')
self.dump_objects()
self.dump_config()
self.need_objects_dump = False
# Checks if memory consumption did not exceed allowed thresold
self.sched_daemon.check_memory_usage()
# WE must save the retention at the quit BY OURSELF
# because our daemon will not be able to do it for us
self.update_retention_file(True)
| 77,081
|
Python
|
.py
| 1,591
| 34.900691
| 100
| 0.555142
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,472
|
downtime.py
|
shinken-solutions_shinken/shinken/downtime.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2017:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import datetime
import time
from shinken.comment import Comment
from shinken.property import BoolProp, IntegerProp, StringProp
from shinken.brok import Brok
""" Schedules downtime for a specified service. If the "fixed" argument is set
to one (1), downtime will start and end at the times specified by the
"start" and "end" arguments.
Otherwise, downtime will begin between the "start" and "end" times and last
for "duration" seconds. The "start" and "end" arguments are specified
in time_t format (seconds since the UNIX epoch). The specified service
downtime can be triggered by another downtime entry if the "trigger_id"
is set to the ID of another scheduled downtime entry.
Set the "trigger_id" argument to zero (0) if the downtime for the
specified service should not be triggered by another downtime entry.
"""
class Downtime(object):
id = 1
# Just to list the properties we will send as serialized object
# so to others daemons, so all but NOT REF
properties = {
'activate_me': StringProp(default=[]),
'entry_time': IntegerProp(default=0, fill_brok=['full_status']),
'fixed': BoolProp(default=True, fill_brok=['full_status']),
'start_time': IntegerProp(default=0, fill_brok=['full_status']),
'duration': IntegerProp(default=0, fill_brok=['full_status']),
'trigger_id': IntegerProp(default=0),
'end_time': IntegerProp(default=0, fill_brok=['full_status']),
'real_end_time': IntegerProp(default=0),
'author': StringProp(default='', fill_brok=['full_status']),
'comment': StringProp(default=''),
'is_in_effect': BoolProp(default=False),
'has_been_triggered': BoolProp(default=False),
'can_be_deleted': BoolProp(default=False),
# TODO: find a very good way to handle the downtime "ref".
# ref must effectively not be in properties because it points
# onto a real object.
# 'ref': None
}
def __init__(self, ref, start_time, end_time, fixed, trigger_id, duration, author, comment):
now = datetime.datetime.now()
self.id = int(time.mktime(now.timetuple()) * 1e6 + now.microsecond)
self.__class__.id = self.id + 1
self.ref = ref # pointer to srv or host we are apply
self.activate_me = [] # The other downtimes i need to activate
self.entry_time = int(time.time())
self.fixed = fixed
self.start_time = start_time
self.duration = duration
self.trigger_id = trigger_id
if self.trigger_id != 0: # triggered plus fixed makes no sense
self.fixed = False
self.end_time = end_time
if fixed:
self.duration = end_time - start_time
# This is important for flexible downtimes. Here start_time and
# end_time mean: in this time interval it is possible to trigger
# the beginning of the downtime which lasts for duration.
# Later, when a non-ok event happens, real_end_time will be
# recalculated from now+duration
# end_time will be displayed in the web interface, but real_end_time
# is used internally
self.real_end_time = end_time
self.author = author
self.comment = comment
self.is_in_effect = False
# fixed: start_time has been reached,
# flexible: non-ok checkresult
self.has_been_triggered = False # another downtime has triggered me
self.can_be_deleted = False
self.add_automatic_comment()
def __str__(self):
if self.is_in_effect is True:
active = "active"
else:
active = "inactive"
if self.fixed is True:
type = "fixed"
else:
type = "flexible"
return "%s %s Downtime id=%d %s - %s" % (
active, type, self.id, time.ctime(self.start_time), time.ctime(self.end_time))
def trigger_me(self, other_downtime):
self.activate_me.append(other_downtime)
def in_scheduled_downtime(self):
return self.is_in_effect
# The referenced host/service object enters now a (or another) scheduled
# downtime. Write a log message only if it was not already in a downtime
def enter(self):
res = []
self.is_in_effect = True
if self.fixed is False:
now = time.time()
self.real_end_time = now + self.duration
if self.ref.scheduled_downtime_depth == 0:
self.ref.raise_enter_downtime_log_entry()
self.ref.create_notifications('DOWNTIMESTART')
self.ref.scheduled_downtime_depth += 1
self.ref.in_scheduled_downtime = True
for dt in self.activate_me:
res.extend(dt.enter())
return res
# The end of the downtime was reached.
def exit(self):
res = []
if self.is_in_effect is True:
# This was a fixed or a flexible+triggered downtime
self.is_in_effect = False
self.ref.scheduled_downtime_depth -= 1
if self.ref.scheduled_downtime_depth <= 0:
self.ref.raise_exit_downtime_log_entry()
self.ref.create_notifications('DOWNTIMEEND')
self.ref.in_scheduled_downtime = False
else:
# This was probably a flexible downtime which was not triggered
# In this case it silently disappears
pass
self.del_automatic_comment()
self.can_be_deleted = True
# when a downtime ends and the service was critical
# a notification is sent with the next critical check
# So we should set a flag here which signals consume_result
# to send a notification
self.ref.in_scheduled_downtime_during_last_check = True
return res
# A scheduled downtime was prematurely canceled
def cancel(self):
res = []
self.is_in_effect = False
self.ref.scheduled_downtime_depth -= 1
if self.ref.scheduled_downtime_depth == 0:
self.ref.raise_cancel_downtime_log_entry()
self.ref.in_scheduled_downtime = False
self.del_automatic_comment()
self.can_be_deleted = True
self.ref.in_scheduled_downtime_during_last_check = True
# Nagios does not notify on canceled downtimes, but we does
self.ref.create_notifications('DOWNTIMECANCELLED')
# Also cancel other downtimes triggered by me
for dt in self.activate_me:
res.extend(dt.cancel())
return res
# Scheduling a downtime creates a comment automatically
def add_automatic_comment(self):
if self.fixed is True:
text = (
"This %s has been scheduled for fixed downtime from %s to %s. "
"Notifications for the %s will not be sent out during that time period." % (
self.ref.my_type,
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.start_time)),
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.end_time)),
self.ref.my_type)
)
else:
hours, remainder = divmod(self.duration, 3600)
minutes, seconds = divmod(remainder, 60)
text = ("This %s has been scheduled for flexible downtime starting between %s and %s "
"and lasting for a period of %d hours and %d minutes. "
"Notifications for the %s will not be sent out during that time period." % (
self.ref.my_type,
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.start_time)),
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.end_time)),
hours, minutes, self.ref.my_type)
)
if self.ref.my_type == 'host':
comment_type = 1
else:
comment_type = 2
c = Comment(self.ref, False, "(Nagios Process)", text, comment_type, 2, 0, False, 0)
self.comment_id = c.id
self.extra_comment = c
self.ref.add_comment(c)
def del_automatic_comment(self):
# Extra comment can be None if we load it from a old version of Shinken
# TODO: remove it in a future version when every one got upgrade
if self.extra_comment is not None:
self.extra_comment.can_be_deleted = True
# self.ref.del_comment(self.comment_id)
# Fill data with info of item by looking at brok_type
# in props of properties or running_properties
def fill_data_brok_from(self, data, brok_type):
cls = self.__class__
# Now config properties
for prop, entry in cls.properties.items():
if hasattr(prop, 'fill_brok'):
if brok_type in entry['fill_brok']:
data[prop] = getattr(self, prop)
# Get a brok with initial status
def get_initial_status_brok(self):
data = {'id': self.id}
self.fill_data_brok_from(data, 'full_status')
b = Brok('downtime_raise', data)
return b
def __getstate__(self):
cls = self.__class__
# id is not in *_properties
res = {'id': self.id}
for prop in cls.properties:
if hasattr(self, prop):
res[prop] = getattr(self, prop)
return res
# Inverted function of getstate
def __setstate__(self, state):
cls = self.__class__
self.id = state['id']
for prop in cls.properties:
if prop in state:
setattr(self, prop, state[prop])
if self.id >= cls.id:
cls.id = self.id + 1
| 10,800
|
Python
|
.py
| 233
| 37.416309
| 98
| 0.621165
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,473
|
log.py
|
shinken-solutions_shinken/shinken/log.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import sys
import os
import stat
from logging import Handler, Formatter, StreamHandler, NOTSET, FileHandler
from logging.handlers import TimedRotatingFileHandler
from shinken.brok import Brok
try:
from shinken.misc.termcolor import cprint
except (SyntaxError, ImportError) as exp:
# Outch can't import a cprint, do a simple print
def cprint(s, color='', end=''):
print(s)
# obj = None
# name = None
human_timestamp_log = False
_brokhandler_ = None
defaultFormatter = Formatter('[%(created)i] %(levelname)s: %(message)s')
defaultFormatter_named = Formatter('[%(created)i] %(levelname)s: [%(name)s] %(message)s')
humanFormatter = Formatter('[%(asctime)s] %(levelname)s: %(message)s', '%a %b %d %H:%M:%S %Y')
humanFormatter_named = Formatter('[%(asctime)s] %(levelname)s: [%(name)s] %(message)s',
'%a %b %d %H:%M:%S %Y')
nagFormatter = Formatter('[%(created)i] %(message)s')
class BrokHandler(Handler):
"""
This log handler is forwarding log messages as broks to the broker.
Only messages of level higher than DEBUG are send to other
satellite to not risk overloading them.
"""
def __init__(self, broker):
# Only messages of level INFO or higher are passed on to the
# broker. Other handlers have a different level.
Handler.__init__(self, logging.INFO)
self._broker = broker
def emit(self, record):
try:
msg = self.format(record)
brok = Brok('log', {'log': msg + '\n'})
self._broker.add(brok)
except Exception:
self.handleError(record)
class ColorStreamHandler(StreamHandler):
def emit(self, record):
try:
msg = self.format(record)
colors = {'DEBUG': 'cyan', 'INFO': 'magenta',
'WARNING': 'yellow', 'CRITICAL': 'magenta', 'ERROR': 'red'}
cprint(msg, colors[record.levelname])
except UnicodeEncodeError:
print(msg.encode('ascii', 'ignore'))
except Exception:
self.handleError(record)
class Log(logging.Logger):
"""
Shinken logger class, wrapping access to Python logging standard library.
See : https://docs.python.org/2/howto/logging.html#logging-flow for more detail about
how log are handled"""
def __init__(self, name="Shinken", level=NOTSET, log_set=False):
logging.Logger.__init__(self, name, level)
self.pre_log_buffer = []
self.log_set = log_set
def setLevel(self, level):
""" Set level of logger and handlers.
The logger need the lowest level (see link above)
"""
if not isinstance(level, int):
level = getattr(logging, level, None)
if not level or not isinstance(level, int):
raise TypeError('log level must be an integer')
# Not very useful, all we have to do is no to set the level > info for the brok handler
self.level = min(level, logging.INFO)
# Only set level to file and/or console handler
for handler in self.handlers:
if isinstance(handler, BrokHandler):
continue
handler.setLevel(level)
def load_obj(self, object, name_=None):
""" We load the object where we will put log broks
with the 'add' method
"""
global _brokhandler_
_brokhandler_ = BrokHandler(object)
if name_ is not None or self.name is not None:
if name_ is not None:
self.name = name_
# We need to se the name format to all other handlers
for handler in self.handlers:
handler.setFormatter(defaultFormatter_named)
_brokhandler_.setFormatter(defaultFormatter_named)
else:
_brokhandler_.setFormatter(defaultFormatter)
self.addHandler(_brokhandler_)
def register_local_log(self, path, level=None, purge_buffer=True):
"""The shinken logging wrapper can write to a local file if needed
and return the file descriptor so we can avoid to
close it.
Add logging to a local log-file.
The file will be rotated once a day
"""
self.log_set = True
# Todo : Create a config var for backup count
if os.path.exists(path) and not stat.S_ISREG(os.stat(path).st_mode):
# We don't have a regular file here. Rotate may fail
# It can be one of the stat.S_IS* (FIFO? CHR?)
handler = FileHandler(path)
else:
handler = TimedRotatingFileHandler(path, 'midnight', backupCount=5)
if level is not None:
handler.setLevel(level)
if self.name is not None:
handler.setFormatter(defaultFormatter_named)
else:
handler.setFormatter(defaultFormatter)
self.addHandler(handler)
# Ok now unstack all previous logs
if purge_buffer:
self._destack()
# Todo : Do we need this now we use logging?
return handler.stream.fileno()
def set_human_format(self, on=True):
"""
Set the output as human format.
If the optional parameter `on` is False, the timestamps format
will be reset to the default format.
"""
global human_timestamp_log
human_timestamp_log = bool(on)
# Apply/Remove the human format to all handlers except the brok one.
for handler in self.handlers:
if isinstance(handler, BrokHandler):
continue
if self.name is not None:
handler.setFormatter(human_timestamp_log and humanFormatter_named or
defaultFormatter_named)
else:
handler.setFormatter(human_timestamp_log and humanFormatter or defaultFormatter)
# Stack logs if we don't open a log file so we will be able to flush them
# Stack max 500 logs (no memory leak please...)
def _stack(self, level, args, kwargs):
if self.log_set:
return
self.pre_log_buffer.append((level, args, kwargs))
if len(self.pre_log_buffer) > 500:
self.pre_log_buffer = self.pre_log_buffer[2:]
# Ok, we are opening a log file, flush all the logs now
def _destack(self):
for (level, args, kwargs) in self.pre_log_buffer:
f = getattr(logging.Logger, level, None)
if f is None:
self.warning('Missing level for a log? %s', level)
continue
f(self, *args, **kwargs)
def debug(self, *args, **kwargs):
self._stack('debug', args, kwargs)
logging.Logger.debug(self, *args, **kwargs)
def info(self, *args, **kwargs):
self._stack('info', args, kwargs)
# super(logging.Logger, self).info(*args, **kwargs)
logging.Logger.info(self, *args, **kwargs)
def warning(self, *args, **kwargs):
self._stack('warning', args, kwargs)
logging.Logger.warning(self, *args, **kwargs)
def error(self, *args, **kwargs):
self._stack('error', args, kwargs)
logging.Logger.error(self, *args, **kwargs)
# --- create the main logger ---
logging.setLoggerClass(Log)
logger = logging.getLogger('Shinken')
if hasattr(sys.stdout, 'isatty'):
csh = ColorStreamHandler(sys.stdout)
if logger.name is not None:
csh.setFormatter(defaultFormatter_named)
else:
csh.setFormatter(defaultFormatter)
logger.addHandler(csh)
def naglog_result(level, result, *args):
"""
Function use for old Nag compatibility. We to set format properly for this call only.
Dirty Hack to keep the old format, we should have another logger and
use one for Shinken logs and another for monitoring data
"""
prev_formatters = []
for handler in logger.handlers:
prev_formatters.append(handler.formatter)
handler.setFormatter(nagFormatter)
log_fun = getattr(logger, level)
if log_fun:
log_fun(result)
for index, handler in enumerate(logger.handlers):
handler.setFormatter(prev_formatters[index])
| 9,182
|
Python
|
.py
| 213
| 35.197183
| 96
| 0.645237
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,474
|
brok.py
|
shinken-solutions_shinken/shinken/brok.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import six
from shinken.serializer import serialize, deserialize
import sys
import io
class Brok(object):
"""A Brok is a piece of information exported by Shinken to the Broker.
Broker can do whatever he wants with it.
"""
#__slots__ = ('id', 'type', 'data', 'prepared', 'instance_id')
id = 0
my_type = 'brok'
def __init__(self, _type, data):
self.type = _type
self.id = self.__class__.id
self.__class__.id += 1
#self.data = serialize(data)
self.data = data
#self.prepared = False
self.prepared = True
def __str__(self):
return str(self.__dict__)
# We unserialize the data, and if some prop were
# add after the serialize pass, we integer them in the data
def prepare(self):
# It's no more necessary to deseriazie the brok's data, as all broks
# are already serialized/deserialiazed wthen they are transported.
# For compatibilty reasons, this method remains, but does nothing.
pass
# serialized when
# Maybe the brok is a old daemon one or was already prepared
# if so, the data is already ok
# if not self.prepared:
# self.data = deserialize(self.data)
# if hasattr(self, 'instance_id'):
# self.data['instance_id'] = self.instance_id
# self.prepared = True
# def __getstate__(self):
# # id is not in *_properties
# res = {'id': self.id}
# for prop in ("data", "instance_id", "prepared", "type"):
# if hasattr(self, prop):
# res[prop] = getattr(self, prop)
# return res
#
# def __setstate__(self, state):
# cls = self.__class__
#
# for prop in ("id", "data", "instance_id", "prepared", "type"):
# if prop in state:
# setattr(self, prop, state[prop])
#
# # to prevent from duplicating id in comments:
# if self.id >= cls.id:
# cls.id = self.id + 1
| 3,003
|
Python
|
.py
| 78
| 35.269231
| 82
| 0.643568
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,475
|
modulesmanager.py
|
shinken-solutions_shinken/shinken/modulesmanager.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import time
import sys
import traceback
import io
from os.path import join, isdir, abspath, dirname
from os import listdir
from shinken.basemodule import BaseModule
from shinken.log import logger
import importlib
# We need to manage pre-2.0 module types with _ into the new 2.0 - mode
def uniform_module_type(s):
return s.replace('_', '-')
class ModulesManager(object):
"""This class is use to manage modules and call callback"""
def __init__(self, modules_type, modules_path, modules):
self.modules_path = modules_path
self.modules_type = modules_type
self.modules = modules
self.allowed_types = [uniform_module_type(plug.module_type) for plug in modules]
self.imported_modules = []
self.modules_assoc = []
self.instances = []
self.to_restart = []
self.max_queue_size = 0
self.manager = None
def load_manager(self, manager):
self.manager = manager
# Set the modules requested for this manager
def set_modules(self, modules):
self.modules = modules
self.allowed_types = [uniform_module_type(mod.module_type) for mod in modules]
def set_max_queue_size(self, max_queue_size):
self.max_queue_size = max_queue_size
# Import, instanciate & "init" the modules we have been requested
def load_and_init(self):
self.load()
self.get_instances()
@classmethod
def _try_load(cls, name, package=None):
try:
mod = importlib.import_module(name, package)
except Exception as err:
logger.warning("Cannot import %s : %s",
'%s.%s' % (package, name) if package else name,
err)
return
# if the module have a 'properties' and a 'get_instance'
# then we are happy and we'll use that:
try:
mod.properties
mod.get_instance
except AttributeError:
return
return mod
@classmethod
def try_load(cls, mod_name, mod_dir=None):
mod = cls._try_load(mod_name)
if mod:
msg = "Correctly loaded %s as a very-new-style shinken module :)"
logger.info(msg, mod_name)
return mod
mod = cls._try_load('{}.module'.format(mod_name), mod_name)
if mod:
msg = "Correctly loaded %s as an old-new-style shinken module :|"
logger.info(msg, mod_name)
return mod
return None
# Try to import the requested modules ; put the imported modules in self.imported_modules.
# The previous imported modules, if any, are cleaned before.
def load(self):
if self.modules_path not in sys.path:
sys.path.append(self.modules_path)
modules_dirs = [
fname for fname in listdir(self.modules_path)
if isdir(join(self.modules_path, fname))
]
del self.imported_modules[:]
for mod_name in modules_dirs:
# No look to .git folder
if mod_name == ".git":
logger.info("Found '.git' directory in modules, skip it.")
continue
mod_file = abspath(join(self.modules_path, mod_name, 'module.py'))
mod_dir = os.path.normpath(os.path.dirname(mod_file))
mod = self.try_load(mod_name, mod_dir)
if not mod:
continue
try:
is_our_type = self.modules_type in mod.properties['daemons']
except Exception as err:
logger.warning("Bad module file for %s : cannot check its properties['daemons']"
"attribute : %s", mod_file, err)
else: # We want to keep only the modules of our type
if is_our_type:
self.imported_modules.append(mod)
# Now we want to find in theses modules the ones we are looking for
del self.modules_assoc[:]
for mod_conf in self.modules:
module_type = uniform_module_type(mod_conf.module_type)
for module in self.imported_modules:
if uniform_module_type(module.properties['type']) == module_type:
self.modules_assoc.append((mod_conf, module))
break
else: # No module is suitable, we emit a Warning
logger.warning("The module type %s for %s was not found in modules!",
module_type, mod_conf.get_name())
# Try to "init" the given module instance.
# If late_start, don't look for last_init_try
# Returns: True on successful init. False if instance init method raised any Exception.
def try_instance_init(self, inst, late_start=False):
try:
logger.info("Trying to init module: %s", inst.get_name())
inst.init_try += 1
# Maybe it's a retry
if not late_start and inst.init_try > 1:
# Do not try until 5 sec, or it's too loopy
if inst.last_init_try > time.time() - 5:
return False
inst.last_init_try = time.time()
# If it's an external, create/update Queues()
if inst.is_external:
inst.create_queues(self.manager)
inst.init()
except Exception as e:
logger.error("The instance %s raised an exception %s, I remove it!",
inst.get_name(), e)
output = io.StringIO()
traceback.print_exc(file=output)
logger.error("Back trace of this remove: %s", output.getvalue())
output.close()
return False
return True
# Request to "remove" the given instances list or all if not provided
def clear_instances(self, insts=None):
if insts is None:
insts = self.instances[:] # have to make a copy of the list
for i in insts:
self.remove_instance(i)
# Put an instance to the restart queue
def set_to_restart(self, inst):
self.to_restart.append(inst)
# actually only arbiter call this method with start_external=False..
# Create, init and then returns the list of module instances that the caller needs.
# If an instance can't be created or init'ed then only log is done.
# That instance is skipped. The previous modules instance(s), if any, are all cleaned.
def get_instances(self):
self.clear_instances()
for (mod_conf, module) in self.modules_assoc:
mod_conf.properties = module.properties.copy()
try:
inst = module.get_instance(mod_conf)
if not isinstance(inst, BaseModule):
raise TypeError('Returned instance is not of type BaseModule (%s) !'
% type(inst))
except Exception as err:
logger.error("The module %s raised an exception %s, I remove it! traceback=%s",
mod_conf.get_name(), err, traceback.format_exc())
else:
# Give the module the data to which module it is load from
inst.set_loaded_into(self.modules_type)
self.instances.append(inst)
for inst in self.instances:
# External are not init now, but only when they are started
if not inst.is_external and not self.try_instance_init(inst):
# If the init failed, we put in in the restart queue
logger.warning("The module '%s' failed to init, I will try to restart it later",
inst.get_name())
self.to_restart.append(inst)
return self.instances
# Launch external instances that are load correctly
def start_external_instances(self, late_start=False):
for inst in [inst for inst in self.instances if inst.is_external]:
# But maybe the init failed a bit, so bypass this ones from now
if not self.try_instance_init(inst, late_start=late_start):
logger.warning("The module '%s' failed to init, I will try to restart it later",
inst.get_name())
self.to_restart.append(inst)
continue
# ok, init succeed
logger.info("Starting external module %s", inst.get_name())
inst.start()
# Request to cleanly remove the given instance.
# If instance is external also shutdown it cleanly
def remove_instance(self, inst):
# External instances need to be close before (process + queues)
if inst.is_external:
logger.debug("Ask stop process for %s", inst.get_name())
inst.stop_process()
logger.debug("Stop process done")
inst.clear_queues(self.manager)
# Then do not listen anymore about it
self.instances.remove(inst)
def check_alive_instances(self):
# Only for external
for inst in self.instances:
if inst not in self.to_restart:
if inst.is_external and not inst.process.is_alive():
logger.error("The external module %s goes down unexpectedly!", inst.get_name())
logger.info("Setting the module %s to restart", inst.get_name())
# We clean its queues, they are no more useful
inst.clear_queues(self.manager)
self.to_restart.append(inst)
# Ok, no need to look at queue size now
continue
# Now look for man queue size. If above value, the module should got a huge problem
# and so bailout. It's not a perfect solution, more a watchdog
# If max_queue_size is 0, don't check this
if self.max_queue_size == 0:
continue
# Ok, go launch the dog!
queue_size = 0
try:
queue_size = inst.to_q.qsize()
except Exception as exp:
pass
if queue_size > self.max_queue_size:
logger.error("The external module %s got a too high brok queue size (%s > %s)!",
inst.get_name(), queue_size, self.max_queue_size)
logger.info("Setting the module %s to restart", inst.get_name())
# We clean its queues, they are no more useful
inst.clear_queues(self.manager)
self.to_restart.append(inst)
def try_to_restart_deads(self):
to_restart = self.to_restart[:]
del self.to_restart[:]
for inst in to_restart:
logger.debug("I should try to reinit %s", inst.get_name())
if self.try_instance_init(inst):
logger.debug("Good, I try to restart %s", inst.get_name())
# If it's an external, it will start it
inst.start()
# Ok it's good now :)
else:
self.to_restart.append(inst)
# Do not give to others inst that got problems
def get_internal_instances(self, phase=None):
return [inst for inst in self.instances
if not inst.is_external and phase in inst.phases and inst not in self.to_restart]
def get_external_instances(self, phase=None):
return [inst for inst in self.instances
if inst.is_external and phase in inst.phases and inst not in self.to_restart]
def get_external_to_queues(self):
return [inst.to_q
for inst in self.instances
if inst.is_external and inst not in self.to_restart]
def get_external_from_queues(self):
return [inst.from_q
for inst in self.instances
if inst.is_external and inst not in self.to_restart]
def stop_all(self):
# Ask internal to quit if they can
for inst in self.get_internal_instances():
if hasattr(inst, 'quit') and callable(inst.quit):
inst.quit()
self.clear_instances([inst for inst in self.instances if inst.is_external])
| 13,267
|
Python
|
.py
| 279
| 36.121864
| 100
| 0.59839
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,476
|
memoized.py
|
shinken-solutions_shinken/shinken/memoized.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
class memoized(object):
"""Decorator that caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned, and
not re-evaluated.
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
self.cache[args] = value = self.func(*args)
return value
except TypeError:
# uncatchable -- for instance, passing a list as an argument.
# Better to not catch it than to blow up entirely.
return self.func(*args)
# Return the function's docstring.
def __repr__(self):
return self.func.__doc__
| 1,789
|
Python
|
.py
| 44
| 36
| 82
| 0.690668
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,477
|
action.py
|
shinken-solutions_shinken/shinken/action.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009-2017:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import six
import os
import time
import shlex
import sys
import subprocess
import signal
# Try to read in non-blocking mode, from now this only from now on
# Unix systems
try:
import fcntl
except ImportError:
fcntl = None
from shinken.log import logger
__all__ = ('Action',)
valid_exit_status = (0, 1, 2, 3)
only_copy_prop = ('id', 'status', 'command', 't_to_go', 'timeout',
'env', 'module_type', 'execution_time', 'u_time', 's_time')
shellchars = ('!', '$', '^', '&', '*', '(', ')', '~', '[', ']',
'|', '{', '}', ';', '<', '>', '?', '`')
# Try to read a fd in a non blocking mode
def no_block_read(output):
fd = output.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
try:
return output.read().decode("utf-8")
except Exception:
return ''
class __Action(object):
"""
This abstract class is used just for having a common id for both
actions and checks.
"""
id = 0
# Ok when we load a previous created element, we should
# not start at 0 for new object, so we must raise the Action.id
# if need
@staticmethod
def assume_at_least_id(_id):
Action.id = max(Action.id, _id)
def set_type_active(self):
"Dummy function, only useful for checks"
pass
def set_type_passive(self):
"Dummy function, only useful for checks"
pass
def get_local_environnement(self):
"""
Mix the env and the environment variables into a new local
env dict.
Note: We cannot just update the global os.environ because this
would effect all other checks.
"""
# Do not use copy.copy() here, as the resulting copy still
# changes the real environment (it is still a os._Environment
# instance).
local_env = os.environ.copy()
for p in self.env:
local_env[p] = self.env[p].rstrip('\x00')
return local_env
def execute(self):
"""
Start this action command. The command will be executed in a
subprocess.
"""
self.status = 'launched'
self.check_time = time.time()
self.wait_time = 0.0001
self.last_poll = self.check_time
# Get a local env variables with our additional values
self.local_env = self.get_local_environnement()
# Initialize stdout and stderr. we will read them in small parts
# if the fcntl is available
self.stdoutdata = ''
self.stderrdata = ''
return self.execute__() # OS specific part
def get_outputs(self, out, max_plugins_output_length):
# Squeeze all output after max_plugins_output_length
out = out[:max_plugins_output_length]
# manage escaped pipes
out = out.replace('\|', '___PROTECT_PIPE___')
# Then cuts by lines
elts = out.split('\n')
# For perf data
elts_line1 = elts[0].split('|')
# First line before | is output, and strip it
self.output = elts_line1[0].strip().replace('___PROTECT_PIPE___', '|')
# Init perfdata as void
self.perf_data = ''
# After | is perfdata, and strip it
if len(elts_line1) > 1:
self.perf_data = elts_line1[1].strip().replace('___PROTECT_PIPE___', '|')
# Now manage others lines. Before the | it's long_output
# And after it's all perf_data, \n join
long_output = []
in_perfdata = False
for line in elts[1:]:
# if already in perfdata, direct append
if in_perfdata:
self.perf_data += ' ' + line.strip().replace('___PROTECT_PIPE___', '|')
else: # not already in? search for the | part :)
elts = line.split('|', 1)
# The first part will always be long_output
long_output.append(elts[0].strip().replace('___PROTECT_PIPE___', '|'))
if len(elts) > 1:
in_perfdata = True
self.perf_data += ' ' + elts[1].strip().replace('___PROTECT_PIPE___', '|')
# long_output is all non output and perfline, join with \n
self.long_output = '\n'.join(long_output)
def check_finished(self, max_plugins_output_length):
# We must wait, but checks are variable in time
# so we do not wait the same for an little check
# than a long ping. So we do like TCP: slow start with *2
# but do not wait more than 0.1s.
if self.status != 'launched':
return
self.last_poll = time.time()
_, _, child_utime, child_stime, _ = os.times()
if self.process.poll() is None:
self.wait_time = min(self.wait_time * 2, 0.1)
now = time.time()
# If the fcntl is available (unix) we try to read in a
# asynchronous mode, so we won't block the PIPE at 64K buffer
# (deadlock...)
if fcntl:
self.stdoutdata += no_block_read(self.process.stdout)
self.stderrdata += no_block_read(self.process.stderr)
if (now - self.check_time) > self.timeout:
self.kill__()
self.status = 'timeout'
self.execution_time = now - self.check_time
self.exit_status = 3
# Do not keep a pointer to the process
self.process.stdout.close()
self.process.stderr.close()
del self.process
# Get the user and system time
_, _, n_child_utime, n_child_stime, _ = os.times()
self.u_time = n_child_utime - child_utime
self.s_time = n_child_stime - child_stime
return
return
# Get standards outputs from the communicate function if we do
# not have the fcntl module (Windows, and maybe some special
# unix like AIX)
if fcntl:
# The command was to quick and finished even before we can
# polled it first. So finish the read.
self.stdoutdata += no_block_read(self.process.stdout)
self.stderrdata += no_block_read(self.process.stderr)
else:
self.stdoutdata, self.stderrdata = self.process.communicate()
self.exit_status = self.process.returncode
# we should not keep the process now
self.process.stdout.close()
self.process.stderr.close()
del self.process
# check if process was signaled #11 (SIGSEGV)
if self.exit_status == -11:
self.stderrdata += " signaled #11 (SIGSEGV)"
# If abnormal termination of check and no error data, set at least exit status info as error information
if not self.stderrdata.strip() and self.exit_status not in valid_exit_status:
self.stderrdata += "Abnormal termination with code: %r" % (self.exit_status,)
# check for bad syntax in command line:
if ('sh: -c: line 0: unexpected EOF while looking for matching' in self.stderrdata or
('sh: -c:' in self.stderrdata and ': Syntax' in self.stderrdata) or
'Syntax error: Unterminated quoted string' in self.stderrdata):
# Very, very ugly. But subprocess._handle_exitstatus does
# not see a difference between a regular "exit 1" and a
# bailing out shell. Strange, because strace clearly shows
# a difference. (exit_group(1) vs. exit_group(257))
self.stdoutdata = self.stdoutdata + self.stderrdata
self.exit_status = 3
if self.exit_status not in valid_exit_status:
self.exit_status = 3
if not self.stdoutdata.strip():
self.stdoutdata = self.stderrdata
# Now grep what we want in the output
self.get_outputs(self.stdoutdata, max_plugins_output_length)
# We can clean the useless properties now
del self.stdoutdata
del self.stderrdata
self.status = 'done'
self.execution_time = time.time() - self.check_time
# Also get the system and user times
_, _, n_child_utime, n_child_stime, _ = os.times()
self.u_time = n_child_utime - child_utime
self.s_time = n_child_stime - child_stime
def copy_shell__(self, new_i):
"""
Copy all attributes listed in 'only_copy_prop' from `self` to
`new_i`.
"""
for prop in only_copy_prop:
setattr(new_i, prop, getattr(self, prop))
return new_i
def got_shell_characters(self):
for c in self.command:
if c in shellchars:
return True
return False
#
# OS specific "execute__" & "kill__" are defined by "Action" class
# definition:
#
if os.name != 'nt':
class Action(__Action):
def execute__(self, force_shell=False):
# If the command line got shell characters, we should go
# in a shell mode. So look at theses parameters
force_shell |= self.got_shell_characters()
try:
if six.PY2:
command = self.command.encode("utf-8")
else:
command = self.command
if force_shell is False:
command = shlex.split(command)
else:
# In case of shell command, only check syntax, do not split
# the chain
shlex.split(command)
except Exception as exp:
self.output = 'Not a valid shell command: ' + exp.__str__()
self.exit_status = 3
self.status = 'done'
self.execution_time = time.time() - self.check_time
return
# Now: GO for launch!
# logger.debug("Launching: %s" % (self.command.encode('utf8', 'ignore')))
# The preexec_fn=os.setsid is set to give sons a same
# process group. See
# http://www.doughellmann.com/PyMOTW/subprocess/ for
# detail about this.
try:
self.process = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
close_fds=True, shell=force_shell, env=self.local_env,
preexec_fn=os.setsid)
except OSError as exp:
logger.error("Fail launching command: %s %s %s",
self.command, exp, force_shell)
# Maybe it's just a shell we try to exec. So we must retry
if (not force_shell and exp.errno == 8 and exp.strerror == 'Exec format error'):
return self.execute__(True)
self.process = None
self.output = exp.__str__()
self.exit_status = 2
self.status = 'done'
self.execution_time = time.time() - self.check_time
# Maybe we run out of file descriptor. It's not good at all!
if exp.errno == 24 and exp.strerror == 'Too many open files':
return 'toomanyopenfiles'
def kill__(self):
# We kill a process group because we launched them with
# preexec_fn=os.setsid and so we can launch a whole kill
# tree instead of just the first one
os.killpg(self.process.pid, signal.SIGKILL)
# Try to force close the descriptors, because python seems to have problems with them
for fd in [self.process.stdout, self.process.stderr]:
try:
fd.close()
except Exception:
pass
else:
import ctypes
TerminateProcess = ctypes.windll.kernel32.TerminateProcess
class Action(__Action):
def execute__(self):
try:
cmd = shlex.split(self.command)
except Exception as exp:
self.output = 'Not a valid shell command: ' + exp.__str__()
self.exit_status = 3
self.status = 'done'
self.execution_time = time.time() - self.check_time
return
try:
self.process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=self.local_env, shell=True)
except WindowsError as exp:
logger.info("We kill the process: %s %s", exp, self.command)
self.status = 'timeout'
self.execution_time = time.time() - self.check_time
def kill__(self):
TerminateProcess(int(self.process._handle), -1)
| 13,779
|
Python
|
.py
| 312
| 33.682692
| 112
| 0.581281
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,478
|
arbiterlink.py
|
shinken-solutions_shinken/shinken/arbiterlink.py
|
'''shinken.arbiterlink is deprecated. Please use shinken.objects.arbiterlink now.'''
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken.old_daemon_link import make_deprecated_daemon_link
from shinken.objects import arbiterlink
make_deprecated_daemon_link(arbiterlink)
| 317
|
Python
|
.py
| 5
| 61.6
| 84
| 0.834416
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,479
|
satellite.py
|
shinken-solutions_shinken/shinken/satellite.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
"""
This class is an interface for Reactionner and Poller daemons
A Reactionner listens to a port for the configuration from the Arbiter
The conf contains the schedulers where actionners will gather actions.
The Reactionner keeps on listening to the Arbiter
(one a timeout)
If Arbiter wants it to have a new conf, the satellite forgets the previous
Schedulers (and actions into) and takes the new ones.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import six
import sys
# Try to see if we are in an android device or not
try:
import android
is_android = True
except ImportError:
is_android = False
import os
import copy
import time
import traceback
import zlib
import base64
import threading
import multiprocessing
from shinken.http_client import HTTPClient, HTTPException
from shinken.message import Message
from shinken.worker import Worker
from shinken.load import Load
from shinken.daemon import Daemon, Interface
from shinken.log import logger
from shinken.util import get_memory, parse_memory_expr, free_memory
from shinken.serializer import serialize, deserialize, SerializeError
from shinken.stats import statsmgr
if six.PY2:
from Queue import Empty, Queue
else:
from queue import Empty, Queue
# Class to tell that we are facing a non worker module
# but a standard one
class NotWorkerMod(Exception):
pass
# Interface for Arbiter, our big MASTER
# It gives us our conf
class IForArbiter(Interface):
doc = 'Remove a scheduler connection (internal)'
# Arbiter ask us to do not manage a scheduler_id anymore
# I do it and don't ask why
def remove_from_conf(self, sched_id):
try:
del self.app.schedulers[sched_id]
except KeyError:
pass
remove_from_conf.doc = doc
doc = 'Return the managed configuration ids (internal)'
# Arbiter ask me which sched_id I manage, If it is not ok with it
# It will ask me to remove one or more sched_id
def what_i_managed(self):
logger.debug("The arbiter asked me what I manage. It's %s", self.app.what_i_managed())
return serialize(self.app.what_i_managed())
what_i_managed.need_lock = False
what_i_managed.doc = doc
doc = 'Ask the daemon to drop its configuration and wait for a new one'
# Call by arbiter if it thinks we are running but we must do not (like
# if I was a spare that take a conf but the master returns, I must die
# and wait a new conf)
# Us: No please...
# Arbiter: I don't care, hasta la vista baby!
# Us: ... <- Nothing! We are dead! you don't get it or what??
# Reading code is not a job for eyes only...
def wait_new_conf(self):
logger.debug("Arbiter wants me to wait for a new configuration")
self.app.schedulers.clear()
self.app.cur_conf = None
wait_new_conf.doc = doc
doc = 'Push broks objects to the daemon (internal)'
# NB: following methods are only used by broker
# Used by the Arbiter to push broks to broker
def push_broks(self, broks):
with self.app.arbiter_broks_lock:
self.app.arbiter_broks.extend(broks)
push_broks.method = 'PUT'
# We are using a Lock just for NOT lock this call from the arbiter :)
push_broks.need_lock = False
push_broks.doc = doc
doc = 'Get the external commands from the daemon (internal)'
# The arbiter ask us our external commands in queue
# Same than push_broks, we will not using Global lock here,
# and only lock for external_commands
def get_external_commands(self):
with self.app.external_commands_lock:
cmds = self.app.get_external_commands()
return serialize(cmds)
get_external_commands.need_lock = False
get_external_commands.encode = 'raw'
get_external_commands.doc = doc
doc = 'Does the daemon got configuration (receiver)'
# NB: only useful for receiver
def got_conf(self):
return serialize(self.app.cur_conf is not None)
got_conf.need_lock = False
got_conf.doc = doc
doc = 'Push hostname/scheduler links (receiver in direct routing)'
# Use by the receivers to got the host names managed by the schedulers
def push_host_names(self, data):
sched_id = data["sched_id"]
hnames = data["hnames"]
self.app.push_host_names({'sched_id': sched_id, 'hnames': hnames})
push_host_names.method = 'PUT'
push_host_names.doc = doc
class ISchedulers(Interface):
"""Interface for Schedulers
If we are passive, they connect to this and send/get actions
"""
doc = 'Push new actions to the scheduler (internal)'
# A Scheduler send me actions to do
def push_actions(self, actions, sched_id):
self.app.add_actions(actions, int(sched_id))
push_actions.method = 'PUT'
push_actions.doc = doc
doc = 'Get the returns of the actions (internal)'
# A scheduler ask us the action return value
def get_returns(self, sched_id):
# print("A scheduler ask me the returns", sched_id)
ret = self.app.get_return_for_passive(int(sched_id))
# print("Send mack", len(ret), "returns")
return serialize(ret)
get_returns.doc = doc
class IBroks(Interface):
"""Interface for Brokers
They connect here and get all broks (data for brokers)
data must be ORDERED! (initial status BEFORE update...)
"""
doc = 'Get broks from the daemon'
# poller or reactionner ask us actions
def get_broks(self, bname, broks_batch=0):
res = self.app.get_broks(broks_batch)
return serialize(res)
get_broks.encode = 'raw'
get_broks.doc = doc
class IStats(Interface):
"""
Interface for various stats about poller/reactionner activity
"""
doc = 'Get raw stats from the daemon'
def get_raw_stats(self):
app = self.app
res = {}
for sched_id in app.schedulers:
sched = app.schedulers[sched_id]
lst = []
res[sched_id] = lst
for mod in app.q_by_mod:
# In workers we've got actions send to queue - queue size
for (i, q) in app.q_by_mod[mod].items():
lst.append({
'scheduler_name': sched['name'],
'module': mod,
'queue_number': i,
'queue_size': q.qsize(),
'return_queue_len': app.get_returns_queue_len()})
return res
get_raw_stats.doc = doc
class BaseSatellite(Daemon):
"""Please Add a Docstring to describe the class here"""
def __init__(self, name, config_file, is_daemon, do_replace, debug, debug_file):
super(BaseSatellite, self).__init__(name, config_file, is_daemon,
do_replace, debug, debug_file)
# Ours schedulers
self.schedulers = {}
# Now we create the interfaces
self.interface = IForArbiter(self)
self.istats = IStats(self)
# Can have a queue of external_commands given by modules
# will be taken by arbiter to process
self.external_commands = []
self.external_commands_lock = threading.RLock()
# The arbiter can resent us new conf in the http_daemon port.
# We do not want to loose time about it, so it's not a blocking
# wait, timeout = 0s
# If it send us a new conf, we reinit the connections of all schedulers
def watch_for_new_conf(self, timeout):
self.handleRequests(timeout)
def do_stop(self):
if self.http_daemon and self.interface:
logger.info("[%s] Stopping all network connections", self.name)
self.http_daemon.unregister(self.interface)
super(BaseSatellite, self).do_stop()
# Give the arbiter the data about what I manage
# for me it's the ids of my schedulers
def what_i_managed(self):
r = {}
for (k, v) in self.schedulers.items():
r[k] = v['push_flavor']
return r
# Call by arbiter to get our external commands
def get_external_commands(self):
res = self.external_commands
self.external_commands = []
return res
class Satellite(BaseSatellite):
"""Our main APP class"""
def __init__(self, name, config_file, is_daemon, do_replace, debug, debug_file):
super(Satellite, self).__init__(name, config_file, is_daemon, do_replace,
debug, debug_file)
# Keep broks so they can be eaten by a broker
self.broks = []
self.workers = {} # dict of active workers
# Init stats like Load for workers
self.wait_ratio = Load(initial_value=1)
self.brok_interface = IBroks(self)
self.scheduler_interface = ISchedulers(self)
# Just for having these attributes defined here. explicit > implicit ;)
self.uri2 = None
self.uri3 = None
self.s = None
self.returns_queue = None
self.q_by_mod = {}
# Wrapper function for the true con init
def pynag_con_init(self, id):
_t = time.time()
r = self.do_pynag_con_init(id)
statsmgr.timing('con-init.scheduler', time.time() - _t, "perf")
return r
# Initialize or re-initialize connection with scheduler
def do_pynag_con_init(self, id):
sched = self.schedulers[id]
# If sched is not active, I do not try to init
# it is just useless
if not sched['active']:
return
sname = sched['name']
uri = sched['uri']
running_id = sched['running_id']
timeout = sched['timeout']
data_timeout = sched['data_timeout']
logger.info("[%s] Init connection with %s at %s (%ss,%ss)",
self.name, sname, uri, timeout, data_timeout)
try:
sch_con = sched['con'] = HTTPClient(
uri=uri, strong_ssl=sched['hard_ssl_name_check'],
timeout=timeout, data_timeout=data_timeout)
except HTTPException as exp:
logger.warning(
"[%s] Scheduler %s is not initialized or has network problem: %s",
self.name, sname, exp
)
sched['con'] = None
return
# timeout of 3s by default (short one)
# and get the running id
try:
new_run_id = sch_con.get('get_running_id')
new_run_id = float(new_run_id)
except (HTTPException, SerializeError, KeyError) as exp:
logger.warning(
"[%s] Scheduler %s is not initialized or has network problem: %s",
self.name, sname, exp
)
sched['con'] = None
return
# The schedulers have been restarted: it has a new run_id.
# So we clear all verifs, they are obsolete now.
if sched['running_id'] != 0 and new_run_id != running_id:
logger.info("[%s] The running id of the scheduler %s changed, "
"we must clear its actions",
self.name, sname)
del sched['wait_homerun'][:]
sched['running_id'] = new_run_id
logger.info("[%s] Connection OK with scheduler %s", self.name, sname)
# Manage action returned from Workers
# We just put them into the corresponding sched
# and we clean unused properties like sched_id
def manage_action_return(self, action):
# Maybe our workers end us something else than an action
# if so, just add this in other queues and return
cls_type = action.__class__.my_type
if cls_type not in ['check', 'notification', 'eventhandler']:
self.add(action)
return
# Ok, it's a result. We get it, and fill verifs of the good sched_id
sched_id = action.sched_id
# Now we now where to put action, we do not need sched_id anymore
del action.sched_id
# Unset the tag of the worker_id too
try:
del action.worker_id
except AttributeError:
pass
# And we remove it from the actions queue of the scheduler too
try:
del self.schedulers[sched_id]['actions'][action.get_id()]
except KeyError:
pass
# We tag it as "return wanted", and move it in the wait return queue
# Stop, if it is "timeout" we need this information later
# in the scheduler
# action.status = 'waitforhomerun'
try:
self.schedulers[sched_id]['wait_homerun'].append(action)
except KeyError:
pass
# Wrapper function for stats
def manage_returns(self):
_t = time.time()
r = self.do_manage_returns()
_type = self.__class__.my_type
statsmgr.timing('core.%s.manage-returns' % _type, time.time() - _t,
'perf')
return r
# Return the chk to scheduler and clean them
# REF: doc/shinken-action-queues.png (6)
def do_manage_returns(self):
# For all schedulers, we check for waitforhomerun
# and we send back results
count = 0
for sched_id in self.schedulers:
sched = self.schedulers[sched_id]
# If sched is not active, I do not try return
if not sched['active']:
continue
# Now ret have all verifs, we can return them
send_ok = False
if self.results_batch > 0:
batch = min(self.results_batch, len(sched['wait_homerun']))
else:
batch = len(sched['wait_homerun'])
ret = sched['wait_homerun'][:batch]
if ret is not []:
try:
con = sched['con']
if con is not None: # None = not initialized
send_ok = deserialize(
con.put('put_results', serialize(ret))
)
# Not connected or sched is gone
except (HTTPException, KeyError) as exp:
logger.error('manage_returns exception:: %s,%s ', type(exp), exp)
self.pynag_con_init(sched_id)
return
except AttributeError as exp: # the scheduler must not be initialized
logger.error('manage_returns exception:: %s,%s ', type(exp), exp)
except Exception as exp:
logger.error("A satellite raised an unknown exception: %s (%s)", exp, type(exp))
raise
# We clean ONLY if the send is OK
if send_ok:
count += len(ret)
del sched['wait_homerun'][:batch]
else:
self.pynag_con_init(sched_id)
logger.warning("Sent failed!")
_type = self.__class__.my_type
statsmgr.incr('core.%s.results.out' % _type, count, 'queue')
# Get all returning actions for a call from a
# scheduler
def get_return_for_passive(self, sched_id):
# I do not know this scheduler?
if sched_id not in self.schedulers:
logger.debug("I do not know this scheduler: %s", sched_id)
return []
sched = self.schedulers[sched_id]
logger.debug("Preparing to return %s", sched['wait_homerun'])
# prepare our return
if self.results_batch > 0:
batch = min(self.results_batch, len(sched['wait_homerun']))
else:
batch = len(sched['wait_homerun'])
ret = sched['wait_homerun'][:batch]
# and clear our dict
del sched['wait_homerun'][:batch]
return ret
# Create and launch a new worker, and put it into self.workers
# It can be mortal or not
def create_and_launch_worker(self, module_name='fork', mortal=True):
# create the input queue of this worker
try:
if is_android:
q = Queue()
else:
q = self.manager.Queue()
# If we got no /dev/shm on linux, we can got problem here.
# Must raise with a good message
except OSError as exp:
# We look for the "Function not implemented" under Linux
if exp.errno == 38 and os.name == 'posix':
logger.critical(
"Got an exception (%s). If you are under Linux, please "
"check that your /dev/shm directory exists and is read-write.",
exp
)
raise
# If we are in the fork module, we do not specify a target
target = None
if module_name == 'fork':
target = None
else:
for module in self.modules_manager.instances:
if module.properties['type'] == module_name:
# First, see if the module is a 'worker' one or not
if not module.properties.get('worker_capable', False):
raise NotWorkerMod
target = module.work
if target is None:
return
# We want to give to the Worker the name of the daemon (poller or reactionner)
cls_name = self.__class__.__name__.lower()
w = Worker(1, q, self.returns_queue, self.processes_by_worker,
mortal=mortal, max_plugins_output_length=self.max_plugins_output_length,
target=target, loaded_into=cls_name, http_daemon=self.http_daemon)
w.module_name = module_name
# save this worker
self.workers[w.id] = w
# And save the Queue of this worker, with key = worker id
self.q_by_mod[module_name][w.id] = q
logger.info("[%s] Allocating new %s Worker: %s", self.name, module_name, w.id)
# Ok, all is good. Start it!
w.start()
# The main stop of this daemon. Stop all workers
# modules and sockets
def do_stop(self):
logger.info("[%s] Stopping all workers", self.name)
for w in self.workers.values():
try:
w.terminate()
w.join(timeout=1)
# A already dead worker or in a worker
except (AttributeError, AssertionError):
pass
# Close the server socket if it was opened
if self.http_daemon:
if self.brok_interface:
self.http_daemon.unregister(self.brok_interface)
if self.scheduler_interface:
self.http_daemon.unregister(self.scheduler_interface)
# And then call our master stop from satellite code
super(Satellite, self).do_stop()
# A simple function to add objects in self
# like broks in self.broks, etc
# TODO: better tag ID?
def add(self, elt):
cls_type = elt.__class__.my_type
if cls_type == 'brok':
# For brok, we TAG brok with our instance_id
elt.instance_id = 0
self.broks.append(elt)
return
elif cls_type == 'externalcommand':
logger.debug("Enqueuing an external command '%s'", elt.__dict__)
with self.external_commands_lock:
self.external_commands.append(elt)
# Someone ask us our broks. We send them, and clean the queue
def get_broks(self, broks_batch=0):
if broks_batch:
try:
broks_batch = int(broks_batch)
except ValueError:
logger.error("Invalid broks_batch in get_broks, should be an "
"integer. Igored.")
broks_batch = 0
_type = self.__class__.my_type
if broks_batch == 0:
count = len(self.broks)
else:
count = min(broks_batch, len(self.broks))
res = self.broks[:count]
del self.broks[:count]
statsmgr.incr('core.%s.broks.out' % _type, count, 'queue')
return res
# workers are processes, they can die in a numerous of ways
# like:
# *99.99%: bug in code, sorry:p
# *0.005 %: a mix between a stupid admin (or an admin without coffee),
# and a kill command
# *0.005%: alien attack
# So they need to be detected, and restart if need
def check_and_del_zombie_workers(self):
# In android, we are using threads, so there is not active_children call
if not is_android:
# Active children make a join with everyone, useful :)
multiprocessing.active_children()
w_to_del = []
for w in self.workers.values():
# If a worker goes down and we did not ask him, it's not
# good: we can think that we have a worker and it's not True
# So we del it
if not w.is_alive():
logger.warning("[%s] The worker %s goes down unexpectedly!", self.name, w.id)
# Terminate immediately
w.terminate()
w.join(timeout=1)
w_to_del.append(w.id)
# OK, now really del workers from queues
# And requeue the actions it was managed
for id in w_to_del:
w = self.workers[id]
# Del the queue of the module queue
del self.q_by_mod[w.module_name][w.id]
for sched_id in self.schedulers:
sched = self.schedulers[sched_id]
for a in sched['actions'].values():
if a.status == 'queue' and a.worker_id == id:
# Got a check that will NEVER return if we do not
# restart it
self.assign_to_a_queue(a)
# So now we can really forgot it
del self.workers[id]
# Here we create new workers if the queue load (len of verifs) is too long
def adjust_worker_number_by_load(self):
to_del = []
logger.debug("[%s] Trying to adjust worker number."
" Actual number : %d, min per module : %d, max per module : %d",
self.name, len(self.workers), self.min_workers, self.max_workers)
# I want at least min_workers by module then if I can, I add worker for load balancing
for mod in self.q_by_mod:
# At least min_workers
while len(self.q_by_mod[mod]) < self.max_workers:
try:
self.create_and_launch_worker(module_name=mod)
# Maybe this modules is not a true worker one.
# if so, just delete if from q_by_mod
except NotWorkerMod:
to_del.append(mod)
break
"""
# Try to really adjust load if necessary
if self.get_max_q_len(mod) > self.max_q_size:
if len(self.q_by_mod[mod]) >= self.max_workers:
logger.info("Cannot add a new %s worker, even if load is high. "
"Consider changing your max_worker parameter") % mod
else:
try:
self.create_and_launch_worker(module_name=mod)
# Maybe this modules is not a true worker one.
# if so, just delete if from q_by_mod
except NotWorkerMod:
to_del.append(mod)
"""
for mod in to_del:
logger.debug("[%s] The module %s is not a worker one, "
"I remove it from the worker list", self.name, mod)
del self.q_by_mod[mod]
# TODO: if len(workers) > 2*wish, maybe we can kill a worker?
# Get the Queue() from an action by looking at which module
# it wants with a round robin way to scale the load between
# workers
def _got_queue_from_action(self, a):
# get the module name, if not, take fork
mod = getattr(a, 'module_type', 'fork')
queues = list(self.q_by_mod[mod].items())
# Maybe there is no more queue, it's very bad!
if not queues:
return (0, None)
# if not get a round robin index to get a queue based
# on the action id
rr_idx = a.id % len(queues)
(i, q) = queues[rr_idx]
# return the id of the worker (i), and its queue
return (i, q)
# Add a list of actions to our queues
def add_actions(self, lst, sched_id):
for a in lst:
# First we look if we do not already have it, if so
# do nothing, we are already working!
if a.id in self.schedulers[sched_id]['actions']:
continue
a.sched_id = sched_id
a.status = 'queue'
self.assign_to_a_queue(a)
# Take an action and put it into one queue
def assign_to_a_queue(self, a):
msg = Message(id=0, type='Do', data=a)
(i, q) = self._got_queue_from_action(a)
# Tag the action as "in the worker i"
a.worker_id = i
if q is not None:
q.put(msg)
# Wrapper function for the real function
def get_new_actions(self):
_t = time.time()
self.do_get_new_actions()
_type = self.__class__.my_type
statsmgr.timing('core.%s.get-new-actions' % _type, time.time() - _t,
'perf')
# We get new actions from schedulers, we create a Message and we
# put it in the s queue (from master to slave)
# REF: doc/shinken-action-queues.png (1)
def do_get_new_actions(self):
# Here are the differences between a
# poller and a reactionner:
# Poller will only do checks,
# reactionner do actions (notif + event handlers)
do_checks = self.__class__.do_checks
do_actions = self.__class__.do_actions
# We check for new check in each schedulers and put the result in new_checks
count = 0
for sched_id in self.schedulers:
sched = self.schedulers[sched_id]
# If sched is not active, I do not try return
if not sched['active']:
continue
try:
try:
con = sched['con']
except KeyError:
con = None
if con is not None: # None = not initialized
# OK, go for it :)
# Before ask a call that can be long, do a simple ping to be sure it is alive
con.get('ping')
args = {
'do_checks': do_checks,
'do_actions': do_actions,
'poller_tags': ",".join(self.poller_tags),
'reactionner_tags': ",".join(self.reactionner_tags),
'worker_name': self.name,
'module_types': ",".join(self.q_by_mod.keys()),
}
slots = self.get_available_slots()
if slots is not None:
args['max_actions'] = slots
raw = con.get('get_checks', args, wait='long')
actions = deserialize(raw)
logger.debug("Ask actions to %d, got %d", sched_id, len(actions))
# We 'tag' them with sched_id and put into queue for workers
# REF: doc/shinken-action-queues.png (2)
self.add_actions(actions, sched_id)
count += len(actions)
else: # no con? make the connection
self.pynag_con_init(sched_id)
# Ok, con is unknown, so we create it
# Or maybe is the connection lost, we recreate it
except (HTTPException, KeyError) as exp:
logger.debug('get_new_actions exception:: %s,%s ', type(exp), exp)
self.pynag_con_init(sched_id)
# scheduler must not be initialized
# or scheduler must not have checks
except AttributeError as exp:
logger.debug('get_new_actions exception:: %s,%s ', type(exp), exp)
# What the F**k? We do not know what happened,
# log the error message if possible.
except Exception as exp:
logger.error("A satellite raised an unknown exception: %s (%s)", exp, type(exp))
raise
_type = self.__class__.my_type
statsmgr.incr('core.%s.actions.in' % _type, count, 'queue')
# Returns the maximim number of actions a satellite may accept, and ask
# to the scheduler.
def get_available_slots(self):
# We limit the maximum number of actions to q_factor times the number of
# allowed concurrent processes.
if self.max_q_size > 0:
slots = self.max_q_size
elif self.q_factor > 0:
slots = self.get_workers_count() * self.processes_by_worker
slots *= self.q_factor
else:
# No limits
return None
actions = self.get_actions_queue_len()
return max(0, slots - actions)
# Returns the total number of elements contained in all the workers queues
def get_actions_queue_len(self):
actions = 0
for mod in self.q_by_mod:
for q in self.q_by_mod[mod].values():
actions += q.qsize()
return actions
# Returns the number of registerred workers
def get_workers_count(self):
return len(self.workers)
# In android we got a Queue, and a manager list for others
def is_returns_queue_empty(self):
return self.returns_queue.empty()
# In android we got a Queue, and a manager list for others
def get_returns_queue_len(self):
return self.returns_queue.qsize()
# In android we got a Queue, and a manager list for others
def get_returns_queue_item(self):
return self.returns_queue.get()
# An arbiter ask us to wait a new conf, so we must clean
# all the mess we did, and close modules too
def clean_previous_run(self):
# Clean all lists
self.schedulers.clear()
del self.broks[:]
with self.external_commands_lock:
self.external_commands = self.external_commands[:]
def do_loop_turn(self):
logger.debug("Loop turn")
# Maybe the arbiter ask us to wait for a new conf
# If true, we must restart all...
if self.cur_conf is None:
# Clean previous run from useless objects
# and close modules
self.clean_previous_run()
self.wait_for_initial_conf()
# we may have been interrupted or so; then
# just return from this loop turn
if not self.new_conf:
return
self.setup_new_conf()
# Now we check if arbiter speak to us in the http_daemon.
# If so, we listen to it
# When it push a conf, we reinit connections
# Sleep in waiting a new conf :)
# TODO: manage the diff again.
while self.timeout > 0:
begin = time.time()
self.watch_for_new_conf(self.timeout)
end = time.time()
if self.new_conf:
if self.graceful_enabled and self.switch_process() is True:
# Child successfully spawned, we're exiting
return
self.setup_new_conf()
self.timeout = self.timeout - (end - begin)
logger.debug(" ======================== ")
self.timeout = self.polling_interval
# Check if zombies workers are among us :)
# If so: KILL THEM ALL!!!
self.check_and_del_zombie_workers()
# But also modules
self.check_and_del_zombie_modules()
# Print stats for debug
for sched_id in self.schedulers:
sched = self.schedulers[sched_id]
for mod in self.q_by_mod:
# In workers we've got actions send to queue - queue size
for (i, q) in self.q_by_mod[mod].items():
logger.debug("[%d][%s][%s] Stats: Workers:%d (Queued:%d TotalReturnWait:%d)",
sched_id, sched['name'], mod,
i, q.qsize(), self.get_returns_queue_len())
# Before return or get new actions, see how we manage
# old ones: are they still in queue (s)? If True, we
# must wait more or at least have more workers
_type = self.__class__.my_type
wait_ratio = self.wait_ratio.get_load()
total_q = 0
for mod in self.q_by_mod:
for q in self.q_by_mod[mod].values():
total_q += q.qsize()
if total_q != 0 and wait_ratio < 2 * self.polling_interval:
logger.debug("I decide to up wait ratio")
self.wait_ratio.update_load(wait_ratio * 2)
# self.wait_ratio.update_load(self.polling_interval)
else:
# Go to self.polling_interval on normal run, if wait_ratio
# was >2*self.polling_interval,
# it make it come near 2 because if < 2, go up :)
self.wait_ratio.update_load(self.polling_interval)
wait_ratio = self.wait_ratio.get_load()
logger.debug("Wait ratio: %f", wait_ratio)
statsmgr.gauge('core.%s.wait-ratio' % _type, wait_ratio, 'queue')
# We can wait more than 1s if needed,
# no more than 5s, but no less than 1
timeout = self.timeout * wait_ratio
timeout = max(self.polling_interval, timeout)
self.timeout = min(5 * self.polling_interval, timeout)
statsmgr.gauge('core.%s.timeout' % _type, self.timeout, 'queue')
# Maybe we do not have enough workers, we check for it
# and launch the new ones if needed
self.adjust_worker_number_by_load()
# Manage all messages we've got in the last timeout
# for queue in self.return_messages:
self.get_workers_results()
# If we are passive, we do not initiate the check getting
# and return
if not self.passive:
# Now we can get new actions from schedulers
self.get_new_actions()
# We send all finished checks
# REF: doc/shinken-action-queues.png (6)
self.manage_returns()
# Get objects from our modules that are not worker based
self.get_objects_from_from_queues()
# Say to modules it's a new tick :)
self.hook_point('tick')
# Checks if memory consumption did not exceed allowed thresold
self.check_memory_usage()
def get_workers_results(self):
while not self.is_returns_queue_empty():
item = self.get_returns_queue_item()
self.manage_action_return(item)
# Do this satellite (poller or reactionner) post "daemonize" init:
# we must register our interfaces for 3 possible callers: arbiter,
# schedulers or brokers.
def do_post_daemon_init(self):
# And we register them
self.uri2 = self.http_daemon.register(self.interface)
self.uri3 = self.http_daemon.register(self.brok_interface)
self.uri4 = self.http_daemon.register(self.scheduler_interface)
self.uri5 = self.http_daemon.register(self.istats)
# self.s = Queue() # Global Master -> Slave
# We can open the Queue for fork AFTER
self.q_by_mod['fork'] = {}
# Under Android, we do not have multiprocessing lib
# so use standard Queue threads things
# but in multiprocess, we are also using a Queue(). It's just
# not the same
if is_android:
self.returns_queue = Queue()
else:
self.returns_queue = self.manager.Queue()
# For multiprocess things, we should not have
# socket timeouts.
import socket
socket.setdefaulttimeout(None)
# Setup the new received conf from arbiter
def setup_new_conf(self):
conf = self.new_conf
logger.debug("[%s] Sending us a configuration %s", self.name, conf)
self.cur_conf = conf
g_conf = conf['global']
# Got our name from the globals
if 'poller_name' in g_conf:
name = g_conf['poller_name']
service = 'poller'
elif 'reactionner_name' in g_conf:
name = g_conf['reactionner_name']
service = 'reactionner'
else:
name = 'Unnamed satellite'
service = 'unknown'
self.name = name
# kernel.io part
self.api_key = g_conf['api_key']
self.secret = g_conf['secret']
self.http_proxy = g_conf['http_proxy']
# local statsd
self.statsd_host = g_conf['statsd_host']
self.statsd_port = g_conf['statsd_port']
self.statsd_prefix = g_conf['statsd_prefix']
self.statsd_enabled = g_conf['statsd_enabled']
self.statsd_interval = g_conf['statsd_interval']
self.statsd_types = g_conf['statsd_types']
self.statsd_pattern = g_conf['statsd_pattern']
self.harakiri_threshold = parse_memory_expr(g_conf['harakiri_threshold'])
if self.harakiri_threshold is not None:
self.raw_conf = self.new_conf
else:
self.raw_conf = None
self.new_conf = None
if self.aggressive_memory_management:
free_memory()
# we got a name, we can now say it to our statsmgr
statsmgr.register(self, self.name, service,
api_key=self.api_key,
secret=self.secret,
http_proxy=self.http_proxy,
statsd_host=self.statsd_host,
statsd_port=self.statsd_port,
statsd_prefix=self.statsd_prefix,
statsd_enabled=self.statsd_enabled,
statsd_interval=self.statsd_interval,
statsd_types=self.statsd_types,
statsd_pattern=self.statsd_pattern)
self.passive = g_conf['passive']
if self.passive:
logger.info("[%s] Passive mode enabled.", self.name)
# If we've got something in the schedulers, we do not want it anymore
for sched_id in conf['schedulers']:
already_got = False
# We can already got this conf id, but with another address
if sched_id in self.schedulers:
new_addr = conf['schedulers'][sched_id]['address']
old_addr = self.schedulers[sched_id]['address']
new_port = conf['schedulers'][sched_id]['port']
old_port = self.schedulers[sched_id]['port']
# Should got all the same to be ok :)
if new_addr == old_addr and new_port == old_port:
already_got = True
if already_got:
logger.info("[%s] We already got the conf %d (%s)",
self.name, sched_id, conf['schedulers'][sched_id]['name'])
wait_homerun = self.schedulers[sched_id]['wait_homerun']
actions = self.schedulers[sched_id]['actions']
s = conf['schedulers'][sched_id]
self.schedulers[sched_id] = s
if s['name'] in g_conf['satellitemap']:
s.update(g_conf['satellitemap'][s['name']])
proto = 'http'
if s['use_ssl']:
proto = 'https'
uri = '%s://%s:%s/' % (proto, s['address'], s['port'])
self.schedulers[sched_id]['uri'] = uri
if already_got:
self.schedulers[sched_id]['wait_homerun'] = wait_homerun
self.schedulers[sched_id]['actions'] = actions
else:
self.schedulers[sched_id]['wait_homerun'] = []
self.schedulers[sched_id]['actions'] = {}
self.schedulers[sched_id]['running_id'] = 0
self.schedulers[sched_id]['active'] = s['active']
self.schedulers[sched_id]['timeout'] = s['timeout']
self.schedulers[sched_id]['data_timeout'] = s['data_timeout']
# Do not connect if we are a passive satellite
if not self.passive and not already_got:
# And then we connect to it :)
self.pynag_con_init(sched_id)
# Now the limit part, 0 mean: number of cpu of this machine :)
# if not available, use 4 (modern hardware)
self.max_workers = g_conf['max_workers']
if self.max_workers == 0 and not is_android:
try:
self.max_workers = multiprocessing.cpu_count()
except NotImplementedError:
self.max_workers = 4
logger.info("[%s] Using max workers: %s", self.name, self.max_workers)
self.min_workers = g_conf['min_workers']
if self.min_workers == 0 and not is_android:
try:
self.min_workers = multiprocessing.cpu_count()
except NotImplementedError:
self.min_workers = 4
logger.info("[%s] Using min workers: %s", self.name, self.min_workers)
self.processes_by_worker = g_conf['processes_by_worker']
self.max_q_size = g_conf['max_q_size']
self.q_factor = g_conf['q_factor']
self.results_batch = g_conf['results_batch']
self.polling_interval = g_conf['polling_interval']
self.timeout = self.polling_interval
# Now set tags
# ['None'] is the default tags
self.poller_tags = g_conf.get('poller_tags', ['None'])
self.reactionner_tags = g_conf.get('reactionner_tags', ['None'])
self.max_plugins_output_length = g_conf.get('max_plugins_output_length', 8192)
# Set our giving timezone from arbiter
use_timezone = g_conf['use_timezone']
if use_timezone != 'NOTSET':
logger.info("[%s] Setting our timezone to %s", self.name, use_timezone)
os.environ['TZ'] = use_timezone
time.tzset()
logger.info("We have our schedulers: %s", self.schedulers)
# Now manage modules
# TODO: check how to better handle this with modules_manager..
mods = g_conf['modules']
for module in mods:
# If we already got it, bypass
if module.module_type not in self.q_by_mod:
logger.debug("Add module object %s", module)
self.modules_manager.modules.append(module)
logger.info("[%s] Got module: %s ", self.name, module.module_type)
self.q_by_mod[module.module_type] = {}
# Gets internal metrics for both statsd and
def get_internal_metrics(self):
_type = self.__class__.my_type
# Queues
metrics = [
('core.%s.mem' % _type, get_memory(), 'system'),
('core.%s.workers' % _type, len(self.workers), 'system'),
('core.%s.external-commands.queue' % _type,
len(self.external_commands), 'queue'),
('core.%s.broks.queue' % _type, len(self.broks), 'queue'),
('core.%s.results.queue' % _type, self.get_returns_queue_len(),
'queue'),
]
actions = self.get_actions_queue_len()
metrics.append(('core.%s.actions.queue' % _type, actions, 'queue'))
return metrics
# stats threads is asking us a main structure for stats
def get_stats_struct(self):
now = int(time.time())
# call the daemon one
res = super(Satellite, self).get_stats_struct()
_type = self.__class__.my_type
res.update({'name': self.name, 'type': _type})
# The receiver do nto have a passie prop
if hasattr(self, 'passive'):
res['passive'] = self.passive
# metrics specific
metrics = res['metrics']
for metric in self.get_internal_metrics():
name, value, mtype = metric
metrics.append(name, value, now, mtype)
return res
def main(self):
try:
self.load_config_file()
# Setting log level
logger.setLevel(self.log_level)
# Force the debug level if the daemon is said to start with such level
if self.debug:
logger.setLevel('DEBUG')
for line in self.get_header():
logger.info(line)
# Look if we are enabled or not. If ok, start the daemon mode
self.look_for_early_exit()
self.load_parent_config()
self.do_daemon_init_and_start()
self.do_post_daemon_init()
self.load_modules_manager()
# We wait for initial conf
self.wait_for_initial_conf()
if not self.new_conf: # we must have either big problem or was requested to shutdown
return
self.setup_new_conf()
# We can load our modules now
self.modules_manager.set_modules(self.modules_manager.modules)
self.do_load_modules()
# And even start external ones
self.modules_manager.start_external_instances()
# Allocate Mortal Threads
for _ in range(1, self.max_workers):
to_del = []
for mod in self.q_by_mod:
try:
self.create_and_launch_worker(module_name=mod)
# Maybe this modules is not a true worker one.
# if so, just delete if from q_by_mod
except NotWorkerMod:
to_del.append(mod)
for mod in to_del:
logger.debug("The module %s is not a worker one, "
"I remove it from the worker list", mod)
del self.q_by_mod[mod]
# Now main loop
self.do_mainloop()
except Exception:
self.print_unrecoverable(traceback.format_exc())
raise
| 47,014
|
Python
|
.py
| 1,044
| 33.953065
| 100
| 0.582035
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,480
|
receiverlink.py
|
shinken-solutions_shinken/shinken/receiverlink.py
|
'''shinken.receiverlink is deprecated. Please use shinken.objects.receiverlink now.'''
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken.old_daemon_link import make_deprecated_daemon_link
from shinken.objects import receiverlink
make_deprecated_daemon_link(receiverlink)
| 321
|
Python
|
.py
| 5
| 62.4
| 86
| 0.836538
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,481
|
stats.py
|
shinken-solutions_shinken/shinken/stats.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import threading
import time
import json
import hashlib
import base64
import socket
import traceback
from shinken.log import logger
# For old users python-crypto was not mandatory, don't break their setup
try:
from Crypto.Cipher import AES
except ImportError:
logger.debug('Cannot find python lib crypto: export to kernel.shinken.io isnot available')
AES = None
from shinken.http_client import HTTPClient, HTTPException
BLOCK_SIZE = 16
def pad(data):
pad = BLOCK_SIZE - len(data) % BLOCK_SIZE
return data + pad * chr(pad)
def unpad(padded):
pad = ord(padded[-1])
return padded[:-pad]
class Stats(object):
def __init__(self):
self.name = ''
self.type = ''
self.app = None
self.stats = {}
# There are two modes that are not exclusive
# first the kernel mode
self.api_key = ''
self.secret = ''
self.http_proxy = ''
self.con = HTTPClient(uri='http://kernel.shinken.io')
# then the statsd one
self.statsd_interval = 5
self.statsd_types = ['system', 'queue', 'perf']
self.statsd_sock = None
self.statsd_addr = None
self.statsd_types = 'system,object,queue,perf'
self.statsd_pattern = None
self.name_cache = {}
def launch_reaper_thread(self):
self.reaper_thread = threading.Thread(None, target=self.reaper,
name='stats-reaper')
self.reaper_thread.daemon = True
self.reaper_thread.start()
def launch_harvester_thread(self):
self.harvester_thread = threading.Thread(None, target=self.harvester,
name='stats-harvester')
self.harvester_thread.daemon = True
self.harvester_thread.start()
def register(self, app, name, _type, api_key='', secret='', http_proxy='',
statsd_host='localhost', statsd_port=8125, statsd_prefix='shinken',
statsd_enabled=False, statsd_interval=5, statsd_types=None,
statsd_pattern=None):
self.app = app
self.name = name
self.type = _type
# kernel.io part
self.api_key = api_key
self.secret = secret
self.http_proxy = http_proxy
# local statsd part
self.statsd_host = statsd_host
self.statsd_port = statsd_port
self.statsd_prefix = statsd_prefix
self.statsd_enabled = statsd_enabled
self.statsd_interval = statsd_interval
if statsd_types is not None:
self.statsd_types = [t.strip() for t in statsd_types.split(",") if t.strip()]
if statsd_pattern is not None:
self.statsd_pattern = statsd_pattern
self.name_cache = {}
if self.statsd_enabled:
logger.debug('Loading statsd communication with %s:%s.%s',
self.statsd_host, self.statsd_port, self.statsd_prefix)
self.load_statsd()
# Also load the proxy if need
self.con.set_proxy(self.http_proxy)
# Tells whether statsd is enabled and stats should be sent
def is_statsd_enabled(self):
return (self.statsd_sock is not None and
self.name and
self.app is not None)
# Tells whether kernel.status.io exporter is enabled
def is_shinkenio_enabled(self):
return (self.name and self.api_key and self.secret)
# Let be crystal clear about why I don't use the statsd lib in python: it's crappy.
# how guys did you fuck this up to this point? django by default for the conf?? really?...
# So raw socket are far better here
def load_statsd(self):
try:
self.statsd_addr = (socket.gethostbyname(self.statsd_host), self.statsd_port)
self.statsd_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
except (socket.error, socket.gaierror) as exp:
logger.error('Cannot create statsd socket: %s' % exp)
return
# Calculates a complete metric name from prefix or pattern
# Pattern has precedence on prefix if defined
# As formatting may involve extra CPU, we cache the calculated metric
# name to speed next calls.
def get_metric_name(self, name):
if name not in self.name_cache:
if self.statsd_pattern is not None:
try:
self.name_cache[name] = self.statsd_pattern.format(
metric=name,
name=self.name,
service=self.type)
except Exception as e:
logger.error("Failed to build metric name, check your "
"statsd_pattern parameter: %s" % e)
elif self.prefix:
self.name_cache[name] = "%s.%s" % (self.statsd_prefix, name)
else:
self.name_cache[name] = name
return self.name_cache[name]
# Sends a metric to statsd daemon
def send_metric(self, packet):
try:
self.statsd_sock.sendto(packet.encode("utf-8"), self.statsd_addr)
except (socket.error, socket.gaierror):
# cannot send? ok not a huge problem here and cannot
# log because it will be far too verbose :p
self.load_statsd()
# Updates internal stats for sending to kernel.shinken.io
def update_internal_stats(self, k, v):
_min, _max, nb, _sum = self.stats.get(k, (None, None, 0, 0))
nb += 1
_sum += v
if _min is None or v < _min:
_min = v
if _max is None or v > _max:
_max = v
self.stats[k] = (_min, _max, nb, _sum)
# Will increment a counter key
def incr(self, k, v, t):
if self.statsd_sock and self.name and t in self.statsd_types:
name = self.get_metric_name(k)
self.update_internal_stats(k, v)
packet = '%s:%d|c' % (name, v)
self.send_metric(packet)
# Will send a gauge value
def gauge(self, k, v, t):
if self.statsd_sock and self.name and t in self.statsd_types:
name = self.get_metric_name(k)
self.update_internal_stats(k, v)
packet = '%s:%d|g' % (name, v)
self.send_metric(packet)
# Will increment a timer key, if None, start at 0
def timing(self, k, v, t):
if self.statsd_sock and self.name and t in self.statsd_types:
name = self.get_metric_name(k)
self.update_internal_stats(k, v)
# beware, we are sending ms here, v is in s
packet = '%s:%d|ms' % (name, v * 1000)
self.send_metric(packet)
def _encrypt(self, data):
m = hashlib.md5()
m.update(self.secret)
key = m.hexdigest()
m = hashlib.md5()
m.update(self.secret + key)
iv = m.hexdigest()
data = pad(data)
aes = AES.new(key, AES.MODE_CBC, iv[:16])
encrypted = aes.encrypt(data)
return base64.urlsafe_b64encode(encrypted)
def reaper(self):
while True:
try:
now = int(time.time())
stats = self.stats
self.stats = {}
if len(stats) != 0:
s = ', '.join(['%s:%s' % (k, v) for (k, v) in stats.items()])
# If we are not in an initializer daemon we skip, we cannot have a real name, it sucks
# to find the data after this
if not self.is_shinkenio_enabled():
time.sleep(60)
continue
metrics = []
for (k, e) in stats.items():
nk = '%s.%s.%s' % (self.type, self.name, k)
_min, _max, nb, _sum = e
_avg = float(_sum) / nb
# nb can't be 0 here and _min_max can't be None too
s = '%s.avg %f %d' % (nk, _avg, now)
metrics.append(s)
s = '%s.min %f %d' % (nk, _min, now)
metrics.append(s)
s = '%s.max %f %d' % (nk, _max, now)
metrics.append(s)
s = '%s.count %f %d' % (nk, nb, now)
metrics.append(s)
# logger.debug('REAPER metrics to send %s (%d)' % (metrics, len(str(metrics))) )
# get the inner data for the daemon
struct = self.app.get_stats_struct()
for metric in struct['metrics']:
name, val, ts, _ = metric
if "." in str(val):
# Value is a float
metrics.append('%s %f %d' % (name, val, ts))
else:
# Value is an integer
metrics.append('%s %d %d' % (name, val, ts))
struct['metrics'] = metrics
# logger.debug('REAPER whole struct %s' % struct)
j = json.dumps(struct)
if AES is not None and self.secret != '':
logger.debug('Stats PUT to kernel.shinken.io/api/v1/put/ with %s %s' % (
self.api_key, self.secret))
# assume a %16 length messagexs
encrypted_text = self._encrypt(j)
try:
self.con.put('/api/v1/put/?api_key=%s' % (
self.api_key), encrypted_text)
except HTTPException as exp:
logger.error('Stats REAPER cannot put to the metric server %s' % exp)
except Exception as e:
logger.error("Reaper: %s", e)
logger.debug(traceback.format_exc())
time.sleep(60)
def harvester(self):
while True:
try:
if not self.is_statsd_enabled():
time.sleep(self.statsd_interval)
continue
# During daemon config load operation, some attributes may not
# have been initialized when we query stats_struct, so we
# ignore AttributeError.
try:
metrics = self.app.get_internal_metrics()
except AttributeError:
time.sleep(self.statsd_interval)
continue
for metric in metrics:
name, val, _type = metric
self.gauge(name, val, _type)
except Exception as e:
logger.error("Harvester: %s", e)
logger.debug(traceback.format_exc())
time.sleep(self.statsd_interval)
statsmgr = Stats()
| 11,768
|
Python
|
.py
| 269
| 32.04461
| 102
| 0.559916
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,482
|
property.py
|
shinken-solutions_shinken/shinken/property.py
|
#!/usr/bin/env python
# -*- mode: python ; coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import re
from shinken.util import to_float, to_split, to_char, to_int, unique_value, list_split
import logging
__all__ = ['UnusedProp', 'BoolProp', 'IntegerProp', 'FloatProp',
'CharProp', 'StringProp', 'ListProp',
'FULL_STATUS', 'CHECK_RESULT']
# Suggestion
# Is this useful? see above
__author__ = "Hartmut Goebel <h.goebel@goebel-consult.de>"
__copyright__ = "Copyright 2010-2011 by Hartmut Goebel <h.goebel@goebel-consult.de>"
__licence__ = "GNU Affero General Public License version 3 (AGPL v3)"
FULL_STATUS = 'full_status'
CHECK_RESULT = 'check_result'
none_object = object()
class Property(object):
"""Baseclass of all properties.
Same semantic for all subclasses (except UnusedProp): The property
is required if, and only if, the default value is `None`.
"""
def __init__(self, default=none_object, class_inherit=None,
unmanaged=False, help='', no_slots=False,
fill_brok=None, conf_send_preparation=None,
brok_transformation=None, retention=False,
retention_preparation=None, to_send=False,
override=False, managed=True, split_on_coma=True, merging='uniq'):
"""
`default`: default value to be used if this property is not set.
If default is None, this property is required.
`class_inherit`: List of 2-tuples, (Service, 'blabla'): must
set this property to the Service class with name
blabla. if (Service, None): must set this property
to the Service class with same name
`unmanaged`: ....
`help`: usage text
`no_slots`: do not take this property for __slots__
`fill_brok`: if set, send to broker. There are two categories:
FULL_STATUS for initial and update status,
CHECK_RESULT for check results
`retention`: if set, we will save this property in the retention files
`retention_preparation`: function, if set, will go this function before
being save to the retention data
`split_on_coma`: indicates that list property value should not be
splitted on coma delimiter (values conain comas that
we want to keep).
Only for the initial call:
conf_send_preparation: if set, will pass the property to this
function. It's used to 'flatten' some dangerous
properties like realms that are too 'linked' to
be send like that.
brok_transformation: if set, will call the function with the
value of the property when flattening
data is necessary (like realm_name instead of
the realm object).
override: for scheduler, if the property must override the
value of the configuration we send it
managed: property that is managed in Nagios but not in Shinken
merging: for merging properties, should we take only one or we can
link with ,
"""
self.default = default
self.has_default = (default is not none_object)
self.required = not self.has_default
self.class_inherit = class_inherit or []
self.help = help or ''
self.unmanaged = unmanaged
self.no_slots = no_slots
self.fill_brok = fill_brok or []
self.conf_send_preparation = conf_send_preparation
self.brok_transformation = brok_transformation
self.retention = retention
self.retention_preparation = retention_preparation
self.to_send = to_send
self.override = override
self.managed = managed
self.unused = False
self.merging = merging
self.split_on_coma = split_on_coma
def pythonize(self, val):
return val
class UnusedProp(Property):
"""A unused Property. These are typically used by Nagios but
no longer useful/used by Shinken.
This is just to warn the user that the option he uses is no more used
in Shinken.
"""
# Since this property is not used, there is no use for other
# parameters than 'text'.
# 'text' a some usage text if present, will print(it to explain)
# why it's no more useful
def __init__(self, text=None):
if text is None:
text = ("This parameter is no longer useful in the "
"Shinken architecture.")
self.text = text
self.has_default = False
self.class_inherit = []
self.unused = True
self.managed = True
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
class BoolProp(Property):
"""A Boolean Property.
Boolean values are currently case insensitively defined as 0,
false, no, off for False, and 1, true, yes, on for True).
"""
@staticmethod
def pythonize(val):
if isinstance(val, bool):
return val
val = unique_value(val).lower()
if val in _boolean_states.keys():
return _boolean_states[val]
else:
raise PythonizeError("Cannot convert '%s' to a boolean value" % val)
class IntegerProp(Property):
"""Please Add a Docstring to describe the class here"""
def pythonize(self, val):
val = unique_value(val)
return to_int(val)
class FloatProp(Property):
"""Please Add a Docstring to describe the class here"""
def pythonize(self, val):
val = unique_value(val)
return to_float(val)
class CharProp(Property):
"""Please Add a Docstring to describe the class here"""
def pythonize(self, val):
val = unique_value(val)
return to_char(val)
class StringProp(Property):
"""Please Add a Docstring to describe the class here"""
def pythonize(self, val):
val = unique_value(val)
return val
class PathProp(StringProp):
""" A string property representing a "running" (== VAR) file path """
class ConfigPathProp(StringProp):
""" A string property representing a config file path """
class ListProp(Property):
"""Please Add a Docstring to describe the class here"""
def pythonize(self, val):
if isinstance(val, list):
return [s.strip() for s in list_split(val, self.split_on_coma)]
else:
return [s.strip() for s in to_split(val, self.split_on_coma)]
class LogLevelProp(StringProp):
""" A string property representing a logging level """
def pythonize(self, val):
val = unique_value(val)
return logging.getLevelName(val)
class DictProp(Property):
def __init__(self, elts_prop=None, *args, **kwargs):
"""Dictionary of values.
If elts_prop is not None, must be a Property subclass
All dict values will be casted as elts_prop values when pythonized
elts_prop = Property of dict members
"""
super(DictProp, self).__init__(*args, **kwargs)
if elts_prop is not None and not issubclass(elts_prop, Property):
raise TypeError("DictProp constructor only accept Property"
"sub-classes as elts_prop parameter")
if elts_prop is not None:
self.elts_prop = elts_prop()
def pythonize(self, val):
val = unique_value(val)
def split(kv):
m = re.match("^\s*([^\s]+)\s*=\s*([^\s]+)\s*$", kv)
if m is None:
raise ValueError
return (
m.group(1),
# >2.4 only. we keep it for later. m.group(2) if self.elts_prop is None
# else self.elts_prop.pythonize(m.group(2))
(self.elts_prop.pythonize(m.group(2)), m.group(2))[self.elts_prop is None]
)
if val is None:
return (dict())
if self.elts_prop is None:
return val
# val is in the form "key1=addr:[port],key2=addr:[port],..."
print(">>>", dict([split(kv) for kv in to_split(val)]))
return dict([split(kv) for kv in to_split(val)])
class AddrProp(Property):
"""Address property (host + port)"""
def pythonize(self, val):
"""
i.e: val = "192.168.10.24:445"
NOTE: port is optional
"""
val = unique_value(val)
m = re.match("^([^:]*)(?::(\d+))?$", val)
if m is None:
raise ValueError
addr = {'address': m.group(1)}
if m.group(2) is not None:
addr['port'] = int(m.group(2))
return addr
class ToGuessProp(Property):
"""Unknown property encountered while parsing"""
@staticmethod
def pythonize(val):
if isinstance(val, list) and len(set(val)) == 1:
# If we have a list with a unique value just use it
return val[0]
else:
# Well, can't choose to remove somthing.
return val
class IntListProp(ListProp):
"""Integer List property"""
def pythonize(self, val):
val = super(IntListProp, self).pythonize(val)
try:
return [int(e) for e in val]
except ValueError as value_except:
raise PythonizeError(str(value_except))
class PythonizeError(Exception):
pass
| 10,461
|
Python
|
.py
| 240
| 34.825
| 90
| 0.623878
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,483
|
safepickle.py
|
shinken-solutions_shinken/shinken/safepickle.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import six
import sys
import io
# Unpickle but strip and remove all __reduce__ things
# so we don't allow external code to be executed
PICKLE_SAFE = {
'copy_reg' : ['_reconstructor'],
'__builtin__' : ['object', 'set'],
'builtins' : ['object', 'set'],
# Be sure to white list all we need to pickle.loads so user cannot exploit other modules (like bottle ^^)
'shinken.acknowledge' : ['Acknowledge'],
'shinken.basemodule' : ['BaseModule'],
'shinken.borg' : ['Borg'],
'shinken.check' : ['Check'],
'shinken.brok' : ['Brok'],
'shinken.commandcall' : ['CommandCall'],
'shinken.comment' : ['Comment'],
'shinken.complexexpression' : ['ComplexExpressionNode'],
'shinken.contactdowntime' : ['ContactDowntime'],
'shinken.daterange' : ['Timerange',
'Daterange',
'CalendarDaterange',
'StandardDaterange',
'MonthWeekDayDaterange',
'MonthDateDaterange',
'WeekDayDaterange',
'MonthDayDaterange',
],
'shinken.dependencynode' : ['DependencyNode'],
'shinken.downtime' : ['Downtime'],
'shinken.discovery.discoverymanager' : ['DiscoveredHost'],
'shinken.eventhandler' : ['EventHandler'],
'shinken.external_command' : ['ExternalCommand'],
'shinken.graph' : ['Graph'],
'shinken.message' : ['Message'],
'shinken.modulesctx' : ['ModulesContext'],
'shinken.modulesmanager' : ['ModulesManager'],
'shinken.notification' : ['Notification'],
'shinken.objects.command' : ['DummyCommand', 'Command', 'Commands'],
'shinken.objects.arbiterlink' : ['ArbiterLink', 'ArbiterLinks'],
'shinken.objects.businessimpactmodulation': ['Businessimpactmodulation', 'Businessimpactmodulations'],
'shinken.objects.brokerlink' : ['BrokerLink', 'BrokerLinks'],
'shinken.objects.checkmodulation' : ['CheckModulation', 'CheckModulations'],
'shinken.objects.config' : ['Config'],
'shinken.objects.contact' : ['Contact', 'Contacts'],
'shinken.objects.contactgroup' : ['Contactgroup', 'Contactgroups'],
'shinken.objects.discoveryrule' : ['Discoveryrule', 'Discoveryrules'],
'shinken.objects.discoveryrun' : ['Discoveryrun', 'Discoveryruns'],
'shinken.objects.escalation' : ['Escalation', 'Escalations'],
'shinken.objects.hostdependency' : ['Hostdependency', 'Hostdependencies'],
'shinken.objects.host' : ['Host', 'Hosts'],
'shinken.objects.hostescalation' : ['Hostescalation', 'Hostescalations'],
'shinken.objects.itemgroup' : ['Itemgroup', 'Itemgroups'],
'shinken.objects.hostgroup' : ['Hostgroup', 'Hostgroups'],
'shinken.objects.hostextinfo' : ['HostExtInfo', 'HostsExtInfo'],
'shinken.objects.item' : ['Item', 'Items'],
'shinken.objects.macromodulation' : ['MacroModulation', 'MacroModulations'],
'shinken.objects.matchingitem' : ['MatchingItem'],
'shinken.objects.pack' : ['Pack', 'Packs'],
'shinken.objects.notificationway' : ['NotificationWay', 'NotificationWays'],
'shinken.objects.module' : ['Module', 'Modules'],
'shinken.objects.pollerlink' : ['PollerLink', 'PollerLinks'],
'shinken.objects.reactionnerlink' : ['ReactionnerLink', 'ReactionnerLinks'],
'shinken.objects.realm' : ['Realm', 'Realms'],
'shinken.objects.receiverlink' : ['ReceiverLink', 'ReceiverLinks'],
'shinken.objects.resultmodulation' : ['Resultmodulation', 'Resultmodulations'],
'shinken.objects.satellitelink' : ['SatelliteLink', 'SatelliteLinks'],
'shinken.objects.schedulingitem' : ['SchedulingItem'],
'shinken.objects.schedulerlink' : ['SchedulerLink', 'SchedulerLinks'],
'shinken.objects.service' : ['Service', 'Services'],
'shinken.objects.servicedependency' : ['Servicedependency', 'Servicedependencies'],
'shinken.objects.serviceescalation' : ['Serviceescalation', 'Serviceescalations'],
'shinken.objects.serviceextinfo' : ['ServiceExtInfo', 'ServicesExtInfo'],
'shinken.objects.servicegroup' : ['Servicegroup', 'Servicegroups'],
'shinken.objects.timeperiod' : ['Timeperiod', 'Timeperiods'],
'shinken.objects.trigger' : ['Trigger', 'Triggers'],
}
def find_class(module, name):
if module not in PICKLE_SAFE:
raise ValueError('Attempting to unpickle unsafe module %s' % module)
__import__(module)
mod = sys.modules[module]
if name not in PICKLE_SAFE[module]:
raise ValueError('Attempting to unpickle unsafe class %s/%s' %
(module, name))
return getattr(mod, name)
if six.PY2:
# This is a dirty hack for python2 as it's impossible to subclass
# Unpickler from the cPickle module.
# This implementation mimics the Unpickler interface but use an external
# Unpickler instance
import cPickle
class SafeUnpickler(object):
def __init__(self, _bytes):
self._bytes = _bytes
def load(self):
pickle_obj = cPickle.Unpickler(self._bytes)
pickle_obj.find_global = find_class
return pickle_obj.load()
else:
import pickle
class SafeUnpickler(pickle.Unpickler):
find_class = staticmethod(find_class)
| 7,392
|
Python
|
.py
| 125
| 51.32
| 109
| 0.569084
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,484
|
brokerlink.py
|
shinken-solutions_shinken/shinken/brokerlink.py
|
'''shinken.brokerlink is deprecated. Please use shinken.objects.brokerlink now.'''
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken.old_daemon_link import make_deprecated_daemon_link
from shinken.objects import brokerlink
make_deprecated_daemon_link(brokerlink)
| 313
|
Python
|
.py
| 5
| 60.8
| 82
| 0.832237
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,485
|
load.py
|
shinken-solutions_shinken/shinken/load.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import time
import math
class Load(object):
"""This class is for having a easy Load calculation
without having to send value at regular interval
(but it's more efficient if you do this :) ) and without
having a list or other stuff. It's just an object, an update and a get
You can define m: the average for m minutes. The val is
the initial value. It's better if it's 0 but you can choose.
"""
def __init__(self, m=1, initial_value=0):
self.exp = 0 # first exp
self.m = m # Number of minute of the avg
self.last_update = 0 # last update of the value
self.val = initial_value # first value
def update_load(self, new_val, forced_interval=None):
# The first call do not change the value, just tag
# the beginning of last_update
# IF we force : bail out all time thing
if not forced_interval and self.last_update == 0:
self.last_update = time.time()
return
now = time.time()
try:
if forced_interval:
diff = forced_interval
else:
diff = now - self.last_update
self.exp = 1 / math.exp(diff / (self.m * 60.0))
self.val = new_val + self.exp * (self.val - new_val)
self.last_update = now
except OverflowError: # if the time change without notice, we overflow :(
pass
except ZeroDivisionError: # do not care
pass
def get_load(self):
return self.val
if __name__ == '__main__':
load = Load()
t = time.time()
for i in range(1, 300):
load.update_load(1)
print('[', int(time.time() - t), ']', load.get_load(), load.exp)
time.sleep(5)
| 2,767
|
Python
|
.py
| 67
| 35.328358
| 82
| 0.651284
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,486
|
satellitelink.py
|
shinken-solutions_shinken/shinken/objects/satellitelink.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import six
import time
import sys
from shinken.util import get_obj_name_two_args_and_void
from shinken.serializer import serialize, deserialize
from shinken.objects.item import Item, Items
from shinken.property import BoolProp, IntegerProp, StringProp, ListProp, DictProp, AddrProp
from shinken.log import logger
from shinken.http_client import HTTPClient, HTTPException
class SatelliteLink(Item):
"""SatelliteLink is a common Class for link to satellite for
Arbiter with Conf Dispatcher.
"""
# id = 0 each Class will have it's own id
properties = Item.properties.copy()
properties.update({
'address': StringProp(default='localhost', fill_brok=['full_status']),
'timeout': IntegerProp(default=3, fill_brok=['full_status']),
'data_timeout': IntegerProp(default=120, fill_brok=['full_status']),
'check_interval': IntegerProp(default=60, fill_brok=['full_status']),
'max_check_attempts': IntegerProp(default=3, fill_brok=['full_status']),
'spare': BoolProp(default=False, fill_brok=['full_status']),
'manage_sub_realms': BoolProp(default=True, fill_brok=['full_status']),
'manage_arbiters': BoolProp(default=False, fill_brok=['full_status'], to_send=True),
'modules': ListProp(default=[''], to_send=True, split_on_coma=True),
'polling_interval': IntegerProp(default=1, fill_brok=['full_status'], to_send=True),
'use_timezone': StringProp(default='NOTSET', to_send=True),
'realm': StringProp(default='', fill_brok=['full_status'],
brok_transformation=get_obj_name_two_args_and_void),
'satellitemap': DictProp(default={}, elts_prop=AddrProp, to_send=True, override=True),
'use_ssl': BoolProp(default=False, fill_brok=['full_status']),
'hard_ssl_name_check': BoolProp(default=True, fill_brok=['full_status']),
'passive': BoolProp(default=False, fill_brok=['full_status'], to_send=True),
})
running_properties = Item.running_properties.copy()
running_properties.update({
'con': StringProp(default=None),
'alive': BoolProp(default=True, fill_brok=['full_status']),
'broks': StringProp(default=[]),
# the number of failed attempt
'attempt': StringProp(default=0, fill_brok=['full_status']),
# can be network ask or not (dead or check in timeout or error)
'reachable': BoolProp(default=False, fill_brok=['full_status']),
'last_check': IntegerProp(default=0, fill_brok=['full_status']),
'managed_confs': StringProp(default={}),
})
def __init__(self, *args, **kwargs):
super(SatelliteLink, self).__init__(*args, **kwargs)
self.arb_satmap = {'address': '0.0.0.0', 'port': 0}
if hasattr(self, 'address'):
self.arb_satmap['address'] = self.address
if hasattr(self, 'port'):
try:
self.arb_satmap['port'] = int(self.port)
except Exception:
pass
def set_arbiter_satellitemap(self, satellitemap):
"""
arb_satmap is the satellitemap in current context:
- A SatelliteLink is owned by an Arbiter
- satellitemap attribute of SatelliteLink is the map
defined IN THE satellite configuration
but for creating connections, we need the have the satellitemap of the Arbiter
"""
self.arb_satmap = {'address': self.address, 'port': self.port, 'use_ssl': self.use_ssl,
'hard_ssl_name_check': self.hard_ssl_name_check}
self.arb_satmap.update(satellitemap)
def create_connection(self):
self.con = HTTPClient(
address=self.arb_satmap['address'],
port=self.arb_satmap['port'],
timeout=self.timeout,
data_timeout=self.data_timeout,
use_ssl=self.use_ssl,
strong_ssl=self.hard_ssl_name_check
)
self.uri = self.con.uri
def put_conf(self, conf):
if self.con is None:
self.create_connection()
# Maybe the connection was not ok, bail out
if not self.con:
return False
try:
self.con.get('ping')
self.con.put('put_conf', serialize(conf), wait='long')
print("PUT CONF SUCESS", self.get_name())
return True
#except HTTPException as exp:
except Exception as exp:
self.con = None
logger.error("Failed sending configuration for %s: %s", self.get_name(), exp)
return False
# Get and clean all of our broks
def get_all_broks(self):
res = self.broks
self.broks = []
return res
# Set alive, reachable, and reset attempts.
# If we change state, raise a status brok update
def set_alive(self):
was_alive = self.alive
self.alive = True
self.attempt = 0
self.reachable = True
# We came from dead to alive
# so we must add a brok update
if not was_alive:
b = self.get_update_status_brok()
self.broks.append(b)
def set_dead(self):
was_alive = self.alive
self.alive = False
self.con = None
# We are dead now. Must raise
# a brok to say it
if was_alive:
logger.warning("Setting the satellite %s to a dead state.", self.get_name())
b = self.get_update_status_brok()
self.broks.append(b)
# Go in reachable=False and add a failed attempt
# if we reach the max, go dead
def add_failed_check_attempt(self, reason=''):
self.reachable = False
self.attempt += 1
self.attempt = min(self.attempt, self.max_check_attempts)
# Don't need to warn again and again if the satellite is already dead
if self.alive:
logger.warning("Add failed attempt to %s (%d/%d) %s",
self.get_name(), self.attempt, self.max_check_attempts, reason)
# check when we just go HARD (dead)
if self.attempt == self.max_check_attempts:
self.set_dead()
# Update satellite info each self.check_interval seconds
# so we smooth arbiter actions for just useful actions
# and not cry for a little timeout
def update_infos(self):
# First look if it's not too early to ping
now = time.time()
since_last_check = now - self.last_check
if since_last_check < self.check_interval:
return
self.last_check = now
# We ping and update the managed list
self.ping()
self.update_managed_list()
# Update the state of this element
b = self.get_update_status_brok()
self.broks.append(b)
# The elements just got a new conf_id, we put it in our list
# because maybe the satellite is too busy to answer now
def known_conf_managed_push(self, cfg_id, push_flavor):
self.managed_confs[cfg_id] = push_flavor
def ping(self):
logger.debug("Pinging %s", self.get_name())
try:
if self.con is None:
self.create_connection()
logger.debug(" (%s)", self.uri)
# If the connection failed to initialize, bail out
if self.con is None:
self.add_failed_check_attempt()
return
r = self.con.get('ping').decode("utf-8")
# Should return us pong string
if r == 'pong':
self.set_alive()
else:
self.add_failed_check_attempt()
except HTTPException as exp:
self.add_failed_check_attempt(reason=str(exp))
def wait_new_conf(self):
if self.con is None:
self.create_connection()
try:
r = self.con.get('wait_new_conf')
return True
except HTTPException as exp:
self.con = None
return False
# To know if the satellite have a conf (magic_hash = None)
# OR to know if the satellite have THIS conf (magic_hash != None)
# Magic_hash is for arbiter check only
def have_conf(self, magic_hash=None):
if self.con is None:
self.create_connection()
# If the connection failed to initialize, bail out
if self.con is None:
return False
try:
if magic_hash is None:
r = self.con.get('have_conf')
else:
r = self.con.get('have_conf', {'magic_hash': magic_hash})
print("have_conf RAW CALL", r, type(r))
if not isinstance(r, bool):
return False
return r
except HTTPException as exp:
self.con = None
return False
# To know if a receiver got a conf or not
def got_conf(self):
if self.con is None:
self.create_connection()
# If the connection failed to initialize, bail out
if self.con is None:
return False
try:
r = deserialize(self.con.get('got_conf'))
# Protect against bad return
if not isinstance(r, bool):
return False
return r
except HTTPException as exp:
self.con = None
return False
def remove_from_conf(self, sched_id):
if self.con is None:
self.create_connection()
# If the connection failed to initialize, bail out
if self.con is None:
return
try:
self.con.get('remove_from_conf', {'sched_id': sched_id})
return True
except HTTPException as exp:
self.con = None
return False
def update_managed_list(self):
if self.con is None:
self.create_connection()
# If the connection failed to initialize, bail out
if self.con is None:
self.managed_confs = {}
return
try:
tab = deserialize(self.con.get('what_i_managed'))
print("[%s]What i managed raw value is %s" % (self.get_name(), tab))
# Protect against bad return
if not isinstance(tab, dict):
print("[%s]What i managed: Got exception: bad what_i_managed returns" %
self.get_name(), tab)
self.con = None
self.managed_confs = {}
return
# Ok protect against json that is chaning keys as string instead of int
tab_cleaned = {}
for (k, v) in tab.items():
try:
tab_cleaned[int(k)] = v
except ValueError:
print("[%s]What i managed: Got exception: bad what_i_managed returns" %
self.get_name(), tab)
# We can update our list now
self.managed_confs = tab_cleaned
except HTTPException as exp:
print("EXCEPTION INwhat_i_managed %s" % exp)
# A timeout is not a crime, put this case aside
# TODO : fix the timeout part?
self.con = None
print("[%s]What i managed: Got exception: %s %s %s" %
(self.get_name(), exp, type(exp), exp.__dict__))
self.managed_confs = {}
# Return True if the satellite said to managed a configuration
def do_i_manage(self, cfg_id, push_flavor):
# If not even the cfg_id in the managed_conf, bail out
if cfg_id not in self.managed_confs:
return False
# maybe it's in but with a false push_flavor. check it :)
return self.managed_confs[cfg_id] == push_flavor
def push_broks(self, broks):
if self.con is None:
self.create_connection()
# If the connection failed to initialize, bail out
if self.con is None:
return False
try:
# Always do a simple ping to avoid a LOOOONG lock
self.con.get('ping')
self.con.put('push_broks', serialize(broks), wait='long')
return True
except HTTPException as exp:
self.con = None
return False
def get_external_commands(self):
if self.con is None:
self.create_connection()
# If the connection failed to initialize, bail out
if self.con is None:
return []
try:
self.con.get('ping')
content = self.con.get('get_external_commands', wait='long')
#raw = zlib.decompress(base64.b64decode(content))
#raw = zlib.decompress(content)
commands = deserialize(content)
# Protect against bad return
if not isinstance(tab, list):
self.con = None
return []
return commands
except HTTPException as exp:
self.con = None
return []
except AttributeError:
self.con = None
return []
except:
self.con = None
return []
def prepare_for_conf(self):
self.cfg = {'global': {}, 'schedulers': {}, 'arbiters': {}}
properties = self.__class__.properties
for prop, entry in properties.items():
if entry.to_send:
self.cfg['global'][prop] = getattr(self, prop)
cls = self.__class__
# Also add global values
self.cfg['global']['api_key'] = cls.api_key
self.cfg['global']['secret'] = cls.secret
self.cfg['global']['http_proxy'] = cls.http_proxy
self.cfg['global']['statsd_host'] = cls.statsd_host
self.cfg['global']['statsd_port'] = cls.statsd_port
self.cfg['global']['statsd_prefix'] = cls.statsd_prefix
self.cfg['global']['statsd_enabled'] = cls.statsd_enabled
self.cfg['global']['statsd_interval'] = cls.statsd_interval
self.cfg['global']['statsd_types'] = cls.statsd_types
self.cfg['global']['statsd_pattern'] = cls.statsd_pattern
# Some parameters for satellites are not defined in the satellites conf
# but in the global configuration. We can pass them in the global
# property
def add_global_conf_parameters(self, params):
for prop in params:
self.cfg['global'][prop] = params[prop]
def get_my_type(self):
return self.__class__.my_type
# Here for poller and reactionner. Scheduler have its own function
def give_satellite_cfg(self):
return {'port': self.port,
'address': self.address,
'use_ssl': self.use_ssl,
'hard_ssl_name_check': self.hard_ssl_name_check,
'name': self.get_name(),
'instance_id': self.id,
'active': True,
'passive': self.passive,
'poller_tags': getattr(self, 'poller_tags', []),
'reactionner_tags': getattr(self, 'reactionner_tags', []),
'api_key': self.__class__.api_key,
'secret': self.__class__.secret,
}
def __getstate__(self):
cls = self.__class__
# id is not in *_properties
res = {'id': self.id}
for prop in cls.properties:
if prop != 'realm':
if hasattr(self, prop):
res[prop] = getattr(self, prop)
for prop in cls.running_properties:
if prop != 'con':
if hasattr(self, prop):
res[prop] = getattr(self, prop)
return res
# Inverted function of getstate
def __setstate__(self, state):
cls = self.__class__
self.id = state['id']
for prop in cls.properties:
if prop in state:
setattr(self, prop, state[prop])
for prop in cls.running_properties:
if prop in state:
setattr(self, prop, state[prop])
# con needs to be explicitly set:
self.con = None
class SatelliteLinks(Items):
"""Please Add a Docstring to describe the class here"""
# name_property = "name"
# inner_class = SchedulerLink
# We must have a realm property, so we find our realm
def linkify(self, realms, modules):
self.linkify_s_by_p(realms)
self.linkify_s_by_plug(modules)
def linkify_s_by_p(self, realms):
for s in self:
p_name = s.realm.strip()
# If no realm name, take the default one
if p_name == '':
p = realms.get_default()
s.realm = p
else: # find the realm one
p = realms.find_by_name(p_name)
s.realm = p
# Check if what we get is OK or not
if p is not None:
s.register_to_my_realm()
else:
err = "The %s %s got a unknown realm '%s'" % \
(s.__class__.my_type, s.get_name(), p_name)
s.configuration_errors.append(err)
| 18,299
|
Python
|
.py
| 425
| 32.621176
| 100
| 0.577911
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,487
|
hostdependency.py
|
shinken-solutions_shinken/shinken/objects/hostdependency.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken.objects.item import Item, Items
from shinken.property import BoolProp, StringProp, ListProp
from shinken.log import logger
class Hostdependency(Item):
id = 0
my_type = 'hostdependency'
# F is dep of D
# host_name Host B
# service_description Service D
# dependent_host_name Host C
# dependent_service_description Service F
# execution_failure_criteria o
# notification_failure_criteria w,u
# inherits_parent 1
# dependency_period 24x7
properties = Item.properties.copy()
properties.update({
'dependent_host_name': StringProp(),
'dependent_hostgroup_name': StringProp(default=''),
'host_name': StringProp(),
'hostgroup_name': StringProp(default=''),
'inherits_parent': BoolProp(default=False),
'execution_failure_criteria': ListProp(default=['n'], split_on_coma=True),
'notification_failure_criteria': ListProp(default=['n'], split_on_coma=True),
'dependency_period': StringProp(default='')
})
# Give a nice name output, for debugging purpose
# (debugging happens more often than expected...)
def get_name(self):
dependent_host_name = 'unknown'
if getattr(self, 'dependent_host_name', None):
dependent_host_name = getattr(
getattr(self, 'dependent_host_name'), 'host_name', 'unknown'
)
host_name = 'unknown'
if getattr(self, 'host_name', None):
host_name = getattr(getattr(self, 'host_name'), 'host_name', 'unknown')
return dependent_host_name + '/' + host_name
class Hostdependencies(Items):
inner_class = Hostdependency # use for know what is in items
def delete_hostsdep_by_id(self, ids):
for id in ids:
del self[id]
# We create new hostdep if necessary (host groups and co)
def explode(self, hostgroups):
# The "old" dependencies will be removed. All dependencies with
# more than one host or a host group will be in it
hstdep_to_remove = []
# Then for every host create a copy of the dependency with just the host
# because we are adding services, we can't just loop in it
hostdeps = list(self.items.keys())
for id in hostdeps:
hd = self.items[id]
# We explode first the dependent (son) part
dephnames = []
if hasattr(hd, 'dependent_hostgroup_name'):
dephg_names = [n.strip() for n in hd.dependent_hostgroup_name.split(',')]
for dephg_name in dephg_names:
dephg = hostgroups.find_by_name(dephg_name)
if dephg is None:
err = "ERROR: the hostdependency got " \
"an unknown dependent_hostgroup_name '%s'" % dephg_name
hd.configuration_errors.append(err)
continue
dephnames.extend([m.strip() for m in dephg.members])
if hasattr(hd, 'dependent_host_name'):
dephnames.extend([n.strip() for n in hd.dependent_host_name.split(',')])
# Ok, and now the father part :)
hnames = []
if hasattr(hd, 'hostgroup_name'):
hg_names = [n.strip() for n in hd.hostgroup_name.split(',')]
for hg_name in hg_names:
hg = hostgroups.find_by_name(hg_name)
if hg is None:
err = "ERROR: the hostdependency got" \
" an unknown hostgroup_name '%s'" % hg_name
hd.configuration_errors.append(err)
continue
hnames.extend([m.strip() for m in hg.members])
if hasattr(hd, 'host_name'):
hnames.extend([n.strip() for n in hd.host_name.split(',')])
# Loop over all sons and fathers to get S*F host deps
for dephname in dephnames:
dephname = dephname.strip()
for hname in hnames:
new_hd = hd.copy()
new_hd.dependent_host_name = dephname
new_hd.host_name = hname
self.add_item(new_hd)
hstdep_to_remove.append(id)
self.delete_hostsdep_by_id(hstdep_to_remove)
def linkify(self, hosts, timeperiods):
self.linkify_hd_by_h(hosts)
self.linkify_hd_by_tp(timeperiods)
self.linkify_h_by_hd()
def linkify_hd_by_h(self, hosts):
for hd in self:
try:
h_name = hd.host_name
dh_name = hd.dependent_host_name
h = hosts.find_by_name(h_name)
if h is None:
err = "Error: the host dependency got a bad host_name definition '%s'" % h_name
hd.configuration_errors.append(err)
dh = hosts.find_by_name(dh_name)
if dh is None:
err = "Error: the host dependency got " \
"a bad dependent_host_name definition '%s'" % dh_name
hd.configuration_errors.append(err)
hd.host_name = h
hd.dependent_host_name = dh
except AttributeError as exp:
err = "Error: the host dependency miss a property '%s'" % exp
hd.configuration_errors.append(err)
# We just search for each hostdep the id of the host
# and replace the name by the id
def linkify_hd_by_tp(self, timeperiods):
for hd in self:
try:
tp_name = hd.dependency_period
tp = timeperiods.find_by_name(tp_name)
hd.dependency_period = tp
except AttributeError as exp:
logger.error("[hostdependency] fail to linkify by timeperiod: %s", exp)
# We backport host dep to host. So HD is not need anymore
def linkify_h_by_hd(self):
for hd in self:
# if the host dep conf is bad, pass this one
if getattr(hd, 'host_name', None) is None or\
getattr(hd, 'dependent_host_name', None) is None:
continue
# Ok, link!
depdt_hname = hd.dependent_host_name
dp = getattr(hd, 'dependency_period', None)
depdt_hname.add_host_act_dependency(
hd.host_name, hd.notification_failure_criteria, dp, hd.inherits_parent
)
depdt_hname.add_host_chk_dependency(
hd.host_name, hd.execution_failure_criteria, dp, hd.inherits_parent
)
def is_correct(self):
r = super(Hostdependencies, self).is_correct()
return r and self.no_loop_in_parents("host_name", "dependent_host_name")
| 7,995
|
Python
|
.py
| 166
| 37.072289
| 99
| 0.580472
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,488
|
module.py
|
shinken-solutions_shinken/shinken/objects/module.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken.objects.item import Item, Items
from shinken.property import StringProp, ListProp
from shinken.util import strip_and_uniq
from shinken.log import logger
class Module(Item):
id = 1 # zero is always special in database, so we do not take risk here
my_type = 'module'
properties = Item.properties.copy()
properties.update({
'module_name': StringProp(),
'module_type': StringProp(),
'modules': ListProp(default=[''], split_on_coma=True),
})
macros = {}
# For debugging purpose only (nice name)
def get_name(self):
return self.module_name
def __repr__(self):
return '<module type=%s name=%s />' % (self.module_type, self.module_name)
class Modules(Items):
name_property = "module_name"
inner_class = Module
def linkify(self):
self.linkify_s_by_plug()
def linkify_s_by_plug(self):
for s in self:
new_modules = []
mods = strip_and_uniq(s.modules)
for plug_name in mods:
plug_name = plug_name.strip()
# don't read void names
if plug_name == '':
continue
# We are the modules, we search them :)
plug = self.find_by_name(plug_name)
if plug is not None:
new_modules.append(plug)
else:
err = "[module] unknown %s module from %s" % (plug_name, s.get_name())
logger.error(err)
s.configuration_errors.append(err)
s.modules = new_modules
# We look for contacts property in contacts and
def explode(self):
pass
| 2,710
|
Python
|
.py
| 68
| 32.941176
| 90
| 0.64596
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,489
|
reactionnerlink.py
|
shinken-solutions_shinken/shinken/objects/reactionnerlink.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken.objects.satellitelink import SatelliteLink, SatelliteLinks
from shinken.property import BoolProp, IntegerProp, StringProp, ListProp
class ReactionnerLink(SatelliteLink):
"""Please Add a Docstring to describe the class here"""
id = 0
my_type = 'reactionner'
properties = SatelliteLink.properties.copy()
properties.update({
'reactionner_name': StringProp(fill_brok=['full_status'], to_send=True),
'port': IntegerProp(default=7769, fill_brok=['full_status']),
'min_workers': IntegerProp(default=1, fill_brok=['full_status'], to_send=True),
'max_workers': IntegerProp(default=30, fill_brok=['full_status'], to_send=True),
'processes_by_worker': IntegerProp(default=256, fill_brok=['full_status'], to_send=True),
'max_q_size': IntegerProp(default=0, fill_brok=['full_status'], to_send=True),
'q_factor': IntegerProp(default=0, fill_brok=['full_status'], to_send=True),
'results_batch': IntegerProp(default=0, fill_brok=['full_status'], to_send=True),
'reactionner_tags': ListProp(default=['None'], to_send=True),
'harakiri_threshold': StringProp(default=None, fill_brok=['full_status'], to_send=True),
})
def get_name(self):
return self.reactionner_name
def register_to_my_realm(self):
self.realm.reactionners.append(self)
class ReactionnerLinks(SatelliteLinks): # (Items):
"""Please Add a Docstring to describe the class here"""
name_property = "reactionner_name"
inner_class = ReactionnerLink
| 2,603
|
Python
|
.py
| 50
| 48.02
| 97
| 0.709957
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,490
|
item.py
|
shinken-solutions_shinken/shinken/objects/item.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
""" This class is a base class for nearly all configuration
elements like service, hosts or contacts.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import six
import time
import itertools
from shinken.util import safe_print
from copy import copy
from shinken.commandcall import CommandCall
from shinken.property import (StringProp, ListProp, BoolProp,
IntegerProp, ToGuessProp, PythonizeError)
from shinken.brok import Brok
from shinken.util import strip_and_uniq, is_complex_expr
from shinken.acknowledge import Acknowledge
from shinken.comment import Comment
from shinken.log import logger
from shinken.complexexpression import ComplexExpressionFactory
from shinken.graph import Graph
INHERITANCE_DEEP_LIMIT = 32
class Item(object):
properties = {
'imported_from': StringProp(default='unknown'),
'use': ListProp(default=None, split_on_coma=True),
'name': StringProp(default=''),
'definition_order': IntegerProp(default=100),
# TODO: find why we can't uncomment this line below.
'register': BoolProp(default=True),
}
running_properties = {
# All errors and warning raised during the configuration parsing
# and that will raised real warning/errors during the is_correct
'configuration_warnings': ListProp(default=[]),
'configuration_errors': ListProp(default=[]),
'hash': StringProp(default=''),
# We save all template we asked us to load from
'tags': ListProp(default=set(), fill_brok=['full_status']),
}
macros = {
}
def __init__(self, params={}):
# We have our own id of My Class type :)
# use set attr for going into the slots
# instead of __dict__ :)
cls = self.__class__
self.id = self.get_newid()
self.customs = {} # for custom variables
self.plus = {} # for value with a +
self.init_running_properties()
# [0] = + -> new key-plus
# [0] = _ -> new custom entry in UPPER case
for key in params:
# We want to create instance of object with the good type.
# Here we've just parsed config files so everything is a list.
# We use the pythonize method to get the good type.
try:
if key in self.properties:
val = self.properties[key].pythonize(params[key])
elif key in self.running_properties:
warning = "using a the running property %s in a config file" % key
self.configuration_warnings.append(warning)
val = self.running_properties[key].pythonize(params[key])
elif hasattr(self, 'old_properties') and key in self.old_properties:
val = self.properties[self.old_properties[key]].pythonize(params[key])
elif key.startswith('_'): # custom macro, not need to detect something here
_t = params[key]
# If it's a string, directly use this
if isinstance(_t, six.string_types):
val = _t
# aa list for a custom macro is not managed (conceptually invalid)
# so take the first defined
elif isinstance(_t, list) and len(_t) > 0:
val = _t[0]
# not a list of void? just put void string so
else:
val = ''
else:
warning = "Guessing the property %s type because it is not in %s object properties" % \
(key, cls.__name__)
self.configuration_warnings.append(warning)
val = ToGuessProp.pythonize(params[key])
except (PythonizeError, ValueError) as expt:
err = "Error while pythonizing parameter '%s': %s" % (key, expt)
self.configuration_errors.append(err)
continue
# checks for attribute value special syntax (+ or _)
# we can have '+param' or ['+template1' , 'template2']
if isinstance(val, six.string_types) and val.startswith('+'):
err = "A + value for a single string is not handled"
self.configuration_errors.append(err)
elif isinstance(val, list) and val and \
isinstance(val[0], six.string_types) and val[0].startswith('+'):
# Special case: a _MACRO can be a plus. so add to plus
# but upper the key for the macro name
val[0] = val[0].lstrip("+")
if key.startswith("_"):
self.plus[key.upper()] = val # we remove the +
else:
self.plus[key] = val # we remove the +
elif key.startswith("_"):
if isinstance(val, list):
err = "no support for _ syntax in multiple valued attributes"
self.configuration_errors.append(err)
continue
custom_name = key.upper()
self.customs[custom_name] = val
else:
setattr(self, key, val)
def get_newid(self):
cls = self.__class__
value = cls.id
cls.id += 1
return value
# When values to set on attributes are unique (single element list),
# return the value directly rather than setting list element.
def compact_unique_attr_value(self, val):
if isinstance(val, list):
if len(val) > 1:
return val
elif len(val) == 0:
return ''
else:
return val[0]
else:
return val
def init_running_properties(self):
for prop, entry in self.__class__.running_properties.items():
# Copy is slow, so we check type
# Type with __iter__ are list or dict, or tuple.
# Item need it's own list, so we copy
val = entry.default
if hasattr(val, '__iter__'):
setattr(self, prop, copy(val))
else:
setattr(self, prop, val)
# each instance to have his own running prop!
def copy(self):
""" Return a copy of the item, but give him a new id """
cls = self.__class__
i = cls({}) # Dummy item but with it's own running properties
for prop in cls.properties:
if hasattr(self, prop):
val = getattr(self, prop)
setattr(i, prop, val)
# Also copy the customs tab
i.customs = copy(self.customs)
# And tags/templates
if hasattr(self, "tags"):
i.tags = copy(self.tags)
if hasattr(self, "templates"):
i.templates = copy(self.templates)
return i
def clean(self):
""" Clean useless things not requested once item has been fully initialized&configured.
Like temporary attributes such as "imported_from", etc.. """
for name in ('imported_from', 'use', 'plus', 'templates',):
try:
delattr(self, name)
except AttributeError:
pass
def __str__(self):
return str(self.__dict__)
def is_tpl(self):
""" Return if the elements is a template """
return not getattr(self, "register", True)
# If a prop is absent and is not required, put the default value
def fill_default(self):
""" Fill missing properties if they are missing """
cls = self.__class__
for prop, entry in cls.properties.items():
if not hasattr(self, prop) and entry.has_default:
setattr(self, prop, entry.default)
# We load every useful parameter so no need to access global conf later
# Must be called after a change in a global conf parameter
def load_global_conf(cls, conf):
""" Used to put global values in the sub Class like
hosts or services """
# conf have properties, if 'enable_notifications':
# { [...] 'class_inherit': [(Host, None), (Service, None),
# (Contact, None)]}
# get the name and put the value if None, put the Name
# (not None) if not (not clear?)
for prop, entry in conf.properties.items():
# If we have a class_inherit, and the arbiter really send us it
# if 'class_inherit' in entry and hasattr(conf, prop):
if hasattr(conf, prop):
for (cls_dest, change_name) in entry.class_inherit:
if cls_dest == cls: # ok, we've got something to get
value = getattr(conf, prop)
if change_name is None:
setattr(cls, prop, value)
else:
setattr(cls, change_name, value)
# Make this method a classmethod
load_global_conf = classmethod(load_global_conf)
def get_templates(self):
use = getattr(self, 'use', '')
if isinstance(use, list):
return [n.strip() for n in use if n.strip()]
else:
return [n.strip() for n in use.split(',') if n.strip()]
# We fillfull properties with template ones if need
def get_property_by_inheritance(self, prop, deep_level):
if prop == 'register':
return None # We do not inherit from register
# Don't allow to loop too much over the inheritance, to avoid infinite
# recursive calls. This loop will raise an error at global configuration
# check.
if deep_level > INHERITANCE_DEEP_LIMIT:
return None
# If I have the prop, I take mine but I check if I must
# add a plus property
if hasattr(self, prop):
value = getattr(self, prop)
# Manage the additive inheritance for the property,
# if property is in plus, add or replace it
# Template should keep the '+' at the beginning of the chain
if self.has_plus(prop):
value.insert(0, self.get_plus_and_delete(prop))
if self.is_tpl():
value = list(value)
value.insert(0, '+')
return value
# Ok, I do not have prop, Maybe my templates do?
# Same story for plus
# We reverse list, so that when looking for properties by inheritance,
# the least defined template wins (if property is set).
for i in self.templates:
value = i.get_property_by_inheritance(prop, deep_level + 1)
if value:
# If our template give us a '+' value, we should continue to loop
still_loop = False
if isinstance(value, list) and value[0] == '+':
# Templates should keep their + inherited from their parents
if not self.is_tpl():
value = [x for x in value if x != '+']
still_loop = True
# Maybe in the previous loop, we set a value, use it too
if hasattr(self, prop):
# If the current value is strong, it will simplify the problem
if not isinstance(value, list) and value[0] == '+':
# In this case we can remove the + from our current
# tpl because our value will be final
new_val = list(getattr(self, prop))
new_val.extend(value[1:])
value = new_val
else: # If not, se should keep the + sign of need
new_val = list(getattr(self, prop))
new_val.extend(value)
value = new_val
# Ok, we can set it
setattr(self, prop, value)
# If we only got some '+' values, we must still loop
# for an end value without it
if not still_loop:
# And set my own value in the end if need
if self.has_plus(prop):
value = list(value)
value = list(getattr(self, prop))
value.extend(self.get_plus_and_delete(prop))
# Template should keep their '+'
if self.is_tpl() and not value[0] == '+':
value.insert(0, '+')
setattr(self, prop, value)
return value
# Maybe templates only give us + values, so we didn't quit, but we already got a
# self.prop value after all
template_with_only_plus = hasattr(self, prop)
# I do not have endingprop, my templates too... Maybe a plus?
# warning: if all my templates gave me '+' values, do not forgot to
# add the already set self.prop value
if self.has_plus(prop):
if template_with_only_plus:
value = list(getattr(self, prop))
value.extend(self.get_plus_and_delete(prop))
else:
value = self.get_plus_and_delete(prop)
# Template should keep their '+' chain
# We must say it's a '+' value, so our son will now that it must
# still loop
if self.is_tpl() and value != [] and not value[0] == '+':
value.insert(0, '+')
setattr(self, prop, value)
return value
# Ok so in the end, we give the value we got if we have one, or None
# Not even a plus... so None :)
return getattr(self, prop, None)
# We fillfull properties with template ones if need
def get_customs_properties_by_inheritance(self, deep_level):
# protect against infinite recursive loop
if deep_level > INHERITANCE_DEEP_LIMIT:
return self.customs
# We reverse list, so that when looking for properties by inheritance,
# the least defined template wins (if property is set).
for i in self.templates:
tpl_cv = i.get_customs_properties_by_inheritance(deep_level + 1)
if tpl_cv is not {}:
for prop in tpl_cv:
if prop not in self.customs:
value = tpl_cv[prop]
else:
value = self.customs[prop]
if self.has_plus(prop):
value.insert(0, self.get_plus_and_delete(prop))
# value = self.get_plus_and_delete(prop) + ',' + value
self.customs[prop] = value
for prop in self.customs:
value = self.customs[prop]
if self.has_plus(prop):
value.insert(0, self.get_plus_and_delete(prop))
self.customs[prop] = value
# We can get custom properties in plus, we need to get all
# entires and put
# them into customs
cust_in_plus = self.get_all_plus_and_delete()
for prop in cust_in_plus:
self.customs[prop] = cust_in_plus[prop]
return self.customs
def has_plus(self, prop):
return prop in self.plus
def get_all_plus_and_delete(self):
res = {}
props = self.plus.keys() # we delete entries, so no for ... in ...
for prop in props:
res[prop] = self.get_plus_and_delete(prop)
return res
def get_plus_and_delete(self, prop):
val = self.plus[prop]
del self.plus[prop]
return val
# Check is required prop are set:
# template are always correct
def is_correct(self):
state = True
properties = self.__class__.properties
# Raised all previously saw errors like unknown contacts and co
if self.configuration_errors != []:
state = False
for err in self.configuration_errors:
logger.error("[item::%s] %s", self.get_name(), err)
for prop, entry in properties.items():
if not hasattr(self, prop) and entry.required:
logger.warning("[item::%s] %s property is missing", self.get_name(), prop)
state = False
return state
# This function is used by service and hosts
# to transform Nagios2 parameters to Nagios3
# ones, like normal_check_interval to
# check_interval. There is a old_parameters tab
# in Classes that give such modifications to do.
def old_properties_names_to_new(self):
old_properties = getattr(self.__class__, "old_properties", {})
for old_name, new_name in old_properties.items():
# Ok, if we got old_name and NO new name,
# we switch the name
if hasattr(self, old_name) and not hasattr(self, new_name):
value = getattr(self, old_name)
setattr(self, new_name, value)
delattr(self, old_name)
# The arbiter is asking us our raw value before all explode or linking
def get_raw_import_values(self):
r = {}
properties = self.__class__.properties.keys()
# Register is not by default in the properties
if 'register' not in properties:
properties.append('register')
for prop in properties:
if hasattr(self, prop):
v = getattr(self, prop)
# print(prop, ":", v)
r[prop] = v
return r
def add_downtime(self, downtime):
self.downtimes.append(downtime)
def del_downtime(self, downtime_id):
d_to_del = None
for dt in self.downtimes:
if dt.id == downtime_id:
d_to_del = dt
dt.can_be_deleted = True
if d_to_del is not None:
self.downtimes.remove(d_to_del)
def add_comment(self, comment):
self.comments.append(comment)
def del_comment(self, comment_id):
c_to_del = None
for c in self.comments:
if c.id == comment_id:
c_to_del = c
c.can_be_deleted = True
if c_to_del is not None:
self.comments.remove(c_to_del)
def acknowledge_problem(self, sticky, notify, persistent, author, comment, end_time=0):
if self.state != self.ok_up:
if notify:
self.create_notifications('ACKNOWLEDGEMENT')
self.problem_has_been_acknowledged = True
if sticky == 2:
sticky = True
else:
sticky = False
a = Acknowledge(self, sticky, notify, persistent, author, comment, end_time=end_time)
self.acknowledgement = a
if self.my_type == 'host':
comment_type = 1
else:
comment_type = 2
c = Comment(self, persistent, author, comment,
comment_type, 4, 0, False, 0)
self.add_comment(c)
self.broks.append(self.get_update_status_brok())
# Look if we got an ack that is too old with an expire date and should
# be delete
def check_for_expire_acknowledge(self):
if (self.acknowledgement and
self.acknowledgement.end_time != 0 and
self.acknowledgement.end_time < time.time()):
self.unacknowledge_problem()
# Delete the acknowledgement object and reset the flag
# but do not remove the associated comment.
def unacknowledge_problem(self):
if self.problem_has_been_acknowledged:
logger.debug("[item::%s] deleting acknowledge of %s",
self.get_name(),
self.get_dbg_name())
self.problem_has_been_acknowledged = False
# Should not be deleted, a None is Good
self.acknowledgement = None
# del self.acknowledgement
# find comments of non-persistent ack-comments and delete them too
for c in self.comments:
if c.entry_type == 4 and not c.persistent:
self.del_comment(c.id)
self.broks.append(self.get_update_status_brok())
# Check if we have an acknowledgement and if this is marked as sticky.
# This is needed when a non-ok state changes
def unacknowledge_problem_if_not_sticky(self):
if hasattr(self, 'acknowledgement') and self.acknowledgement is not None:
if not self.acknowledgement.sticky:
self.unacknowledge_problem()
# Will flatten some parameters tagged by the 'conf_send_preparation'
# property because they are too "linked" to be send like that (like realms)
def prepare_for_conf_sending(self):
cls = self.__class__
for prop, entry in cls.properties.items():
# Is this property need preparation for sending?
if entry.conf_send_preparation is not None:
val = entry.conf_send_preparation(getattr(self, prop))
setattr(self, prop, val)
running_properties = getattr(cls, 'running_properties', {})
for prop, entry in running_properties.items():
# Is this property need preparation for sending?
if entry.conf_send_preparation is not None:
val = entry.conf_send_preparation(getattr(self, prop))
setattr(self, prop, val)
# Get the property for an object, with good value
# and brok_transformation if need
def get_property_value_for_brok(self, prop, tab):
entry = tab[prop]
# Get the current value, or the default if need
value = getattr(self, prop, entry.default)
# Apply brok_transformation if need
# Look if we must preprocess the value first
pre_op = entry.brok_transformation
if pre_op is not None:
value = pre_op(self, value)
return value
# Fill data with info of item by looking at brok_type
# in props of properties or running_properties
def fill_data_brok_from(self, data, brok_type):
cls = self.__class__
# Now config properties
for prop, entry in cls.properties.items():
# Is this property intended for broking?
if brok_type in entry.fill_brok:
data[prop] = self.get_property_value_for_brok(prop, cls.properties)
# Maybe the class do not have running_properties
if hasattr(cls, 'running_properties'):
# We've got prop in running_properties too
for prop, entry in cls.running_properties.items():
# if 'fill_brok' in cls.running_properties[prop]:
if brok_type in entry.fill_brok:
data[prop] = self.get_property_value_for_brok(prop, cls.running_properties)
# Get a brok with initial status
def get_initial_status_brok(self):
data = {'id': self.id}
self.fill_data_brok_from(data, 'full_status')
return Brok('initial_' + self.my_type + '_status', data)
# Get a brok with update item status
def get_update_status_brok(self):
data = {'id': self.id}
self.fill_data_brok_from(data, 'full_status')
return Brok('update_' + self.my_type + '_status', data)
# Get a brok with check_result
def get_check_result_brok(self):
data = {}
self.fill_data_brok_from(data, 'check_result')
return Brok(self.my_type + '_check_result', data)
# Get brok about the new schedule (next_check)
def get_next_schedule_brok(self):
data = {}
self.fill_data_brok_from(data, 'next_schedule')
return Brok(self.my_type + '_next_schedule', data)
# A snapshot brok is alike a check_result, with also a
# output from the snapshot command
def get_snapshot_brok(self, snap_output, exit_status):
data = {
'snapshot_output': snap_output,
'snapshot_time': int(time.time()),
'snapshot_exit_status': exit_status,
}
self.fill_data_brok_from(data, 'check_result')
return Brok(self.my_type + '_snapshot', data)
# Link one command property to a class (for globals like oc*p_command)
def linkify_one_command_with_commands(self, commands, prop):
if not hasattr(self, prop):
return
command = getattr(self, prop).strip()
if command:
parms = {}
for parm in ('poller_tag', 'reactionner_tag', 'priority'):
if hasattr(self, parm):
parms[parm] = getattr(self, parm)
cmdCall = CommandCall(commands, command, **parms)
setattr(self, prop, cmdCall)
else:
setattr(self, prop, None)
# We look at the 'trigger' prop and we create a trigger for it
def explode_trigger_string_into_triggers(self, triggers):
src = getattr(self, 'trigger', '')
if src:
# Change on the fly the characters
src = src.replace(r'\n', '\n').replace(r'\t', '\t')
t = triggers.create_trigger(
src,
'inner-trigger-%s%s' % (self.__class__.my_type, self.id)
)
if t:
# Maybe the trigger factory give me a already existing trigger,
# so my name can be dropped
self.triggers.append(t.get_name())
# Link with triggers. Can be with a "in source" trigger, or a file name
def linkify_with_triggers(self, triggers):
# Get our trigger string and trigger names in the same list
self.triggers.extend([self.trigger_name])
# print("I am linking my triggers", self.get_full_name(), self.triggers)
new_triggers = []
for tname in self.triggers:
if tname == '':
continue
t = triggers.find_by_name(tname)
if t:
setattr(t, 'trigger_broker_raise_enabled', self.trigger_broker_raise_enabled)
new_triggers.append(t)
else:
self.configuration_errors.append(
'the %s %s does have a unknown trigger_name %s' %
(self.__class__.my_type, self.get_full_name(), tname)
)
self.triggers = new_triggers
def dump(self):
dmp = {}
for prop in self.properties.keys():
if not hasattr(self, prop):
continue
attr = getattr(self, prop)
if isinstance(attr, list) and attr and isinstance(attr[0], Item):
dmp[prop] = [i.dump() for i in attr]
elif isinstance(attr, Item):
dmp[prop] = attr.dump()
elif attr:
dmp[prop] = getattr(self, prop)
return dmp
def _get_name(self):
if hasattr(self, 'get_name'):
return self.get_name()
name = getattr(self, 'name', None)
host_name = getattr(self, 'host_name', None)
return '%s(host_name=%s)' % (name or 'no-name', host_name or '')
class Items(object):
def __init__(self, items, index_items=True, conflict_policy="loose"):
self.conflict_policy = conflict_policy
self.items = {}
self.name_to_item = {}
self.templates = {}
self.name_to_template = {}
self.configuration_warnings = []
self.configuration_errors = []
self.add_items(items, index_items)
def get_source(self, item):
source = getattr(item, 'imported_from', None)
if source:
return " in %s" % source
else:
return ""
def add_items(self, items, index_items):
"""
Add items into the `items` or `templates` container depending on the
is_tpl method result.
:param items: The items list to add.
:param index_items: Flag indicating if the items should be indexed
on the fly.
"""
for i in items:
if i.is_tpl():
self.add_template(i)
else:
self.add_item(i, index_items)
def manage_conflict(self, item, name):
"""
Cheks if an object holding the same name already exists in the index.
If so, it compares their definition order: the lowest definition order
is kept. If definiton order equal, an error is risen.Item
The method returns the item that should be added after it has decided
which one should be kept.
If the new item has precedence over the New existing one, the
existing is removed for the new to replace it.
:param item: The new item to check for confict
:param name: The exiting object name
:return The retained object
"""
if item.is_tpl():
existing = self.name_to_template[name]
else:
existing = self.name_to_item[name]
existing_prio = getattr(
existing,
"definition_order",
existing.properties["definition_order"].default)
item_prio = getattr(
item,
"definition_order",
item.properties["definition_order"].default)
if existing_prio < item_prio:
# Existing item has lower priority, so it has precedence.
return existing
elif existing_prio > item_prio:
# New item has lower priority, so it has precedence.
# Existing item will be deleted below
pass
else:
# Don't know which one to keep, lastly defined has precedence
objcls = getattr(self.inner_class, "my_type", "[unknown]")
if objcls == "service":
objname = "%s/%s" % (item.host_name, item.service_description)
else:
objname = item.get_name()
if self.conflict_policy == "strict":
mesg = "duplicate %s name %s%s. "\
"You have to manually set the definition_order " \
"parameter to avoid this error." % \
(objcls, objname, self.get_source(item))
item.configuration_errors.append(mesg)
else:
mesg = "duplicate %s name %s%s, using lastly defined. "\
"You may manually set the definition_order " \
"parameter to avoid this message." % \
(objcls, objname, self.get_source(item))
item.configuration_warnings.append(mesg)
if item.is_tpl():
self.remove_template(existing)
else:
self.remove_item(existing)
return item
def add_template(self, tpl):
"""
Adds and index a template into the `templates` container.
:param tpl: The template to add
"""
tpl = self.index_template(tpl)
self.templates[tpl.id] = tpl
def index_template(self, tpl):
"""
Indexes a template by `name` into the `name_to_template` dictionnary.
:param tpl: The template to index
"""
objcls = self.inner_class.my_type
name = getattr(tpl, 'name', '')
if not name:
mesg = "a %s template has been defined without name%s%s" % \
(objcls, tpl.imported_from, self.get_source(tpl))
tpl.configuration_errors.append(mesg)
elif name in self.name_to_template:
tpl = self.manage_conflict(tpl, name)
self.name_to_template[name] = tpl
return tpl
def remove_template(self, tpl):
"""
Removes and unindex a template from the `templates` container.
:param tpl: The template to remove
"""
try:
del self.templates[tpl.id]
except KeyError:
pass
self.unindex_template(tpl)
def unindex_template(self, tpl):
"""
Unindex a template from the `templates` container.
:param tpl: The template to unindex
"""
name = getattr(tpl, 'name', '')
try:
del self.name_to_template[name]
except KeyError:
pass
def add_item(self, item, index=True):
"""Adds an item into our containers, and index it depending on the `index` flag.
:param item: The item to add
:param index: Flag indicating if the item should be indexed
"""
name_property = getattr(self.__class__, "name_property", None)
if index is True and name_property:
item = self.index_item(item)
self.items[item.id] = item
def remove_item(self, item):
"""Removes (and un-index) an item from our containers.
:param item: The item to be removed.
:type item: Item # or subclass of
"""
self.unindex_item(item)
try:
self.items.pop(item.id)
except KeyError:
safe_print("ERROR: Internal Issue, this case should not happen %s " % item)
pass
def index_item(self, item):
""" Indexes an item into our `name_to_item` dictionary.
If an object holding the same item's name/key already exists in the index
then the conflict is managed by the `manage_conflict` method.
:param item: The item to index
:param name: The optional name to use to index the item
"""
# TODO: simplify this function (along with its opposite: unindex_item)
# it's too complex for what it does.
# more over:
# There are cases (in unindex_item) where some item is tried to be removed
# from name_to_item while it's not present in it !
# so either it wasn't added or it was added with another key or the item key changed
# between the index and unindex calls..
# -> We should simply not have to call unindex_item() with a non-indexed item !
name_property = getattr(self.__class__, "name_property", None)
# if there is no 'name_property' set(it is None), then the following getattr() will
# "hopefully" evaluates to '',
# unless some(thing|one) have setattr(item, None, 'with_something'),
# which would be rather odd :
name = getattr(item, name_property, '')
if not name:
objcls = self.inner_class.my_type
mesg = "a %s item has been defined without %s%s" % \
(objcls, name_property, self.get_source(item))
item.configuration_errors.append(mesg)
elif name in self.name_to_item:
if item.id != self.name_to_item[name].id:
item = self.manage_conflict(item, name)
self.name_to_item[name] = item
return item
def unindex_item(self, item):
""" Unindex an item from our name_to_item dict.
:param item: The item to unindex
"""
name_property = getattr(self.__class__, "name_property", None)
if name_property is None:
return
self.name_to_item.pop(getattr(item, name_property, ''), None)
def __iter__(self):
return iter(self.items.values())
def __len__(self):
return len(self.items)
def __delitem__(self, key):
try:
self.unindex_item(self.items[key])
del self.items[key]
except KeyError: # we don't want it, we do not have it. All is perfect
pass
def __setitem__(self, key, value):
self.items[key] = value
name_property = getattr(self.__class__, "name_property", None)
if name_property:
self.index_item(value)
def __getitem__(self, key):
return self.items[key]
def __contains__(self, key):
return key in self.items
def find_by_name(self, name):
return self.name_to_item.get(name, None)
# Search items using a list of filter callbacks. Each callback is passed
# the item instances and should return a boolean value indicating if it
# matched the filter.
# Returns a list of items matching all filters.
def find_by_filter(self, filters):
items = []
for i in self:
failed = False
for f in filters:
if not f(i):
failed = True
break
if failed is False:
items.append(i)
return items
# prepare_for_conf_sending to flatten some properties
def prepare_for_sending(self):
for i in self:
i.prepare_for_conf_sending()
# It's used to change old Nagios2 names to
# Nagios3 ones
def old_properties_names_to_new(self):
for i in itertools.chain(self.items.values(),
self.templates.values()):
i.old_properties_names_to_new()
def pythonize(self):
for id in self.items:
self.items[id].pythonize()
def find_tpl_by_name(self, name):
return self.name_to_template.get(name, None)
def get_all_tags(self, item):
all_tags = item.get_templates()
for t in item.templates:
all_tags.append(t.name)
all_tags.extend(self.get_all_tags(t))
return list(set(all_tags))
def linkify_item_templates(self, item):
tpls = []
tpl_names = item.get_templates()
for name in tpl_names:
t = self.find_tpl_by_name(name)
if t is None:
# TODO: Check if this should not be better to report as an error ?
self.configuration_warnings.append("%s %r use/inherit from an unknown template "
"(%r) ! Imported from: "
"%s" % (type(item).__name__,
item._get_name(),
name,
item.imported_from))
else:
if t is item:
self.configuration_errors.append(
'%s %r use/inherits from itself ! Imported from: %s' %
(type(item).__name__, item._get_name(), item.imported_from)
)
else:
tpls.append(t)
item.templates = tpls
# We will link all templates, and create the template
# graph too
def linkify_templates(self):
# First we create a list of all templates
for i in itertools.chain(self.items.values(),
self.templates.values()):
self.linkify_item_templates(i)
for i in self:
i.tags = self.get_all_tags(i)
# Look if there are loop in our parents definition
if not self.no_loop_in_parents("self", "templates", templates=True):
err = '[items] There are loops in the %s templates definition.' % i.__class__.my_type
self.configuration_errors.append(err)
def is_correct(self):
# we are ok at the beginning. Hope we still ok at the end...
r = True
# Some class do not have twins, because they do not have names
# like servicedependencies
twins = getattr(self, 'twins', None)
if twins is not None:
# Ok, look at no twins (it's bad!)
for id in twins:
i = self.items[id]
logger.warning("[items] %s.%s is duplicated from %s",
i.__class__.my_type,
i.get_name(),
getattr(i, 'imported_from', "unknown source"))
# Then look if we have some errors in the conf
# Juts print(warnings, but raise errors)
for err in self.configuration_warnings:
logger.warning("[items] %s", err)
for err in self.configuration_errors:
logger.error("[items] %s", err)
r = False
# Then look for individual ok
for i in self:
# Alias and display_name hook hook
prop_name = getattr(self.__class__, 'name_property', None)
if prop_name and not hasattr(i, 'alias') and hasattr(i, prop_name):
setattr(i, 'alias', getattr(i, prop_name))
if prop_name and getattr(i, 'display_name', '') == '' and hasattr(i, prop_name):
setattr(i, 'display_name', getattr(i, prop_name))
# Now other checks
if not i.is_correct():
n = getattr(i, 'imported_from', "unknown source")
logger.error("[items] In %s is incorrect ; from %s", i.get_name(), n)
r = False
return r
def remove_templates(self):
""" Remove useless templates (& properties) of our items
otherwise we could get errors on config.is_correct()
"""
del self.templates
def clean(self):
""" Request to remove the unnecessary attributes/others from our items """
for i in self:
i.clean()
Item.clean(self)
# If a prop is absent and is not required, put the default value
def fill_default(self):
for i in self:
i.fill_default()
def __str__(self):
s = ''
cls = self.__class__
for id in self.items:
s += "%s:%s%s\n" % (cls, id, self.items[id])
return s
# Inheritance for just a property
def apply_partial_inheritance(self, prop):
for i in itertools.chain(self.items.values(),
self.templates.values()):
i.get_property_by_inheritance(prop, 0)
# If a "null" attribute was inherited, delete it
try:
if getattr(i, prop) == 'null':
delattr(i, prop)
except AttributeError:
pass
def apply_inheritance(self):
""" For all items and templates inherite properties and custom
variables.
"""
# We check for all Class properties if the host has it
# if not, it check all host templates for a value
cls = self.inner_class
for prop in cls.properties:
self.apply_partial_inheritance(prop)
for i in itertools.chain(self.items.values(),
self.templates.values()):
i.get_customs_properties_by_inheritance(0)
# We've got a contacts property with , separated contacts names
# and we want have a list of Contacts
def linkify_with_contacts(self, contacts):
for i in self:
if hasattr(i, 'contacts'):
contacts_tab = strip_and_uniq(i.contacts)
new_contacts = []
for c_name in contacts_tab:
if c_name != '':
c = contacts.find_by_name(c_name)
if c is not None:
new_contacts.append(c)
# Else: Add in the errors tab.
# will be raised at is_correct
else:
err = "the contact '%s' defined for '%s' is unknown" % (c_name,
i.get_name())
i.configuration_errors.append(err)
# Get the list, but first make elements uniq
i.contacts = list(set(new_contacts))
# Make link between an object and its escalations
def linkify_with_escalations(self, escalations):
for i in self:
if hasattr(i, 'escalations'):
escalations_tab = strip_and_uniq(i.escalations)
new_escalations = []
for es_name in [e for e in escalations_tab if e != '']:
es = escalations.find_by_name(es_name)
if es is not None:
new_escalations.append(es)
else: # Escalation not find, not good!
err = "the escalation '%s' defined for '%s' is unknown" % (es_name,
i.get_name())
i.configuration_errors.append(err)
i.escalations = new_escalations
# Make link between item and it's resultmodulations
def linkify_with_resultmodulations(self, resultmodulations):
for i in self:
if hasattr(i, 'resultmodulations'):
resultmodulations_tab = strip_and_uniq(i.resultmodulations)
new_resultmodulations = []
for rm_name in resultmodulations_tab:
rm = resultmodulations.find_by_name(rm_name)
if rm is not None:
new_resultmodulations.append(rm)
else:
err = ("the result modulation '%s' defined on the %s "
"'%s' do not exist" % (rm_name, i.__class__.my_type, i.get_name()))
i.configuration_warnings.append(err)
continue
i.resultmodulations = new_resultmodulations
# Make link between item and it's business_impact_modulations
def linkify_with_business_impact_modulations(self, business_impact_modulations):
for i in self:
if hasattr(i, 'business_impact_modulations'):
business_impact_modulations_tab = strip_and_uniq(i.business_impact_modulations)
new_business_impact_modulations = []
for rm_name in business_impact_modulations_tab:
rm = business_impact_modulations.find_by_name(rm_name)
if rm is not None:
new_business_impact_modulations.append(rm)
else:
err = ("the business impact modulation '%s' defined on the %s "
"'%s' do not exist" % (rm_name, i.__class__.my_type, i.get_name()))
i.configuration_errors.append(err)
continue
i.business_impact_modulations = new_business_impact_modulations
# If we've got a contact_groups properties, we search for all
# theses groups and ask them their contacts, and then add them
# all into our contacts property
def explode_contact_groups_into_contacts(self, item, contactgroups):
if hasattr(item, 'contact_groups'):
# TODO : See if we can remove this if
if isinstance(item.contact_groups, list):
cgnames = item.contact_groups
else:
cgnames = item.contact_groups.split(',')
cgnames = strip_and_uniq(cgnames)
for cgname in cgnames:
cg = contactgroups.find_by_name(cgname)
if cg is None:
err = "The contact group '%s' defined on the %s '%s' do " \
"not exist" % (cgname, item.__class__.my_type,
item.get_name())
item.configuration_errors.append(err)
continue
cnames = contactgroups.get_members_by_name(cgname)
# We add contacts into our contacts
if cnames != []:
if hasattr(item, 'contacts'):
item.contacts.extend(cnames)
else:
item.contacts = cnames
# Link a timeperiod property (prop)
def linkify_with_timeperiods(self, timeperiods, prop):
for i in self:
if hasattr(i, prop):
tpname = getattr(i, prop).strip()
# some default values are '', so set None
if tpname == '':
setattr(i, prop, None)
continue
# Ok, get a real name, search for it
tp = timeperiods.find_by_name(tpname)
# If not found, it's an error
if tp is None:
err = ("The %s of the %s '%s' named "
"'%s' is unknown!" % (prop, i.__class__.my_type, i.get_name(), tpname))
i.configuration_errors.append(err)
continue
# Got a real one, just set it :)
setattr(i, prop, tp)
def create_commandcall(self, prop, commands, command):
comandcall = dict(commands=commands, call=command)
for parm in ('enable_environment_macros', 'poller_tag',
'reactionner_tag', 'priority'):
if hasattr(prop, parm):
comandcall[parm] = getattr(prop, parm)
return CommandCall(**comandcall)
# Link one command property
def linkify_one_command_with_commands(self, commands, prop):
for i in self:
if not hasattr(i, prop):
continue
command = getattr(i, prop).strip()
if command:
cmdCall = self.create_commandcall(i, commands, command)
# TODO: catch None?
setattr(i, prop, cmdCall)
else:
setattr(i, prop, None)
# Link a command list (commands with , between) in real CommandCalls
def linkify_command_list_with_commands(self, commands, prop):
for i in self:
if not hasattr(i, prop):
continue
coms = strip_and_uniq(getattr(i, prop))
com_list = []
for com in coms:
if com:
#print("com: %s" % com)
cmdCall = self.create_commandcall(i, commands, com)
# TODO: catch None?
com_list.append(cmdCall)
else: # TODO: catch?
pass
setattr(i, prop, com_list)
# Link with triggers. Can be with a "in source" trigger, or a file name
def linkify_with_triggers(self, triggers):
for i in self:
i.linkify_with_triggers(triggers)
# We've got a notificationways property with , separated contacts names
# and we want have a list of NotificationWay
def linkify_with_checkmodulations(self, checkmodulations):
for i in self:
if not hasattr(i, 'checkmodulations'):
continue
new_checkmodulations = []
for cw_name in i.checkmodulations:
cw = checkmodulations.find_by_name(cw_name)
if cw is not None:
new_checkmodulations.append(cw)
else:
err = ("The checkmodulations of the %s '%s' named "
"'%s' is unknown!" % (i.__class__.my_type, i.get_name(), cw_name))
i.configuration_errors.append(err)
# Get the list, but first make elements uniq
i.checkmodulations = new_checkmodulations
# We've got list of macro modulations as list of names, and
# we want real objects
def linkify_with_macromodulations(self, macromodulations):
for i in self:
if not hasattr(i, 'macromodulations'):
continue
new_macromodulations = []
for cw_name in i.macromodulations:
cw = macromodulations.find_by_name(cw_name)
if cw is not None:
new_macromodulations.append(cw)
else:
err = ("The macromodulations of the %s '%s' named "
"'%s' is unknown!" % (i.__class__.my_type, i.get_name(), cw_name))
i.configuration_errors.append(err)
# Get the list, but first make elements uniq
i.macromodulations = new_macromodulations
# Linkify with modules
def linkify_s_by_plug(self, modules):
for s in self:
new_modules = []
for plug_name in s.modules:
plug_name = plug_name.strip()
# don't tread void names
if plug_name == '':
continue
plug = modules.find_by_name(plug_name)
if plug is not None:
new_modules.append(plug)
else:
err = "Error: the module %s is unknown for %s" % (plug_name, s.get_name())
s.configuration_errors.append(err)
s.modules = new_modules
def evaluate_hostgroup_expression(self, expr, hosts, hostgroups, look_in='hostgroups'):
# print("\n"*10, "looking for expression", expr)
# Maybe exp is a list, like numerous hostgroups entries in a service, link them
if isinstance(expr, list):
expr = '|'.join(expr)
# print("\n"*10, "looking for expression", expr)
if look_in == 'hostgroups':
f = ComplexExpressionFactory(look_in, hostgroups, hosts)
else: # templates
f = ComplexExpressionFactory(look_in, hosts, hosts)
expr_tree = f.eval_cor_pattern(expr)
set_res = expr_tree.resolve_elements()
# HOOK DBG
return list(set_res)
def get_hosts_from_hostgroups(self, hgname, hostgroups):
if not isinstance(hgname, list):
hgname = [e.strip() for e in hgname.split(',') if e.strip()]
host_names = []
for name in hgname:
hg = hostgroups.find_by_name(name)
if hg is None:
raise ValueError("the hostgroup '%s' is unknown" % hgname)
mbrs = [h.strip() for h in hg.get_hosts() if h.strip()]
host_names.extend(mbrs)
return host_names
# If we've got a hostgroup_name property, we search for all
# theses groups and ask them their hosts, and then add them
# all into our host_name property
def explode_host_groups_into_hosts(self, item, hosts, hostgroups):
hnames_list = []
# Gets item's hostgroup_name
hgnames = getattr(item, "hostgroup_name", '')
# Defines if hostgroup is a complex expression
# Expands hostgroups
if is_complex_expr(hgnames):
hnames_list.extend(self.evaluate_hostgroup_expression(
item.hostgroup_name, hosts, hostgroups))
elif hgnames:
try:
hnames_list.extend(
self.get_hosts_from_hostgroups(hgnames, hostgroups))
except ValueError as e:
item.configuration_errors.append(str(e))
# Expands host names
hname = getattr(item, "host_name", '')
hnames_list.extend([n.strip() for n in hname.split(',') if n.strip()])
hnames = set()
for h in hnames_list:
# If the host start with a !, it's to be removed from
# the hostgroup get list
if h.startswith('!'):
hst_to_remove = h[1:].strip()
try:
hnames.remove(hst_to_remove)
except KeyError:
pass
elif h == '*':
[hnames.add(h.host_name) for h in hosts.items.values()
if getattr(h, 'host_name', '')]
# Else it's a host to add, but maybe it's ALL
else:
hnames.add(h)
item.host_name = ','.join(hnames)
# Take our trigger strings and create true objects with it
def explode_trigger_string_into_triggers(self, triggers):
for i in self:
i.explode_trigger_string_into_triggers(triggers)
# Parent graph: use to find quickly relations between all item, and loop
# return True if there is a loop
def no_loop_in_parents(self, attr1, attr2, templates=False):
""" Find loop in dependencies.
For now, used with the following attributes :
:(self, parents):
host dependencies from host object
:(host_name, dependent_host_name):\
host dependencies from hostdependencies object
:(service_description, dependent_service_description):
service dependencies from servicedependencies object
"""
# Ok, we say "from now, no loop :) "
r = True
# Create parent graph
parents = Graph()
elts_lst = self
if templates:
elts_lst = self.templates.values()
# Start with all items as nodes
for item in elts_lst:
# Hack to get self here. Used when looping on host and host parent's
if attr1 == "self":
obj = item # obj is a host/service [list]
else:
obj = getattr(item, attr1, None)
if obj is not None:
if isinstance(obj, list):
for sobj in obj:
parents.add_node(sobj)
else:
parents.add_node(obj)
# And now fill edges
for item in elts_lst:
if attr1 == "self":
obj1 = item
else:
obj1 = getattr(item, attr1, None)
obj2 = getattr(item, attr2, None)
if obj2 is not None:
if isinstance(obj2, list):
for sobj2 in obj2:
if isinstance(obj1, list):
for sobj1 in obj1:
parents.add_edge(sobj1, sobj2)
else:
parents.add_edge(obj1, sobj2)
else:
if isinstance(obj1, list):
for sobj1 in obj1:
parents.add_edge(sobj1, obj2)
else:
parents.add_edge(obj1, obj2)
# Now get the list of all item in a loop
items_in_loops = parents.loop_check()
# and raise errors about it
for item in items_in_loops:
logger.error("The %s object '%s' is part of a circular parent/child chain!",
item.my_type,
item.get_name())
r = False
return r
| 59,328
|
Python
|
.py
| 1,287
| 33.259518
| 107
| 0.556075
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,491
|
businessimpactmodulation.py
|
shinken-solutions_shinken/shinken/objects/businessimpactmodulation.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
# The resultmodulation class is used for in scheduler modulation of results
# like the return code or the output.
from __future__ import absolute_import, division, print_function, unicode_literals
import time
from shinken.objects.item import Item, Items
from shinken.property import StringProp, IntegerProp
class Businessimpactmodulation(Item):
id = 1 # zero is always special in database, so we do not take risk here
my_type = 'businessimpactmodulation'
properties = Item.properties.copy()
properties.update({'business_impact_modulation_name': StringProp(),
'business_impact': IntegerProp(),
'modulation_period': StringProp(default=''),
})
# For debugging purpose only (nice name)
def get_name(self):
return self.business_impact_modulation_name
class Businessimpactmodulations(Items):
name_property = "business_impact_modulation_name"
inner_class = Businessimpactmodulation
def linkify(self, timeperiods):
self.linkify_cm_by_tp(timeperiods)
# We just search for each timeperiod the tp
# and replace the name by the tp
def linkify_cm_by_tp(self, timeperiods):
for rm in self:
mtp_name = rm.modulation_period.strip()
# The new member list, in id
mtp = timeperiods.find_by_name(mtp_name)
if mtp_name != '' and mtp is None:
err = ("Error: the business impact modulation '%s' got an unknown "
"modulation_period '%s'" % (rm.get_name(), mtp_name))
rm.configuration_errors.append(err)
rm.modulation_period = mtp
| 2,627
|
Python
|
.py
| 56
| 41.035714
| 83
| 0.690681
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,492
|
pack.py
|
shinken-solutions_shinken/shinken/objects/pack.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import time
import os
import re
try:
import json
except ImportError:
json = None
from shinken.objects.item import Item, Items
from shinken.property import StringProp
from shinken.log import logger
class Pack(Item):
id = 1 # zero is always special in database, so we do not take risk here
my_type = 'pack'
properties = Item.properties.copy()
properties.update({'pack_name': StringProp(fill_brok=['full_status'])})
running_properties = Item.running_properties.copy()
running_properties.update({'macros': StringProp(default={})})
# For debugging purpose only (nice name)
def get_name(self):
try:
return self.pack_name
except AttributeError:
return 'UnnamedPack'
class Packs(Items):
name_property = "pack_name"
inner_class = Pack
# We will dig into the path and load all .pack files
def load_file(self, path):
# Now walk for it
for root, dirs, files in os.walk(path):
for file in files:
if re.search(r"\.pack$", file):
p = os.path.join(root, file)
try:
fd = open(p, 'r')
buf = fd.read()
fd.close()
except IOError as exp:
logger.error("Cannot open pack file '%s' for reading: %s", p, exp)
# ok, skip this one
continue
self.create_pack(buf, file[:-5])
# Create a pack from the string buf, and get a real object from it
def create_pack(self, buf, name):
if not json:
logger.warning("[Pack] cannot load the pack file '%s': missing json lib", name)
return
# Ok, go compile the code
try:
d = json.loads(buf)
if 'name' not in d:
logger.error("[Pack] no name in the pack '%s'", name)
return
p = Pack({})
p.pack_name = d['name']
p.description = d.get('description', '')
p.macros = d.get('macros', {})
p.templates = d.get('templates', [p.pack_name])
p.path = d.get('path', 'various/')
p.doc_link = d.get('doc_link', '')
p.services = d.get('services', {})
p.commands = d.get('commands', [])
if not p.path.endswith('/'):
p.path += '/'
# Ok, add it
self[p.id] = p
except ValueError as exp:
logger.error("[Pack] error in loading pack file '%s': '%s'", name, exp)
| 3,616
|
Python
|
.py
| 91
| 31.373626
| 91
| 0.59812
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,493
|
servicedependency.py
|
shinken-solutions_shinken/shinken/objects/servicedependency.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken.property import BoolProp, StringProp, ListProp
from shinken.log import logger
from .item import Item, Items
from .service import Service
class Servicedependency(Item):
id = 0
my_type = "servicedependency"
# F is dep of D
# host_name Host B
# service_description Service D
# dependent_host_name Host C
# dependent_service_description Service F
# execution_failure_criteria o
# notification_failure_criteria w,u
# inherits_parent 1
# dependency_period 24x7
properties = Item.properties.copy()
properties.update({
'dependent_host_name': StringProp(),
'dependent_hostgroup_name': StringProp(default=''),
'dependent_service_description': StringProp(),
'host_name': StringProp(),
'hostgroup_name': StringProp(default=''),
'service_description': StringProp(),
'inherits_parent': BoolProp(default=False),
'execution_failure_criteria': ListProp(default=['n'], split_on_coma=True),
'notification_failure_criteria': ListProp(default=['n'], split_on_coma=True),
'dependency_period': StringProp(default=''),
'explode_hostgroup': BoolProp(default=False)
})
# Give a nice name output, for debugging purpose
# (Yes, debugging CAN happen...)
def get_name(self):
return getattr(self, 'dependent_host_name', '') + '/'\
+ getattr(self, 'dependent_service_description', '') \
+ '..' + getattr(self, 'host_name', '') + '/' \
+ getattr(self, 'service_description', '')
class Servicedependencies(Items):
inner_class = Servicedependency # use for know what is in items
def delete_servicesdep_by_id(self, ids):
for id in ids:
del self[id]
# Add a simple service dep from another (dep -> par)
def add_service_dependency(self, dep_host_name, dep_service_description,
par_host_name, par_service_description):
# We create a "standard" service_dep
prop = {
'dependent_host_name': dep_host_name,
'dependent_service_description': dep_service_description,
'host_name': par_host_name,
'service_description': par_service_description,
'notification_failure_criteria': 'u,c,w',
'inherits_parent': '1',
}
sd = Servicedependency(prop)
self.add_item(sd)
# If we have explode_hostgroup parameter we have to create a
# service dependency for each host of the hostgroup
def explode_hostgroup(self, sd, hostgroups):
# We will create a service dependency for each host part of the host group
# First get services
snames = [d.strip() for d in sd.service_description.split(',')]
# And dep services
dep_snames = [d.strip() for d in sd.dependent_service_description.split(',')]
# Now for each host into hostgroup we will create a service dependency object
hg_names = [n.strip() for n in sd.hostgroup_name.split(',')]
for hg_name in hg_names:
hg = hostgroups.find_by_name(hg_name)
if hg is None:
err = "ERROR: the servicedependecy got an unknown hostgroup_name '%s'" % hg_name
self.configuration_errors.append(err)
continue
hnames = []
hnames.extend([m.strip() for m in hg.members])
for hname in hnames:
for dep_sname in dep_snames:
for sname in snames:
new_sd = sd.copy()
new_sd.host_name = hname
new_sd.service_description = sname
new_sd.dependent_host_name = hname
new_sd.dependent_service_description = dep_sname
self.add_item(new_sd)
# We create new servicedep if necessary (host groups and co)
def explode(self, hostgroups):
# The "old" services will be removed. All services with
# more than one host or a host group will be in it
srvdep_to_remove = []
# Then for every host create a copy of the service with just the host
# because we are adding services, we can't just loop in it
servicedeps = list(self.items.keys())
for id in servicedeps:
sd = self.items[id]
# Have we to explode the hostgroup into many service?
if bool(getattr(sd, 'explode_hostgroup', 0)) and \
hasattr(sd, 'hostgroup_name'):
self.explode_hostgroup(sd, hostgroups)
srvdep_to_remove.append(id)
continue
# Get the list of all FATHER hosts and service deps
hnames = []
if hasattr(sd, 'hostgroup_name'):
hg_names = [n.strip() for n in sd.hostgroup_name.split(',')]
hg_names = [hg_name.strip() for hg_name in hg_names]
for hg_name in hg_names:
hg = hostgroups.find_by_name(hg_name)
if hg is None:
err = "ERROR: the servicedependecy got an" \
" unknown hostgroup_name '%s'" % hg_name
hg.configuration_errors.append(err)
continue
hnames.extend([m.strip() for m in hg.members])
if not hasattr(sd, 'host_name'):
sd.host_name = ''
if sd.host_name != '':
hnames.extend([n.strip() for n in sd.host_name.split(',')])
snames = [d.strip() for d in sd.service_description.split(',')]
couples = []
for hname in hnames:
for sname in snames:
couples.append((hname.strip(), sname.strip()))
if not hasattr(sd, 'dependent_hostgroup_name') and hasattr(sd, 'hostgroup_name'):
sd.dependent_hostgroup_name = sd.hostgroup_name
# Now the dep part (the sons)
dep_hnames = []
if hasattr(sd, 'dependent_hostgroup_name'):
hg_names = [n.strip() for n in sd.dependent_hostgroup_name.split(',')]
hg_names = [hg_name.strip() for hg_name in hg_names]
for hg_name in hg_names:
hg = hostgroups.find_by_name(hg_name)
if hg is None:
err = "ERROR: the servicedependecy got an " \
"unknown dependent_hostgroup_name '%s'" % hg_name
hg.configuration_errors.append(err)
continue
dep_hnames.extend([m.strip() for m in hg.members])
if not hasattr(sd, 'dependent_host_name'):
sd.dependent_host_name = getattr(sd, 'host_name', '')
if sd.dependent_host_name != '':
dep_hnames.extend([n.strip() for n in sd.dependent_host_name.split(',')])
dep_snames = [d.strip() for d in sd.dependent_service_description.split(',')]
dep_couples = []
for dep_hname in dep_hnames:
for dep_sname in dep_snames:
dep_couples.append((dep_hname.strip(), dep_sname.strip()))
# Create the new service deps from all of this.
for (dep_hname, dep_sname) in dep_couples: # the sons, like HTTP
for (hname, sname) in couples: # the fathers, like MySQL
new_sd = sd.copy()
new_sd.host_name = hname
new_sd.service_description = sname
new_sd.dependent_host_name = dep_hname
new_sd.dependent_service_description = dep_sname
self.add_item(new_sd)
# Ok so we can remove the old one
srvdep_to_remove.append(id)
self.delete_servicesdep_by_id(srvdep_to_remove)
def linkify(self, hosts, services, timeperiods):
self.linkify_sd_by_s(hosts, services)
self.linkify_sd_by_tp(timeperiods)
self.linkify_s_by_sd()
# We just search for each srvdep the id of the srv
# and replace the name by the id
def linkify_sd_by_s(self, hosts, services):
to_del = []
errors = self.configuration_errors
warns = self.configuration_warnings
for sd in self:
try:
s_name = sd.dependent_service_description
hst_name = sd.dependent_host_name
# The new member list, in id
s = services.find_srv_by_name_and_hostname(hst_name, s_name)
if s is None:
host = hosts.find_by_name(hst_name)
if not (host and host.is_excluded_for_sdesc(s_name)):
errors.append("Service %s not found for host %s" % (s_name, hst_name))
elif host:
warns.append("Service %s is excluded from host %s ; "
"removing this servicedependency as it's unusuable."
% (s_name, hst_name))
to_del.append(sd)
continue
sd.dependent_service_description = s
s_name = sd.service_description
hst_name = sd.host_name
# The new member list, in id
s = services.find_srv_by_name_and_hostname(hst_name, s_name)
if s is None:
host = hosts.find_by_name(hst_name)
if not (host and host.is_excluded_for_sdesc(s_name)):
errors.append("Service %s not found for host %s" % (s_name, hst_name))
elif host:
warns.append("Service %s is excluded from host %s ; "
"removing this servicedependency as it's unusuable."
% (s_name, hst_name))
to_del.append(sd)
continue
sd.service_description = s
except AttributeError as err:
logger.error("[servicedependency] fail to linkify by service %s: %s", sd, err)
to_del.append(sd)
for sd in to_del:
self.remove_item(sd)
# We just search for each srvdep the id of the srv
# and replace the name by the id
def linkify_sd_by_tp(self, timeperiods):
for sd in self:
try:
tp_name = sd.dependency_period
tp = timeperiods.find_by_name(tp_name)
sd.dependency_period = tp
except AttributeError as exp:
logger.error("[servicedependency] fail to linkify by timeperiods: %s", exp)
# We backport service dep to service. So SD is not need anymore
def linkify_s_by_sd(self):
for sd in self:
dsc = sd.dependent_service_description
sdval = sd.service_description
if dsc is not None and sdval is not None:
dp = getattr(sd, 'dependency_period', None)
dsc.add_service_act_dependency(sdval, sd.notification_failure_criteria,
dp, sd.inherits_parent)
dsc.add_service_chk_dependency(sdval, sd.execution_failure_criteria,
dp, sd.inherits_parent)
def is_correct(self):
r = super(Servicedependencies, self).is_correct()
return r and self.no_loop_in_parents("service_description", "dependent_service_description")
| 12,859
|
Python
|
.py
| 251
| 38.175299
| 100
| 0.562212
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,494
|
hostextinfo.py
|
shinken-solutions_shinken/shinken/objects/hostextinfo.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
""" This is the main class for the Host ext info. In fact it's mainly
about the configuration part. Parameters are merged in Hosts so it's
no use in running part
"""
import six
from shinken.objects.item import Item, Items
from shinken.autoslots import AutoSlots
from shinken.util import to_hostnames_list
from shinken.property import StringProp, ListProp
class HostExtInfo(six.with_metaclass(AutoSlots, Item)):
id = 1 # zero is reserved for host (primary node for parents)
my_type = 'hostextinfo'
# properties defined by configuration
# *required: is required in conf
# *default: default value if no set in conf
# *pythonize: function to call when transforming string to python object
# *fill_brok: if set, send to broker.
# there are two categories:
# full_status for initial and update status, check_result for check results
# *no_slots: do not take this property for __slots__
# Only for the initial call
# conf_send_preparation: if set, will pass the property to this function. It's used to "flatten"
# some dangerous properties like realms that are too 'linked' to be send like that.
# brok_transformation: if set, will call the function with the value of the property
# the major times it will be to flatten the data (like realm_name instead of the realm object).
properties = Item.properties.copy()
properties.update({
'host_name': StringProp(),
'notes': StringProp(default=''),
'notes_url': StringProp(default=''),
'icon_image': StringProp(default=''),
'icon_image_alt': StringProp(default=''),
'vrml_image': StringProp(default=''),
'statusmap_image': StringProp(default=''),
# No slots for this 2 because begin property by a number seems bad
# it's stupid!
'2d_coords': StringProp(default='', no_slots=True),
'3d_coords': StringProp(default='', no_slots=True),
})
# Hosts macros and prop that give the information
# the prop can be callable or not
macros = {
'HOSTNAME': 'host_name',
'HOSTNOTESURL': 'notes_url',
'HOSTNOTES': 'notes',
}
#######
# __ _ _ _
# / _(_) | | (_)
# ___ ___ _ __ | |_ _ __ _ _ _ _ __ __ _| |_ _ ___ _ __
# / __/ _ \| '_ \| _| |/ _` | | | | '__/ _` | __| |/ _ \| '_ \
# | (_| (_) | | | | | | | (_| | |_| | | | (_| | |_| | (_) | | | |
# \___\___/|_| |_|_| |_|\__, |\__,_|_| \__,_|\__|_|\___/|_| |_|
# __/ |
# |___/
######
# Check is required prop are set:
# host_name is needed
def is_correct(self):
state = True
cls = self.__class__
return state
# For get a nice name
def get_name(self):
if not self.is_tpl():
try:
return self.host_name
except AttributeError: # outch, no hostname
return 'UNNAMEDHOST'
else:
try:
return self.name
except AttributeError: # outch, no name for this template
return 'UNNAMEDHOSTTEMPLATE'
# For debugging purpose only
def get_dbg_name(self):
return self.host_name
# Same but for clean call, no debug
def get_full_name(self):
return self.host_name
# Class for the hosts lists. It's mainly for configuration
# part
class HostsExtInfo(Items):
name_property = "host_name" # use for the search by name
inner_class = HostExtInfo # use for know what is in items
# Merge extended host information into host
def merge(self, hosts):
for ei in self:
host_name = ei.get_name()
h = hosts.find_by_name(host_name)
if h is not None:
# FUUUUUUUUUUsion
self.merge_extinfo(h, ei)
def merge_extinfo(self, host, extinfo):
properties = ['notes',
'notes_url',
'icon_image',
'icon_image_alt',
'vrml_image',
'statusmap_image']
# host properties have precedence over hostextinfo properties
for p in properties:
if getattr(host, p) == '' and getattr(extinfo, p) != '':
setattr(host, p, getattr(extinfo, p))
| 5,513
|
Python
|
.py
| 127
| 37.023622
| 100
| 0.587248
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,495
|
config.py
|
shinken-solutions_shinken/shinken/objects/config.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
""" Config is the class to read, load and manipulate the user
configuration. It read a main cfg (nagios.cfg) and get all informations
from it. It create objects, make link between them, clean them, and cut
them into independent parts. The main user of this is Arbiter, but schedulers
use it too (but far less)"""
from __future__ import absolute_import, division, print_function, unicode_literals
import six
import re
import sys
import string
import os
import io
import socket
import itertools
import time
import random
import tempfile
import json
from multiprocessing import Process, Manager
from shinken.objects.item import Item
from shinken.objects.timeperiod import Timeperiod, Timeperiods
from shinken.objects.service import Service, Services
from shinken.objects.command import Command, Commands
from shinken.objects.resultmodulation import Resultmodulation, Resultmodulations
from shinken.objects.businessimpactmodulation import Businessimpactmodulation, Businessimpactmodulations
from shinken.objects.escalation import Escalation, Escalations
from shinken.objects.serviceescalation import Serviceescalation, Serviceescalations
from shinken.objects.hostescalation import Hostescalation, Hostescalations
from shinken.objects.host import Host, Hosts
from shinken.objects.hostgroup import Hostgroup, Hostgroups
from shinken.objects.realm import Realm, Realms
from shinken.objects.contact import Contact, Contacts
from shinken.objects.contactgroup import Contactgroup, Contactgroups
from shinken.objects.notificationway import NotificationWay, NotificationWays
from shinken.objects.checkmodulation import CheckModulation, CheckModulations
from shinken.objects.macromodulation import MacroModulation, MacroModulations
from shinken.objects.servicegroup import Servicegroup, Servicegroups
from shinken.objects.servicedependency import Servicedependency, Servicedependencies
from shinken.objects.hostdependency import Hostdependency, Hostdependencies
from shinken.objects.module import Module, Modules
from shinken.objects.discoveryrule import Discoveryrule, Discoveryrules
from shinken.objects.discoveryrun import Discoveryrun, Discoveryruns
from shinken.objects.hostextinfo import HostExtInfo, HostsExtInfo
from shinken.objects.serviceextinfo import ServiceExtInfo, ServicesExtInfo
from shinken.objects.trigger import Triggers
from shinken.objects.pack import Packs
from shinken.objects.arbiterlink import ArbiterLink, ArbiterLinks
from shinken.objects.schedulerlink import SchedulerLink, SchedulerLinks
from shinken.objects.reactionnerlink import ReactionnerLink, ReactionnerLinks
from shinken.objects.brokerlink import BrokerLink, BrokerLinks
from shinken.objects.receiverlink import ReceiverLink, ReceiverLinks
from shinken.objects.pollerlink import PollerLink, PollerLinks
from shinken.graph import Graph
from shinken.log import logger
from shinken.property import (UnusedProp, BoolProp, IntegerProp, CharProp,
StringProp, LogLevelProp, ListProp, ToGuessProp)
from shinken.daemon import get_cur_user, get_cur_group
from shinken.util import split_semicolon, jsonify_r
from shinken.serializer import serialize
no_longer_used_txt = ('This parameter is not longer take from the main file, but must be defined '
'in the status_dat broker module instead. But Shinken will create you one '
'if there are no present and use this parameter in it, so no worry.')
not_interresting_txt = 'We do not think such an option is interesting to manage.'
class Config(Item):
cache_path = "objects.cache"
my_type = "config"
# Properties:
# *required: if True, there is not default, and the config must put them
# *default: if not set, take this value
# *pythonize: function call to
# *class_inherit: (Service, 'blabla'): must set this property to the
# Service class with name blabla
# if (Service, None): must set this property to the Service class with
# same name
# *unused: just to warn the user that the option he use is no more used
# in Shinken
# *usage_text: if present, will print(it to explain why it's no more useful)
properties = {
'prefix':
StringProp(default='/usr/local/shinken/'),
'workdir':
StringProp(default='/var/run/shinken/'),
'config_base_dir':
StringProp(default=''), # will be set when we will load a file
'modules_dir':
StringProp(default='/var/lib/shinken/modules'),
'conflict_policy':
StringProp(default='loose'),
'use_local_log':
BoolProp(default=True),
'log_level':
LogLevelProp(default='WARNING'),
'local_log':
StringProp(default='/var/log/shinken/arbiterd.log'),
'log_file':
UnusedProp(text=no_longer_used_txt),
'object_cache_file':
UnusedProp(text=no_longer_used_txt),
'precached_object_file':
UnusedProp(text='Shinken does not use precached_object_files. Skipping.'),
'resource_file':
StringProp(default='/tmp/resources.txt'),
'temp_file':
UnusedProp(text='Temporary files are not used in the shinken architecture. Skipping'),
'status_file':
UnusedProp(text=no_longer_used_txt),
'status_update_interval':
UnusedProp(text=no_longer_used_txt),
'shinken_user':
StringProp(default=get_cur_user()),
'shinken_group':
StringProp(default=get_cur_group()),
'enable_notifications':
BoolProp(default=True, class_inherit=[(Host, None), (Service, None), (Contact, None)]),
'execute_service_checks':
BoolProp(default=True, class_inherit=[(Service, 'execute_checks')]),
'accept_passive_service_checks':
BoolProp(default=True, class_inherit=[(Service, 'accept_passive_checks')]),
'execute_host_checks':
BoolProp(default=True, class_inherit=[(Host, 'execute_checks')]),
'accept_passive_host_checks':
BoolProp(default=True, class_inherit=[(Host, 'accept_passive_checks')]),
'enable_event_handlers':
BoolProp(default=True, class_inherit=[(Host, None), (Service, None)]),
'log_rotation_method':
CharProp(default='d'),
'log_archive_path':
StringProp(default='/usr/local/shinken/var/archives'),
'check_external_commands':
BoolProp(default=True),
'command_check_interval':
UnusedProp(text='another value than look always the file is useless, so we fix it.'),
'command_file':
StringProp(default=''),
'external_command_buffer_slots':
UnusedProp(text='We do not limit the external command slot.'),
'check_for_updates':
UnusedProp(text='network administrators will never allow such communication between '
'server and the external world. Use your distribution packet manager '
'to know if updates are available or go to the '
'http://www.shinken-monitoring.org website instead.'),
'bare_update_checks':
UnusedProp(text=None),
'lock_file':
StringProp(default='/var/run/shinken/arbiterd.pid'),
'retain_state_information':
UnusedProp(text='sorry, retain state information will not be implemented '
'because it is useless.'),
'state_retention_file':
StringProp(default=''),
'retention_update_interval':
IntegerProp(default=60),
'use_retained_program_state':
UnusedProp(text=not_interresting_txt),
'use_retained_scheduling_info':
UnusedProp(text=not_interresting_txt),
'retained_host_attribute_mask':
UnusedProp(text=not_interresting_txt),
'retained_service_attribute_mask':
UnusedProp(text=not_interresting_txt),
'retained_process_host_attribute_mask':
UnusedProp(text=not_interresting_txt),
'retained_process_service_attribute_mask':
UnusedProp(text=not_interresting_txt),
'retained_contact_host_attribute_mask':
UnusedProp(text=not_interresting_txt),
'retained_contact_service_attribute_mask':
UnusedProp(text=not_interresting_txt),
'use_syslog':
BoolProp(default=False),
'log_notifications':
BoolProp(default=True, class_inherit=[(Host, None), (Service, None)]),
'log_service_retries':
BoolProp(default=True, class_inherit=[(Service, 'log_retries')]),
'log_host_retries':
BoolProp(default=True, class_inherit=[(Host, 'log_retries')]),
'log_event_handlers':
BoolProp(default=True, class_inherit=[(Host, None), (Service, None)]),
'log_initial_states':
BoolProp(default=True, class_inherit=[(Host, None), (Service, None)]),
'log_external_commands':
BoolProp(default=True),
'log_passive_checks':
BoolProp(default=True),
'global_host_event_handler':
StringProp(default='', class_inherit=[(Host, 'global_event_handler')]),
'global_service_event_handler':
StringProp(default='', class_inherit=[(Service, 'global_event_handler')]),
'sleep_time':
UnusedProp(text='this deprecated option is useless in the shinken way of doing.'),
'service_inter_check_delay_method':
UnusedProp(text='This option is useless in the Shinken scheduling. '
'The only way is the smart way.'),
'max_service_check_spread':
IntegerProp(default=30, class_inherit=[(Service, 'max_check_spread')]),
'service_interleave_factor':
UnusedProp(text='This option is useless in the Shinken scheduling '
'because it use a random distribution for initial checks.'),
'max_concurrent_checks':
UnusedProp(text='Limiting the max concurrent checks is not helpful '
'to got a good running monitoring server.'),
'check_result_reaper_frequency':
UnusedProp(text='Shinken do not use reaper process.'),
'max_check_result_reaper_time':
UnusedProp(text='Shinken do not use reaper process.'),
'check_result_path':
UnusedProp(text='Shinken use in memory returns, not check results on flat file.'),
'max_check_result_file_age':
UnusedProp(text='Shinken do not use flat file check resultfiles.'),
'host_inter_check_delay_method':
UnusedProp(text='This option is unused in the Shinken scheduling because distribution '
'of the initial check is a random one.'),
'max_host_check_spread':
IntegerProp(default=30, class_inherit=[(Host, 'max_check_spread')]),
'interval_length':
IntegerProp(default=60, class_inherit=[(Host, None), (Service, None)]),
'auto_reschedule_checks':
BoolProp(managed=False, default=True),
'auto_rescheduling_interval':
IntegerProp(managed=False, default=1),
'auto_rescheduling_window':
IntegerProp(managed=False, default=180),
'use_aggressive_host_checking':
BoolProp(default=False, class_inherit=[(Host, None)]),
'translate_passive_host_checks':
BoolProp(managed=False, default=True),
'passive_host_checks_are_soft':
BoolProp(managed=False, default=True),
'enable_predictive_host_dependency_checks':
BoolProp(managed=False,
default=True,
class_inherit=[(Host, 'enable_predictive_dependency_checks')]),
'enable_predictive_service_dependency_checks':
BoolProp(managed=False, default=True),
'cached_host_check_horizon':
IntegerProp(default=0, class_inherit=[(Host, 'cached_check_horizon')]),
'cached_service_check_horizon':
IntegerProp(default=0, class_inherit=[(Service, 'cached_check_horizon')]),
'use_large_installation_tweaks':
UnusedProp(text='this option is deprecated because in shinken it is just an alias '
'for enable_environment_macros=0'),
'free_child_process_memory':
UnusedProp(text='this option is automatic in Python processes'),
'child_processes_fork_twice':
UnusedProp(text='fork twice is not use.'),
'enable_environment_macros':
BoolProp(default=True, class_inherit=[(Host, None), (Service, None)]),
'enable_flap_detection':
BoolProp(default=True, class_inherit=[(Host, None), (Service, None)]),
'low_service_flap_threshold':
IntegerProp(default=20, class_inherit=[(Service, 'global_low_flap_threshold')]),
'high_service_flap_threshold':
IntegerProp(default=30, class_inherit=[(Service, 'global_high_flap_threshold')]),
'low_host_flap_threshold':
IntegerProp(default=20, class_inherit=[(Host, 'global_low_flap_threshold')]),
'high_host_flap_threshold':
IntegerProp(default=30, class_inherit=[(Host, 'global_high_flap_threshold')]),
'soft_state_dependencies':
BoolProp(managed=False, default=False),
'service_check_timeout':
IntegerProp(default=60, class_inherit=[(Service, 'check_timeout')]),
'host_check_timeout':
IntegerProp(default=30, class_inherit=[(Host, 'check_timeout')]),
'timeout_exit_status':
IntegerProp(default=2),
'event_handler_timeout':
IntegerProp(default=30, class_inherit=[(Host, None), (Service, None)]),
'notification_timeout':
IntegerProp(default=30, class_inherit=[(Host, None), (Service, None)]),
'ocsp_timeout':
IntegerProp(default=15, class_inherit=[(Service, None)]),
'ochp_timeout':
IntegerProp(default=15, class_inherit=[(Host, None)]),
'perfdata_timeout':
IntegerProp(default=5, class_inherit=[(Host, None), (Service, None)]),
'obsess_over_services':
BoolProp(default=False, class_inherit=[(Service, 'obsess_over')]),
'ocsp_command':
StringProp(default='', class_inherit=[(Service, None)]),
'obsess_over_hosts':
BoolProp(default=False, class_inherit=[(Host, 'obsess_over')]),
'ochp_command':
StringProp(default='', class_inherit=[(Host, None)]),
'process_performance_data':
BoolProp(default=True, class_inherit=[(Host, None), (Service, None)]),
'host_perfdata_command':
StringProp(default='', class_inherit=[(Host, 'perfdata_command')]),
'service_perfdata_command':
StringProp(default='', class_inherit=[(Service, 'perfdata_command')]),
'host_perfdata_file':
StringProp(default='', class_inherit=[(Host, 'perfdata_file')]),
'service_perfdata_file':
StringProp(default='', class_inherit=[(Service, 'perfdata_file')]),
'host_perfdata_file_template':
StringProp(default='/tmp/host.perf', class_inherit=[(Host, 'perfdata_file_template')]),
'service_perfdata_file_template':
StringProp(default='/tmp/host.perf',
class_inherit=[(Service, 'perfdata_file_template')]),
'host_perfdata_file_mode':
CharProp(default='a', class_inherit=[(Host, 'perfdata_file_mode')]),
'service_perfdata_file_mode':
CharProp(default='a', class_inherit=[(Service, 'perfdata_file_mode')]),
'host_perfdata_file_processing_interval':
IntegerProp(managed=False, default=15),
'service_perfdata_file_processing_interval':
IntegerProp(managed=False, default=15),
'host_perfdata_file_processing_command':
StringProp(managed=False,
default='',
class_inherit=[(Host, 'perfdata_file_processing_command')]),
'service_perfdata_file_processing_command':
StringProp(managed=False, default=None),
'check_for_orphaned_services':
BoolProp(default=True, class_inherit=[(Service, 'check_for_orphaned')]),
'check_for_orphaned_hosts':
BoolProp(default=True, class_inherit=[(Host, 'check_for_orphaned')]),
'check_service_freshness':
BoolProp(default=True, class_inherit=[(Service, 'global_check_freshness')]),
'service_freshness_check_interval':
IntegerProp(default=60),
'check_host_freshness':
BoolProp(default=True, class_inherit=[(Host, 'global_check_freshness')]),
'host_freshness_check_interval':
IntegerProp(default=60),
'additional_freshness_latency':
IntegerProp(default=15, class_inherit=[(Host, None), (Service, None)]),
'enable_embedded_perl':
BoolProp(managed=False,
default=True,
help='It will surely never be managed, '
'but it should not be useful with poller performances.'),
'use_embedded_perl_implicitly':
BoolProp(managed=False, default=False),
'date_format':
StringProp(managed=False, default=None),
'use_timezone':
StringProp(default='', class_inherit=[(Host, None), (Service, None), (Contact, None)]),
'illegal_object_name_chars':
StringProp(default="""`~!$%^&*"|'<>?,()=""",
class_inherit=[(Host, None), (Service, None),
(Contact, None), (HostExtInfo, None)]),
'illegal_macro_output_chars':
StringProp(default='',
class_inherit=[(Host, None), (Service, None), (Contact, None)]),
'use_regexp_matching':
BoolProp(managed=False,
default=False,
help='If you go some host or service definition like prod*, '
'it will surely failed from now, sorry.'),
'use_true_regexp_matching':
BoolProp(managed=False, default=None),
'admin_email':
UnusedProp(text='sorry, not yet implemented.'),
'admin_pager':
UnusedProp(text='sorry, not yet implemented.'),
'event_broker_options':
UnusedProp(text='event broker are replaced by modules '
'with a real configuration template.'),
'broker_module':
StringProp(default=''),
'debug_file':
UnusedProp(text=None),
'debug_level':
UnusedProp(text=None),
'debug_verbosity':
UnusedProp(text=None),
'max_debug_file_size':
UnusedProp(text=None),
'modified_attributes':
IntegerProp(default=0),
# '$USERn$: {'required':False, 'default':''} # Add at run in __init__
# SHINKEN SPECIFIC
'idontcareaboutsecurity':
BoolProp(default=False),
'daemon_enabled':
BoolProp(default=True), # Put to 0 to disable the arbiter to run
'graceful_enabled':
BoolProp(default=False),
'aggressive_memory_management':
BoolProp(default=False),
'daemon_thread_pool_size':
IntegerProp(default=16),
'flap_history':
IntegerProp(default=20, class_inherit=[(Host, None), (Service, None)]),
'max_plugins_output_length':
IntegerProp(default=8192, class_inherit=[(Host, None), (Service, None)]),
'no_event_handlers_during_downtimes':
BoolProp(default=False, class_inherit=[(Host, None), (Service, None)]),
# Interval between cleaning queues pass
'cleaning_queues_interval':
IntegerProp(default=900),
# Enable or not the notice about old Nagios parameters
'disable_old_nagios_parameters_whining':
BoolProp(default=False),
# Now for problem/impact states changes
'enable_problem_impacts_states_change':
BoolProp(default=False, class_inherit=[(Host, None), (Service, None)]),
'enable_problem_impacts_states_reprocessing':
BoolProp(default=False, class_inherit=[(Host, None), (Service, None)]),
# More a running value in fact
'resource_macros_names':
ListProp(default=[]),
'http_backend':
StringProp(default='auto'),
# SSL PART
# global boolean for know if we use ssl or not
'use_ssl':
BoolProp(default=False,
class_inherit=[(SchedulerLink, None), (ReactionnerLink, None),
(BrokerLink, None), (PollerLink, None),
(ReceiverLink, None), (ArbiterLink, None)]),
'ca_cert':
StringProp(default='etc/certs/ca.pem'),
'server_cert':
StringProp(default='etc/certs/server.cert'),
'server_key':
StringProp(default='etc/certs/server.key'),
'hard_ssl_name_check':
BoolProp(default=False),
# Log format
'human_timestamp_log':
BoolProp(default=False),
# Discovery part
'strip_idname_fqdn':
BoolProp(default=True),
'runners_timeout':
IntegerProp(default=3600),
# pack_distribution_file is for keeping a distribution history
# of the host distribution in the several "packs" so a same
# scheduler will have more change of getting the same host
'pack_distribution_file':
StringProp(default='pack_distribution.dat'),
# WEBUI part
'webui_lock_file':
StringProp(default='webui.pid'),
'webui_port':
IntegerProp(default=8080),
'webui_host':
StringProp(default='0.0.0.0'),
# Large env tweacks
'use_multiprocesses_serializer':
BoolProp(default=False),
# About shinken.io part
'api_key':
StringProp(default='',
class_inherit=[(SchedulerLink, None), (ReactionnerLink, None),
(BrokerLink, None), (PollerLink, None),
(ReceiverLink, None), (ArbiterLink, None)]),
'secret':
StringProp(default='',
class_inherit=[(SchedulerLink, None), (ReactionnerLink, None),
(BrokerLink, None), (PollerLink, None),
(ReceiverLink, None), (ArbiterLink, None)]),
'http_proxy':
StringProp(default='',
class_inherit=[(SchedulerLink, None), (ReactionnerLink, None),
(BrokerLink, None), (PollerLink, None),
(ReceiverLink, None), (ArbiterLink, None)]),
# and local statsd one
'statsd_host':
StringProp(default='localhost',
class_inherit=[(SchedulerLink, None), (ReactionnerLink, None),
(BrokerLink, None), (PollerLink, None),
(ReceiverLink, None), (ArbiterLink, None)]),
'statsd_port':
IntegerProp(default=8125,
class_inherit=[(SchedulerLink, None), (ReactionnerLink, None),
(BrokerLink, None), (PollerLink, None),
(ReceiverLink, None), (ArbiterLink, None)]),
'statsd_prefix': StringProp(default='shinken',
class_inherit=[(SchedulerLink, None), (ReactionnerLink, None),
(BrokerLink, None), (PollerLink, None),
(ReceiverLink, None), (ArbiterLink, None)]),
'statsd_enabled': BoolProp(default=False,
class_inherit=[(SchedulerLink, None), (ReactionnerLink, None),
(BrokerLink, None), (PollerLink, None),
(ReceiverLink, None), (ArbiterLink, None)]),
'statsd_interval':
IntegerProp(default=5,
class_inherit=[(SchedulerLink, None), (ReactionnerLink, None),
(BrokerLink, None), (PollerLink, None),
(ReceiverLink, None), (ArbiterLink, None)]),
'statsd_types':
StringProp(default=None,
class_inherit=[(SchedulerLink, None), (ReactionnerLink, None),
(BrokerLink, None), (PollerLink, None),
(ReceiverLink, None), (ArbiterLink, None)]),
'statsd_pattern':
StringProp(default=None,
class_inherit=[(SchedulerLink, None), (ReactionnerLink, None),
(BrokerLink, None), (PollerLink, None),
(ReceiverLink, None), (ArbiterLink, None)]),
}
macros = {
'PREFIX': 'prefix',
'MAINCONFIGFILE': '',
'STATUSDATAFILE': '',
'COMMENTDATAFILE': '',
'DOWNTIMEDATAFILE': '',
'RETENTIONDATAFILE': '',
'OBJECTCACHEFILE': '',
'TEMPFILE': '',
'TEMPPATH': '',
'LOGFILE': '',
'RESOURCEFILE': '',
'COMMANDFILE': 'command_file',
'HOSTPERFDATAFILE': '',
'SERVICEPERFDATAFILE': '',
'ADMINEMAIL': '',
'ADMINPAGER': ''
# 'USERn': '$USERn$' # Add at run time
}
# We create dict of objects
# Type: 'name in objects': {Class of object, Class of objects,
# 'property for self for the objects(config)'
types_creations = {
'timeperiod':
(Timeperiod, Timeperiods, 'timeperiods', True),
'service':
(Service, Services, 'services', False),
'servicegroup':
(Servicegroup, Servicegroups, 'servicegroups', True),
'command':
(Command, Commands, 'commands', True),
'host':
(Host, Hosts, 'hosts', True),
'hostgroup':
(Hostgroup, Hostgroups, 'hostgroups', True),
'contact':
(Contact, Contacts, 'contacts', True),
'contactgroup':
(Contactgroup, Contactgroups, 'contactgroups', True),
'notificationway':
(NotificationWay, NotificationWays, 'notificationways', True),
'checkmodulation':
(CheckModulation, CheckModulations, 'checkmodulations', True),
'macromodulation':
(MacroModulation, MacroModulations, 'macromodulations', True),
'servicedependency':
(Servicedependency, Servicedependencies, 'servicedependencies', True),
'hostdependency':
(Hostdependency, Hostdependencies, 'hostdependencies', True),
'arbiter':
(ArbiterLink, ArbiterLinks, 'arbiters', True),
'scheduler':
(SchedulerLink, SchedulerLinks, 'schedulers', True),
'reactionner':
(ReactionnerLink, ReactionnerLinks, 'reactionners', True),
'broker':
(BrokerLink, BrokerLinks, 'brokers', True),
'receiver':
(ReceiverLink, ReceiverLinks, 'receivers', True),
'poller':
(PollerLink, PollerLinks, 'pollers', True),
'realm':
(Realm, Realms, 'realms', True),
'module':
(Module, Modules, 'modules', True),
'resultmodulation':
(Resultmodulation, Resultmodulations, 'resultmodulations', True),
'businessimpactmodulation':
(Businessimpactmodulation, Businessimpactmodulations,
'businessimpactmodulations', True),
'escalation':
(Escalation, Escalations, 'escalations', True),
'serviceescalation':
(Serviceescalation, Serviceescalations, 'serviceescalations', False),
'hostescalation':
(Hostescalation, Hostescalations, 'hostescalations', False),
'discoveryrule':
(Discoveryrule, Discoveryrules, 'discoveryrules', True),
'discoveryrun':
(Discoveryrun, Discoveryruns, 'discoveryruns', True),
'hostextinfo':
(HostExtInfo, HostsExtInfo, 'hostsextinfo', True),
'serviceextinfo':
(ServiceExtInfo, ServicesExtInfo, 'servicesextinfo', True),
}
# This tab is used to transform old parameters name into new ones
# so from Nagios2 format, to Nagios3 ones
old_properties = {
'nagios_user': 'shinken_user',
'nagios_group': 'shinken_group',
'modulesdir': 'modules_dir',
}
read_config_silent = 0
early_created_types = ['arbiter', 'module']
configuration_types = ['void', 'timeperiod', 'command', 'contactgroup', 'hostgroup',
'contact', 'notificationway', 'checkmodulation',
'macromodulation', 'host', 'service', 'servicegroup',
'servicedependency', 'hostdependency', 'arbiter', 'scheduler',
'reactionner', 'broker', 'receiver', 'poller', 'realm', 'module',
'resultmodulation', 'escalation', 'serviceescalation', 'hostescalation',
'discoveryrun', 'discoveryrule', 'businessimpactmodulation',
'hostextinfo', 'serviceextinfo']
def __init__(self):
self.params = {}
self.resource_macros_names = []
# By default the conf is correct
self.conf_is_correct = True
# We tag the conf with a magic_hash, a random value to
# idify this conf
random.seed(time.time())
self.magic_hash = random.randint(1, 100000)
self.configuration_errors = []
self.triggers_dirs = []
self.triggers = Triggers({})
self.packs_dirs = []
self.packs = Packs({})
# Initialize conflict_policy soon as it's required during object lists
# creation.
self.conflict_policy = self.properties["conflict_policy"].default
def get_name(self):
return 'global configuration file'
# We've got macro in the resource file and we want
# to update our MACRO dict with it
def fill_resource_macros_names_macros(self):
""" fill the macro dict will all value
from self.resource_macros_names"""
properties = self.__class__.properties
macros = self.__class__.macros
for macro_name in self.resource_macros_names:
properties['$' + macro_name + '$'] = StringProp(default='')
macros[macro_name] = '$' + macro_name + '$'
def clean_params(self, params):
clean_p = {}
for elt in params:
elts = elt.split('=', 1)
if len(elts) == 1: # error, there is no = !
self.conf_is_correct = False
logger.error("[config] the parameter %s is malformed! (no = sign)", elts[0])
elif elts[1] == '':
self.conf_is_correct = False
logger.error("[config] the parameter %s is malformed! (no value after =)", elts[0])
else:
clean_p[elts[0]] = elts[1]
return clean_p
def load_params(self, params):
clean_params = self.clean_params(params)
for key, value in clean_params.items():
if key in self.properties:
val = self.properties[key].pythonize(clean_params[key])
elif key in self.running_properties:
logger.warning("using a the running property %s in a config file", key)
val = self.running_properties[key].pythonize(clean_params[key])
elif key.startswith('$') or key in ['cfg_file', 'cfg_dir']:
# it's a macro or a useless now param, we don't touch this
val = value
else:
logger.warning("Guessing the property %s type because it is not in "
"%s object properties", key, self.__class__.__name__)
val = ToGuessProp.pythonize(clean_params[key])
setattr(self, key, val)
# Maybe it's a variable as $USER$ or $ANOTHERVATRIABLE$
# so look at the first character. If it's a $, it's a variable
# and if it's end like it too
if key[0] == '$' and key[-1] == '$':
macro_name = key[1:-1]
self.resource_macros_names.append(macro_name)
# Change Nagios2 names to Nagios3 ones (before using them)
self.old_properties_names_to_new()
def _cut_line(self, line):
# punct = '"#$%&\'()*+/<=>?@[\\]^`{|}~'
tmp = re.split("[" + string.whitespace + "]+", line, 1)
r = [elt for elt in tmp if elt != '']
return r
def read_config(self, files):
# just a first pass to get the cfg_file and all files in a buf
res = io.StringIO()
for file in files:
# We add a \n (or \r\n) to be sure config files are separated
# if the previous does not finish with a line return
linesep = six.u(os.linesep)
res.write(linesep)
res.write('# IMPORTEDFROM=%s' % (file) + linesep)
if self.read_config_silent == 0:
logger.info("[config] opening '%s' configuration file", file)
try:
# Open in Universal way for Windows, Mac, Linux
with open(file, 'r') as f:
buf = map(six.u, f.readlines())
self.config_base_dir = os.path.dirname(file)
except IOError as exp:
logger.error("[config] cannot open config file '%s' for reading: %s", file, exp)
# The configuration is invalid because we have a bad file!
self.conf_is_correct = False
continue
for line in buf:
res.write(line)
if line.endswith('\n'):
line = line[:-1]
line = line.strip()
if re.search("^cfg_file", line) or re.search("^resource_file", line):
elts = line.split('=', 1)
if os.path.isabs(elts[1]):
cfg_file_name = elts[1]
else:
cfg_file_name = os.path.join(self.config_base_dir, elts[1])
cfg_file_name = cfg_file_name.strip()
try:
fd = open(cfg_file_name, 'r')
if self.read_config_silent == 0:
logger.info("Processing object config file '%s'", cfg_file_name)
res.write(linesep + '# IMPORTEDFROM=%s' % (cfg_file_name) + linesep)
if six.PY2:
res.write(fd.read().decode("utf-8"))
else:
res.write(fd.read())
# Be sure to add a line return so we won't mix files
res.write(linesep)
fd.close()
except IOError as exp:
logger.error("Cannot open config file '%s' for reading: %s",
cfg_file_name, exp)
# The configuration is invalid because we have a bad file!
self.conf_is_correct = False
elif re.search("^cfg_dir", line):
elts = line.split('=', 1)
if os.path.isabs(elts[1]):
cfg_dir_name = elts[1]
else:
cfg_dir_name = os.path.join(self.config_base_dir, elts[1])
# Ok, look if it's really a directory
if not os.path.isdir(cfg_dir_name):
logger.error("Cannot open config dir '%s' for reading", cfg_dir_name)
self.conf_is_correct = False
# Look for .pack file into it :)
self.packs_dirs.append(cfg_dir_name)
# Now walk for it.
for root, dirs, files in os.walk(cfg_dir_name, followlinks=True):
for file in files:
if re.search("\.cfg$", file):
if self.read_config_silent == 0:
logger.info("Processing object config file '%s'",
os.path.join(root, file))
try:
res.write(linesep + '# IMPORTEDFROM=%s' %
(os.path.join(root, file)) + linesep)
fd = open(os.path.join(root, file), 'r')
res.write(six.u(fd.read()))
# Be sure to separate files data
res.write(linesep)
fd.close()
except IOError as exp:
logger.error("Cannot open config file '%s' for reading: %s",
os.path.join(root, file), exp)
# The configuration is invalid
# because we have a bad file!
self.conf_is_correct = False
elif re.search("^triggers_dir", line):
elts = line.split('=', 1)
if os.path.isabs(elts[1]):
trig_dir_name = elts[1]
else:
trig_dir_name = os.path.join(self.config_base_dir, elts[1])
# Ok, look if it's really a directory
if not os.path.isdir(trig_dir_name):
logger.error("Cannot open triggers dir '%s' for reading", trig_dir_name)
self.conf_is_correct = False
continue
# Ok it's a valid one, I keep it
self.triggers_dirs.append(trig_dir_name)
# Early read conflict_policy because it's necessary when
# parsing configuration files
elif re.search("^conflict_policy", line):
elts = line.split('=', 1)
self.conflict_policy = elts[1]
config = res.getvalue()
res.close()
return config
# self.read_config_buf(res)
def read_config_buf(self, buf):
params = []
objectscfg = {}
types = self.__class__.configuration_types
for t in types:
objectscfg[t] = []
tmp = []
tmp_type = 'void'
in_define = False
almost_in_define = False
continuation_line = False
tmp_line = ''
lines = buf.split('\n')
line_nb = 0 # Keep the line number for the file path
for line in lines:
if line.startswith("# IMPORTEDFROM="):
filefrom = line.split('=')[1]
line_nb = 0 # reset the line number too
continue
line_nb += 1
# Remove comments
line = split_semicolon(line)[0].strip()
# A backslash means, there is more to come
if re.search(r"\\\s*$", line) is not None:
continuation_line = True
line = re.sub(r"\\\s*$", "", line)
line = re.sub(r"^\s+", " ", line)
tmp_line += line
continue
elif continuation_line:
# Now the continuation line is complete
line = re.sub(r"^\s+", "", line)
line = tmp_line + line
tmp_line = ''
continuation_line = False
# } alone in a line means stop the object reading
if re.search(r"^\s*}\s*$", line) is not None:
in_define = False
# { alone in a line can mean start object reading
if re.search(r"^\s*\{\s*$", line) is not None and almost_in_define:
almost_in_define = False
in_define = True
continue
if re.search(r"^\s*#|^\s*$|^\s*}", line) is not None:
pass
# A define must be catch and the type save
# The old entry must be save before
elif re.search("^define", line) is not None:
if re.search(".*\{.*$", line) is not None:
in_define = True
else:
almost_in_define = True
if tmp_type not in objectscfg:
objectscfg[tmp_type] = []
objectscfg[tmp_type].append(tmp)
tmp = []
tmp.append("imported_from " + filefrom + ':%d' % line_nb)
# Get new type
elts = re.split('\s', line)
# Maybe there was space before and after the type
# so we must get all and strip it
tmp_type = ' '.join(elts[1:]).strip()
tmp_type = tmp_type.split('{')[0].strip()
else:
if in_define:
tmp.append(line)
else:
params.append(line)
# Maybe the type of the last element is unknown, declare it
if tmp_type not in objectscfg:
objectscfg[tmp_type] = []
objectscfg[tmp_type].append(tmp)
objects = {}
# print("Params", params)
self.load_params(params)
# And then update our MACRO dict
self.fill_resource_macros_names_macros()
for type in objectscfg:
objects[type] = []
for items in objectscfg[type]:
tmp = {}
for line in items:
elts = self._cut_line(line)
if elts == []:
continue
prop = elts[0]
if prop not in tmp:
tmp[prop] = []
value = ' '.join(elts[1:])
tmp[prop].append(value)
if tmp != {}:
objects[type].append(tmp)
return objects
# We need to have some ghost objects like
# the check_command bp_rule for business
# correlator rules
def add_ghost_objects(self, raw_objects):
bp_rule = {'command_name': 'bp_rule', 'command_line': 'bp_rule'}
raw_objects['command'].append(bp_rule)
host_up = {'command_name': '_internal_host_up', 'command_line': '_internal_host_up'}
raw_objects['command'].append(host_up)
echo_obj = {'command_name': '_echo', 'command_line': '_echo'}
raw_objects['command'].append(echo_obj)
# We've got raw objects in string, now create real Instances
def create_objects(self, raw_objects):
""" Create real 'object' from dicts of prop/value """
types_creations = self.__class__.types_creations
# some types are already created in this time
early_created_types = self.__class__.early_created_types
# Before really create the objects, we add
# ghost ones like the bp_rule for correlation
self.add_ghost_objects(raw_objects)
for t in types_creations:
if t not in early_created_types:
self.create_objects_for_type(raw_objects, t)
def create_objects_for_type(self, raw_objects, type):
types_creations = self.__class__.types_creations
t = type
# Ex: the above code do for timeperiods:
# timeperiods = []
# for timeperiodcfg in objects['timeperiod']:
# t = Timeperiod(timeperiodcfg)
# t.clean()
# timeperiods.append(t)
# self.timeperiods = Timeperiods(timeperiods)
(cls, clss, prop, initial_index) = types_creations[t]
# List where we put objects
lst = []
for obj_cfg in raw_objects[t]:
# We create the object
o = cls(obj_cfg)
# Change Nagios2 names to Nagios3 ones (before using them)
o.old_properties_names_to_new()
lst.append(o)
# we create the objects Class and we set it in prop
setattr(self, prop, clss(lst, initial_index, self.conflict_policy))
# Here arbiter and modules objects should be prepare and link
# before all others types
def early_arbiter_linking(self):
""" Prepare the arbiter for early operations """
if len(self.arbiters) == 0:
logger.warning("There is no arbiter, I add one in localhost:7770")
a = ArbiterLink({'arbiter_name': 'Default-Arbiter',
'host_name': six.u(socket.gethostname()),
'address': 'localhost', 'port': '7770',
'spare': '0'})
self.arbiters = ArbiterLinks([a])
# Should look at hacking command_file module first
self.hack_old_nagios_parameters_for_arbiter()
# First fill default
self.arbiters.fill_default()
self.modules.fill_default()
# print("****************** Linkify ******************")
self.arbiters.linkify(self.modules)
self.modules.linkify()
# We will load all triggers .trig files from all triggers_dir
def load_triggers(self):
for p in self.triggers_dirs:
self.triggers.load_file(p)
# We will load all packs .pack files from all packs_dirs
def load_packs(self):
for p in self.packs_dirs:
self.packs.load_file(p)
# We use linkify to make the config more efficient: elements will be
# linked, like pointers. For example, a host will have it's service,
# and contacts directly in it's properties
# REMEMBER: linkify AFTER explode...
def linkify(self):
""" Make 'links' between elements, like a host got a services list
with all it's services in it """
self.services.optimize_service_search(self.hosts)
# First linkify myself like for some global commands
self.linkify_one_command_with_commands(self.commands, 'ocsp_command')
self.linkify_one_command_with_commands(self.commands, 'ochp_command')
self.linkify_one_command_with_commands(self.commands, 'host_perfdata_command')
self.linkify_one_command_with_commands(self.commands, 'service_perfdata_command')
self.linkify_one_command_with_commands(self.commands, 'global_host_event_handler')
self.linkify_one_command_with_commands(self.commands, 'global_service_event_handler')
# print("Hosts")
# link hosts with timeperiods and commands
self.hosts.linkify(self.timeperiods, self.commands,
self.contacts, self.realms,
self.resultmodulations, self.businessimpactmodulations,
self.escalations, self.hostgroups,
self.triggers, self.checkmodulations,
self.macromodulations
)
self.hostsextinfo.merge(self.hosts)
# Do the simplify AFTER explode groups
# print("Hostgroups")
# link hostgroups with hosts
self.hostgroups.linkify(self.hosts, self.realms)
# print("Services")
# link services with other objects
self.services.linkify(self.hosts, self.commands,
self.timeperiods, self.contacts,
self.resultmodulations, self.businessimpactmodulations,
self.escalations, self.servicegroups,
self.triggers, self.checkmodulations,
self.macromodulations
)
self.servicesextinfo.merge(self.services)
# print("Service groups")
# link servicegroups members with services
self.servicegroups.linkify(self.hosts, self.services)
# link notificationways with timeperiods and commands
self.notificationways.linkify(self.timeperiods, self.commands)
# link notificationways with timeperiods and commands
self.checkmodulations.linkify(self.timeperiods, self.commands)
# Link with timeperiods
self.macromodulations.linkify(self.timeperiods)
# print("Contactgroups")
# link contacgroups with contacts
self.contactgroups.linkify(self.contacts)
# print("Contacts")
# link contacts with timeperiods and commands
self.contacts.linkify(self.timeperiods, self.commands,
self.notificationways)
# print("Timeperiods")
# link timeperiods with timeperiods (exclude part)
self.timeperiods.linkify()
# print("Servicedependency")
self.servicedependencies.linkify(self.hosts, self.services,
self.timeperiods)
# print("Hostdependency")
self.hostdependencies.linkify(self.hosts, self.timeperiods)
# print("Resultmodulations")
self.resultmodulations.linkify(self.timeperiods)
self.businessimpactmodulations.linkify(self.timeperiods)
# print("Escalations")
self.escalations.linkify(self.timeperiods, self.contacts,
self.services, self.hosts)
# Link discovery commands
self.discoveryruns.linkify(self.commands)
# print("Realms")
self.realms.linkify()
# print("Schedulers and satellites")
# Link all links with realms
# self.arbiters.linkify(self.modules)
self.schedulers.linkify(self.realms, self.modules)
self.brokers.linkify(self.realms, self.modules)
self.receivers.linkify(self.realms, self.modules)
self.reactionners.linkify(self.realms, self.modules)
self.pollers.linkify(self.realms, self.modules)
# Ok, now update all realms with backlinks of
# satellites
self.realms.prepare_for_satellites_conf()
# Removes service exceptions based on host configuration
def remove_exclusions(self):
return self.services.remove_exclusions(self.hosts)
def set_initial_state(self):
"""
Sets services and hosts initial states.
"""
self.hosts.set_initial_state()
self.services.set_initial_state()
# Some elements are maybe set as wrong after a is_correct, so clean them
# if possible
def clean(self):
self.services.clean()
# In the scheduler we need to relink the commandCall with
# the real commands
def late_linkify(self):
props = ['ocsp_command', 'ochp_command',
'service_perfdata_command', 'host_perfdata_command',
'global_host_event_handler', 'global_service_event_handler']
for prop in props:
cc = getattr(self, prop, None)
if cc:
cc.late_linkify_with_command(self.commands)
# But also other objects like hosts and services
self.hosts.late_linkify_h_by_commands(self.commands)
self.services.late_linkify_s_by_commands(self.commands)
self.contacts.late_linkify_c_by_commands(self.commands)
# Some properties are dangerous to be send like that
# like realms linked in hosts. Realms are too big to send (too linked)
# We are also pre-serializing the confs so the sending phase will
# be quicker.
def prepare_for_sending(self):
# Preparing hosts and hostgroups for sending. Some properties
# should be "flatten" before sent, like .realm object that should
# be changed into names
self.hosts.prepare_for_sending()
self.hostgroups.prepare_for_sending()
t1 = time.time()
logger.info('[Arbiter] Serializing the configurations...')
# There are two ways of configuration serializing
# One if to use the serial way, the other is with use_multiprocesses_serializer
# to call to sub-wrokers to do the job.
# TODO : enable on windows? I'm not sure it will work, must give a test
if os.name == 'nt' or not self.use_multiprocesses_serializer:
logger.info('Using the default serialization pass')
for r in self.realms:
for (i, conf) in r.confs.items():
# Remember to protect the local conf hostgroups too!
conf.hostgroups.prepare_for_sending()
logger.debug('[%s] Serializing the configuration %d', r.get_name(), i)
t0 = time.time()
r.serialized_confs[i] = serialize(conf)
logger.debug("[config] time to serialize the conf %s:%s is %s (size:%s)",
r.get_name(), i, time.time() - t0, len(r.serialized_confs[i]))
t0 = time.time()
whole_conf_pack = serialize(self)
logger.debug("[config] time to serialize the global conf : %s (size:%s)",
time.time() - t0, len(whole_conf_pack))
self.whole_conf_pack = whole_conf_pack
logger.debug("[config]serializing total: %s" % (time.time() - t1))
else:
logger.info('Using the multiprocessing serialization pass')
t1 = time.time()
# We ask a manager to manage the communication with our children
m = Manager()
# The list will got all the strings from the children
q = m.list()
for r in self.realms:
processes = []
for (i, conf) in r.confs.items():
# This function will be called by the children, and will give
def Serialize_config(q, rname, i, conf):
# Remember to protect the local conf hostgroups too!
conf.hostgroups.prepare_for_sending()
logger.debug('[%s] Serializing the configuration %d', rname, i)
t0 = time.time()
res = serialize(conf)
logger.debug("[config] time to serialize the conf %s:%s is %s (size:%s)",
rname, i, time.time() - t0, len(res))
q.append((i, res))
# Prepare a sub-process that will manage the serialization computation
p = Process(target=Serialize_config,
name="serializer-%s-%d" % (r.get_name(), i),
args=(q, r.get_name(), i, conf))
p.start()
processes.append((i, p))
# Here all sub-processes are launched for this realm, now wait for them to finish
while len(processes) != 0:
to_del = []
for (i, p) in processes:
if p.exitcode is not None:
to_del.append((i, p))
# remember to join() so the children can die
p.join()
for (i, p) in to_del:
logger.debug("The sub process %s is done with the return code %d",
p.name, p.exitcode)
processes.remove((i, p))
# Don't be too quick to poll!
time.sleep(0.1)
# Check if we got the good number of configuration,
# maybe one of the cildren got problems?
if len(q) != len(r.confs):
logger.error("Something goes wrong in the configuration serializations, "
"please restart Shinken Arbiter")
sys.exit(2)
# Now get the serialized configuration and saved them into self
for (i, cfg) in q:
r.serialized_confs[i] = cfg
whole_queue = m.list()
t0 = time.time()
# The function that just compute the whole conf serialized string, but n a children
def create_whole_conf_pack(whole_queue, self):
logger.debug("[config] sub processing the whole configuration pack creation")
whole_queue.append(serialize(self))
logger.debug("[config] sub processing the whole configuration pack creation "
"finished")
# Go for it
p = Process(target=create_whole_conf_pack,
args=(whole_queue, self),
name='serializer-whole-configuration')
p.start()
# Wait for it to die
while p.exitcode is None:
time.sleep(0.1)
p.join()
# Maybe we don't have our result?
if len(whole_queue) != 1:
logger.error("Something goes wrong in the whole configuration pack creation, "
"please restart Shinken Arbiter")
sys.exit(2)
# Get it and save it
self.whole_conf_pack = whole_queue.pop()
logger.debug("[config] time to serialize the global conf : %s (size:%s)",
time.time() - t0, len(self.whole_conf_pack))
# Shutdown the manager, the sub-process should be gone now
m.shutdown()
# It's used to warn about useless parameter and print(why it's not use.)
def notice_about_useless_parameters(self):
if not self.disable_old_nagios_parameters_whining:
properties = self.__class__.properties
for prop, entry in properties.items():
if isinstance(entry, UnusedProp):
logger.warning("The parameter %s is useless and can be removed "
"from the configuration (Reason: %s)", prop, entry.text)
# It's used to raise warning if the user got parameter
# that we do not manage from now
def warn_about_unmanaged_parameters(self):
properties = self.__class__.properties
unmanaged = []
for prop, entry in properties.items():
if not entry.managed and hasattr(self, prop):
if entry.help:
s = "%s: %s" % (prop, entry.help)
else:
s = prop
unmanaged.append(s)
if len(unmanaged) != 0:
mailing_list_uri = "https://lists.sourceforge.net/lists/listinfo/shinken-devel"
logger.warning("The following parameter(s) are not currently managed.")
for s in unmanaged:
logger.info(s)
logger.warning("Unmanaged configuration statement, do you really need it?"
"Ask for it on the developer mailinglist %s or submit a pull "
"request on the Shinken github ", mailing_list_uri)
# Overrides specific instances properties
def override_properties(self):
self.services.override_properties(self.hosts)
# Use to fill groups values on hosts and create new services
# (for host group ones)
def explode(self):
# first elements, after groups
# print("Contacts")
self.contacts.explode(self.contactgroups, self.notificationways)
# print("Contactgroups")
self.contactgroups.explode()
# print("Hosts")
self.hosts.explode(self.hostgroups, self.contactgroups, self.triggers)
# print("Hostgroups")
self.hostgroups.explode()
# print("Services")
# print("Initially got nb of services: %d" % len(self.services.items))
self.services.explode(self.hosts, self.hostgroups, self.contactgroups,
self.servicegroups, self.servicedependencies,
self.triggers)
# print("finally got nb of services: %d" % len(self.services.items))
# print("Servicegroups")
self.servicegroups.explode()
# print("Timeperiods")
self.timeperiods.explode()
self.hostdependencies.explode(self.hostgroups)
# print("Servicedependency")
self.servicedependencies.explode(self.hostgroups)
# Serviceescalations hostescalations will create new escalations
self.serviceescalations.explode(self.escalations)
self.hostescalations.explode(self.escalations)
self.escalations.explode(self.hosts, self.hostgroups,
self.contactgroups)
# Now the architecture part
# print("Realms")
self.realms.explode()
# Dependencies are important for scheduling
# This function create dependencies linked between elements.
def apply_dependencies(self):
self.hosts.apply_dependencies()
self.services.apply_dependencies()
# Use to apply inheritance (template and implicit ones)
# So elements will have their configured properties
def apply_inheritance(self):
# inheritance properties by template
# print("Hosts")
self.hosts.apply_inheritance()
# print("Contacts")
self.contacts.apply_inheritance()
# print("Services")
self.services.apply_inheritance()
# print("Servicedependencies")
self.servicedependencies.apply_inheritance()
# print("Hostdependencies")
self.hostdependencies.apply_inheritance()
# Also timeperiods
self.timeperiods.apply_inheritance()
# Also "Hostextinfo"
self.hostsextinfo.apply_inheritance()
# Also "Serviceextinfo"
self.servicesextinfo.apply_inheritance()
# Now escalations too
self.serviceescalations.apply_inheritance()
self.hostescalations.apply_inheritance()
self.escalations.apply_inheritance()
# Use to apply implicit inheritance
def apply_implicit_inheritance(self):
# print("Services")
self.services.apply_implicit_inheritance(self.hosts)
# will fill properties for elements so they will have all theirs properties
def fill_default(self):
# Fill default for config (self)
super(Config, self).fill_default()
self.hosts.fill_default()
self.hostgroups.fill_default()
self.contacts.fill_default()
self.contactgroups.fill_default()
self.notificationways.fill_default()
self.checkmodulations.fill_default()
self.macromodulations.fill_default()
self.services.fill_default()
self.servicegroups.fill_default()
self.resultmodulations.fill_default()
self.businessimpactmodulations.fill_default()
self.hostsextinfo.fill_default()
self.servicesextinfo.fill_default()
# Now escalations
self.escalations.fill_default()
# Also fill default of host/servicedep objects
self.servicedependencies.fill_default()
self.hostdependencies.fill_default()
# Discovery part
self.discoveryrules.fill_default()
self.discoveryruns.fill_default()
# first we create missing sat, so no other sat will
# be created after this point
self.fill_default_satellites()
# now we have all elements, we can create a default
# realm if need and it will be tagged to sat that do
# not have an realm
self.fill_default_realm()
self.realms.fill_default() # also put default inside the realms themselves
self.reactionners.fill_default()
self.pollers.fill_default()
self.brokers.fill_default()
self.receivers.fill_default()
self.schedulers.fill_default()
# The arbiters are already done.
# self.arbiters.fill_default()
# Now fill some fields we can predict (like address for hosts)
self.fill_predictive_missing_parameters()
# Here is a special functions to fill some special
# properties that are not filled and should be like
# address for host (if not set, put host_name)
def fill_predictive_missing_parameters(self):
self.hosts.fill_predictive_missing_parameters()
# Will check if a realm is defined, if not
# Create a new one (default) and tag everyone that do not have
# a realm prop to be put in this realm
def fill_default_realm(self):
if len(self.realms) == 0:
# Create a default realm with default value =1
# so all hosts without realm will be link with it
default = Realm({'realm_name': 'Default', 'default': '1'})
self.realms = Realms([default])
logger.warning("No realms defined, I add one at %s", default.get_name())
lists = [self.pollers, self.brokers, self.reactionners, self.receivers, self.schedulers]
for l in lists:
for elt in l:
if not hasattr(elt, 'realm'):
elt.realm = 'Default'
logger.info("Tagging %s with realm %s", elt.get_name(), default.get_name())
# If a satellite is missing, we add them in the localhost
# with defaults values
def fill_default_satellites(self):
if len(self.schedulers) == 0:
logger.warning("No scheduler defined, I add one at localhost:7768")
s = SchedulerLink({'scheduler_name': 'Default-Scheduler',
'address': 'localhost', 'port': '7768'})
self.schedulers = SchedulerLinks([s])
if len(self.pollers) == 0:
logger.warning("No poller defined, I add one at localhost:7771")
p = PollerLink({'poller_name': 'Default-Poller',
'address': 'localhost', 'port': '7771'})
self.pollers = PollerLinks([p])
if len(self.reactionners) == 0:
logger.warning("No reactionner defined, I add one at localhost:7769")
r = ReactionnerLink({'reactionner_name': 'Default-Reactionner',
'address': 'localhost', 'port': '7769'})
self.reactionners = ReactionnerLinks([r])
if len(self.brokers) == 0:
logger.warning("No broker defined, I add one at localhost:7772")
b = BrokerLink({'broker_name': 'Default-Broker',
'address': 'localhost', 'port': '7772',
'manage_arbiters': '1'})
self.brokers = BrokerLinks([b])
# Return if one broker got a module of type: mod_type
def got_broker_module_type_defined(self, mod_type):
for b in self.brokers:
for m in b.modules:
if hasattr(m, 'module_type') and m.module_type == mod_type:
return True
return False
# return if one scheduler got a module of type: mod_type
def got_scheduler_module_type_defined(self, mod_type):
for b in self.schedulers:
for m in b.modules:
if hasattr(m, 'module_type') and m.module_type == mod_type:
return True
return False
# return if one arbiter got a module of type: mod_type
# but this time it's tricky: the python pass is not done!
# so look with strings!
def got_arbiter_module_type_defined(self, mod_type):
for a in self.arbiters:
# Do like the linkify will do after....
for m in getattr(a, 'modules', []):
# So look at what the arbiter try to call as module
m = m.strip()
# Ok, now look in modules...
for mod in self.modules:
# try to see if this module is the good type
if getattr(mod, 'module_type', '').strip() == mod_type.strip():
# if so, the good name?
if getattr(mod, 'module_name', '').strip() == m:
return True
return False
# Will ask for each host/service if the
# check_command is a bp rule. If so, it will create
# a tree structures with the rules
def create_business_rules(self):
self.hosts.create_business_rules(self.hosts, self.services)
self.services.create_business_rules(self.hosts, self.services)
# Will fill dep list for business rules
def create_business_rules_dependencies(self):
self.hosts.create_business_rules_dependencies()
self.services.create_business_rules_dependencies()
# It's used to hack some old Nagios parameters like
# log_file or status_file: if they are present in
# the global configuration and there is no such modules
# in a Broker, we create it on the fly for all Brokers
def hack_old_nagios_parameters(self):
""" Create some 'modules' from all nagios parameters if they are set and
the modules are not created """
# We list all modules we will add to brokers
mod_to_add = []
mod_to_add_to_schedulers = []
# For status_dat
if (hasattr(self, 'status_file') and
self.status_file != '' and
hasattr(self, 'object_cache_file')):
# Ok, the user put such a value, we must look
# if he forget to put a module for Brokers
got_status_dat_module = self.got_broker_module_type_defined('status_dat')
# We need to create the module on the fly?
if not got_status_dat_module:
data = {'object_cache_file': self.object_cache_file,
'status_file': self.status_file,
'module_name': 'Status-Dat-Autogenerated',
'module_type': 'status_dat'}
mod = Module(data)
mod.status_update_interval = getattr(self, 'status_update_interval', 15)
mod_to_add.append(mod)
# Now the log_file
if hasattr(self, 'log_file') and self.log_file != '':
# Ok, the user put such a value, we must look
# if he forget to put a module for Brokers
got_simple_log_module = self.got_broker_module_type_defined('simple_log')
# We need to create the module on the fly?
if not got_simple_log_module:
data = {'module_type': 'simple_log', 'path': self.log_file,
'archive_path': self.log_archive_path,
'module_name': 'Simple-log-Autogenerated'}
mod = Module(data)
mod_to_add.append(mod)
# Now the syslog facility
if self.use_syslog:
# Ok, the user want a syslog logging, why not after all
got_syslog_module = self.got_broker_module_type_defined('syslog')
# We need to create the module on the fly?
if not got_syslog_module:
data = {'module_type': 'syslog',
'module_name': 'Syslog-Autogenerated'}
mod = Module(data)
mod_to_add.append(mod)
# Now the service_perfdata module
if self.service_perfdata_file != '':
# Ok, we've got a path for a service perfdata file
got_service_perfdata_module = self.got_broker_module_type_defined('service_perfdata')
# We need to create the module on the fly?
if not got_service_perfdata_module:
data = {'module_type': 'service_perfdata',
'module_name': 'Service-Perfdata-Autogenerated',
'path': self.service_perfdata_file,
'mode': self.service_perfdata_file_mode,
'template': self.service_perfdata_file_template}
mod = Module(data)
mod_to_add.append(mod)
# Now the old retention file module
if self.state_retention_file != '' and self.retention_update_interval != 0:
# Ok, we've got a old retention file
got_retention_file_module = \
self.got_scheduler_module_type_defined('nagios_retention_file')
# We need to create the module on the fly?
if not got_retention_file_module:
data = {'module_type': 'nagios_retention_file',
'module_name': 'Nagios-Retention-File-Autogenerated',
'path': self.state_retention_file}
mod = Module(data)
mod_to_add_to_schedulers.append(mod)
# Now the host_perfdata module
if self.host_perfdata_file != '':
# Ok, we've got a path for a host perfdata file
got_host_perfdata_module = self.got_broker_module_type_defined('host_perfdata')
# We need to create the module on the fly?
if not got_host_perfdata_module:
data = {'module_type': 'host_perfdata',
'module_name': 'Host-Perfdata-Autogenerated',
'path': self.host_perfdata_file, 'mode': self.host_perfdata_file_mode,
'template': self.host_perfdata_file_template}
mod = Module(data)
mod_to_add.append(mod)
# We add them to the brokers if we need it
if mod_to_add != []:
logger.warning("I autogenerated some Broker modules, please look at your configuration")
for m in mod_to_add:
logger.warning("The module %s is autogenerated", m.module_name)
for b in self.brokers:
b.modules.append(m)
# Then for schedulers
if mod_to_add_to_schedulers != []:
logger.warning("I autogenerated some Scheduler modules, "
"please look at your configuration")
for m in mod_to_add_to_schedulers:
logger.warning("The module %s is autogenerated", m.module_name)
for b in self.schedulers:
b.modules.append(m)
# It's used to hack some old Nagios parameters like
# but for the arbiter, so very early in the run
def hack_old_nagios_parameters_for_arbiter(self):
""" Create some 'modules' from all nagios parameters if they are set and
the modules are not created """
# We list all modules we will add to arbiters
mod_to_add = []
# For command_file
if getattr(self, 'command_file', '') != '':
# Ok, the user put such a value, we must look
# if he forget to put a module for arbiters
got_named_pipe_module = self.got_arbiter_module_type_defined('named_pipe')
# We need to create the module on the fly?
if not got_named_pipe_module:
data = {'command_file': self.command_file,
'module_name': 'NamedPipe-Autogenerated',
'module_type': 'named_pipe'}
mod = Module(data)
mod_to_add.append((mod, data))
# We add them to the brokers if we need it
if mod_to_add != []:
logger.warning("I autogenerated some Arbiter modules, "
"please look at your configuration")
for (mod, data) in mod_to_add:
logger.warning("Module %s was autogenerated", data['module_name'])
for a in self.arbiters:
a.modules = getattr(a, 'modules', []) + [data['module_name']]
self.modules.add_item(mod)
# Set our timezone value and give it too to unset satellites
def propagate_timezone_option(self):
if self.use_timezone != '':
# first apply myself
os.environ['TZ'] = self.use_timezone
time.tzset()
tab = [self.schedulers, self.pollers, self.brokers, self.receivers, self.reactionners]
for t in tab:
for s in t:
if s.use_timezone == 'NOTSET':
setattr(s, 'use_timezone', self.use_timezone)
# Link templates with elements
def linkify_templates(self):
""" Like for normal object, we link templates with each others """
self.hosts.linkify_templates()
self.contacts.linkify_templates()
self.services.linkify_templates()
self.servicedependencies.linkify_templates()
self.hostdependencies.linkify_templates()
self.timeperiods.linkify_templates()
self.hostsextinfo.linkify_templates()
self.servicesextinfo.linkify_templates()
self.escalations.linkify_templates()
# But also old srv and host escalations
self.serviceescalations.linkify_templates()
self.hostescalations.linkify_templates()
# Some parameters are just not managed like O*HP commands
# and regexp capabilities
# True: OK
# False: error in conf
def check_error_on_hard_unmanaged_parameters(self):
r = True
if self.use_regexp_matching:
logger.error("use_regexp_matching parameter is not managed.")
r &= False
# if self.ochp_command != '':
# logger.error("ochp_command parameter is not managed.")
# r &= False
# if self.ocsp_command != '':
# logger.error("ocsp_command parameter is not managed.")
# r &= False
return r
# check if elements are correct or not (fill with defaults, etc)
# Warning: this function call be called from a Arbiter AND
# from and scheduler. The first one got everything, the second
# does not have the satellites.
def is_correct(self):
""" Check if all elements got a good configuration """
logger.info('Running pre-flight check on configuration data...')
r = self.conf_is_correct
# Globally unmanaged parameters
if self.read_config_silent == 0:
logger.info('Checking global parameters...')
if not self.check_error_on_hard_unmanaged_parameters():
r = False
logger.error("Check global parameters failed")
for x in ('hosts', 'hostgroups', 'contacts', 'contactgroups', 'notificationways',
'escalations', 'services', 'servicegroups', 'timeperiods', 'commands',
'hostsextinfo', 'servicesextinfo', 'checkmodulations', 'macromodulations'):
if self.read_config_silent == 0:
logger.info('Checking %s...', x)
cur = getattr(self, x)
if not cur.is_correct():
r = False
logger.error("\t%s conf incorrect!!", x)
if self.read_config_silent == 0:
logger.info('\tChecked %d %s', len(cur), x)
# Hosts got a special check for loops
if not self.hosts.no_loop_in_parents("self", "parents"):
r = False
logger.error("Hosts: detected loop in parents ; conf incorrect")
for x in ('servicedependencies', 'hostdependencies', 'arbiters', 'schedulers',
'reactionners', 'pollers', 'brokers', 'receivers', 'resultmodulations',
'discoveryrules', 'discoveryruns', 'businessimpactmodulations'):
try:
cur = getattr(self, x)
except AttributeError:
continue
if self.read_config_silent == 0:
logger.info('Checking %s...', x)
if not cur.is_correct():
r = False
logger.error("\t%s conf incorrect!!", x)
if self.read_config_silent == 0:
logger.info('\tChecked %d %s', len(cur), x)
# Look that all scheduler got a broker that will take brok.
# If there are no, raise an Error
for s in self.schedulers:
rea = s.realm
if rea:
if len(rea.potential_brokers) == 0:
logger.error("The scheduler %s got no broker in its realm or upper",
s.get_name())
self.add_error("Error: the scheduler %s got no broker in its realm "
"or upper" % s.get_name())
r = False
# Check that for each poller_tag of a host, a poller exists with this tag
# TODO: need to check that poller are in the good realm too
hosts_tag = set()
services_tag = set()
pollers_tag = set()
for h in self.hosts:
hosts_tag.add(h.poller_tag)
for s in self.services:
services_tag.add(s.poller_tag)
for p in self.pollers:
for t in p.poller_tags:
pollers_tag.add(t)
if not hosts_tag.issubset(pollers_tag):
for tag in hosts_tag.difference(pollers_tag):
logger.error("Hosts exist with poller_tag %s but no poller got this tag", tag)
self.add_error("Error: hosts exist with poller_tag %s but no poller "
"got this tag" % tag)
r = False
if not services_tag.issubset(pollers_tag):
for tag in services_tag.difference(pollers_tag):
logger.error("Services exist with poller_tag %s but no poller got this tag", tag)
self.add_error("Error: services exist with poller_tag %s but no poller "
"got this tag" % tag)
r = False
# Check that all hosts involved in business_rules are from the same realm
for l in [self.services, self.hosts]:
for e in l:
if e.got_business_rule:
e_ro = e.get_realm()
# Something was wrong in the conf, will be raised elsewhere
if not e_ro:
continue
e_r = e_ro.realm_name
for elt in e.business_rule.list_all_elements():
r_o = elt.get_realm()
# Something was wrong in the conf, will be raised elsewhere
if not r_o:
continue
elt_r = elt.get_realm().realm_name
if not elt_r == e_r:
logger.error("Business_rule '%s' got hosts from another realm: %s",
e.get_full_name(), elt_r)
self.add_error("Error: Business_rule '%s' got hosts from another "
"realm: %s" % (e.get_full_name(), elt_r))
r = False
if len([realm for realm in self.realms if hasattr(realm, 'default') and realm.default]) > 1:
err = "Error : More than one realm are set to the default realm"
logger.error(err)
self.add_error(err)
r = False
self.conf_is_correct = r
# Explode parameters like cached_service_check_horizon in the
# Service class in a cached_check_horizon manner, o*hp commands
# , etc
def explode_global_conf(self):
clss = [Service, Host, Contact, SchedulerLink,
PollerLink, ReactionnerLink, BrokerLink,
ReceiverLink, ArbiterLink, HostExtInfo]
for cls in clss:
cls.load_global_conf(self)
# Clean useless elements like templates because they are not needed anymore
def remove_templates(self):
self.hosts.remove_templates()
self.contacts.remove_templates()
self.services.remove_templates()
self.servicedependencies.remove_templates()
self.hostdependencies.remove_templates()
self.timeperiods.remove_templates()
self.discoveryrules.remove_templates()
self.discoveryruns.remove_templates()
# Add an error in the configuration error list so we can print(them)
# all in one place
def add_error(self, txt):
err = txt
self.configuration_errors.append(err)
self.conf_is_correct = False
# Now it's time to show all configuration errors
def show_errors(self):
for err in self.configuration_errors:
logger.error(err)
def get_least_loaded_scheduler_id(self, scheduler_ids, distribution):
"""
Returns the scheduler id having the lowest number of objects to manage
:param dict distribution: The packs distribution
:rtype: int
:return: The least weighted scheduler id
"""
distribution_ids = list(distribution.values())
# Scheduler not having config yet have precedence
no_conf = [i for i in scheduler_ids if i not in distribution_ids]
if no_conf:
return random.choice(no_conf)
# Returns the scheduler id having the least managed objects
weighted_ids = dict(
[
(distribution_ids.count(i), i)
for i in set(distribution_ids)
]
)
return weighted_ids[min(weighted_ids.keys())]
# Create packs of hosts and services so in a pack,
# all dependencies are resolved
# It create a graph. All hosts are connected to their
# parents, and hosts without parent are connected to host 'root'.
# services are link to the host. Dependencies are managed
# REF: doc/pack-creation.png
def create_packs(self, nb_packs):
# We create a graph with host in nodes
g = Graph()
g.add_nodes(self.hosts)
# links will be used for relations between hosts
links = set()
# Now the relations
for h in self.hosts:
# Add parent relations
for p in h.parents:
if p is not None:
links.add((p, h))
# Add the others dependencies
for (dep, tmp, tmp2, tmp3, tmp4) in h.act_depend_of:
links.add((dep, h))
for (dep, tmp, tmp2, tmp3, tmp4) in h.chk_depend_of:
links.add((dep, h))
# For services: they are link with their own host but we need
# To have the hosts of service dep in the same pack too
for s in self.services:
for (dep, tmp, tmp2, tmp3, tmp4) in s.act_depend_of:
# I don't care about dep host: they are just the host
# of the service...
if hasattr(dep, 'host'):
links.add((dep.host, s.host))
# The other type of dep
for (dep, tmp, tmp2, tmp3, tmp4) in s.chk_depend_of:
links.add((dep.host, s.host))
# For host/service that are business based, we need to
# link them too
for s in [s for s in self.services if s.got_business_rule]:
for e in s.business_rule.list_all_elements():
if hasattr(e, 'host'): # if it's a service
if e.host != s.host: # do not a host with itself
links.add((e.host, s.host))
else: # it's already a host
if e != s.host:
links.add((e, s.host))
# Same for hosts of course
for h in [h for h in self.hosts if h.got_business_rule]:
for e in h.business_rule.list_all_elements():
if hasattr(e, 'host'): # if it's a service
if e.host != h:
links.add((e.host, h))
else: # e is a host
if e != h:
links.add((e, h))
# Now we create links in the graph. With links (set)
# We are sure to call the less add_edge
for (dep, h) in links:
g.add_edge(dep, h)
g.add_edge(h, dep)
# Access_list from a node il all nodes that are connected
# with it: it's a list of ours mini_packs
tmp_packs = g.get_accessibility_packs()
# Now We find the default realm
default_realm = None
for r in self.realms:
if hasattr(r, 'default') and r.default:
default_realm = r
# Now we look if all elements of all packs have the
# same realm. If not, not good!
for pack in tmp_packs:
tmp_realms = set()
for elt in pack:
if elt.realm is not None:
tmp_realms.add(elt.realm)
if len(tmp_realms) > 1:
self.add_error("Error: the realm configuration of yours hosts is not good "
"because there a more than one realm in one pack (host relations):")
for h in pack:
if h.realm is None:
err = ' the host %s do not have a realm' % h.get_name()
self.add_error(err)
else:
err = ' the host %s is in the realm %s' % (h.get_name(),
h.realm.get_name())
self.add_error(err)
if len(tmp_realms) == 1: # Ok, good
r = tmp_realms.pop() # There is just one element
r.packs.append(pack)
elif len(tmp_realms) == 0: # Hum.. no realm value? So default Realm
if default_realm is not None:
default_realm.packs.append(pack)
else:
err = ("Error: some hosts do not have a realm and you do not "
"defined a default realm!")
self.add_error(err)
for h in pack:
err = ' Impacted host: %s ' % h.get_name()
self.add_error(err)
# The load balancing is for a loop, so all
# hosts of a realm (in a pack) will be dispatch
# in the schedulers of this realm
# REF: doc/pack-agregation.png
# Count the numbers of elements in all the realms, to compare it the total number of hosts
nb_elements_all_realms = 0
for r in self.realms:
# print("Load balancing realm", r.get_name())
packs = {}
# create roundrobin iterator for id of cfg
# So dispatching is loadbalanced in a realm
# but add a entry in the roundrobin tourniquet for
# every weight point schedulers (so Weight round robin)
no_spare_schedulers = [s for s in r.schedulers if not s.spare]
nb_schedulers = len(no_spare_schedulers)
# Maybe there is no scheduler in the realm, it's can be a
# big problem if there are elements in packs
nb_elements = 0
for pack in r.packs:
nb_elements += len(pack)
nb_elements_all_realms += len(pack)
logger.info("Number of hosts in the realm %s: %d "
"(distributed in %d linked packs)",
r.get_name(), nb_elements, len(r.packs))
if nb_schedulers == 0 and nb_elements != 0:
err = "The realm %s has hosts but no scheduler!" % r.get_name()
self.add_error(err)
r.packs = [] # Dumb pack
continue
packindices = {}
weight_scheduler_ids = []
for i, s in enumerate(no_spare_schedulers):
packindices[s.id] = i
for _ in range(0, s.weight):
weight_scheduler_ids.append(s.id)
# We must have nb_schedulers packs
for i in range(0, nb_schedulers):
packs[i] = []
# Try to load the history association dict so we will try to
# send the hosts in the same "pack"
assoc = {}
# Now we explode the numerous packs into nb_packs reals packs:
# we 'load balance' them trying to equilibrate number of objects
# to check
for pack in r.packs:
scheduler_id = self.get_least_loaded_scheduler_id(weight_scheduler_ids, assoc)
for elt in pack:
packs[packindices[scheduler_id]].append(elt)
assoc[elt.get_name()] = i
for svc in elt.services:
assoc[svc.get_full_name()] = i
# Now in packs we have the number of packs [h1, h2, etc]
# equal to the number of schedulers.
r.packs = packs
logger.info("Total number of hosts : %d",
nb_elements_all_realms)
if len(self.hosts) != nb_elements_all_realms:
logger.warning("There are %d hosts defined, and %d hosts dispatched in the realms. "
"Some hosts have been ignored", len(self.hosts), nb_elements_all_realms)
self.add_error("There are %d hosts defined, and %d hosts dispatched in the realms. "
"Some hosts have been "
"ignored" % (len(self.hosts), nb_elements_all_realms))
# Use the self.conf and make nb_parts new confs.
# nbparts is equal to the number of schedulerlink
# New confs are independent with checks. The only communication
# That can be need is macro in commands
def cut_into_parts(self):
# print("Scheduler configured:", self.schedulers)
# I do not care about alive or not. User must have set a spare if need it
nb_parts = len([s for s in self.schedulers if not s.spare])
if nb_parts == 0:
nb_parts = 1
# We create dummy configurations for schedulers:
# they are clone of the master
# conf but without hosts and services (because they are dispatched between
# theses configurations)
self.confs = {}
for i in range(0, nb_parts):
# print("Create Conf:", i, '/', nb_parts -1)
cur_conf = self.confs[i] = Config()
# Now we copy all properties of conf into the new ones
for prop, entry in Config.properties.items():
if entry.managed and not isinstance(entry, UnusedProp):
val = getattr(self, prop)
setattr(cur_conf, prop, val)
# print("Copy", prop, val)
# we need a deepcopy because each conf
# will have new hostgroups
cur_conf.id = i
cur_conf.commands = self.commands
cur_conf.timeperiods = self.timeperiods
# Create hostgroups with just the name and same id, but no members
new_hostgroups = []
for hg in self.hostgroups:
new_hostgroups.append(hg.copy_shell())
cur_conf.hostgroups = Hostgroups(new_hostgroups)
cur_conf.notificationways = self.notificationways
cur_conf.checkmodulations = self.checkmodulations
cur_conf.macromodulations = self.macromodulations
cur_conf.contactgroups = self.contactgroups
cur_conf.contacts = self.contacts
cur_conf.triggers = self.triggers
# Create hostgroups with just the name and same id, but no members
new_servicegroups = []
for sg in self.servicegroups:
new_servicegroups.append(sg.copy_shell())
cur_conf.servicegroups = Servicegroups(new_servicegroups)
cur_conf.hosts = [] # will be fill after
cur_conf.services = [] # will be fill after
# The elements of the others conf will be tag here
cur_conf.other_elements = {}
# if a scheduler have accepted the conf
cur_conf.is_assigned = False
logger.info("Creating packs for realms")
# Just create packs. There can be numerous ones
# In pack we've got hosts and service
# packs are in the realms
# REF: doc/pack-creation.png
self.create_packs(nb_parts)
# We've got all big packs and get elements into configurations
# REF: doc/pack-agregation.png
offset = 0
for r in self.realms:
for i in r.packs:
pack = r.packs[i]
for h in pack:
h.pack_id = i
self.confs[i + offset].hosts.append(h)
for s in h.services:
self.confs[i + offset].services.append(s)
# Now the conf can be link in the realm
r.confs[i + offset] = self.confs[i + offset]
offset += len(r.packs)
del r.packs
# We've nearly have hosts and services. Now we want REALS hosts (Class)
# And we want groups too
# print("Finishing packs")
for i in self.confs:
# print("Finishing pack Nb:", i)
cfg = self.confs[i]
# Create ours classes
cfg.hosts = Hosts(cfg.hosts)
cfg.services = Services(cfg.services)
# Fill host groups
for ori_hg in self.hostgroups:
hg = cfg.hostgroups.find_by_name(ori_hg.get_name())
mbrs = ori_hg.members
mbrs_id = []
for h in mbrs:
if h is not None:
mbrs_id.append(h.id)
for h in cfg.hosts:
if h.id in mbrs_id:
hg.members.append(h)
# And also relink the hosts with the valid hostgroups
for h in cfg.hosts:
orig_hgs = h.hostgroups
nhgs = []
for ohg in orig_hgs:
nhg = cfg.hostgroups.find_by_name(ohg.get_name())
nhgs.append(nhg)
h.hostgroups = nhgs
# Fill servicegroup
for ori_sg in self.servicegroups:
sg = cfg.servicegroups.find_by_name(ori_sg.get_name())
mbrs = ori_sg.members
mbrs_id = []
for s in mbrs:
if s is not None:
mbrs_id.append(s.id)
for s in cfg.services:
if s.id in mbrs_id:
sg.members.append(s)
# And also relink the services with the valid servicegroups
for h in cfg.services:
orig_hgs = h.servicegroups
nhgs = []
for ohg in orig_hgs:
nhg = cfg.servicegroups.find_by_name(ohg.get_name())
nhgs.append(nhg)
h.servicegroups = nhgs
# Now we fill other_elements by host (service are with their host
# so they are not tagged)
for i in self.confs:
for h in self.confs[i].hosts:
for j in [j for j in self.confs if j != i]: # So other than i
self.confs[i].other_elements[h.get_name()] = i
# We tag conf with instance_id
for i in self.confs:
self.confs[i].instance_id = i
random.seed(time.time())
def dump(self, f=None):
dmp = {}
for category in ("hosts",
"hostgroups",
"hostdependencies",
"contactgroups",
"contacts",
"notificationways",
"checkmodulations",
"macromodulations",
"servicegroups",
"services",
"servicedependencies",
"resultmodulations",
"businessimpactmodulations",
"escalations",
"discoveryrules",
"discoveryruns",
"schedulers",
"realms",
):
objs = [jsonify_r(i) for i in getattr(self, category)]
container = getattr(self, category)
if category == "services":
objs = sorted(objs, key=lambda o: "%s/%s" %
(o["host_name"], o["service_description"]))
elif hasattr(container, "name_property"):
np = container.name_property
objs = sorted(objs, key=lambda o: getattr(o, np, ''))
dmp[category] = objs
if f is None:
d = tempfile.gettempdir()
p = os.path.join(d, 'shinken-config-dump-%d' % time.time())
f = open(p, "w")
close = True
else:
close = False
f.write(
json.dumps(
dmp,
indent=4,
separators=(',', ': '),
sort_keys=True
)
)
if close is True:
f.close()
# ...
def lazy():
# let's compute the "USER" properties and macros..
for n in range(1, 256):
Config.properties['$USER%s$' % n] = StringProp(default='')
Config.macros['USER%s' % n] = '$USER%s$' % n
lazy()
del lazy
| 102,817
|
Python
|
.py
| 2,069
| 36.171097
| 104
| 0.568779
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,496
|
checkmodulation.py
|
shinken-solutions_shinken/shinken/objects/checkmodulation.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken.objects.item import Item, Items
from shinken.property import StringProp
from shinken.util import to_name_if_possible
from shinken.log import logger
class CheckModulation(Item):
id = 1 # zero is always special in database, so we do not take risk here
my_type = 'checkmodulation'
properties = Item.properties.copy()
properties.update({
'checkmodulation_name':
StringProp(fill_brok=['full_status']),
'check_command':
StringProp(fill_brok=['full_status']),
'check_period':
StringProp(brok_transformation=to_name_if_possible, fill_brok=['full_status']),
})
running_properties = Item.running_properties.copy()
_special_properties = ('check_period',)
macros = {}
# For debugging purpose only (nice name)
def get_name(self):
return self.checkmodulation_name
# Will look at if our check_period is ok, and give our check_command if we got it
def get_check_command(self, t_to_go):
if not self.check_period or self.check_period.is_time_valid(t_to_go):
return self.check_command
return None
# Should have all properties, or a void check_period
def is_correct(self):
state = True
cls = self.__class__
# Raised all previously saw errors like unknown commands or timeperiods
if self.configuration_errors != []:
state = False
for err in self.configuration_errors:
logger.error("[item::%s] %s", self.get_name(), err)
for prop, entry in cls.properties.items():
if prop not in cls._special_properties:
if not hasattr(self, prop) and entry.required:
logger.warning("[checkmodulation::%s] %s property not set",
self.get_name(), prop)
state = False # Bad boy...
# Ok now we manage special cases...
# Service part
if not hasattr(self, 'check_command'):
logger.warning("[checkmodulation::%s] do not have any check_command defined",
self.get_name())
state = False
else:
if self.check_command is None:
logger.warning("[checkmodulation::%s] a check_command is missing", self.get_name())
state = False
if not self.check_command.is_valid():
logger.warning("[checkmodulation::%s] a check_command is invalid", self.get_name())
state = False
# Ok just put None as check_period, means 24x7
if not hasattr(self, 'check_period'):
self.check_period = None
return state
# In the scheduler we need to relink the commandCall with
# the real commands
def late_linkify_cw_by_commands(self, commands):
if self.check_command:
self.check_command.late_linkify_with_command(commands)
class CheckModulations(Items):
name_property = "checkmodulation_name"
inner_class = CheckModulation
def linkify(self, timeperiods, commands):
self.linkify_with_timeperiods(timeperiods, 'check_period')
self.linkify_one_command_with_commands(commands, 'check_command')
def new_inner_member(self, name=None, params={}):
if name is None:
name = CheckModulation.id
params['checkmodulation_name'] = name
# print("Asking a new inner checkmodulation from name %s with params %s" % (name, params))
cw = CheckModulation(params)
self.add_item(cw)
| 4,574
|
Python
|
.py
| 100
| 37.88
| 99
| 0.65857
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,497
|
schedulerlink.py
|
shinken-solutions_shinken/shinken/objects/schedulerlink.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken.objects.satellitelink import SatelliteLink, SatelliteLinks
from shinken.property import BoolProp, IntegerProp, StringProp
from shinken.log import logger
from shinken.http_client import HTTPException
from shinken.serializer import serialize
class SchedulerLink(SatelliteLink):
"""Please Add a Docstring to describe the class here"""
id = 0
# Ok we lie a little here because we are a mere link in fact
my_type = 'scheduler'
properties = SatelliteLink.properties.copy()
properties.update({
'scheduler_name': StringProp(fill_brok=['full_status']),
'port': IntegerProp(default=7768, fill_brok=['full_status']),
'weight': IntegerProp(default=1, fill_brok=['full_status']),
'skip_initial_broks': BoolProp(default=False, fill_brok=['full_status']),
'accept_passive_unknown_check_results': BoolProp(default=False, fill_brok=['full_status']),
'harakiri_threshold': StringProp(default=None, fill_brok=['full_status'], to_send=True),
})
running_properties = SatelliteLink.running_properties.copy()
running_properties.update({
'conf': StringProp(default=None),
'need_conf': StringProp(default=True),
'external_commands': StringProp(default=[]),
'push_flavor': IntegerProp(default=0),
})
def get_name(self):
return self.scheduler_name
def run_external_commands(self, commands):
if self.con is None:
self.create_connection()
if not self.alive:
return None
logger.debug("[SchedulerLink] Sending %d commands", len(commands))
try:
self.con.put('run_external_commands', serialize(commands))
except HTTPException as exp:
self.con = None
logger.debug(exp)
return False
def register_to_my_realm(self):
self.realm.schedulers.append(self)
def give_satellite_cfg(self):
return {'port': self.port, 'address': self.address,
'name': self.scheduler_name, 'instance_id': self.id,
'active': self.conf is not None, 'push_flavor': self.push_flavor,
'timeout': self.timeout, 'data_timeout': self.data_timeout,
'use_ssl': self.use_ssl, 'hard_ssl_name_check': self.hard_ssl_name_check}
# Some parameters can give as 'overridden parameters' like use_timezone
# so they will be mixed (in the scheduler) with the standard conf sent by the arbiter
def get_override_configuration(self):
r = {}
properties = self.__class__.properties
for prop, entry in properties.items():
if entry.override:
r[prop] = getattr(self, prop)
return r
class SchedulerLinks(SatelliteLinks):
"""Please Add a Docstring to describe the class here"""
name_property = "scheduler_name"
inner_class = SchedulerLink
| 3,933
|
Python
|
.py
| 84
| 40.607143
| 99
| 0.684939
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,498
|
serviceescalation.py
|
shinken-solutions_shinken/shinken/objects/serviceescalation.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken.objects.item import Item, Items
from shinken.objects.escalation import Escalation
from shinken.property import IntegerProp, StringProp, ListProp
import uuid
class Serviceescalation(Item):
id = 1 # zero is always special in database, so we do not take risk here
my_type = 'serviceescalation'
properties = Item.properties.copy()
properties.update({
'host_name': StringProp(),
'hostgroup_name': StringProp(),
'service_description': StringProp(),
'first_notification': IntegerProp(),
'last_notification': IntegerProp(),
'notification_interval': IntegerProp(default=30), # like Nagios value
'escalation_period': StringProp(default=''),
'escalation_options': ListProp(default=['d', 'u', 'r', 'w', 'c'], split_on_coma=True),
'contacts': StringProp(),
'contact_groups': StringProp(),
'first_notification_time': IntegerProp(),
'last_notification_time': IntegerProp(),
})
def get_newid(self):
cls = self.__class__
value = uuid.uuid1().hex
cls.id += 1
return value
# For debugging purpose only (nice name)
def get_name(self):
return ''
class Serviceescalations(Items):
name_property = ""
inner_class = Serviceescalation
# We look for contacts property in contacts and
def explode(self, escalations):
# Now we explode all escalations (host_name, service_description) to escalations
for es in self:
properties = es.__class__.properties
creation_dict = {'escalation_name': 'Generated-Serviceescalation-%s' % es.id}
for prop in properties:
if hasattr(es, prop):
creation_dict[prop] = getattr(es, prop)
# print("Creation an escalation with:", creation_dict)
s = Escalation(creation_dict)
escalations.add_escalation(s)
| 2,995
|
Python
|
.py
| 68
| 38.294118
| 97
| 0.67239
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,499
|
command.py
|
shinken-solutions_shinken/shinken/objects/command.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import six
from shinken.objects.item import Item, Items
from shinken.brok import Brok
from shinken.property import StringProp, IntegerProp, BoolProp
from shinken.autoslots import AutoSlots
# Ok, slots are fun: you cannot set the __autoslots__
# on the same class you use, fun isn't it? So we define*
# a dummy useless class to get such :)
class DummyCommand(object):
pass
class Command(six.with_metaclass(AutoSlots, Item)):
id = 0
my_type = "command"
properties = Item.properties.copy()
properties.update({
'command_name': StringProp(fill_brok=['full_status']),
'command_line': StringProp(fill_brok=['full_status']),
'poller_tag': StringProp(default='None'),
'reactionner_tag': StringProp(default='None'),
'module_type': StringProp(default=None),
'timeout': IntegerProp(default=-1),
'enable_environment_macros': BoolProp(default=False),
'priority': IntegerProp(default=100),
})
def __init__(self, params={}):
setattr(self, 'id', self.__class__.id)
# self.id = self.__class__.id
self.__class__.id += 1
self.init_running_properties()
self.customs = {}
for key in params:
# delistify attributes if there is only one value
params[key] = self.compact_unique_attr_value(params[key])
# Manage customs values
if key.startswith('_'):
self.customs[key.upper()] = params[key]
else:
setattr(self, key, params[key])
if not hasattr(self, 'timeout'):
self.timeout = '-1'
if not hasattr(self, 'poller_tag'):
self.poller_tag = 'None'
if not hasattr(self, 'enable_environment_macros'):
self.enable_environment_macros = 0
if not hasattr(self, 'reactionner_tag'):
self.reactionner_tag = 'None'
if not hasattr(self, 'module_type'):
# If the command start with a _, set the module_type
# as the name of the command, without the _
if getattr(self, 'command_line', '').startswith('_'):
module_type = getattr(self, 'command_line', '').split(' ')[0]
# and we remove the first _
self.module_type = module_type[1:]
# If no command starting with _, be fork :)
else:
self.module_type = 'fork'
if not hasattr(self, 'priority'):
self.priority = 100
def get_name(self):
return self.command_name
def __str__(self):
return str(self.__dict__)
def fill_data_brok_from(self, data, brok_type):
cls = self.__class__
# Now config properties
for prop, entry in cls.properties.items():
# Is this property intended for broking?
# if 'fill_brok' in entry[prop]:
if brok_type in entry.fill_brok:
if hasattr(self, prop):
data[prop] = getattr(self, prop)
# elif 'default' in entry[prop]:
# data[prop] = entry.default
def __getstate__(self):
cls = self.__class__
# id is not in *_properties
res = {'id': self.id}
for prop in cls.properties:
if hasattr(self, prop):
res[prop] = getattr(self, prop)
return res
# Inversed function of getstate
def __setstate__(self, state):
cls = self.__class__
# We move during 1.0 to a dict state
# but retention file from 0.8 was tuple
if isinstance(state, tuple):
self.__setstate_pre_1_0__(state)
return
self.id = state['id']
for prop in cls.properties:
if prop in state:
setattr(self, prop, state[prop])
# In 1.0 we move to a dict save. Before, it was
# a tuple save, like
# ({'id': 11}, {'poller_tag': 'None', 'reactionner_tag': 'None',
# 'command_line': '/usr/local/nagios/bin/rss-multiuser',
# 'module_type': 'fork', 'command_name': 'notify-by-rss'})
def __setstate_pre_1_0__(self, state):
for d in state:
for k, v in d.items():
setattr(self, k, v)
class Commands(Items):
inner_class = Command
name_property = "command_name"
| 5,326
|
Python
|
.py
| 128
| 33.617188
| 82
| 0.610714
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|