id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6,500
|
itemgroup.py
|
shinken-solutions_shinken/shinken/objects/itemgroup.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
# And itemgroup is like a item, but it's a group of items :)
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken.objects.item import Item, Items
from shinken.brok import Brok
from shinken.property import StringProp, ListProp, ToGuessProp
from shinken.log import logger
# TODO: subclass Item & Items for Itemgroup & Itemgroups?
class Itemgroup(Item):
id = 0
properties = Item.properties.copy()
properties.update({
'members': ListProp(fill_brok=['full_status'], default=None, split_on_coma=True),
# Shinken specific
'unknown_members': ListProp(default=None),
})
def __init__(self, params={}):
self.id = self.__class__.id
self.__class__.id += 1
cls = self.__class__
self.init_running_properties()
for key in params:
if key in self.properties:
val = self.properties[key].pythonize(params[key])
elif key in self.running_properties:
warning = "using a the running property %s in a config file" % key
self.configuration_warnings.append(warning)
val = self.running_properties[key].pythonize(params[key])
else:
warning = "Guessing the property %s type because it is not in %s object properties" % \
(key, cls.__name__)
self.configuration_warnings.append(warning)
val = ToGuessProp.pythonize(params[key])
setattr(self, key, val)
# Copy the groups properties EXCEPT the members
# members need to be fill after manually
def copy_shell(self):
cls = self.__class__
old_id = cls.id
new_i = cls() # create a new group
new_i.id = self.id # with the same id
cls.id = old_id # Reset the Class counter
# Copy all properties
for prop in cls.properties:
if prop != 'members':
if self.has(prop):
val = getattr(self, prop)
setattr(new_i, prop, val)
# but no members
new_i.members = []
return new_i
def replace_members(self, members):
self.members = members
# If a prop is absent and is not required, put the default value
def fill_default(self):
cls = self.__class__
for prop, entry in cls.properties.items():
if not hasattr(self, prop) and not entry.required:
value = entry.default
setattr(self, prop, value)
def add_string_member(self, member):
add_fun = list.extend if isinstance(member, list) else list.append
if not hasattr(self, "members"):
self.members = []
add_fun(self.members, member)
def add_string_unknown_member(self, member):
add_fun = list.extend if isinstance(member, list) else list.append
if not self.unknown_members:
self.unknown_members = []
add_fun(self.unknown_members, member)
def __str__(self):
return str(self.__dict__)
def __iter__(self):
return self.members.__iter__()
def __delitem__(self, i):
try:
self.members.remove(i)
except ValueError:
pass
# a item group is correct if all members actually exists,
# so if unknown_members is still []
def is_correct(self):
res = True
if self.unknown_members:
for m in self.unknown_members:
logger.error("[itemgroup::%s] as %s, got unknown member %s",
self.get_name(), self.__class__.my_type, m)
res = False
if self.configuration_errors != []:
for err in self.configuration_errors:
logger.error("[itemgroup] %s", err)
res = False
return res
def has(self, prop):
return hasattr(self, prop)
# Get a brok with hostgroup info (like id, name)
# members is special: list of (id, host_name) for database info
def get_initial_status_brok(self):
cls = self.__class__
data = {}
# Now config properties
for prop, entry in cls.properties.items():
if entry.fill_brok != []:
if self.has(prop):
data[prop] = getattr(self, prop)
# Here members is just a bunch of host, I need name in place
data['members'] = []
for i in self.members:
# it look like lisp! ((( ..))), sorry....
data['members'].append((i.id, i.get_name()))
b = Brok('initial_' + cls.my_type + '_status', data)
return b
class Itemgroups(Items):
# If a prop is absent and is not required, put the default value
def fill_default(self):
for i in self:
i.fill_default()
def add(self, ig):
self.add_item(ig)
def get_members_by_name(self, gname):
g = self.find_by_name(gname)
if g is None:
return []
return getattr(g, 'members', [])
| 5,980
|
Python
|
.py
| 145
| 32.655172
| 103
| 0.610459
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,501
|
realm.py
|
shinken-solutions_shinken/shinken/objects/realm.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import copy
from shinken.objects.item import Item
from shinken.objects.itemgroup import Itemgroup, Itemgroups
from shinken.property import BoolProp, IntegerProp, StringProp, DictProp, ListProp
from shinken.log import logger
# It change from hostgroup Class because there is no members
# properties, just the realm_members that we rewrite on it.
class Realm(Itemgroup):
id = 1 # zero is always a little bit special... like in database
my_type = 'realm'
properties = Itemgroup.properties.copy()
properties.update({
'id': IntegerProp(default=0, fill_brok=['full_status']),
'realm_name': StringProp(fill_brok=['full_status']),
# No status_broker_name because it put hosts, not host_name
'realm_members': ListProp(default=[], split_on_coma=True),
'higher_realms': ListProp(default=[], split_on_coma=True),
'default': BoolProp(default=False),
'broker_complete_links': BoolProp(default=False),
# 'alias': {'required': True, 'fill_brok': ['full_status']},
# 'notes': {'required': False, 'default':'', 'fill_brok': ['full_status']},
# 'notes_url': {'required': False, 'default':'', 'fill_brok': ['full_status']},
# 'action_url': {'required': False, 'default':'', 'fill_brok': ['full_status']},
})
running_properties = Item.running_properties.copy()
running_properties.update({
'serialized_confs': DictProp(default={}),
})
macros = {
'REALMNAME': 'realm_name',
'REALMMEMBERS': 'members',
}
def get_name(self):
return self.realm_name
def get_realms(self):
return self.realm_members
def add_string_member(self, member):
self.realm_members += ',' + member
def get_realm_members(self):
if self.has('realm_members'):
return [r.strip() for r in self.realm_members]
else:
return []
# We fillfull properties with template ones if need
# Because hostgroup we call may not have it's members
# we call get_hosts_by_explosion on it
def get_realms_by_explosion(self, realms):
# First we tag the hg so it will not be explode
# if a son of it already call it
self.already_explode = True
# Now the recursive part
# rec_tag is set to False every HG we explode
# so if True here, it must be a loop in HG
# calls... not GOOD!
if self.rec_tag:
err = "Error: we've got a loop in realm definition %s" % self.get_name()
self.configuration_errors.append(err)
if self.has('members'):
return self.members
else:
return ''
# Ok, not a loop, we tag it and continue
self.rec_tag = True
p_mbrs = self.get_realm_members()
for p_mbr in p_mbrs:
p = realms.find_by_name(p_mbr.strip())
if p is not None:
value = p.get_realms_by_explosion(realms)
if value is not None:
self.add_string_member(value)
if self.has('members'):
return self.members
else:
return ''
def get_all_subs_satellites_by_type(self, sat_type):
r = copy.copy(getattr(self, sat_type))
for p in self.realm_members:
tmps = p.get_all_subs_satellites_by_type(sat_type)
for s in tmps:
r.append(s)
return r
def count_reactionners(self):
self.nb_reactionners = 0
for reactionner in self.reactionners:
if not reactionner.spare:
self.nb_reactionners += 1
for realm in self.higher_realms:
for reactionner in realm.reactionners:
if not reactionner.spare and reactionner.manage_sub_realms:
self.nb_reactionners += 1
def count_pollers(self):
self.nb_pollers = 0
for poller in self.pollers:
if not poller.spare:
self.nb_pollers += 1
for realm in self.higher_realms:
for poller in realm.pollers:
if not poller.spare and poller.manage_sub_realms:
self.nb_pollers += 1
def count_brokers(self):
self.nb_brokers = 0
for broker in self.brokers:
if not broker.spare:
self.nb_brokers += 1
for realm in self.higher_realms:
for broker in realm.brokers:
if not broker.spare and broker.manage_sub_realms:
self.nb_brokers += 1
def count_receivers(self):
self.nb_receivers = 0
for receiver in self.receivers:
if not receiver.spare:
self.nb_receivers += 1
for realm in self.higher_realms:
for receiver in realm.receivers:
if not receiver.spare and receiver.manage_sub_realms:
self.nb_receivers += 1
# Return the list of satellites of a certain type
# like reactionner -> self.reactionners
def get_satellties_by_type(self, type):
if hasattr(self, type + 's'):
return getattr(self, type + 's')
else:
logger.debug("[realm] do not have this kind of satellites: %s", type)
return []
def fill_potential_satellites_by_type(self, sat_type):
setattr(self, 'potential_%s' % sat_type, [])
for satellite in getattr(self, sat_type):
getattr(self, 'potential_%s' % sat_type).append(satellite)
for realm in self.higher_realms:
for satellite in getattr(realm, sat_type):
if satellite.manage_sub_realms:
getattr(self, 'potential_%s' % sat_type).append(satellite)
# Return the list of potentials satellites of a certain type
# like reactionner -> self.potential_reactionners
def get_potential_satellites_by_type(self, type):
if hasattr(self, 'potential_' + type + 's'):
return getattr(self, 'potential_' + type + 's')
else:
logger.debug("[realm] do not have this kind of satellites: %s", type)
return []
# Return the list of potentials satellites of a certain type
# like reactionner -> self.nb_reactionners
def get_nb_of_must_have_satellites(self, type):
if hasattr(self, 'nb_' + type + 's'):
return getattr(self, 'nb_' + type + 's')
else:
logger.debug("[realm] do not have this kind of satellites: %s", type)
return 0
# Fill dict of realms for managing the satellites confs
def prepare_for_satellites_conf(self):
self.to_satellites = {}
self.to_satellites['reactionner'] = {}
self.to_satellites['poller'] = {}
self.to_satellites['broker'] = {}
self.to_satellites['receiver'] = {}
self.to_satellites_need_dispatch = {}
self.to_satellites_need_dispatch['reactionner'] = {}
self.to_satellites_need_dispatch['poller'] = {}
self.to_satellites_need_dispatch['broker'] = {}
self.to_satellites_need_dispatch['receiver'] = {}
self.to_satellites_managed_by = {}
self.to_satellites_managed_by['reactionner'] = {}
self.to_satellites_managed_by['poller'] = {}
self.to_satellites_managed_by['broker'] = {}
self.to_satellites_managed_by['receiver'] = {}
self.count_reactionners()
self.fill_potential_satellites_by_type('reactionners')
self.count_pollers()
self.fill_potential_satellites_by_type('pollers')
self.count_brokers()
self.fill_potential_satellites_by_type('brokers')
self.count_receivers()
self.fill_potential_satellites_by_type('receivers')
s = "%s: (in/potential) (schedulers:%d) (pollers:%d/%d) "\
"(reactionners:%d/%d) (brokers:%d/%d) (receivers:%d/%d)" % \
(self.get_name(),
len(self.schedulers),
self.nb_pollers, len(self.potential_pollers),
self.nb_reactionners, len(self.potential_reactionners),
self.nb_brokers, len(self.potential_brokers),
self.nb_receivers, len(self.potential_receivers)
)
logger.info(s)
# TODO: find a better name...
# TODO: and if he goes active?
def fill_broker_with_poller_reactionner_links(self, broker):
# First we create/void theses links
broker.cfg['pollers'] = {}
broker.cfg['reactionners'] = {}
broker.cfg['receivers'] = {}
# First our own level
for p in self.pollers:
cfg = p.give_satellite_cfg()
broker.cfg['pollers'][p.id] = cfg
for r in self.reactionners:
cfg = r.give_satellite_cfg()
broker.cfg['reactionners'][r.id] = cfg
for b in self.receivers:
cfg = b.give_satellite_cfg()
broker.cfg['receivers'][b.id] = cfg
# Then sub if we must to it
if broker.manage_sub_realms:
# Now pollers
for p in self.get_all_subs_satellites_by_type('pollers'):
cfg = p.give_satellite_cfg()
broker.cfg['pollers'][p.id] = cfg
# Now reactionners
for r in self.get_all_subs_satellites_by_type('reactionners'):
cfg = r.give_satellite_cfg()
broker.cfg['reactionners'][r.id] = cfg
# Now receivers
for r in self.get_all_subs_satellites_by_type('receivers'):
cfg = r.give_satellite_cfg()
broker.cfg['receivers'][r.id] = cfg
# Get a conf package of satellites links that can be useful for
# a scheduler
def get_satellites_links_for_scheduler(self):
cfg = {}
# First we create/void theses links
cfg['pollers'] = {}
cfg['reactionners'] = {}
# First our own level
for p in self.pollers:
c = p.give_satellite_cfg()
cfg['pollers'][p.id] = c
for r in self.reactionners:
c = r.give_satellite_cfg()
cfg['reactionners'][r.id] = c
# print("***** Preparing a satellites conf for a scheduler", cfg)
return cfg
class Realms(Itemgroups):
name_property = "realm_name" # is used for finding hostgroups
inner_class = Realm
def get_members_by_name(self, pname):
realm = self.find_by_name(pname)
if realm is None:
return []
return realm.get_realms()
def linkify(self):
self.linkify_p_by_p()
# prepare list of satellites and confs
for p in self:
p.pollers = []
p.schedulers = []
p.reactionners = []
p.brokers = []
p.receivers = []
p.packs = []
p.confs = {}
# We just search for each realm the others realms
# and replace the name by the realm
def linkify_p_by_p(self):
for p in self.items.values():
mbrs = p.get_realm_members()
# The new member list, in id
new_mbrs = []
for mbr in mbrs:
new_mbr = self.find_by_name(mbr)
if new_mbr is not None:
new_mbrs.append(new_mbr)
# We find the id, we replace the names
p.realm_members = new_mbrs
# Now put higher realm in sub realms
# So after they can
for p in self.items.values():
p.higher_realms = []
for p in self.items.values():
self.recur_higer_realms(p, p.realm_members)
# I add the R realm in the sons.higer_realms, and
# also in the son.sons and so on
def recur_higer_realms(self, r, sons):
for sub_p in sons:
sub_p.higher_realms.append(r)
# and call for our sons too
self.recur_higer_realms(r, sub_p.realm_members)
# Use to fill members with hostgroup_members
def explode(self):
# We do not want a same hg to be explode again and again
# so we tag it
for tmp_p in self.items.values():
tmp_p.already_explode = False
for p in self:
if p.has('realm_members') and not p.already_explode:
# get_hosts_by_explosion is a recursive
# function, so we must tag hg so we do not loop
for tmp_p in self:
tmp_p.rec_tag = False
p.get_realms_by_explosion(self)
# We clean the tags
for tmp_p in self.items.values():
if hasattr(tmp_p, 'rec_tag'):
del tmp_p.rec_tag
del tmp_p.already_explode
def get_default(self):
for r in self:
if getattr(r, 'default', False):
return r
return None
def prepare_for_satellites_conf(self):
for r in self:
r.prepare_for_satellites_conf()
| 13,932
|
Python
|
.py
| 325
| 33.129231
| 88
| 0.595184
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,502
|
trigger.py
|
shinken-solutions_shinken/shinken/objects/trigger.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import six
import os
import re
import sys
import traceback
from shinken.objects.item import Item, Items
from shinken.property import BoolProp, StringProp
from shinken.log import logger
from shinken.trigger_functions import objs, trigger_functions, set_value
class Trigger(Item):
id = 1 # zero is always special in database, so we do not take risk here
my_type = 'trigger'
properties = Item.properties.copy()
properties.update({'trigger_name': StringProp(fill_brok=['full_status']),
'code_src': StringProp(default='', fill_brok=['full_status']),
})
running_properties = Item.running_properties.copy()
running_properties.update({'code_bin': StringProp(default=None),
'trigger_broker_raise_enabled': BoolProp(default=False)
})
# For debugging purpose only (nice name)
def get_name(self):
try:
return self.trigger_name
except AttributeError:
return 'UnnamedTrigger'
def compile(self):
self.code_bin = compile(self.code_src, "<irc>", "exec")
# ctx is the object we are evaluating the code. In the code
# it will be "self".
def eval(myself, ctx):
self = ctx
# Ok we can declare for this trigger call our functions
for (n, f) in trigger_functions.items():
locals()[n] = f
code = myself.code_bin # Comment? => compile(myself.code_bin, "<irc>", "exec")
try:
six.exec_(code)
except Exception as err:
set_value(self, "UNKNOWN: Trigger error: %s" % err, "", 3)
logger.error('%s Trigger %s failed: %s ; '
'%s' % (self.host_name, myself.trigger_name, err, traceback.format_exc()))
def __getstate__(self):
return {'trigger_name': self.trigger_name,
'code_src': self.code_src,
'trigger_broker_raise_enabled': self.trigger_broker_raise_enabled}
def __setstate__(self, d):
self.trigger_name = d['trigger_name']
self.code_src = d['code_src']
self.trigger_broker_raise_enabled = d['trigger_broker_raise_enabled']
class Triggers(Items):
name_property = "trigger_name"
inner_class = Trigger
# We will dig into the path and load all .trig files
def load_file(self, path):
# Now walk for it
for root, dirs, files in os.walk(path):
for file in files:
if re.search("\.trig$", file):
p = os.path.join(root, file)
try:
fd = open(p, 'r')
buf = fd.read()
fd.close()
except IOError as exp:
logger.error("Cannot open trigger file '%s' for reading: %s", p, exp)
# ok, skip this one
continue
self.create_trigger(buf, file[:-5])
# Create a trigger from the string src, and with the good name
def create_trigger(self, src, name):
# Ok, go compile the code
t = Trigger({'trigger_name': name, 'code_src': src})
t.compile()
# Ok, add it
self[t.id] = t
return t
def compile(self):
for i in self:
i.compile()
def load_objects(self, conf):
global objs
objs['hosts'] = conf.hosts
objs['services'] = conf.services
| 4,483
|
Python
|
.py
| 107
| 33.317757
| 99
| 0.61277
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,503
|
host.py
|
shinken-solutions_shinken/shinken/objects/host.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
""" This is the main class for the Host. In fact it's mainly
about the configuration part. for the running one, it's better
to look at the schedulingitem class that manage all
scheduling/consume check smart things :)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import six
import time
import itertools
from shinken.objects.item import Items
from shinken.objects.schedulingitem import SchedulingItem
from shinken.autoslots import AutoSlots
from shinken.util import (format_t_into_dhms_format, to_hostnames_list, get_obj_name,
to_svc_hst_distinct_lists, to_list_string_of_names, to_list_of_names,
to_name_if_possible, strip_and_uniq, get_exclude_match_expr)
from shinken.property import BoolProp, IntegerProp, FloatProp, CharProp, StringProp, ListProp
from shinken.graph import Graph
from shinken.macroresolver import MacroResolver
from shinken.eventhandler import EventHandler
from shinken.log import logger, naglog_result
import uuid
class Host(six.with_metaclass(AutoSlots, SchedulingItem)):
id = 1 # zero is reserved for host (primary node for parents)
ok_up = 'UP'
my_type = 'host'
# properties defined by configuration
# *required: is required in conf
# *default: default value if no set in conf
# *pythonize: function to call when transforming string to python object
# *fill_brok: if set, send to broker.
# there are two categories:
# full_status for initial and update status, check_result for check results
# *no_slots: do not take this property for __slots__
# Only for the initial call
# conf_send_preparation: if set, will pass the property to this function. It's used to "flatten"
# some dangerous properties like realms that are too 'linked' to be send like that.
# brok_transformation: if set, will call the function with the value of the property
# the major times it will be to flatten the data (like realm_name instead of the realm object).
properties = SchedulingItem.properties.copy()
properties.update({
'host_name':
StringProp(fill_brok=['full_status', 'check_result', 'next_schedule']),
'alias':
StringProp(fill_brok=['full_status']),
'display_name':
StringProp(default='', fill_brok=['full_status']),
'address':
StringProp(fill_brok=['full_status']),
'parents':
ListProp(brok_transformation=to_hostnames_list, default=[],
fill_brok=['full_status'], merging='join', split_on_coma=True),
'hostgroups':
ListProp(brok_transformation=to_list_string_of_names, default=[],
fill_brok=['full_status'], merging='join', split_on_coma=True),
'check_command':
StringProp(default='_internal_host_up', fill_brok=['full_status']),
'initial_state':
CharProp(default='', fill_brok=['full_status']),
'initial_output':
StringProp(default='', fill_brok=['full_status']),
'max_check_attempts':
IntegerProp(default=1, fill_brok=['full_status']),
'check_interval':
IntegerProp(default=0, fill_brok=['full_status', 'check_result']),
'retry_interval':
IntegerProp(default=0, fill_brok=['full_status', 'check_result']),
'active_checks_enabled':
BoolProp(default=True, fill_brok=['full_status'], retention=True),
'passive_checks_enabled':
BoolProp(default=True, fill_brok=['full_status'], retention=True),
'check_period':
StringProp(brok_transformation=to_name_if_possible, fill_brok=['full_status']),
'obsess_over_host':
BoolProp(default=False, fill_brok=['full_status'], retention=True),
'check_freshness':
BoolProp(default=False, fill_brok=['full_status']),
'freshness_threshold':
IntegerProp(default=0, fill_brok=['full_status']),
'event_handler':
StringProp(default='', fill_brok=['full_status']),
'event_handler_enabled':
BoolProp(default=False, fill_brok=['full_status']),
'low_flap_threshold':
IntegerProp(default=25, fill_brok=['full_status']),
'high_flap_threshold':
IntegerProp(default=50, fill_brok=['full_status']),
'flap_detection_enabled':
BoolProp(default=True, fill_brok=['full_status'], retention=True),
'flap_detection_options':
ListProp(default=['o', 'd', 'u'], fill_brok=['full_status'],
merging='join', split_on_coma=True),
'process_perf_data':
BoolProp(default=True, fill_brok=['full_status'], retention=True),
'retain_status_information':
BoolProp(default=True, fill_brok=['full_status']),
'retain_nonstatus_information':
BoolProp(default=True, fill_brok=['full_status']),
'contacts':
ListProp(default=[], brok_transformation=to_list_of_names,
fill_brok=['full_status'], merging='join', split_on_coma=True),
'contact_groups':
ListProp(default=[], fill_brok=['full_status'],
merging='join', split_on_coma=True),
'notification_interval':
IntegerProp(default=60, fill_brok=['full_status']),
'first_notification_delay':
IntegerProp(default=0, fill_brok=['full_status']),
'notification_period':
StringProp(brok_transformation=to_name_if_possible, fill_brok=['full_status']),
'notification_options':
ListProp(default=['d', 'u', 'r', 'f'], fill_brok=['full_status'],
merging='join', split_on_coma=True),
'notifications_enabled':
BoolProp(default=True, fill_brok=['full_status'], retention=True),
'stalking_options':
ListProp(default=[''], fill_brok=['full_status']),
'notes':
StringProp(default='', fill_brok=['full_status']),
'notes_url':
StringProp(default='', fill_brok=['full_status']),
'action_url':
StringProp(default='', fill_brok=['full_status']),
'icon_image':
StringProp(default='', fill_brok=['full_status']),
'icon_image_alt':
StringProp(default='', fill_brok=['full_status']),
'icon_set':
StringProp(default='', fill_brok=['full_status']),
'vrml_image':
StringProp(default='', fill_brok=['full_status']),
'statusmap_image':
StringProp(default='', fill_brok=['full_status']),
# No slots for this 2 because begin property by a number seems bad
# it's stupid!
'2d_coords':
StringProp(default='', fill_brok=['full_status'], no_slots=True),
'3d_coords':
StringProp(default='', fill_brok=['full_status'], no_slots=True),
'failure_prediction_enabled':
BoolProp(default=False, fill_brok=['full_status']),
# New to shinken
# 'fill_brok' is ok because in scheduler it's already
# a string from conf_send_preparation
'realm':
StringProp(default=None, fill_brok=['full_status'], conf_send_preparation=get_obj_name),
'poller_tag':
StringProp(default='None'),
'reactionner_tag':
StringProp(default='None'),
'resultmodulations':
ListProp(default=[], merging='join'),
'business_impact_modulations':
ListProp(default=[], merging='join'),
'escalations':
ListProp(default=[], fill_brok=['full_status'], merging='join', split_on_coma=True),
'time_to_orphanage':
IntegerProp(default=300, fill_brok=['full_status']),
'service_overrides':
ListProp(default=[], merging='duplicate', split_on_coma=False),
'service_excludes':
ListProp(default=[], merging='duplicate', split_on_coma=True),
'service_includes':
ListProp(default=[], merging='duplicate', split_on_coma=True),
'labels':
StringProp(default=[], fill_brok=['full_status'], merging='join', split_on_coma=True),
# BUSINESS CORRELATOR PART
# Business rules output format template
'business_rule_output_template':
StringProp(default='', fill_brok=['full_status']),
# Business rules notifications mode
'business_rule_smart_notifications':
BoolProp(default=False, fill_brok=['full_status']),
# Treat downtimes as acknowledgements in smart notifications
'business_rule_downtime_as_ack':
BoolProp(default=False, fill_brok=['full_status']),
# Enforces child nodes notification options
'business_rule_host_notification_options':
ListProp(default=[], fill_brok=['full_status']),
'business_rule_service_notification_options':
ListProp(default=[], fill_brok=['full_status']),
# Business impact value
'business_impact':
IntegerProp(default=2, fill_brok=['full_status']),
# Load some triggers
'trigger':
StringProp(default=''),
'trigger_name':
StringProp(default=''),
'trigger_broker_raise_enabled':
BoolProp(default=False),
# Trending
'trending_policies':
ListProp(default=[], fill_brok=['full_status'], merging='join'),
# Our modulations. By defualt void, but will filled by an inner if need
'checkmodulations':
ListProp(default=[], fill_brok=['full_status'], merging='join'),
'macromodulations':
ListProp(default=[], merging='join'),
# Custom views
'custom_views':
ListProp(default=[], fill_brok=['full_status'], merging='join'),
# Snapshot part
'snapshot_enabled':
BoolProp(default=False),
'snapshot_command':
StringProp(default=''),
'snapshot_period':
StringProp(default=''),
'snapshot_criteria':
ListProp(default=['d', 'u'], fill_brok=['full_status'], merging='join'),
'snapshot_interval':
IntegerProp(default=5),
# Maintenance part
'maintenance_check_command':
StringProp(default='', fill_brok=['full_status']),
'maintenance_period':
StringProp(default='', brok_transformation=to_name_if_possible, fill_brok=['full_status']),
'maintenance_checks_enabled':
BoolProp(default=False, fill_brok=['full_status']),
'maintenance_check_period':
StringProp(default='', brok_transformation=to_name_if_possible, fill_brok=['full_status']),
'maintenance_check_interval':
IntegerProp(default=0, fill_brok=['full_status', 'check_result']),
'maintenance_retry_interval':
IntegerProp(default=0, fill_brok=['full_status', 'check_result']),
# Check/notification priority
'priority':
IntegerProp(default=100, fill_brok=['full_status']),
})
# properties set only for running purpose
# retention: save/load this property from retention
running_properties = SchedulingItem.running_properties.copy()
running_properties.update({
'modified_attributes':
IntegerProp(default=0, fill_brok=['full_status'], retention=True),
'last_chk':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'next_chk':
IntegerProp(default=0, fill_brok=['full_status', 'next_schedule'], retention=True),
'in_maintenance':
IntegerProp(default=None, fill_brok=['full_status'], retention=True),
'latency':
FloatProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'attempt':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'state':
StringProp(default='PENDING', fill_brok=['full_status', 'check_result'],
retention=True),
'state_id':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'state_type':
StringProp(default='HARD', fill_brok=['full_status', 'check_result'], retention=True),
'state_type_id':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'current_event_id':
StringProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'last_event_id':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'last_state':
StringProp(default='PENDING', fill_brok=['full_status', 'check_result'],
retention=True),
'last_state_id':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'last_state_type':
StringProp(default='HARD', fill_brok=['full_status', 'check_result'], retention=True),
'last_state_change':
FloatProp(default=0.0, fill_brok=['full_status', 'check_result'], retention=True),
'last_hard_state_change':
FloatProp(default=0.0, fill_brok=['full_status', 'check_result'], retention=True),
'last_hard_state':
StringProp(default='PENDING', fill_brok=['full_status'], retention=True),
'last_hard_state_id':
IntegerProp(default=0, fill_brok=['full_status'], retention=True),
'last_time_up':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'last_time_down':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'last_time_unreachable':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'duration_sec':
IntegerProp(default=0, fill_brok=['full_status'], retention=True),
'output':
StringProp(default='', fill_brok=['full_status', 'check_result'], retention=True),
'long_output':
StringProp(default='', fill_brok=['full_status', 'check_result'], retention=True),
'is_flapping':
BoolProp(default=False, fill_brok=['full_status'], retention=True),
'flapping_comment_id':
IntegerProp(default=0, fill_brok=['full_status'], retention=True),
# No broks for _depend_of because of to much links to hosts/services
# dependencies for actions like notif of event handler, so AFTER check return
'act_depend_of':
ListProp(default=[]),
# dependencies for checks raise, so BEFORE checks
'chk_depend_of':
ListProp(default=[]),
# elements that depend of me, so the reverse than just upper
'act_depend_of_me':
ListProp(default=[]),
# elements that depend of me
'chk_depend_of_me':
ListProp(default=[]),
'last_state_update':
StringProp(default=0, fill_brok=['full_status'], retention=True),
# no brok ,to much links
'services':
StringProp(default=[]),
# No broks, it's just internal, and checks have too links
'checks_in_progress':
ListProp(default=[]),
# No broks, it's just internal, and checks have too links
'notifications_in_progress':
StringProp(default={}, retention=True),
'downtimes':
StringProp(default=[], fill_brok=['full_status'], retention=True),
'comments':
StringProp(default=[], fill_brok=['full_status'], retention=True),
'flapping_changes':
StringProp(default=[], fill_brok=['full_status'], retention=True),
'percent_state_change':
FloatProp(default=0.0, fill_brok=['full_status', 'check_result'], retention=True),
'problem_has_been_acknowledged':
BoolProp(default=False, fill_brok=['full_status', 'check_result']),
'acknowledgement':
StringProp(default=None, retention=True),
'acknowledgement_type':
IntegerProp(default=1, fill_brok=['full_status', 'check_result'], retention=True),
'check_type':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'has_been_checked':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'should_be_scheduled':
IntegerProp(default=1, fill_brok=['full_status'], retention=True),
'last_problem_id':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'current_problem_id':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'execution_time':
FloatProp(default=0.0, fill_brok=['full_status', 'check_result'], retention=True),
'u_time':
FloatProp(default=0.0),
's_time':
FloatProp(default=0.0),
'last_notification':
FloatProp(default=0.0, fill_brok=['full_status'], retention=True),
'current_notification_number':
IntegerProp(default=0, fill_brok=['full_status'], retention=True),
'current_notification_id':
IntegerProp(default=0, fill_brok=['full_status'], retention=True),
'check_flapping_recovery_notification':
BoolProp(default=True, fill_brok=['full_status'], retention=True),
'scheduled_downtime_depth':
IntegerProp(default=0, fill_brok=['full_status']),
'pending_flex_downtime':
IntegerProp(default=0, fill_brok=['full_status'], retention=True),
'timeout':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'start_time':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'end_time':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'early_timeout':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'return_code':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'perf_data':
StringProp(default='', fill_brok=['full_status', 'check_result'], retention=True),
'last_perf_data':
StringProp(default='', retention=True),
'customs':
StringProp(default={}, fill_brok=['full_status']),
'got_default_realm':
BoolProp(default=False),
# use for having all contacts we have notified
# Warning: for the notified_contacts retention save, we save only the names of the
# contacts, and we should RELINK
# them when we load it.
'notified_contacts':
StringProp(default=set(), retention=True, retention_preparation=to_list_of_names),
'in_scheduled_downtime':
BoolProp(default=False, fill_brok=['full_status', 'check_result']),
'in_scheduled_downtime_during_last_check':
BoolProp(default=False, retention=True),
# put here checks and notif raised
'actions':
StringProp(default=[]),
# and here broks raised
'broks':
StringProp(default=[]),
# For knowing with which elements we are in relation
# of dep.
# childs are the hosts that have US as parent, so
# only a network dep
'childs':
StringProp(brok_transformation=to_hostnames_list, default=[],
fill_brok=['full_status']),
# Here it's the elements we are depending on
# so our parents as network relation, or a host
# we are depending in a hostdependency
# or even if we are business based.
'parent_dependencies':
StringProp(brok_transformation=to_svc_hst_distinct_lists, default=set(),
fill_brok=['full_status']),
# Here it's the guys that depend on us. So it's the total
# opposite of the parent_dependencies
'child_dependencies':
StringProp(brok_transformation=to_svc_hst_distinct_lists,
default=set(),
fill_brok=['full_status']),
# Problem/impact part
'is_problem':
StringProp(default=False, fill_brok=['full_status']),
'is_impact':
StringProp(default=False, fill_brok=['full_status']),
# the save value of our business_impact for "problems"
'my_own_business_impact':
IntegerProp(default=-1, fill_brok=['full_status']),
# list of problems that make us an impact
'source_problems':
StringProp(brok_transformation=to_svc_hst_distinct_lists, default=[],
fill_brok=['full_status']),
# list of the impact I'm the cause of
'impacts':
StringProp(brok_transformation=to_svc_hst_distinct_lists, default=[],
fill_brok=['full_status']),
# keep a trace of the old state before being an impact
'state_before_impact':
StringProp(default='PENDING'),
# keep a trace of the old state id before being an impact
'state_id_before_impact':
StringProp(default=0),
# if the state change, we know so we do not revert it
'state_changed_since_impact':
StringProp(default=False),
# BUSINESS CORRELATOR PART
# Say if we are business based rule or not
'got_business_rule':
BoolProp(default=False, fill_brok=['full_status']),
# Previously processed business rule (with macro expanded)
'processed_business_rule':
StringProp(default="", fill_brok=['full_status']),
# Our Dependency node for the business rule
'business_rule':
StringProp(default=None),
# Manage the unknown/unreach during hard state
# From now its not really used
'in_hard_unknown_reach_phase':
BoolProp(default=False, retention=True),
'was_in_hard_unknown_reach_phase':
BoolProp(default=False, retention=True),
'state_before_hard_unknown_reach_phase':
StringProp(default='UP', retention=True),
# Set if the element just change its father/son topology
'topology_change':
BoolProp(default=False, fill_brok=['full_status']),
# Keep in mind our pack id after the cutting phase
'pack_id':
IntegerProp(default=-1),
# Trigger list
'triggers':
StringProp(default=[]),
# snapshots part
'last_snapshot': IntegerProp(default=0, fill_brok=['full_status'], retention=True),
# Keep the string of the last command launched for this element
'last_check_command': StringProp(default=''),
# Maintenance states: PRODUCTION (0), MAINTENANCE (1), UNKNOWN (2)
'last_maintenance_chk':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'next_maintenance_chk':
IntegerProp(default=0, fill_brok=['full_status', 'next_schedule'], retention=True),
'maintenance_check_output':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'maintenance_state':
StringProp(default='PENDING', fill_brok=['full_status', 'check_result'], retention=True),
'maintenance_state_id':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'last_maintenance_state':
StringProp(default='PENDING', fill_brok=['full_status', 'check_result'], retention=True),
'last_maintenance_state_id':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'last_maintenance_state_change':
FloatProp(default=0.0, fill_brok=['full_status', 'check_result'], retention=True),
})
# Hosts macros and prop that give the information
# the prop can be callable or not
macros = {
'HOSTNAME': 'host_name',
'HOSTDISPLAYNAME': 'display_name',
'HOSTALIAS': 'alias',
'HOSTADDRESS': 'address',
'HOSTSTATE': 'state',
'HOSTSTATEID': 'state_id',
'LASTHOSTSTATE': 'last_state',
'LASTHOSTSTATEID': 'last_state_id',
'HOSTSTATETYPE': 'state_type',
'HOSTATTEMPT': 'attempt',
'MAXHOSTATTEMPTS': 'max_check_attempts',
'HOSTEVENTID': 'current_event_id',
'LASTHOSTEVENTID': 'last_event_id',
'HOSTPROBLEMID': 'current_problem_id',
'LASTHOSTPROBLEMID': 'last_problem_id',
'HOSTLATENCY': 'latency',
'HOSTEXECUTIONTIME': 'execution_time',
'HOSTDURATION': 'get_duration',
'HOSTDURATIONSEC': 'get_duration_sec',
'HOSTDOWNTIME': 'get_downtime',
'HOSTPERCENTCHANGE': 'percent_state_change',
'HOSTGROUPNAME': 'get_groupname',
'HOSTGROUPNAMES': 'get_groupnames',
'LASTHOSTCHECK': 'last_chk',
'LASTHOSTSTATECHANGE': 'last_state_change',
'LASTHOSTUP': 'last_time_up',
'LASTHOSTDOWN': 'last_time_down',
'LASTHOSTUNREACHABLE': 'last_time_unreachable',
'HOSTOUTPUT': 'output',
'LONGHOSTOUTPUT': 'long_output',
'HOSTPERFDATA': 'perf_data',
'LASTHOSTPERFDATA': 'last_perf_data',
'HOSTCHECKCOMMAND': 'get_check_command',
'HOSTACKAUTHOR': 'get_ack_author_name',
'HOSTACKAUTHORNAME': 'get_ack_author_name',
'HOSTACKAUTHORALIAS': 'get_ack_author_name',
'HOSTACKCOMMENT': 'get_ack_comment',
'HOSTACTIONURL': 'action_url',
'HOSTNOTESURL': 'notes_url',
'HOSTNOTES': 'notes',
'HOSTREALM': 'get_realm',
'TOTALHOSTSERVICES': 'get_total_services',
'TOTALHOSTSERVICESOK': 'get_total_services_ok',
'TOTALHOSTSERVICESWARNING': 'get_total_services_warning',
'TOTALHOSTSERVICESUNKNOWN': 'get_total_services_unknown',
'TOTALHOSTSERVICESCRITICAL': 'get_total_services_critical',
'HOSTBUSINESSIMPACT': 'business_impact',
# Business rules output formatting related macros
'STATUS': 'get_status',
'SHORTSTATUS': 'get_short_status',
'FULLNAME': 'get_full_name',
}
# Manage ADDRESSX macros by adding them dynamically
for _i in range(32):
macros['HOSTADDRESS%d' % _i] = 'address%d' % _i
# This tab is used to transform old parameters name into new ones
# so from Nagios2 format, to Nagios3 ones.
# Or Shinken deprecated names like criticity
old_properties = {
'normal_check_interval': 'check_interval',
'retry_check_interval': 'retry_interval',
'criticity': 'business_impact',
'hostgroup': 'hostgroups',
# 'criticitymodulations': 'business_impact_modulations',
}
#######
# __ _ _ _
# / _(_) | | (_)
# ___ ___ _ __ | |_ _ __ _ _ _ _ __ __ _| |_ _ ___ _ __
# / __/ _ \| '_ \| _| |/ _` | | | | '__/ _` | __| |/ _ \| '_ \
# | (_| (_) | | | | | | | (_| | |_| | | | (_| | |_| | (_) | | | |
# \___\___/|_| |_|_| |_|\__, |\__,_|_| \__,_|\__|_|\___/|_| |_|
# __/ |
# |___/
######
def get_newid(self):
cls = self.__class__
value = uuid.uuid1().hex
cls.id += 1
return value
def set_initial_state(self):
mapping = {
"o": {
"state": "UP",
"state_id": 0
},
"d": {
"state": "DOWN",
"state_id": 1
},
"u": {
"state": "UNREACHABLE",
"state_id": 2
},
}
SchedulingItem.set_initial_state(self, mapping)
# Fill address with host_name if not already set
def fill_predictive_missing_parameters(self):
if hasattr(self, 'host_name') and not hasattr(self, 'address'):
self.address = self.host_name
if hasattr(self, 'host_name') and not hasattr(self, 'alias'):
self.alias = self.host_name
# Check is required prop are set:
# contacts OR contactgroups is need
def is_correct(self):
state = True
cls = self.__class__
source = getattr(self, 'imported_from', 'unknown')
special_properties = ['check_period', 'notification_interval',
'notification_period']
for prop, entry in cls.properties.items():
if prop not in special_properties:
if not hasattr(self, prop) and entry.required:
logger.error("[host::%s] %s property not set", self.get_name(), prop)
state = False # Bad boy...
# Then look if we have some errors in the conf
# Juts print(warnings, but raise errors)
for err in self.configuration_warnings:
logger.warning("[host::%s] %s", self.get_name(), err)
# Raised all previously saw errors like unknown contacts and co
if self.configuration_errors != []:
state = False
for err in self.configuration_errors:
logger.error("[host::%s] %s", self.get_name(), err)
if not hasattr(self, 'notification_period'):
self.notification_period = None
# Ok now we manage special cases...
if self.notifications_enabled and self.contacts == []:
logger.warning("The host %s has no contacts nor contact_groups in (%s)",
self.get_name(), source)
if getattr(self, 'event_handler', None) and not self.event_handler.is_valid():
logger.error("%s: my event_handler %s is invalid",
self.get_name(), self.event_handler.command)
state = False
if getattr(self, 'check_command', None) is None:
logger.error("%s: I've got no check_command", self.get_name())
state = False
# Ok got a command, but maybe it's invalid
else:
if not self.check_command.is_valid():
logger.error("%s: my check_command %s is invalid",
self.get_name(), self.check_command.command)
state = False
if self.got_business_rule:
if not self.business_rule.is_valid():
logger.error("%s: my business rule is invalid", self.get_name(),)
for bperror in self.business_rule.configuration_errors:
logger.error("[host::%s] %s", self.get_name(), bperror)
state = False
if (not hasattr(self, 'notification_interval') and
self.notifications_enabled is True):
logger.error("%s: I've got no notification_interval but "
"I've got notifications enabled", self.get_name())
state = False
# if no check_period, means 24x7, like for services
if not hasattr(self, 'check_period'):
self.check_period = None
if hasattr(self, 'host_name'):
for c in cls.illegal_object_name_chars:
if c in self.host_name:
logger.error("%s: My host_name got the character %s that is not allowed.",
self.get_name(), c)
state = False
return state
# Search in my service if I've got the service
def find_service_by_name(self, service_description):
for s in self.services:
if getattr(s, 'service_description', '__UNNAMED_SERVICE__') == service_description:
return s
return None
# Return all of the services on a host
def get_services(self):
return self.services
# For get a nice name
def get_name(self):
if not self.is_tpl():
try:
return self.host_name
except AttributeError: # outch, no hostname
return 'UNNAMEDHOST'
else:
try:
return self.name
except AttributeError: # outch, no name for this template
return 'UNNAMEDHOSTTEMPLATE'
def get_groupname(self):
groupname = ''
for hg in self.hostgroups:
# naglog_result('info', 'get_groupname : %s %s %s' % (hg.id, hg.alias, hg.get_name()))
# groupname = "%s [%s]" % (hg.alias, hg.get_name())
groupname = hg.alias
return groupname
def get_groupnames(self):
groupnames = ''
for hg in self.hostgroups:
# naglog_result('info', 'get_groupnames : %s' % (hg.get_name()))
if groupnames == '':
groupnames = hg.get_name()
else:
groupnames = "%s, %s" % (groupnames, hg.get_name())
return groupnames
# For debugging purpose only
def get_dbg_name(self):
return self.host_name
# Same but for clean call, no debug
def get_full_name(self):
return self.host_name
# Get our realm
def get_realm(self):
return self.realm
def get_hostgroups(self):
return self.hostgroups
def get_host_tags(self):
return self.tags
# Say if we got the other in one of your dep list
def is_linked_with_host(self, other):
for (h, status, type, timeperiod, inherits_parent) in self.act_depend_of:
if h == other:
return True
return False
# Delete all links in the act_depend_of list of self and other
def del_host_act_dependency(self, other):
to_del = []
# First we remove in my list
for (h, status, type, timeperiod, inherits_parent) in self.act_depend_of:
if h == other:
to_del.append((h, status, type, timeperiod, inherits_parent))
for t in to_del:
self.act_depend_of.remove(t)
# And now in the father part
to_del = []
for (h, status, type, timeperiod, inherits_parent) in other.act_depend_of_me:
if h == self:
to_del.append((h, status, type, timeperiod, inherits_parent))
for t in to_del:
other.act_depend_of_me.remove(t)
# Remove in child/parents deps too
# Me in father list
other.child_dependencies.remove(self)
# and father list in mine
self.parent_dependencies.remove(other)
# Add a dependency for action event handler, notification, etc)
# and add ourself in it's dep list
def add_host_act_dependency(self, h, status, timeperiod, inherits_parent):
# I add him in MY list
self.act_depend_of.append((h, status, 'logic_dep', timeperiod, inherits_parent))
# And I add me in it's list
h.act_depend_of_me.append((self, status, 'logic_dep', timeperiod, inherits_parent))
# And the parent/child dep lists too
h.register_son_in_parent_child_dependencies(self)
# Register the dependency between 2 service for action (notification etc)
# but based on a BUSINESS rule, so on fact:
# ERP depend on database, so we fill just database.act_depend_of_me
# because we will want ERP mails to go on! So call this
# on the database service with the srv=ERP service
def add_business_rule_act_dependency(self, h, status, timeperiod, inherits_parent):
# first I add the other the I depend on in MY list
# I only register so he know that I WILL be a impact
self.act_depend_of_me.append((h, status, 'business_dep',
timeperiod, inherits_parent))
# And the parent/child dep lists too
self.register_son_in_parent_child_dependencies(h)
# Add a dependency for check (so before launch)
def add_host_chk_dependency(self, h, status, timeperiod, inherits_parent):
# I add him in MY list
self.chk_depend_of.append((h, status, 'logic_dep', timeperiod, inherits_parent))
# And I add me in it's list
h.chk_depend_of_me.append((self, status, 'logic_dep', timeperiod, inherits_parent))
# And we fill parent/childs dep for brok purpose
# Here self depend on h
h.register_son_in_parent_child_dependencies(self)
# Add one of our service to services (at linkify)
def add_service_link(self, service):
self.services.append(service)
def __repr__(self):
return '<Host host_name=%r name=%r use=%r />' % (
getattr(self, 'host_name', None),
getattr(self, 'name', None),
getattr(self, 'use', None))
__str__ = __repr__
def is_excluded_for(self, service):
''' Check whether this host should have the passed service be "excluded" or "not included".
An host can define service_includes and/or service_excludes directive to either
white-list-only or black-list some services from itself.
:type service: shinken.objects.service.Service
'''
return self.is_excluded_for_sdesc(service.service_description, service.is_tpl())
def is_excluded_for_sdesc(self, sdesc, is_tpl=False):
''' Check whether this host should have the passed service *description*
be "excluded" or "not included".
'''
if not is_tpl and hasattr(self, "service_includes"):
incl = False
for d in self.service_includes:
try:
fct = get_exclude_match_expr(d)
if fct(sdesc):
incl = True
except Exception as e:
self.configuration_errors.append(
"Invalid include expression: %s: %s" % (d, e))
return not incl
if hasattr(self, "service_excludes"):
for d in self.service_excludes:
try:
fct = get_exclude_match_expr(d)
if fct(sdesc):
return True
except Exception as e:
self.configuration_errors.append(
"Invalid exclude expression: %s: %s" % (d, e))
return False
#####
# _
# (_)
# _ __ _ _ _ __ _ __ _ _ __ __ _
# | '__| | | | '_ \| '_ \| | '_ \ / _` |
# | | | |_| | | | | | | | | | | | (_| |
# |_| \__,_|_| |_|_| |_|_|_| |_|\__, |
# __/ |
# |___/
####
# Set unreachable: all our parents are down!
# We have a special state, but state was already set, we just need to
# update it. We are no DOWN, we are UNREACHABLE and
# got a state id is 2
def set_unreachable(self):
now = time.time()
self.state_id = 2
self.state = 'UNREACHABLE'
self.last_time_unreachable = int(now)
# We just go an impact, so we go unreachable
# But only if we enable this state change in the conf
def set_impact_state(self):
cls = self.__class__
if cls.enable_problem_impacts_states_change:
# Keep a trace of the old state (problem came back before
# a new checks)
self.state_before_impact = self.state
self.state_id_before_impact = self.state_id
# This flag will know if we override the impact state
self.state_changed_since_impact = False
self.state = 'UNREACHABLE' # exit code UNDETERMINED
self.state_id = 2
# Ok, we are no more an impact, if no news checks
# override the impact state, we came back to old
# states
# And only if impact state change is set in configuration
def unset_impact_state(self):
cls = self.__class__
if cls.enable_problem_impacts_states_change and not self.state_changed_since_impact:
self.state = self.state_before_impact
self.state_id = self.state_id_before_impact
# set the state in UP, DOWN, or UNDETERMINED
# with the status of a check. Also update last_state
def set_state_from_exit_status(self, status):
now = time.time()
self.last_state_update = now
# we should put in last_state the good last state:
# if not just change the state by an problem/impact
# we can take current state. But if it's the case, the
# real old state is self.state_before_impact (it's the TRUE
# state in fact)
# And only if we enable the impact state change
cls = self.__class__
if (cls.enable_problem_impacts_states_change and
self.is_impact and
not self.state_changed_since_impact):
self.last_state = self.state_before_impact
else:
self.last_state = self.state
# There is no 1 case because it should have been managed by the caller for a host
# like the schedulingitem::consume method.
if status == 0:
self.state = 'UP'
self.state_id = 0
self.last_time_up = int(self.last_state_update)
state_code = 'u'
elif status in (2, 3):
self.state = 'DOWN'
self.state_id = 1
self.last_time_down = int(self.last_state_update)
state_code = 'd'
else:
self.state = 'DOWN' # exit code UNDETERMINED
self.state_id = 1
self.last_time_down = int(self.last_state_update)
state_code = 'd'
if state_code in self.flap_detection_options:
self.add_flapping_change(self.state != self.last_state)
if self.state != self.last_state and \
not(self.state == "DOWN" and self.last_state == "UNREACHABLE"):
self.last_state_change = self.last_state_update
self.duration_sec = now - self.last_state_change
# See if status is status. Can be low of high format (o/UP, d/DOWN, ...)
def is_state(self, status):
if status == self.state:
return True
# Now low status
elif status == 'o' and self.state == 'UP':
return True
elif status == 'd' and self.state == 'DOWN':
return True
elif status == 'u' and self.state == 'UNREACHABLE':
return True
return False
# The last time when the state was not UP
def last_time_non_ok_or_up(self):
if self.last_time_down > self.last_time_up:
last_time_non_up = self.last_time_down
else:
last_time_non_up = 0
return last_time_non_up
# Add a log entry with a HOST ALERT like:
# HOST ALERT: server;DOWN;HARD;1;I don't know what to say...
def raise_alert_log_entry(self, check_variant=None):
if check_variant is None:
check_variant = SchedulingItem.default_check_variant
if check_variant == SchedulingItem.default_check_variant:
naglog_result('critical', 'HOST ALERT: %s;%s;%s;%d;%s' % (
self.get_name(), self.state, self.state_type, self.attempt,
self.output))
elif check_variant == "maintenance":
naglog_result('critical', 'HOST MAINTENANCE ALERT: %s;%s;%s' % (
self.get_name(), self.maintenance_state,
self.maintenance_check_output))
# If the configuration allow it, raise an initial log like
# CURRENT HOST STATE: server;DOWN;HARD;1;I don't know what to say...
def raise_initial_state(self):
if self.__class__.log_initial_states:
naglog_result('info',
'CURRENT HOST STATE: %s;%s;%s;%d;%s' % (self.get_name(),
self.state, self.state_type,
self.attempt, self.output))
# Add a log entry with a Freshness alert like:
# Warning: The results of host 'Server' are stale by 0d 0h 0m 58s (threshold=0d 1h 0m 0s).
# I'm forcing an immediate check of the host.
def raise_freshness_log_entry(self, t_stale_by, t_threshold):
logger.warning("The results of host '%s' are stale by %s "
"(threshold=%s). I'm forcing an immediate check "
"of the host.",
self.get_name(),
format_t_into_dhms_format(t_stale_by),
format_t_into_dhms_format(t_threshold))
# Raise a log entry with a Notification alert like
# HOST NOTIFICATION: superadmin;server;UP;notify-by-rss;no output
def raise_notification_log_entry(self, n):
contact = n.contact
command = n.command_call
if n.type in ('DOWNTIMESTART', 'DOWNTIMEEND', 'CUSTOM',
'ACKNOWLEDGEMENT', 'FLAPPINGSTART', 'FLAPPINGSTOP',
'FLAPPINGDISABLED'):
state = '%s (%s)' % (n.type, self.state)
else:
state = self.state
if self.__class__.log_notifications:
naglog_result('critical',
"HOST NOTIFICATION: %s;%s;%s;%s;%s" % (contact.get_name(),
self.get_name(), state,
command.get_name(), self.output))
# Raise a log entry with a Eventhandler alert like
# HOST EVENT HANDLER: superadmin;server;UP;notify-by-rss;no output
def raise_event_handler_log_entry(self, command):
if self.__class__.log_event_handlers:
naglog_result('critical',
"HOST EVENT HANDLER: %s;%s;%s;%s;%s" % (self.get_name(),
self.state, self.state_type,
self.attempt, command.get_name()))
# Raise a log entry with a Snapshot alert like
# HOST SNAPSHOT: superadmin;server;UP;notify-by-rss;no output
def raise_snapshot_log_entry(self, command):
if self.__class__.log_event_handlers:
naglog_result('critical',
"HOST SNAPSHOT: %s;%s;%s;%s;%s" % (self.get_name(),
self.state, self.state_type,
self.attempt, command.get_name()))
# Raise a log entry with FLAPPING START alert like
# HOST FLAPPING ALERT: server;STARTED; Host appears to have started ...
# .... flapping (50.6% change >= 50.0% threshold)
def raise_flapping_start_log_entry(self, change_ratio, threshold):
naglog_result('critical',
"HOST FLAPPING ALERT: %s;STARTED; "
"Host appears to have started flapping "
"(%.1f%% change >= %.1f%% threshold)"
% (self.get_name(), change_ratio, threshold))
# Raise a log entry with FLAPPING STOP alert like
# HOST FLAPPING ALERT: server;STOPPED; host appears to have stopped ...
# ..... flapping (23.0% change < 25.0% threshold)
def raise_flapping_stop_log_entry(self, change_ratio, threshold):
naglog_result('critical',
"HOST FLAPPING ALERT: %s;STOPPED; "
"Host appears to have stopped flapping "
"(%.1f%% change < %.1f%% threshold)"
% (self.get_name(), change_ratio, threshold))
# If there is no valid time for next check, raise a log entry
def raise_no_next_check_log_entry(self):
logger.warning("I cannot schedule the check for the host '%s' "
"because there is not future valid time",
self.get_name())
# Raise a log entry when a downtime begins
# HOST DOWNTIME ALERT: test_host_0;STARTED; Host has entered a period of scheduled downtime
def raise_enter_downtime_log_entry(self):
naglog_result('critical',
"HOST DOWNTIME ALERT: %s;STARTED; "
"Host has entered a period of scheduled downtime"
% (self.get_name()))
# Raise a log entry when a downtime has finished
# HOST DOWNTIME ALERT: test_host_0;STOPPED; Host has exited from a period of scheduled downtime
def raise_exit_downtime_log_entry(self):
naglog_result('critical',
"HOST DOWNTIME ALERT: %s;STOPPED; Host has "
"exited from a period of scheduled downtime"
% (self.get_name()))
# Raise a log entry when a downtime prematurely ends
# HOST DOWNTIME ALERT: test_host_0;CANCELLED; Service has entered a period of scheduled downtime
def raise_cancel_downtime_log_entry(self):
naglog_result('critical',
"HOST DOWNTIME ALERT: %s;CANCELLED; "
"Scheduled downtime for host has been cancelled."
% (self.get_name()))
# Is stalking?
# Launch if check is waitconsume==first time
# and if c.status is in self.stalking_options
def manage_stalking(self, c):
need_stalk = False
if c.status == 'waitconsume':
if c.exit_status == 0 and 'o' in self.stalking_options:
need_stalk = True
elif c.exit_status == 1 and 'd' in self.stalking_options:
need_stalk = True
elif c.exit_status == 2 and 'd' in self.stalking_options:
need_stalk = True
elif c.exit_status == 3 and 'u' in self.stalking_options:
need_stalk = True
if c.output != self.output:
need_stalk = False
if need_stalk:
logger.info("Stalking %s: %s", self.get_name(), self.output)
# fill act_depend_of with my parents (so network dep)
# and say parents they impact me, no timeperiod and follow parents of course
def fill_parents_dependency(self):
for parent in self.parents:
if parent is not None:
# I add my parent in my list
self.act_depend_of.append((parent, ['d', 'u', 's', 'f'], 'network_dep', None, True))
# And I register myself in my parent list too
parent.register_child(self)
# And add the parent/child dep filling too, for broking
parent.register_son_in_parent_child_dependencies(self)
# Register a child in our lists
def register_child(self, child):
# We've got 2 list: a list for our child
# where we just put the pointer, it's just for broking
# and another with all data, useful for 'running' part
self.childs.append(child)
self.act_depend_of_me.append((child, ['d', 'u', 's', 'f'], 'network_dep', None, True))
# Give data for checks's macros
def get_data_for_checks(self):
return [self]
# Give data for event handler's macro
def get_data_for_event_handler(self):
return [self]
# Give data for notifications'n macros
def get_data_for_notifications(self, contact, n):
return [self, contact, n]
# See if the notification is launchable (time is OK and contact is OK too)
def notification_is_blocked_by_contact(self, n, contact):
return not contact.want_host_notification(self.last_chk, self.state, n.type,
self.business_impact, n.command_call)
# MACRO PART
def get_duration_sec(self):
return str(int(self.duration_sec))
def get_duration(self):
m, s = divmod(self.duration_sec, 60)
h, m = divmod(m, 60)
return "%02dh %02dm %02ds" % (h, m, s)
# Check if a notification for this host is suppressed at this time
# This is a check at the host level. Do not look at contacts here
def notification_is_blocked_by_item(self, type, t_wished=None):
if t_wished is None:
t_wished = time.time()
# TODO
# forced notification -> false
# custom notification -> false
# Block if notifications are program-wide disabled
if not self.enable_notifications:
return True
# Does the notification period allow sending out this notification?
if (self.notification_period is not None and
not self.notification_period.is_time_valid(t_wished)):
return True
# Block if notifications are disabled for this host
if not self.notifications_enabled:
return True
# Block if the current status is in the notification_options d,u,r,f,s
if 'n' in self.notification_options:
return True
if type in ('PROBLEM', 'RECOVERY'):
if self.state == 'DOWN' and 'd' not in self.notification_options:
return True
if self.state == 'UP' and 'r' not in self.notification_options:
return True
if self.state == 'UNREACHABLE' and 'u' not in self.notification_options:
return True
if (type in ('FLAPPINGSTART', 'FLAPPINGSTOP', 'FLAPPINGDISABLED') and
'f' not in self.notification_options):
return True
if (type in ('DOWNTIMESTART', 'DOWNTIMEEND', 'DOWNTIMECANCELLED') and
's' not in self.notification_options):
return True
# Acknowledgements make no sense when the status is ok/up
if type == 'ACKNOWLEDGEMENT':
if self.state == self.ok_up:
return True
# Flapping
if type in ('FLAPPINGSTART', 'FLAPPINGSTOP', 'FLAPPINGDISABLED'):
# TODO block if not notify_on_flapping
if self.scheduled_downtime_depth > 0:
return True
# When in deep downtime, only allow end-of-downtime notifications
# In depth 1 the downtime just started and can be notified
if self.scheduled_downtime_depth > 1 and type not in ('DOWNTIMEEND', 'DOWNTIMECANCELLED'):
return True
# Block if in a scheduled downtime and a problem arises
if self.scheduled_downtime_depth > 0 and type in ('PROBLEM', 'RECOVERY'):
return True
# Block if the status is SOFT
if self.state_type == 'SOFT' and type == 'PROBLEM':
return True
# Block if the problem has already been acknowledged
if self.problem_has_been_acknowledged and type != 'ACKNOWLEDGEMENT':
return True
# Block if flapping
if self.is_flapping and type not in ('FLAPPINGSTART', 'FLAPPINGSTOP', 'FLAPPINGDISABLED'):
return True
# Block if business rule smart notifications is enabled and all its
# childs have been acknowledged or are under downtime.
if self.got_business_rule is True \
and self.business_rule_smart_notifications is True \
and self.business_rule_notification_is_blocked() is True \
and type == 'PROBLEM':
return True
return False
# Get a oc*p command if item has obsess_over_*
# command. It must be enabled locally and globally
def get_obsessive_compulsive_processor_command(self):
cls = self.__class__
if not cls.obsess_over or not self.obsess_over_host:
return
m = MacroResolver()
data = self.get_data_for_event_handler()
cmd = m.resolve_command(cls.ochp_command, data)
e = EventHandler(cmd, timeout=cls.ochp_timeout)
# ok we can put it in our temp action queue
self.actions.append(e)
# Macro part
def get_total_services(self):
return str(len(self.services))
def get_total_services_ok(self):
return str(len([s for s in self.services if s.state_id == 0]))
def get_total_services_warning(self):
return str(len([s for s in self.services if s.state_id == 1]))
def get_total_services_critical(self):
return str(len([s for s in self.services if s.state_id == 2]))
def get_total_services_unknown(self):
return str(len([s for s in self.services if s.state_id == 3]))
def get_ack_author_name(self):
if self.acknowledgement is None:
return ''
return self.acknowledgement.author
def get_ack_comment(self):
if self.acknowledgement is None:
return ''
return self.acknowledgement.comment
def get_check_command(self):
return self.check_command.get_name()
def get_short_status(self):
mapping = {
0: "U",
1: "D",
2: "N",
}
if self.got_business_rule:
return mapping.get(self.business_rule.get_state(), "n/a")
else:
return mapping.get(self.state_id, "n/a")
def get_status(self):
if self.got_business_rule:
mapping = {
0: "UP",
1: "DOWN",
2: "UNREACHABLE",
}
return mapping.get(self.business_rule.get_state(), "n/a")
else:
return self.state
def get_downtime(self):
return str(self.scheduled_downtime_depth)
# CLass for the hosts lists. It's mainly for configuration
# part
class Hosts(Items):
name_property = "host_name" # use for the search by name
inner_class = Host # use for know what is in items
# Create link between elements:
# hosts -> timeperiods
# hosts -> hosts (parents, etc)
# hosts -> commands (check_command)
# hosts -> contacts
def linkify(self, timeperiods=None, commands=None, contacts=None, realms=None,
resultmodulations=None, businessimpactmodulations=None, escalations=None,
hostgroups=None, triggers=None, checkmodulations=None, macromodulations=None):
self.linkify_with_timeperiods(timeperiods, 'notification_period')
self.linkify_with_timeperiods(timeperiods, 'check_period')
self.linkify_with_timeperiods(timeperiods, 'maintenance_period')
self.linkify_with_timeperiods(timeperiods, 'snapshot_period')
self.linkify_with_timeperiods(timeperiods, 'maintenance_check_period')
self.linkify_h_by_h()
self.linkify_h_by_hg(hostgroups)
self.linkify_one_command_with_commands(commands, 'check_command')
self.linkify_one_command_with_commands(commands, 'event_handler')
self.linkify_one_command_with_commands(commands, 'snapshot_command')
self.linkify_one_command_with_commands(commands, 'maintenance_check_command')
self.linkify_with_contacts(contacts)
self.linkify_h_by_realms(realms)
self.linkify_with_resultmodulations(resultmodulations)
self.linkify_with_business_impact_modulations(businessimpactmodulations)
# WARNING: all escalations will not be link here
# (just the escalation here, not serviceesca or hostesca).
# This last one will be link in escalations linkify.
self.linkify_with_escalations(escalations)
self.linkify_with_triggers(triggers)
self.linkify_with_checkmodulations(checkmodulations)
self.linkify_with_macromodulations(macromodulations)
# Fill address by host_name if not set
def fill_predictive_missing_parameters(self):
for h in self:
h.fill_predictive_missing_parameters()
# Link host with hosts (parents)
def linkify_h_by_h(self):
for h in self:
parents = h.parents
# The new member list
new_parents = []
for parent in parents:
parent = parent.strip()
p = self.find_by_name(parent)
if p is not None:
new_parents.append(p)
else:
err = "the parent '%s' on host '%s' is unknown!" % (parent, h.get_name())
self.configuration_warnings.append(err)
# print("Me,", h.host_name, "define my parents", new_parents)
# We find the id, we replace the names
h.parents = new_parents
# Link with realms and set a default realm if none
def linkify_h_by_realms(self, realms):
default_realm = None
for r in realms:
if getattr(r, 'default', False):
default_realm = r
# if default_realm is None:
# print("Error: there is no default realm defined!")
for h in self:
if h.realm is not None:
p = realms.find_by_name(h.realm.strip())
if p is None:
err = "the host %s got an invalid realm (%s)!" % (h.get_name(), h.realm)
h.configuration_errors.append(err)
h.realm = p
else:
# print("Notice: applying default realm %s to host %s"
# % (default_realm.get_name(), h.get_name()))
h.realm = default_realm
h.got_default_realm = True
# We look for hostgroups property in hosts and link them
def linkify_h_by_hg(self, hostgroups):
# Register host in the hostgroups
for h in self:
new_hostgroups = []
if hasattr(h, 'hostgroups') and h.hostgroups != []:
hgs = [n.strip() for n in h.hostgroups if n.strip()]
for hg_name in hgs:
# TODO: should an unknown hostgroup raise an error ?
hg = hostgroups.find_by_name(hg_name)
if hg is not None:
new_hostgroups.append(hg)
else:
err = ("the hostgroup '%s' of the host '%s' is "
"unknown" % (hg_name, h.host_name))
h.configuration_errors.append(err)
h.hostgroups = new_hostgroups
# We look for hostgroups property in hosts and
def explode(self, hostgroups, contactgroups, triggers):
# items::explode_trigger_string_into_triggers
self.explode_trigger_string_into_triggers(triggers)
for t in self.templates.values():
# items::explode_contact_groups_into_contacts
# take all contacts from our contact_groups into our contact property
self.explode_contact_groups_into_contacts(t, contactgroups)
# Register host in the hostgroups
for h in self:
# items::explode_contact_groups_into_contacts
# take all contacts from our contact_groups into our contact property
self.explode_contact_groups_into_contacts(h, contactgroups)
if hasattr(h, 'host_name') and hasattr(h, 'hostgroups'):
hname = h.host_name
for hg in h.hostgroups:
hostgroups.add_member(hname, hg.strip())
# In the scheduler we need to relink the commandCall with
# the real commands
def late_linkify_h_by_commands(self, commands):
props = ['check_command', 'maintenance_check_command', 'event_handler',
'snapshot_command']
for h in self:
for prop in props:
cc = getattr(h, prop, None)
if cc:
cc.late_linkify_with_command(commands)
# Ok also link checkmodulations
for cw in h.checkmodulations:
cw.late_linkify_cw_by_commands(commands)
print(cw)
# Create dependencies:
# Dependencies at the host level: host parent
def apply_dependencies(self):
for h in self:
h.fill_parents_dependency()
def set_initial_state(self):
"""
Sets hosts initial state if required in configuration
"""
for h in self:
h.set_initial_state()
# Return a list of the host_name of the hosts
# that got the template with name=tpl_name or inherit from
# a template that use it
def find_hosts_that_use_template(self, tpl_name):
return [h.host_name for h in self if tpl_name in h.tags if hasattr(h, "host_name")]
# Will create all business tree for the
# services
def create_business_rules(self, hosts, services):
for h in self:
h.create_business_rules(hosts, services)
# Will link all business service/host with theirs
# dep for problem/impact link
def create_business_rules_dependencies(self):
for h in self:
h.create_business_rules_dependencies()
| 66,075
|
Python
|
.py
| 1,356
| 37.755162
| 103
| 0.591949
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,504
|
pollerlink.py
|
shinken-solutions_shinken/shinken/objects/pollerlink.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken.objects.satellitelink import SatelliteLink, SatelliteLinks
from shinken.property import BoolProp, IntegerProp, StringProp, ListProp
class PollerLink(SatelliteLink):
"""This class is the link between Arbiter and Poller. With it, arbiter
can see if a poller is alive, and can send it new configuration
"""
id = 0
my_type = 'poller'
# To_send: send or not to satellite conf
properties = SatelliteLink.properties.copy()
properties.update({
'poller_name': StringProp(fill_brok=['full_status'], to_send=True),
'port': IntegerProp(default=7771, fill_brok=['full_status']),
'min_workers': IntegerProp(default=0, fill_brok=['full_status'], to_send=True),
'max_workers': IntegerProp(default=30, fill_brok=['full_status'], to_send=True),
'processes_by_worker': IntegerProp(default=256, fill_brok=['full_status'], to_send=True),
'max_q_size': IntegerProp(default=0, fill_brok=['full_status'], to_send=True),
'q_factor': IntegerProp(default=0, fill_brok=['full_status'], to_send=True),
'results_batch': IntegerProp(default=0, fill_brok=['full_status'], to_send=True),
'poller_tags': ListProp(default=['None'], to_send=True),
'harakiri_threshold': StringProp(default=None, fill_brok=['full_status'], to_send=True),
})
def get_name(self):
return getattr(self, 'poller_name', 'UNNAMED-POLLER')
def register_to_my_realm(self):
self.realm.pollers.append(self)
class PollerLinks(SatelliteLinks):
"""Please Add a Docstring to describe the class here"""
name_property = "poller_name"
inner_class = PollerLink
| 2,691
|
Python
|
.py
| 53
| 46.679245
| 97
| 0.710967
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,505
|
resultmodulation.py
|
shinken-solutions_shinken/shinken/objects/resultmodulation.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
# The resultmodulation class is used for in scheduler modulation of results
# like the return code or the output.
from __future__ import absolute_import, division, print_function, unicode_literals
import time
from .item import Item, Items
from shinken.property import StringProp, IntegerProp, IntListProp
class Resultmodulation(Item):
id = 1 # zero is always special in database, so we do not take risk here
my_type = 'resultmodulation'
properties = Item.properties.copy()
properties.update({
'resultmodulation_name': StringProp(),
'exit_codes_match': IntListProp(default=[]),
'exit_code_modulation': IntegerProp(default=None),
'modulation_period': StringProp(default=None),
})
# For debugging purpose only (nice name)
def get_name(self):
return self.resultmodulation_name
# Make the return code modulation if need
def module_return(self, return_code):
# Only if in modulation_period of modulation_period == None
if self.modulation_period is None or self.modulation_period.is_time_valid(time.time()):
# Try to change the exit code only if a new one is defined
if self.exit_code_modulation is not None:
# First with the exit_code_match
if return_code in self.exit_codes_match:
return_code = self.exit_code_modulation
return return_code
# We override the pythonize because we have special cases that we do not want
# to be do at running
def pythonize(self):
# First apply Item pythonize
super(Resultmodulation, self).pythonize()
# Then very special cases
# Intify the exit_codes_match, and make list
self.exit_codes_match = [int(ec) for ec in getattr(self, 'exit_codes_match', [])]
if hasattr(self, 'exit_code_modulation'):
self.exit_code_modulation = int(self.exit_code_modulation)
else:
self.exit_code_modulation = None
class Resultmodulations(Items):
name_property = "resultmodulation_name"
inner_class = Resultmodulation
def linkify(self, timeperiods):
self.linkify_rm_by_tp(timeperiods)
# We just search for each timeperiod the tp
# and replace the name by the tp
def linkify_rm_by_tp(self, timeperiods):
for rm in self:
mtp_name = rm.modulation_period.strip()
# The new member list, in id
mtp = timeperiods.find_by_name(mtp_name)
if mtp_name != '' and mtp is None:
err = "Error: the result modulation '%s' got an unknown modulation_period '%s'" % \
(rm.get_name(), mtp_name)
rm.configuration_errors.append(err)
rm.modulation_period = mtp
| 3,716
|
Python
|
.py
| 80
| 39.875
| 99
| 0.684939
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,506
|
__init__.py
|
shinken-solutions_shinken/shinken/objects/__init__.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
"""
The objects package contains definition classes of the different objects
that can be declared in configuration files.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from .item import Item, Items
from .timeperiod import Timeperiod, Timeperiods
from .schedulingitem import SchedulingItem
from .matchingitem import MatchingItem
from .service import Service, Services
from .command import Command, Commands
from .resultmodulation import Resultmodulation, Resultmodulations
from .escalation import Escalation, Escalations
from .serviceescalation import Serviceescalation, Serviceescalations
from .hostescalation import Hostescalation, Hostescalations
from .host import Host, Hosts
from .hostgroup import Hostgroup, Hostgroups
from .realm import Realm, Realms
from .contact import Contact, Contacts
from .contactgroup import Contactgroup, Contactgroups
from .notificationway import NotificationWay, NotificationWays
from .servicegroup import Servicegroup, Servicegroups
from .servicedependency import Servicedependency, Servicedependencies
from .hostdependency import Hostdependency, Hostdependencies
from .module import Module, Modules
from .discoveryrule import Discoveryrule, Discoveryrules
from .discoveryrun import Discoveryrun, Discoveryruns
from .trigger import Trigger, Triggers
from .businessimpactmodulation import Businessimpactmodulation, Businessimpactmodulations
from .macromodulation import MacroModulation, MacroModulations
# from config import Config
| 2,432
|
Python
|
.py
| 53
| 44.716981
| 89
| 0.828415
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,507
|
discoveryrun.py
|
shinken-solutions_shinken/shinken/objects/discoveryrun.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
from copy import copy
from shinken.objects.item import Item, Items
from shinken.objects.matchingitem import MatchingItem
from shinken.property import StringProp
from shinken.eventhandler import EventHandler
from shinken.macroresolver import MacroResolver
class Discoveryrun(MatchingItem):
id = 1 # zero is always special in database, so we do not take risk here
my_type = 'discoveryrun'
properties = Item.properties.copy()
properties.update({
'discoveryrun_name': StringProp(),
'discoveryrun_command': StringProp(),
})
running_properties = Item.running_properties.copy()
running_properties.update({
'current_launch': StringProp(default=None),
})
# The init of a discovery will set the property of
# Discoveryrun.properties as in setattr, but all others
# will be in a list because we need to have all names
# and not lost all in __dict__
def __init__(self, params={}):
cls = self.__class__
# We have our own id of My Class type :)
# use set attr for going into the slots
# instead of __dict__ :)
setattr(self, 'id', cls.id)
cls.id += 1
self.matches = {} # for matching rules
self.not_matches = {} # for rules that should NOT match
# In my own property:
# -> in __dict__
# if not, in matches or not match (if key starts
# with a !, it's a not rule)
# -> in self.matches or self.not_matches
# in writing properties if start with + (means 'add this')
for key in params:
# delistify attributes if there is only one value
params[key] = self.compact_unique_attr_value(params[key])
if key in cls.properties:
setattr(self, key, params[key])
else:
if key.startswith('!'):
key = key.split('!')[1]
self.not_matches[key] = params['!' + key]
else:
self.matches[key] = params[key]
# Then running prop :)
cls = self.__class__
# adding running properties like latency, dependency list, etc
for prop, entry in cls.running_properties.items():
# Copy is slow, so we check type
# Type with __iter__ are list or dict, or tuple.
# Item need it's own list, so qe copy
val = entry.default
if hasattr(val, '__iter__'):
setattr(self, prop, copy(val))
else:
setattr(self, prop, val)
# each instance to have his own running prop!
# Output name
def get_name(self):
try:
return self.discoveryrun_name
except AttributeError:
return "UnnamedDiscoveryRun"
# A Run that is first level means that it do not have
# any matching filter
def is_first_level(self):
return len(self.not_matches) + len(self.matches) == 0
# Get an eventhandler object and launch it
def launch(self, ctx=[], timeout=300):
m = MacroResolver()
cmd = m.resolve_command(self.discoveryrun_command, ctx)
self.current_launch = EventHandler(cmd, timeout=timeout)
self.current_launch.execute()
def check_finished(self):
max_output = 10 ** 9
# print("Max output", max_output)
self.current_launch.check_finished(max_output)
# Look if the current launch is done or not
def is_finished(self):
if self.current_launch is None:
return True
if self.current_launch.status in ('done', 'timeout'):
return True
return False
# we use an EventHandler object, so we have output with a single line
# and longoutput with the rest. We just need to return all
def get_output(self):
return '\n'.join([self.current_launch.output, self.current_launch.long_output])
class Discoveryruns(Items):
name_property = "discoveryrun_name"
inner_class = Discoveryrun
def linkify(self, commands):
for r in self:
r.linkify_one_command_with_commands(commands, 'discoveryrun_command')
| 5,151
|
Python
|
.py
| 121
| 35.214876
| 87
| 0.649351
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,508
|
contact.py
|
shinken-solutions_shinken/shinken/objects/contact.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken.objects.item import Item, Items
from shinken.util import strip_and_uniq
from shinken.property import BoolProp, IntegerProp, StringProp, ListProp
from shinken.log import logger, naglog_result
_special_properties = (
'service_notification_commands', 'host_notification_commands',
'service_notification_period', 'host_notification_period',
'service_notification_options', 'host_notification_options',
'host_notification_commands', 'contact_name'
)
_simple_way_parameters = (
'service_notification_period', 'host_notification_period',
'service_notification_options', 'host_notification_options',
'service_notification_commands', 'host_notification_commands',
'min_business_impact'
)
class Contact(Item):
id = 1 # zero is always special in database, so we do not take risk here
my_type = 'contact'
properties = Item.properties.copy()
properties.update({
'contact_name': StringProp(fill_brok=['full_status']),
'alias': StringProp(default='none', fill_brok=['full_status']),
'contactgroups': ListProp(default=[], fill_brok=['full_status']),
'host_notifications_enabled': BoolProp(default=True, fill_brok=['full_status']),
'service_notifications_enabled': BoolProp(default=True, fill_brok=['full_status']),
'host_notification_period': StringProp(fill_brok=['full_status']),
'service_notification_period': StringProp(fill_brok=['full_status']),
'host_notification_options': ListProp(default=[''], fill_brok=['full_status'],
split_on_coma=True),
'service_notification_options': ListProp(default=[''], fill_brok=['full_status'],
split_on_coma=True),
# To be consistent with notificationway object attributes
'host_notification_commands': ListProp(fill_brok=['full_status']),
'service_notification_commands': ListProp(fill_brok=['full_status']),
'min_business_impact': IntegerProp(default=0, fill_brok=['full_status']),
'email': StringProp(default='none', fill_brok=['full_status']),
'pager': StringProp(default='none', fill_brok=['full_status']),
'address1': StringProp(default='none', fill_brok=['full_status']),
'address2': StringProp(default='none', fill_brok=['full_status']),
'address3': StringProp(default='none', fill_brok=['full_status']),
'address4': StringProp(default='none', fill_brok=['full_status']),
'address5': StringProp(default='none', fill_brok=['full_status']),
'address6': StringProp(default='none', fill_brok=['full_status']),
'can_submit_commands': BoolProp(default=False, fill_brok=['full_status']),
'is_admin': BoolProp(default=False, fill_brok=['full_status']),
'expert': BoolProp(default=False, fill_brok=['full_status']),
'retain_status_information': BoolProp(default=True, fill_brok=['full_status']),
'notificationways': ListProp(default=[], fill_brok=['full_status']),
'password': StringProp(default='NOPASSWORDSET', fill_brok=['full_status']),
})
running_properties = Item.running_properties.copy()
running_properties.update({
'modified_attributes': IntegerProp(default=0, fill_brok=['full_status'], retention=True),
'downtimes': StringProp(default=[], fill_brok=['full_status'], retention=True),
})
# This tab is used to transform old parameters name into new ones
# so from Nagios2 format, to Nagios3 ones.
# Or Shinken deprecated names like criticity
old_properties = {
'min_criticity': 'min_business_impact',
}
macros = {
'CONTACTNAME': 'contact_name',
'CONTACTALIAS': 'alias',
'CONTACTEMAIL': 'email',
'CONTACTPAGER': 'pager',
'CONTACTADDRESS1': 'address1',
'CONTACTADDRESS2': 'address2',
'CONTACTADDRESS3': 'address3',
'CONTACTADDRESS4': 'address4',
'CONTACTADDRESS5': 'address5',
'CONTACTADDRESS6': 'address6',
'CONTACTGROUPNAME': 'get_groupname',
'CONTACTGROUPNAMES': 'get_groupnames'
}
# For debugging purpose only (nice name)
def get_name(self):
try:
return self.contact_name
except AttributeError:
return 'UnnamedContact'
# Search for notification_options with state and if t is
# in service_notification_period
def want_service_notification(self, t, state, type, business_impact, cmd=None):
if not self.service_notifications_enabled:
return False
# If we are in downtime, we do nto want notification
for dt in self.downtimes:
if dt.is_in_effect:
return False
# Now the rest is for sub notificationways. If one is OK, we are ok
# We will filter in another phase
for nw in self.notificationways:
nw_b = nw.want_service_notification(t, state, type, business_impact, cmd)
if nw_b:
return True
# Oh... no one is ok for it? so no, sorry
return False
# Search for notification_options with state and if t is in
# host_notification_period
def want_host_notification(self, t, state, type, business_impact, cmd=None):
if not self.host_notifications_enabled:
return False
# If we are in downtime, we do nto want notification
for dt in self.downtimes:
if dt.is_in_effect:
return False
# Now it's all for sub notificationways. If one is OK, we are OK
# We will filter in another phase
for nw in self.notificationways:
nw_b = nw.want_host_notification(t, state, type, business_impact, cmd)
if nw_b:
return True
# Oh, nobody..so NO :)
return False
# Call to get our commands to launch a Notification
def get_notification_commands(self, type):
r = []
# service_notification_commands for service
notif_commands_prop = type + '_notification_commands'
for nw in self.notificationways:
r.extend(getattr(nw, notif_commands_prop))
return r
# Check is required prop are set:
# contacts OR contactgroups is need
def is_correct(self):
state = True
cls = self.__class__
# All of the above are checks in the notificationways part
for prop, entry in cls.properties.items():
if prop not in _special_properties:
if not hasattr(self, prop) and entry.required:
logger.error("[contact::%s] %s property not set", self.get_name(), prop)
state = False # Bad boy...
# There is a case where there is no nw: when there is not special_prop defined
# at all!!
if self.notificationways == []:
for p in _special_properties:
if not hasattr(self, p):
logger.error("[contact::%s] %s property is missing", self.get_name(), p)
state = False
if hasattr(self, 'contact_name'):
for c in cls.illegal_object_name_chars:
if c in self.contact_name:
logger.error("[contact::%s] %s character not allowed in contact_name",
self.get_name(), c)
state = False
else:
if hasattr(self, 'alias'): # take the alias if we miss the contact_name
self.contact_name = self.alias
return state
# Raise a log entry when a downtime begins
# CONTACT DOWNTIME ALERT:
# test_contact;STARTED; Contact has entered a period of scheduled downtime
def raise_enter_downtime_log_entry(self):
naglog_result('info', "CONTACT DOWNTIME ALERT: %s;STARTED; Contact has "
"entered a period of scheduled downtime" % self.get_name())
# Raise a log entry when a downtime has finished
# CONTACT DOWNTIME ALERT:
# test_contact;STOPPED; Contact has exited from a period of scheduled downtime
def raise_exit_downtime_log_entry(self):
naglog_result('info', "CONTACT DOWNTIME ALERT: %s;STOPPED; Contact has "
"exited from a period of scheduled downtime" % self.get_name())
# Raise a log entry when a downtime prematurely ends
# CONTACT DOWNTIME ALERT:
# test_contact;CANCELLED; Contact has entered a period of scheduled downtime
def raise_cancel_downtime_log_entry(self):
naglog_result('info', "CONTACT DOWNTIME ALERT: %s;CANCELLED; Scheduled "
"downtime for contact has been cancelled." % self.get_name())
class Contacts(Items):
name_property = "contact_name"
inner_class = Contact
def linkify(self, timeperiods, commands, notificationways):
# self.linkify_with_timeperiods(timeperiods, 'service_notification_period')
# self.linkify_with_timeperiods(timeperiods, 'host_notification_period')
# self.linkify_command_list_with_commands(commands, 'service_notification_commands')
# self.linkify_command_list_with_commands(commands, 'host_notification_commands')
self.linkify_with_notificationways(notificationways)
# We've got a notificationways property with , separated contacts names
# and we want have a list of NotificationWay
def linkify_with_notificationways(self, notificationways):
for i in self:
if not hasattr(i, 'notificationways'):
continue
new_notificationways = []
for nw_name in strip_and_uniq(i.notificationways):
nw = notificationways.find_by_name(nw_name)
if nw is not None:
new_notificationways.append(nw)
else:
err = "The 'notificationways' of the %s '%s' named '%s' is unknown!" %\
(i.__class__.my_type, i.get_name(), nw_name)
i.configuration_errors.append(err)
# Get the list, but first make elements uniq
i.notificationways = list(set(new_notificationways))
def late_linkify_c_by_commands(self, commands):
for i in self:
for nw in i.notificationways:
nw.late_linkify_nw_by_commands(commands)
# We look for contacts property in contacts and
def explode(self, contactgroups, notificationways):
# Contactgroups property need to be fullfill for got the informations
self.apply_partial_inheritance('contactgroups')
# _special properties maybe came from a template, so
# import them before grok ourselves
for prop in _special_properties:
if prop == 'contact_name':
continue
self.apply_partial_inheritance(prop)
# Register ourself into the contactsgroups we are in
for c in self:
if not (hasattr(c, 'contact_name') and hasattr(c, 'contactgroups')):
continue
for cg in c.contactgroups:
contactgroups.add_member(c.contact_name, cg.strip())
# Now create a notification way with the simple parameter of the
# contacts
for c in self:
need_notificationway = False
params = {}
for p in _simple_way_parameters:
if hasattr(c, p):
need_notificationway = True
params[p] = getattr(c, p)
else: # put a default text value
# Remove the value and put a default value
setattr(c, p, c.properties[p].default)
if need_notificationway:
# print("Create notif way with", params)
cname = getattr(c, 'contact_name', getattr(c, 'alias', ''))
nw_name = cname + '_inner_notificationway'
notificationways.new_inner_member(nw_name, params)
if not hasattr(c, 'notificationways'):
c.notificationways = [nw_name]
else:
c.notificationways = list(c.notificationways)
c.notificationways.append(nw_name)
| 13,347
|
Python
|
.py
| 261
| 41.321839
| 97
| 0.6312
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,509
|
notificationway.py
|
shinken-solutions_shinken/shinken/objects/notificationway.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
from .item import Item, Items
from shinken.property import BoolProp, IntegerProp, StringProp, ListProp
from shinken.log import logger
_special_properties = ('service_notification_commands', 'host_notification_commands',
'service_notification_period', 'host_notification_period')
class NotificationWay(Item):
id = 1 # zero is always special in database, so we do not take risk here
my_type = 'notificationway'
properties = Item.properties.copy()
properties.update({
'notificationway_name':
StringProp(fill_brok=['full_status']),
'host_notifications_enabled':
BoolProp(default=True, fill_brok=['full_status']),
'service_notifications_enabled':
BoolProp(default=True, fill_brok=['full_status']),
'host_notification_period':
StringProp(fill_brok=['full_status']),
'service_notification_period':
StringProp(fill_brok=['full_status']),
'host_notification_options':
ListProp(default=[''], fill_brok=['full_status'], split_on_coma=True),
'service_notification_options':
ListProp(default=[''], fill_brok=['full_status'], split_on_coma=True),
'host_notification_commands':
ListProp(fill_brok=['full_status']),
'service_notification_commands':
ListProp(fill_brok=['full_status']),
'min_business_impact':
IntegerProp(default=0, fill_brok=['full_status']),
})
running_properties = Item.running_properties.copy()
# This tab is used to transform old parameters name into new ones
# so from Nagios2 format, to Nagios3 ones.
# Or Shinken deprecated names like criticity
old_properties = {
'min_criticity': 'min_business_impact',
}
macros = {}
# For debugging purpose only (nice name)
def get_name(self):
return self.notificationway_name
# Search for notification_options with state and if t is
# in service_notification_period
def want_service_notification(self, t, state, type, business_impact, cmd=None):
if not self.service_notifications_enabled:
return False
# Maybe the command we ask for are not for us, but for another notification ways
# on the same contact. If so, bail out
if cmd and cmd not in self.service_notification_commands:
return False
# If the business_impact is not high enough, we bail out
if business_impact < self.min_business_impact:
return False
b = self.service_notification_period.is_time_valid(t)
if 'n' in self.service_notification_options:
return False
t = {'WARNING': 'w', 'UNKNOWN': 'u', 'CRITICAL': 'c',
'RECOVERY': 'r', 'FLAPPING': 'f', 'DOWNTIME': 's'}
if type == 'PROBLEM':
if state in t:
return b and t[state] in self.service_notification_options
elif type == 'RECOVERY':
if type in t:
return b and t[type] in self.service_notification_options
elif type == 'ACKNOWLEDGEMENT':
return b
elif type in ('FLAPPINGSTART', 'FLAPPINGSTOP', 'FLAPPINGDISABLED'):
return b and 'f' in self.service_notification_options
elif type in ('DOWNTIMESTART', 'DOWNTIMEEND', 'DOWNTIMECANCELLED'):
# No notification when a downtime was cancelled. Is that true??
# According to the documentation we need to look at _host_ options
return b and 's' in self.host_notification_options
return False
# Search for notification_options with state and if t is in
# host_notification_period
def want_host_notification(self, t, state, type, business_impact, cmd=None):
if not self.host_notifications_enabled:
return False
# If the business_impact is not high enough, we bail out
if business_impact < self.min_business_impact:
return False
# Maybe the command we ask for are not for us, but for another notification ways
# on the same contact. If so, bail out
if cmd and cmd not in self.host_notification_commands:
return False
b = self.host_notification_period.is_time_valid(t)
if 'n' in self.host_notification_options:
return False
t = {'DOWN': 'd', 'UNREACHABLE': 'u', 'RECOVERY': 'r',
'FLAPPING': 'f', 'DOWNTIME': 's'}
if type == 'PROBLEM':
if state in t:
return b and t[state] in self.host_notification_options
elif type == 'RECOVERY':
if type in t:
return b and t[type] in self.host_notification_options
elif type == 'ACKNOWLEDGEMENT':
return b
elif type in ('FLAPPINGSTART', 'FLAPPINGSTOP', 'FLAPPINGDISABLED'):
return b and 'f' in self.host_notification_options
elif type in ('DOWNTIMESTART', 'DOWNTIMEEND', 'DOWNTIMECANCELLED'):
return b and 's' in self.host_notification_options
return False
# Call to get our commands to launch a Notification
def get_notification_commands(self, type):
# service_notification_commands for service
notif_commands_prop = type + '_notification_commands'
notif_commands = getattr(self, notif_commands_prop)
return notif_commands
# Check is required prop are set:
# contacts OR contactgroups is need
def is_correct(self):
state = True
cls = self.__class__
# Raised all previously saw errors like unknown commands or timeperiods
if self.configuration_errors != []:
state = False
for err in self.configuration_errors:
logger.error("[item::%s] %s", self.get_name(), err)
# A null notif way is a notif way that will do nothing (service = n, hot =n)
is_null_notifway = False
if (hasattr(self, 'service_notification_options') and
self.service_notification_options == ['n']):
if (hasattr(self, 'host_notification_options') and
self.host_notification_options == ['n']):
is_null_notifway = True
return True
for prop, entry in cls.properties.items():
if prop not in _special_properties:
if not hasattr(self, prop) and entry.required:
logger.warning("[notificationway::%s] %s property not set",
self.get_name(), prop)
state = False # Bad boy...
# Ok now we manage special cases...
# Service part
if not hasattr(self, 'service_notification_commands'):
logger.warning("[notificationway::%s] do not have any "
"service_notification_commands defined", self.get_name())
state = False
else:
for cmd in self.service_notification_commands:
if cmd is None:
logger.warning("[notificationway::%s] a "
"service_notification_command is missing", self.get_name())
state = False
if not cmd.is_valid():
logger.warning("[notificationway::%s] a "
"service_notification_command is invalid", self.get_name())
state = False
if getattr(self, 'service_notification_period', None) is None:
logger.warning("[notificationway::%s] the "
"service_notification_period is invalid", self.get_name())
state = False
# Now host part
if not hasattr(self, 'host_notification_commands'):
logger.warning("[notificationway::%s] do not have any "
"host_notification_commands defined", self.get_name())
state = False
else:
for cmd in self.host_notification_commands:
if cmd is None:
logger.warning("[notificationway::%s] a "
"host_notification_command is missing", self.get_name())
state = False
if not cmd.is_valid():
logger.warning(
"[notificationway::%s] a host_notification_command "
"is invalid (%s)", cmd.get_name(), cmd.__dict__)
state = False
if getattr(self, 'host_notification_period', None) is None:
logger.warning("[notificationway::%s] the host_notification_period "
"is invalid", self.get_name())
state = False
return state
# In the scheduler we need to relink the commandCall with
# the real commands
def late_linkify_nw_by_commands(self, commands):
props = ['service_notification_commands', 'host_notification_commands']
for prop in props:
for cc in getattr(self, prop, []):
cc.late_linkify_with_command(commands)
class NotificationWays(Items):
name_property = "notificationway_name"
inner_class = NotificationWay
def linkify(self, timeperiods, commands):
self.linkify_with_timeperiods(timeperiods, 'service_notification_period')
self.linkify_with_timeperiods(timeperiods, 'host_notification_period')
self.linkify_command_list_with_commands(commands, 'service_notification_commands')
self.linkify_command_list_with_commands(commands, 'host_notification_commands')
def new_inner_member(self, name=None, params={}):
if name is None:
name = NotificationWay.id
params['notificationway_name'] = name
# print("Asking a new inner notificationway from name %s with params %s" % (name, params))
nw = NotificationWay(params)
self.add_item(nw)
| 10,936
|
Python
|
.py
| 220
| 39.1
| 98
| 0.621966
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,510
|
schedulingitem.py
|
shinken-solutions_shinken/shinken/objects/schedulingitem.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
# Thibault Cohen, thibault.cohen@savoirfairelinux.com
# Francois Mikus, fmikus@acktomic.com
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
""" This class is a common one for service/host. Here you
will find all scheduling related functions, like the schedule
or the consume_check. It's a very important class!
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import re
import random
import time
import traceback
from shinken.objects.item import Item
from shinken.check import Check
from shinken.notification import Notification
from shinken.macroresolver import MacroResolver
from shinken.eventhandler import EventHandler
from shinken.dependencynode import DependencyNodeFactory
from shinken.log import logger
# on system time change just reevaluate the following attributes:
on_time_change_update = (
'last_notification',
'last_state_change',
'last_hard_state_change',
'last_maintenance_state_change'
)
class SchedulingItem(Item):
# global counters used for [current|last]_[host|service]_[event|problem]_id
current_event_id = 0
current_problem_id = 0
check_variants = ("state", "maintenance")
default_check_variant = "state"
maintenance_downtime = None
# Call by serialize to data-ify the host
# we do a dict because list are too dangerous for
# retention save and co :( even if it's more
# extensive
# The setstate function do the inverse
def __getstate__(self):
cls = self.__class__
# id is not in *_properties
res = {'id': self.id}
for prop in cls.properties:
if hasattr(self, prop):
res[prop] = getattr(self, prop)
for prop in cls.running_properties:
if hasattr(self, prop):
res[prop] = getattr(self, prop)
return res
# Inversed function of __getstate__
def __setstate__(self, state):
cls = self.__class__
self.id = state['id']
for prop in cls.properties:
if prop in state:
setattr(self, prop, state[prop])
for prop in cls.running_properties:
if prop in state:
setattr(self, prop, state[prop])
def set_initial_state(self, mapping):
"""
Sets the object's initial state, state_id, and output attributes if
initial other than default values are wanted.
The allowed states have to be given in the mapping dictionnary,
following the pattern below:
{
"o": {
"state": "OK",
"state_id": 0
},
...
}
:param mapping: The mapping describing the allowed states
"""
# Enforced initial state
init_state = getattr(self, "initial_state", "")
if init_state:
if init_state in mapping:
self.state = mapping[init_state]["state"]
self.state_id = mapping[init_state]["state_id"]
else:
err = "invalid initial_state: %s, should be one of %s" % (
init_state, ", ".join(sorted(mapping.keys())))
self.configuration_errors.append(err)
# Enforced check output
output = getattr(self, "initial_output", "")
if output:
self.output = self.long_output = output
# Register the son in my child_dependencies, and
# myself in its parent_dependencies
def register_son_in_parent_child_dependencies(self, son):
# So we register it in our list
self.child_dependencies.add(son)
# and us to its parents
son.parent_dependencies.add(self)
# Add a flapping change, but no more than 20 states
# Then update the self.is_flapping bool by calling update_flapping
def add_flapping_change(self, b):
cls = self.__class__
# If this element is not in flapping check, or
# the flapping is globally disable, bailout
if not self.flap_detection_enabled or not cls.enable_flap_detection:
return
self.flapping_changes.append(b)
# Keep just 20 changes (global flap_history value)
flap_history = cls.flap_history
if len(self.flapping_changes) > flap_history:
self.flapping_changes.pop(0)
# Now we add a value, we update the is_flapping prop
self.update_flapping()
# We update the is_flapping prop with value in self.flapping_states
# Old values have less weight than new ones
def update_flapping(self):
flap_history = self.__class__.flap_history
# We compute the flapping change in %
r = 0.0
i = 0
for b in self.flapping_changes:
i += 1
if b:
r += i * (1.2 - 0.8) / flap_history + 0.8
r = r / flap_history
r *= 100
# We can update our value
self.percent_state_change = r
# Look if we are full in our states, because if not
# the value is not accurate
is_full = len(self.flapping_changes) >= flap_history
# Now we get the low_flap_threshold and high_flap_threshold values
# They can be from self, or class
(low_flap_threshold, high_flap_threshold) = (self.low_flap_threshold,
self.high_flap_threshold)
if low_flap_threshold == -1:
cls = self.__class__
low_flap_threshold = cls.global_low_flap_threshold
if high_flap_threshold == -1:
cls = self.__class__
high_flap_threshold = cls.global_high_flap_threshold
# Now we check is flapping change, but only if we got enough
# states to look at the value accuracy
if self.is_flapping and r < low_flap_threshold and is_full:
self.is_flapping = False
# We also raise a log entry
self.raise_flapping_stop_log_entry(r, low_flap_threshold)
# and a notification
self.remove_in_progress_notifications()
self.create_notifications('FLAPPINGSTOP')
# And update our status for modules
b = self.get_update_status_brok()
self.broks.append(b)
if not self.is_flapping and r >= high_flap_threshold and is_full:
self.is_flapping = True
# We also raise a log entry
self.raise_flapping_start_log_entry(r, high_flap_threshold)
# and a notification
self.remove_in_progress_notifications()
self.create_notifications('FLAPPINGSTART')
# And update our status for modules
b = self.get_update_status_brok()
self.broks.append(b)
# Add an attempt but cannot be more than max_check_attempts
def add_attempt(self):
self.attempt += 1
self.attempt = min(self.attempt, self.max_check_attempts)
# Return True if attempt is at max
def is_max_attempts(self):
return self.attempt >= self.max_check_attempts
# Call by scheduler to see if last state is older than
# freshness_threshold if check_freshness, then raise a check
# even if active check is disabled
def do_check_freshness(self):
now = time.time()
# Before, check if class (host or service) have check_freshness OK
# Then check if item want freshness, then check freshness
cls = self.__class__
if not self.is_in_checking():
if cls.global_check_freshness:
if self.check_freshness and self.freshness_threshold != 0:
if self.last_state_update < now - (
self.freshness_threshold + cls.additional_freshness_latency
):
# Fred: Do not raise a check for passive
# only checked hosts when not in check period ...
if self.passive_checks_enabled and not self.active_checks_enabled:
if self.check_period is None or self.check_period.is_time_valid(now):
# Raise a log
self.raise_freshness_log_entry(
int(now - self.last_state_update),
int(now - self.freshness_threshold)
)
# And a new check
return self.launch_check(now)
else:
logger.debug(
"Should have checked freshness for passive only"
" checked host:%s, but host is not in check period.",
self.host_name
)
return None
def set_myself_as_problem(self, send_brok=True):
"""
Raise all impact from my error. I'm setting myself as a problem, and
I register myself as this in all hosts/services that depend_on_me.
So they are now my impacts.
This method may be called to correctly reinitialize the object state
after the retention data has been loaded. In such a situation, a
brok shold not be emitted if the state is modified. The send_brok
variable reflects this.
:param bool send_brok: Should a brok be emitted if the object state
is modified.
"""
now = time.time()
updated = False
if self.is_problem is False:
self.is_problem = True
updated = True
# we should warn potentials impact of our problem
# and they should be cool to register them so I've got
# my impacts list
impacts = list(self.impacts)
for (impact, status, dep_type, tp, inh_par) in self.act_depend_of_me:
# Check if the status is ok for impact
for s in status:
if self.is_state(s):
# now check if we should bailout because of a
# not good timeperiod for dep
if tp is None or tp.is_time_valid(now):
new_impacts = impact.register_a_problem(self, send_brok)
impacts.extend(new_impacts)
# Only update impacts and create new brok if impacts changed.
s_impacts = set(impacts)
if s_impacts != set(self.impacts):
self.impacts = list(s_impacts)
# We can update our business_impact value now
self.update_business_impact_value()
updated = True
if send_brok is True and updated is True:
# And we register a new broks for update status
b = self.get_update_status_brok()
self.broks.append(b)
# We update our 'business_impact' value with the max of
# the impacts business_impact if we got impacts. And save our 'configuration'
# business_impact if we do not have do it before
# If we do not have impacts, we revert our value
def update_business_impact_value(self):
# First save our business_impact if not already do
if self.my_own_business_impact == -1:
self.my_own_business_impact = self.business_impact
# We look at our crit modulations. If one apply, we take apply it
# and it's done
in_modulation = False
for cm in self.business_impact_modulations:
now = time.time()
period = cm.modulation_period
if period is None or period.is_time_valid(now):
# print("My self", self.get_name(), "go from crit",)
# self.business_impact, "to crit", cm.business_impact
self.business_impact = cm.business_impact
in_modulation = True
# We apply the first available, that's all
break
# If we truly have impacts, we get the max business_impact
# if it's huge than ourselves
if len(self.impacts) != 0:
self.business_impact = max(
self.business_impact, max([e.business_impact for e in self.impacts])
)
return
# If we are not a problem, we setup our own_crit if we are not in a
# modulation period
if self.my_own_business_impact != -1 and not in_modulation:
self.business_impact = self.my_own_business_impact
def no_more_a_problem(self, send_brok=True):
"""
Look for my impacts, and remove me from theirs problems list
This method may be called to correctly reinitialize the object state
after the retention data has been loaded. In such a situation, a
brok shold not be emitted if the state is modified. The send_brok
variable reflects this.
:param bool send_brok: Should a brok be emitted if the object state
is modified.
"""
was_pb = self.is_problem
if self.is_problem:
self.is_problem = False
# we warn impacts that we are no more a problem
for impact in self.impacts:
impact.deregister_a_problem(self, send_brok)
# we can just drop our impacts list
self.impacts = []
# We update our business_impact value, it's not a huge thing :)
self.update_business_impact_value()
# If we were a problem, we say to everyone
# our new status, with good business_impact value
if send_brok is True and was_pb:
# And we register a new broks for update status
b = self.get_update_status_brok()
self.broks.append(b)
def register_a_problem(self, pb, send_brok=True):
"""
Call recursively by potentials impacts so they update their
source_problems list. But do not go below if the problem is not a
real one for me like If I've got multiple parents for examples.
This method may be called to correctly reinitialize the object state
after the retention data has been loaded. In such a situation, a
brok shold not be emitted if the state is modified. The send_brok
variable reflects this.
:param Item pb: The source problem
:param bool send_brok: Should a brok be emitted if the object state
is modified.
"""
# Maybe we already have this problem? If so, bailout too
if pb in self.source_problems:
return []
now = time.time()
was_an_impact = self.is_impact
# Our father already look of he impacts us. So if we are here,
# it's that we really are impacted
self.is_impact = True
impacts = []
# Ok, if we are impacted, we can add it in our
# problem list
# Maybe I was a problem myself, now I can say: not my fault!
if self.is_problem:
self.no_more_a_problem()
# Ok, we are now an impact, we should take the good state
# but only when we just go in impact state
if not was_an_impact:
self.set_impact_state()
# Ok now we can be a simple impact
impacts.append(self)
if pb not in self.source_problems:
self.source_problems.append(pb)
# we should send this problem to all potential impact that
# depend on us
for (impact, status, dep_type, tp, inh_par) in self.act_depend_of_me:
# Check if the status is ok for impact
for s in status:
if self.is_state(s):
# now check if we should bailout because of a
# not good timeperiod for dep
if tp is None or tp.is_time_valid(now):
new_impacts = impact.register_a_problem(pb)
impacts.extend(new_impacts)
if send_brok is True:
# And we register a new broks for update status
b = self.get_update_status_brok()
self.broks.append(b)
# now we return all impacts (can be void of course)
return impacts
def deregister_a_problem(self, pb, send_brok=True):
"""
Just remove the problem from our problems list
and check if we are still 'impacted'. It's not recursif because problem
got the list of all its impacts
This method may be called to correctly reinitialize the object state
after the retention data has been loaded. In such a situation, a
brok shold not be emitted if the state is modified. The send_brok
variable reflects this.
:param bool send_brok: Should a brok be emitted if the object state
is modified.
"""
self.source_problems.remove(pb)
# For know if we are still an impact, maybe our dependencies
# are not aware of the remove of the impact state because it's not ordered
# so we can just look at if we still have some problem in our list
if len(self.source_problems) == 0:
self.is_impact = False
# No more an impact, we can unset the impact state
self.unset_impact_state()
# And we register a new broks for update status
b = self.get_update_status_brok()
self.broks.append(b)
# When all dep are resolved, this function say if
# action can be raise or not by viewing dep status
# network_dep have to be all raise to be no action
# logic_dep: just one is enough
def is_no_action_dependent(self):
# Use to know if notif is raise or not
# no_action = False
parent_is_down = []
# So if one logic is Raise, is dep
# is one network is no ok, is not dep
# at the end, raise no dep
for (dep, status, type, tp, inh_par) in self.act_depend_of:
# For logic_dep, only one state raise put no action
if type == 'logic_dep':
for s in status:
if dep.is_state(s):
return True
# more complicated: if none of the states are match, the host is down
# so -> network_dep
else:
p_is_down = False
dep_match = [dep.is_state(s) for s in status]
# check if the parent match a case, so he is down
if True in dep_match:
p_is_down = True
parent_is_down.append(p_is_down)
# if a parent is not down, no dep can explain the pb
if False in parent_is_down:
return False
else: # every parents are dead, so... It's not my fault, unless I want to know about it anyway :)
if hasattr(self, 'notification_options'):
if 'u' in self.notification_options:
return False
else:
return True
else:
return True
# We check if we are no action just because of ours parents (or host for
# service)
# TODO: factorize with previous check?
def check_and_set_unreachability(self):
parent_is_down = []
# We must have all parents raised to be unreachable
for (dep, status, type, tp, inh_par) in self.act_depend_of:
# For logic_dep, only one state raise put no action
if type == 'network_dep':
p_is_down = False
dep_match = [dep.is_state(s) for s in status]
if True in dep_match: # the parent match a case, so he is down
p_is_down = True
parent_is_down.append(p_is_down)
# if a parent is not down, no dep can explain the pb
# or if we don't have any parents
if len(parent_is_down) == 0 or False in parent_is_down:
return
else: # every parents are dead, so... It's not my fault :)
self.set_unreachable()
return
# Use to know if I raise dependency for someone else (with status)
# If I do not raise dep, maybe my dep raise me. If so, I raise dep.
# So it's a recursive function
def do_i_raise_dependency(self, status, inherit_parents):
# Do I raise dep?
for s in status:
if self.is_state(s):
return True
# If we do not inherit parent, we have no reason to be blocking
if not inherit_parents:
return False
# Ok, I do not raise dep, but my dep maybe raise me
now = time.time()
for (dep, status, type, tp, inh_parent) in self.chk_depend_of:
if dep.do_i_raise_dependency(status, inh_parent):
if tp is None or tp.is_time_valid(now):
return True
# No, I really do not raise...
return False
# Use to know if my dep force me not to be checked
# So check the chk_depend_of if they raise me
def is_no_check_dependent(self):
now = time.time()
for (dep, status, type, tp, inh_parent) in self.chk_depend_of:
if tp is None or tp.is_time_valid(now):
if dep.do_i_raise_dependency(status, inh_parent):
return True
return False
# call by a bad consume check where item see that he have dep
# and maybe he is not in real fault.
def raise_dependencies_check(self, ref_check):
now = time.time()
cls = self.__class__
checks = []
for (dep, status, type, tp, inh_par) in self.act_depend_of:
# If the dep timeperiod is not valid, do notraise the dep,
# None=everytime
if tp is None or tp.is_time_valid(now):
# if the update is 'fresh', do not raise dep,
# cached_check_horizon = cached_service_check_horizon for service
if dep.last_state_update < now - cls.cached_check_horizon:
# Fred : passive only checked host dependency ...
i = dep.launch_check(now, ref_check, dependent=True)
# i = dep.launch_check(now, ref_check)
if i is not None:
checks.append(i)
# else:
# print("DBG: **************** The state is FRESH",)
# dep.host_name, time.asctime(time.localtime(dep.last_state_update))
return checks
# Main scheduling function
# If a check is in progress, or active check are disabled, do
# not schedule a check.
# The check interval change with HARD state or not:
# SOFT: retry_interval
# HARD: check_interval
# The first scheduling is evenly distributed, so all checks
# are not launched at the same time.
#
# TODO: should all check types be forced ?
def schedule(self, force=False, force_time=None):
self.schedule_state_check(force, force_time)
types = [t for t in self.check_variants if t != self.default_check_variant]
for check_variant in types:
self.schedule_standard_check(check_variant, force, force_time)
# Host/service state scheduling function
# If a check is in progress, or active check are disabled, do
# not schedule a check.
# The check interval change with HARD state or not:
# SOFT: retry_interval
# HARD: check_interval
# The first scheduling is evenly distributed, so all checks
# are not launched at the same time.
def schedule_state_check(self, force=False, force_time=None):
# if last_chk == 0 put in a random way so all checks
# are not in the same time
# next_chk il already set, do not change
# unless we force the check or the time
if self.is_in_checking() and not (force or force_time):
return None
cls = self.__class__
# if no active check and no force, no check
if self.state_type == 'HARD' or self.retry_interval == 0:
interval = self.check_interval * cls.interval_length
else: # TODO: if no retry_interval?
interval = self.retry_interval * cls.interval_length
if not (self.active_checks_enabled and cls.execute_checks or force):
return None
# If check_interval is 0, we should not add it for a service
# but suppose a 5min sched for hosts
if interval == 0 and not force:
if cls.my_type == 'service':
return None
else: # host
self.check_interval = 300 / cls.interval_length
# Interval change is in a HARD state or not
# If the retry is 0, take the normal value
self.next_chk = self.get_next_check_time(
interval, self.next_chk, self.check_period, force, force_time)
# If next time is None, do not go
if self.next_chk is None:
# Nagios do not raise it, I'm wondering if we should
return None
# Get the command to launch, and put it in queue
self.launch_check(self.next_chk, force=force)
# Maintenance watch scheduling function
def schedule_standard_check(self, check_variant, force=False, force_time=None):
cls = self.__class__
# if last_chk == 0 put in a random way so all checks
# are not in the same time
# TODO: ensure check interval is set
# If checks for variant are not enabled, do not schedule any check
checks_enabled = getattr(self, "%s_checks_enabled" % check_variant)
if not checks_enabled:
return None
# Checks if checks are enabled or forced
if not (self.active_checks_enabled and cls.execute_checks or force):
return None
# If already in checking, do not schedule it twice
in_checking = self.is_in_checking(check_variant)
if in_checking and not (force or force_time):
return None
# Defines the check interval depending on the current state
check_interval = getattr(self, "%s_check_interval" % check_variant)
retr_interval = getattr(self, "%s_retry_interval" % check_variant)
state_id = getattr(self, "%s_state_id" % check_variant)
check_period = getattr(self, "%s_check_period" % check_variant)
if state_id == 0 or retr_interval == 0:
interval = check_interval * cls.interval_length
else:
interval = retr_interval * cls.interval_length
# Defines next check timestamp
nchk_attr = "next_%s_chk" % check_variant
next_check = getattr(self, nchk_attr)
next_check = self.get_next_check_time(
interval, next_check, check_period, force, force_time)
setattr(self, nchk_attr, next_check)
# If next time is None, do not go
if next_check is None:
# Nagios do not raise it, I'm wondering if we should
return None
# Get the command to launch, and put it in queue
self.launch_check(next_check, force=force, check_variant=check_variant)
def get_next_check_time(self, interval, next_chk, chk_period=None,
force=False, force_time=None):
cls = self.__class__
now = time.time()
# Determine when a new check (randomize and distribute next check time)
# or recurring check should happen.
if next_chk == 0:
# At the start, we cannot have an interval more than cls.max_check_spread
# is service_max_check_spread or host_max_check_spread in config
interval = min(interval, cls.max_check_spread *
cls.interval_length)
time_add = interval * random.uniform(0.0, 1.0)
else:
time_add = interval
# Do the actual Scheduling now
# If not force_time, try to schedule
if force_time is None:
# Do not calculate next_chk based on current time, but
# based on the last check execution time.
# Important for consistency of data for trending.
if next_chk == 0 or next_chk is None:
next_chk = now
# If the neck_chk is already in the future, do not touch it.
# But if == 0, schedule it too
if next_chk <= now:
# maybe we do not have a check_period, if so, take always
# good (24x7)
if chk_period:
next_chk = chk_period.get_next_valid_time_from_t(
next_chk + time_add)
else:
next_chk = int(next_chk + time_add)
# Maybe we load next_chk from retention and the
# value of the next_chk is still the past even
# after add an interval
if next_chk < now:
interval = min(interval, cls.max_check_spread *
cls.interval_length)
time_add = interval * random.uniform(0.0, 1.0)
# if we got a check period, use it, if now, use now
if chk_period:
next_chk = chk_period.get_next_valid_time_from_t(
now + time_add)
else:
next_chk = int(now + time_add)
# else: keep the next_chk value in the future
else:
next_chk = int(force_time)
return next_chk
# If we've got a system time change, we need to compensate it
# If modify all past value. For active one like next_chk, it's the current
# checks that will give us the new value
def compensate_system_time_change(self, difference):
# We only need to change some value
for p in on_time_change_update:
val = getattr(self, p) # current value
# Do not go below 1970 :)
val = max(0, val + difference) # diff may be negative
setattr(self, p, val)
# For disabling active checks, we need to set active_checks_enabled
# to false, but also make a dummy current checks attempts so the
# effect is immediate.
def disable_active_checks(self):
self.active_checks_enabled = False
for c in self.checks_in_progress:
c.status = 'waitconsume'
c.exit_status = self.state_id
c.output = self.output
c.check_time = time.time()
c.execution_time = 0
c.perf_data = self.perf_data
def remove_in_progress_check(self, c):
# The check is consumed, update the in_checking properties
if c in self.checks_in_progress:
self.checks_in_progress.remove(c)
def is_in_checking(self, check_variant=None):
if check_variant is None:
check_variant = self.default_check_variant
return len(self.get_checks_in_progress(check_variant)) != 0
def get_checks_in_progress(self, check_variant=None):
if check_variant is None:
check_variant = self.default_check_variant
return [c for c in self.checks_in_progress
if c.check_variant == check_variant]
# Del just a notification that is returned
def remove_in_progress_notification(self, n):
if n.id in self.notifications_in_progress:
n.status = 'zombie'
del self.notifications_in_progress[n.id]
# We do not need ours currents pending notifications,
# so we zombify them and clean our list
def remove_in_progress_notifications(self):
for n in list(self.notifications_in_progress.values()):
self.remove_in_progress_notification(n)
# Get a event handler if item got an event handler
# command. It must be enabled locally and globally
def get_event_handlers(self, externalcmd=False):
cls = self.__class__
# The external command always pass
# if not, only if we enable them (auto launch)
if (not self.event_handler_enabled or not cls.enable_event_handlers) and not externalcmd:
return
# If we do not force and we are in downtime, bailout
# if the no_event_handlers_during_downtimes is 1 in conf
if cls.no_event_handlers_during_downtimes and \
not externalcmd and self.in_scheduled_downtime:
return
if self.event_handler is not None:
event_handler = self.event_handler
elif cls.global_event_handler is not None:
event_handler = cls.global_event_handler
else:
return
m = MacroResolver()
data = self.get_data_for_event_handler()
cmd = m.resolve_command(event_handler, data)
rt = event_handler.reactionner_tag
e = EventHandler(cmd, timeout=cls.event_handler_timeout,
ref=self, reactionner_tag=rt)
# print("DBG: Event handler call created")
# print("DBG: ",e.__dict__)
self.raise_event_handler_log_entry(event_handler)
# ok we can put it in our temp action queue
self.actions.append(e)
# Get a event handler from a snapshot command
def get_snapshot(self):
# We should have a snapshot_command, to be enabled and of course
# in the good time and state :D
if self.snapshot_command is None:
return
if not self.snapshot_enabled:
return
# look at if one state is matching the criteria
boolmap = [self.is_state(s) for s in self.snapshot_criteria]
if True not in boolmap:
return
# Time based checks now, we should be in the period and not too far
# from the last_snapshot
now = int(time.time())
cls = self.__class__
if self.last_snapshot > now - self.snapshot_interval * cls.interval_length: # too close
return
# no period means 24x7 :)
if self.snapshot_period is not None and not self.snapshot_period.is_time_valid(now):
return
cls = self.__class__
m = MacroResolver()
data = self.get_data_for_event_handler()
cmd = m.resolve_command(self.snapshot_command, data)
rt = self.snapshot_command.reactionner_tag
e = EventHandler(cmd, timeout=cls.event_handler_timeout,
ref=self, reactionner_tag=rt, is_snapshot=True)
self.raise_snapshot_log_entry(self.snapshot_command)
# we save the time we launch the snap
self.last_snapshot = now
# ok we can put it in our temp action queue
self.actions.append(e)
def reprocess_state(self):
"""
Resets object state after retention has been reloaded
"""
# Processes the downtime depth from the currently active downtimes
self.reprocess_ack_and_downtimes_state()
# Enforces the problem/impact attributes processing if the feature is
# enabled
enable_problem_impact = getattr(
self,
"enable_problem_impacts_states_change",
False
)
reprocess_problem_impact = getattr(
self,
"enable_problem_impacts_states_reprocessing",
False
)
if enable_problem_impact is True and reprocess_problem_impact is True:
self.reprocess_problem_impact_state()
def reprocess_ack_and_downtimes_state(self):
"""
Force the evaluation of scheduled_downtime_depth and in_scheduled_downtime
attributes
"""
self.scheduled_downtime_depth = 0
for dt in self.downtimes:
if dt.in_scheduled_downtime():
self.scheduled_downtime_depth += 1
if self.scheduled_downtime_depth > 0:
self.in_scheduled_downtime = True
else:
self.in_scheduled_downtime = False
if getattr(self, "acknowledgement", None) is not None:
self.problem_has_been_acknowledged = True
else:
self.problem_has_been_acknowledged = False
def reprocess_problem_impact_state(self):
"""
Resets the problem/impact related attributes, which are reprocess to their
default value after the retention data has been reloaded.
"""
no_action = self.is_no_action_dependent()
if not no_action and self.state_id != 0 and self.state_type == "HARD":
self.set_myself_as_problem(False)
# We recheck just for network_dep. Maybe we are just unreachable
# and we need to override the state_id
self.check_and_set_unreachability()
# Whenever a non-ok hard state is reached, we must check whether this
# host/service has a flexible downtime waiting to be activated
def check_for_flexible_downtime(self):
status_updated = False
for dt in self.downtimes:
# activate flexible downtimes (do not activate triggered downtimes)
if dt.fixed is False and dt.is_in_effect is False and \
dt.start_time <= self.last_chk and \
self.state_id != 0 and dt.trigger_id == 0:
n = dt.enter() # returns downtimestart notifications
if n is not None:
self.actions.append(n)
status_updated = True
if status_updated is True:
self.broks.append(self.get_update_status_brok())
# UNKNOWN during a HARD state are not so important, and they should
# not raise notif about it
def update_hard_unknown_phase_state(self):
self.was_in_hard_unknown_reach_phase = self.in_hard_unknown_reach_phase
# We do not care about SOFT state at all
# and we are sure we are no more in such a phase
if self.state_type != 'HARD' or self.last_state_type != 'HARD':
self.in_hard_unknown_reach_phase = False
# So if we are not in already in such a phase, we check for
# a start or not. So here we are sure to be in a HARD/HARD following
# state
if not self.in_hard_unknown_reach_phase:
if self.state == 'UNKNOWN' and self.last_state != 'UNKNOWN' \
or self.state == 'UNREACHABLE' and self.last_state != 'UNREACHABLE':
self.in_hard_unknown_reach_phase = True
# We also backup with which state we was before enter this phase
self.state_before_hard_unknown_reach_phase = self.last_state
return
else:
# if we were already in such a phase, look for its end
if self.state != 'UNKNOWN' and self.state != 'UNREACHABLE':
self.in_hard_unknown_reach_phase = False
# If we just exit the phase, look if we exit with a different state
# than we enter or not. If so, lie and say we were not in such phase
# because we need so to raise a new notif
if not self.in_hard_unknown_reach_phase and self.was_in_hard_unknown_reach_phase:
if self.state != self.state_before_hard_unknown_reach_phase:
self.was_in_hard_unknown_reach_phase = False
# consume a check return and send action in return
# main function of reaction of checks like raise notifications
def consume_result(self, c):
if c.check_variant == self.default_check_variant:
return self.consume_state_result(c)
elif c.check_variant == 'maintenance':
return self.consume_maintenance_result(c)
# consume a check return and send action in return
# main function of reaction of checks like raise notifications
# Special case:
# is_flapping: immediate notif when problem
# is_in_scheduled_downtime: no notification
# is_volatile: notif immediately (service only)
def consume_state_result(self, c):
OK_UP = self.__class__.ok_up # OK for service, UP for host
# We check for stalking if necessary
# so if check is here
self.manage_stalking(c)
# Latency can be <0 is we get a check from the retention file
# so if <0, set 0
try:
self.latency = max(0, c.check_time - c.t_to_go)
except TypeError:
pass
# Ok, the first check is done
self.has_been_checked = 1
# Now get data from check
self.execution_time = c.execution_time
self.u_time = c.u_time
self.s_time = c.s_time
self.last_chk = int(c.check_time)
# Get output and forgot bad UTF8 values for simple str ones
# (we can get already unicode with external commands)
self.output = c.output
self.long_output = c.long_output
# Set the check result type also in the host/service
# 0 = result came from an active check
# 1 = result came from a passive check
self.check_type = c.check_type
# Get the perf_data only if we want it in the configuration
if self.__class__.process_performance_data and self.process_perf_data:
self.last_perf_data = self.perf_data
self.perf_data = c.perf_data
# Before setting state, modulate them
for rm in self.resultmodulations:
if rm is not None:
c.exit_status = rm.module_return(c.exit_status)
# By design modulation: if we got a host, we should look at the
# use_aggressive_host_checking flag we should module 1 (warning return):
# 1 & agressive => DOWN/2
# 1 & !agressive => UP/0
cls = self.__class__
if c.exit_status == 1 and cls.my_type == 'host':
if cls.use_aggressive_host_checking:
c.exit_status = 2
else:
c.exit_status = 0
# If we got a bad result on a normal check, and we have dep,
# we raise dep checks
# put the actual check in waitdep and we return all new checks
if c.exit_status != 0 and c.status == 'waitconsume' and len(self.act_depend_of) != 0:
c.status = 'waitdep'
# Make sure the check know about his dep
# C is my check, and he wants dependencies
checks_id = self.raise_dependencies_check(c)
for check_id in checks_id:
# Get checks_id of dep
c.depend_on.append(check_id)
# Ok, no more need because checks are not
# take by host/service, and not returned
# remember how we was before this check
self.last_state_type = self.state_type
self.set_state_from_exit_status(c.exit_status)
# Set return_code to exit_status to fill the value in broks
self.return_code = c.exit_status
# we change the state, do whatever we are or not in
# an impact mode, we can put it
self.state_changed_since_impact = True
# The check is consumed, update the in_checking properties
self.remove_in_progress_check(c)
# C is a check and someone wait for it
if c.status == 'waitconsume' and c.depend_on_me != []:
c.status = 'havetoresolvedep'
# if finish, check need to be set to a zombie state to be removed
# it can be change if necessary before return, like for dependencies
if c.status == 'waitconsume' and c.depend_on_me == []:
c.status = 'zombie'
# Use to know if notif is raise or not
no_action = False
# C was waitdep, but now all dep are resolved, so check for deps
if c.status == 'waitdep':
if c.depend_on_me != []:
c.status = 'havetoresolvedep'
else:
c.status = 'zombie'
# Check deps
no_action = self.is_no_action_dependent()
# We recheck just for network_dep. Maybe we are just unreachable
# and we need to override the state_id
self.check_and_set_unreachability()
# OK following a previous OK. perfect if we were not in SOFT
if c.exit_status == 0 and self.last_state in (OK_UP, 'PENDING'):
# print("Case 1 (OK following a previous OK):)
# code:%s last_state:%s" % (c.exit_status, self.last_state)
self.unacknowledge_problem()
# action in return can be notification or other checks (dependencies)
if (self.state_type == 'SOFT') and self.last_state != 'PENDING':
if self.is_max_attempts() and self.state_type == 'SOFT':
self.state_type = 'HARD'
else:
self.state_type = 'SOFT'
else:
self.attempt = 1
self.state_type = 'HARD'
# OK following a NON-OK.
elif c.exit_status == 0 and self.last_state not in (OK_UP, 'PENDING'):
self.unacknowledge_problem()
# print("Case 2 (OK following a NON-OK):)
# code:%s last_state:%s" % (c.exit_status, self.last_state)
if self.state_type == 'SOFT':
# OK following a NON-OK still in SOFT state
if not c.is_dependent():
self.add_attempt()
self.raise_alert_log_entry()
# Eventhandler gets OK;SOFT;++attempt, no notification needed
self.get_event_handlers()
# Internally it is a hard OK
self.state_type = 'HARD'
self.attempt = 1
elif self.state_type == 'HARD':
# OK following a HARD NON-OK
self.raise_alert_log_entry()
# Eventhandler and notifications get OK;HARD;maxattempts
# Ok, so current notifications are not needed, we 'zombie' them
self.remove_in_progress_notifications()
if not no_action:
self.create_notifications('RECOVERY')
self.get_event_handlers()
# Internally it is a hard OK
self.state_type = 'HARD'
self.attempt = 1
# self.update_hard_unknown_phase_state()
# I'm no more a problem if I was one
self.no_more_a_problem()
# Volatile part
# Only for service
elif c.exit_status != 0 and getattr(self, 'is_volatile', False):
# print("Case 3 (volatile only)")
# There are no repeated attempts, so the first non-ok results
# in a hard state
self.attempt = 1
self.state_type = 'HARD'
# status != 0 so add a log entry (before actions that can also raise log
# it is smarter to log error before notification)
self.raise_alert_log_entry()
self.check_for_flexible_downtime()
self.remove_in_progress_notifications()
if not no_action:
self.create_notifications('PROBLEM')
# Ok, event handlers here too
self.get_event_handlers()
# PROBLEM/IMPACT
# I'm a problem only if I'm the root problem,
# so not no_action:
if not no_action:
self.set_myself_as_problem()
# NON-OK follows OK. Everything was fine, but now trouble is ahead
elif c.exit_status != 0 and self.last_state in (OK_UP, 'PENDING'):
# print("Case 4: NON-OK follows OK: code:%s last_state:%s" %)
# (c.exit_status, self.last_state)
if self.is_max_attempts():
# if max_attempts == 1 we're already in deep trouble
self.state_type = 'HARD'
self.raise_alert_log_entry()
self.remove_in_progress_notifications()
self.check_for_flexible_downtime()
if not no_action:
self.create_notifications('PROBLEM')
# Oh? This is the typical go for a event handler :)
self.get_event_handlers()
# PROBLEM/IMPACT
# I'm a problem only if I'm the root problem,
# so not no_action:
if not no_action:
self.set_myself_as_problem()
else:
# This is the first NON-OK result. Initiate the SOFT-sequence
# Also launch the event handler, he might fix it.
self.attempt = 1
self.state_type = 'SOFT'
self.raise_alert_log_entry()
self.get_event_handlers()
# If no OK in a no OK: if hard, still hard, if soft,
# check at self.max_check_attempts
# when we go in hard, we send notification
elif c.exit_status != 0 and self.last_state != OK_UP:
# print("Case 5 (no OK in a no OK): code:%s last_state:%s state_type:%s" %)
# (c.exit_status, self.last_state,self.state_type)
if self.state_type == 'SOFT':
if not c.is_dependent():
self.add_attempt()
if self.is_max_attempts():
# Ok here is when we just go to the hard state
self.state_type = 'HARD'
self.raise_alert_log_entry()
self.remove_in_progress_notifications()
# There is a request in the Nagios trac to enter downtimes
# on soft states which does make sense. If this becomes
# the default behavior, just move the following line
# into the else-branch below.
self.check_for_flexible_downtime()
if not no_action:
self.create_notifications('PROBLEM')
# So event handlers here too
self.get_event_handlers()
# PROBLEM/IMPACT
# I'm a problem only if I'm the root problem,
# so not no_action:
if not no_action:
self.set_myself_as_problem()
else:
self.raise_alert_log_entry()
# eventhandler is launched each time during the soft state
self.get_event_handlers()
else:
# Send notifications whenever the state has changed. (W -> C)
# but not if the current state is UNKNOWN (hard C-> hard U -> hard C should
# not restart notifications)
if self.state != self.last_state:
self.update_hard_unknown_phase_state()
# print(self.last_state, self.last_state_type, self.state_type, self.state)
if not self.in_hard_unknown_reach_phase and not \
self.was_in_hard_unknown_reach_phase:
self.unacknowledge_problem_if_not_sticky()
self.raise_alert_log_entry()
self.remove_in_progress_notifications()
if not no_action:
self.create_notifications('PROBLEM')
elif self.in_scheduled_downtime_during_last_check is True:
# during the last check i was in a downtime. but now
# the status is still critical and notifications
# are possible again. send an alert immediately
self.remove_in_progress_notifications()
if not no_action:
self.create_notifications('PROBLEM')
# PROBLEM/IMPACT
# Forces problem/impact registration even if no state change
# was detected as we may have a non OK state restored from
# retetion data. This way, we rebuild problem/impact hierarchy.
# I'm a problem only if I'm the root problem,
# so not no_action:
if not no_action:
self.set_myself_as_problem()
self.update_hard_unknown_phase_state()
# Reset this flag. If it was true, actions were already taken
self.in_scheduled_downtime_during_last_check = False
# now is the time to update state_type_id
# and our last_hard_state
if self.state_type == 'HARD':
self.state_type_id = 1
self.last_hard_state = self.state
self.last_hard_state_id = self.state_id
else:
self.state_type_id = 0
# Fill last_hard_state_change to now
# if we just change from SOFT->HARD or
# in HARD we change of state (Warning->critical, or critical->ok, etc etc)
if self.state_type == 'HARD' and \
(self.last_state_type == 'SOFT' or self.last_state != self.state):
self.last_hard_state_change = int(time.time())
# update event/problem-counters
self.update_event_and_problem_id()
# Now launch trigger if need. If it's from a trigger raised check,
# do not raise a new one
if not c.from_trigger:
self.eval_triggers()
if c.from_trigger or not c.from_trigger and \
len([t for t in self.triggers if t.trigger_broker_raise_enabled]) == 0:
self.broks.append(self.get_check_result_brok())
self.get_obsessive_compulsive_processor_command()
self.get_perfdata_command()
# Also snapshot if need :)
self.get_snapshot()
def consume_maintenance_result(self, c):
# The check is consumed, update the in_checking properties
self.remove_in_progress_check(c)
# Get data from check
self.last_maintenance_chk = int(c.check_time)
self.maintenance_check_output = c.output
# Item is in production
if c.in_timeout is True:
logger.warning(
"[%s] maintenance check went in timeout, result ignored" %
self.get_full_name())
elif c.exit_status == 0:
self.maintenance_state = "PRODUCTION"
self.maintenance_state_id = 0
elif c.exit_status == 2:
self.maintenance_state = "MAINTENANCE"
self.maintenance_state_id = 1
else:
logger.error(
"[%s] got an invalid return code (%s) from maintenance check "
"command %s, defaulting to production state" % (
self.get_full_name(), c.exit_status,
self.maintenance_last_check_command))
self.maintenance_state = "PRODUCTION"
self.maintenance_state_id = 0
if self.maintenance_state_id != self.last_maintenance_state_id:
self.last_maintenance_state_change = time.time()
self.raise_alert_log_entry("maintenance")
self.last_maintenance_state = self.maintenance_state
self.last_maintenance_state_id = self.maintenance_state_id
if c.status == 'waitconsume':
c.status = 'zombie'
def update_event_and_problem_id(self):
OK_UP = self.__class__.ok_up # OK for service, UP for host
if (self.state != self.last_state and self.last_state != 'PENDING' or
self.state != OK_UP and self.last_state == 'PENDING'):
SchedulingItem.current_event_id += 1
self.last_event_id = self.current_event_id
self.current_event_id = SchedulingItem.current_event_id
# now the problem_id
if self.state != OK_UP and self.last_state == 'PENDING':
# broken ever since i can remember
SchedulingItem.current_problem_id += 1
self.last_problem_id = self.current_problem_id
self.current_problem_id = SchedulingItem.current_problem_id
elif self.state != OK_UP and self.last_state != OK_UP:
# State transitions between non-OK states
# (e.g. WARNING to CRITICAL) do not cause
# this problem id to increase.
pass
elif self.state == OK_UP:
# If the service is currently in an OK state,
# this macro will be set to zero (0).
self.last_problem_id = self.current_problem_id
self.current_problem_id = 0
else:
# Every time a service (or host) transitions from
# an OK or UP state to a problem state, a global
# problem ID number is incremented by one (1).
SchedulingItem.current_problem_id += 1
self.last_problem_id = self.current_problem_id
self.current_problem_id = SchedulingItem.current_problem_id
# Called by scheduler when a notification is
# ok to be send (so fully prepared to be send
# to reactionner). Here we update the command with
# status of now, and we add the contact to set of
# contact we notified. And we raise the log entry
def prepare_notification_for_sending(self, n):
if n.status == 'inpoller':
self.update_notification_command(n)
self.notified_contacts.add(n.contact)
self.raise_notification_log_entry(n)
# Just update the notification command by resolving Macros
# And because we are just launching the notification, we can say
# that this contact have been notified
def update_notification_command(self, n):
cls = self.__class__
m = MacroResolver()
data = self.get_data_for_notifications(n.contact, n)
n.command = m.resolve_command(n.command_call, data)
if cls.enable_environment_macros or n.enable_environment_macros:
n.env = m.get_env_macros(data)
# See if an escalation is eligible at t and notif nb=n
def is_escalable(self, n):
cls = self.__class__
# We search since when we are in notification for escalations
# that are based on time
in_notif_time = time.time() - n.creation_time
# Check is an escalation match the current_notification_number
for es in self.escalations:
if es.is_eligible(n.t_to_go, self.state, n.notif_nb,
in_notif_time, cls.interval_length):
return True
return False
# Give for a notification the next notification time
# by taking the standard notification_interval or ask for
# our escalation if one of them need a smaller value to escalade
def get_next_notification_time(self, n):
res = None
now = time.time()
cls = self.__class__
# Look at the minimum notification interval
notification_interval = self.notification_interval
# and then look for currently active notifications, and take notification_interval
# if filled and less than the self value
in_notif_time = time.time() - n.creation_time
for es in self.escalations:
if es.is_eligible(n.t_to_go, self.state, n.notif_nb,
in_notif_time, cls.interval_length):
if es.notification_interval != -1 and \
es.notification_interval < notification_interval:
notification_interval = es.notification_interval
# So take the by default time
std_time = n.t_to_go + notification_interval * cls.interval_length
# Maybe the notification comes from retention data and
# next notification alert is in the past
# if so let use the now value instead
if std_time < now:
std_time = now + notification_interval * cls.interval_length
# standard time is a good one
res = std_time
creation_time = n.creation_time
in_notif_time = now - n.creation_time
for es in self.escalations:
# If the escalation was already raised, we do not look for a new "early start"
if es.get_name() not in n.already_start_escalations:
r = es.get_next_notif_time(std_time, self.state, creation_time, cls.interval_length)
# If we got a real result (time base escalation), we add it
if r is not None and now < r < res:
res = r
# And we take the minimum of this result. Can be standard or escalation asked
return res
# Get all contacts (uniq) from eligible escalations
def get_escalable_contacts(self, n):
cls = self.__class__
# We search since when we are in notification for escalations
# that are based on this time
in_notif_time = time.time() - n.creation_time
contacts = set()
for es in self.escalations:
if es.is_eligible(n.t_to_go, self.state, n.notif_nb,
in_notif_time, cls.interval_length):
contacts.update(es.contacts)
# And we tag this escalations as started now
n.already_start_escalations.add(es.get_name())
return list(contacts)
# Create a "master" notification here, which will later
# (immediately before the reactionner gets it) be split up
# in many "child" notifications, one for each contact.
def create_notifications(self, type, t_wished=None):
cls = self.__class__
# t_wished==None for the first notification launch after consume
# here we must look at the self.notification_period
if t_wished is None:
now = time.time()
t_wished = now
# if first notification, we must add first_notification_delay
if self.current_notification_number == 0 and type == 'PROBLEM':
last_time_non_ok_or_up = self.last_time_non_ok_or_up()
if last_time_non_ok_or_up == 0:
# this happens at initial
t_wished = now + self.first_notification_delay * cls.interval_length
else:
t_wished = last_time_non_ok_or_up + \
self.first_notification_delay * cls.interval_length
if self.notification_period is None:
t = int(now)
else:
t = self.notification_period.get_next_valid_time_from_t(t_wished)
else:
# We follow our order
t = t_wished
if self.notification_is_blocked_by_item(type, t_wished) and \
self.first_notification_delay == 0 and self.notification_interval == 0:
# If notifications are blocked on the host/service level somehow
# and repeated notifications are not configured,
# we can silently drop this one
return
if type == 'PROBLEM':
# Create the notification with an incremented notification_number.
# The current_notification_number of the item itself will only
# be incremented when this notification (or its children)
# have actually be sent.
next_notif_nb = self.current_notification_number + 1
elif type == 'RECOVERY':
# Recovery resets the notification counter to zero
self.current_notification_number = 0
next_notif_nb = self.current_notification_number
else:
# downtime/flap/etc do not change the notification number
next_notif_nb = self.current_notification_number
n = Notification(type, 'scheduled', 'VOID', None, self, None, t,
timeout=cls.notification_timeout,
notif_nb=next_notif_nb)
# Keep a trace in our notifications queue
self.notifications_in_progress[n.id] = n
# and put it in the temp queue for scheduler
self.actions.append(n)
# In create_notifications we created a notification "template". When it's
# time to hand it over to the reactionner, this master notification needs
# to be split in several child notifications, one for each contact
# To be more exact, one for each contact who is willing to accept
# notifications of this type and at this time
def scatter_notification(self, n):
cls = self.__class__
childnotifications = []
escalated = False
if n.contact:
# only master notifications can be split up
return []
if n.type == 'RECOVERY':
if self.first_notification_delay != 0 and len(self.notified_contacts) == 0:
# Recovered during first_notification_delay. No notifications
# have been sent yet, so we keep quiet
contacts = []
else:
# The old way. Only send recover notifications to those contacts
# who also got problem notifications
contacts = list(self.notified_contacts)
self.notified_contacts.clear()
else:
# Check is an escalation match. If yes, get all contacts from escalations
if self.is_escalable(n):
contacts = self.get_escalable_contacts(n)
escalated = True
# else take normal contacts
else:
contacts = self.contacts
for contact in contacts:
# We do not want to notify again a contact with
# notification interval == 0 that has been already
# notified. Can happen when a service exit a dowtime
# and still in crit/warn (and not ack)
if n.type == "PROBLEM" and \
self.notification_interval == 0 \
and contact in self.notified_contacts:
continue
# Get the property name for notif commands, like
# service_notification_commands for service
notif_commands = contact.get_notification_commands(cls.my_type)
for cmd in notif_commands:
rt = cmd.reactionner_tag
child_n = Notification(n.type, 'scheduled', 'VOID', cmd, self,
contact, n.t_to_go, escalated=escalated,
timeout=cls.notification_timeout,
notif_nb=n.notif_nb, reactionner_tag=rt,
module_type=cmd.module_type,
enable_environment_macros=cmd.enable_environment_macros)
if not self.notification_is_blocked_by_contact(child_n, contact):
# Update the notification with fresh status information
# of the item. Example: during the notification_delay
# the status of a service may have changed from WARNING to CRITICAL
self.update_notification_command(child_n)
self.raise_notification_log_entry(child_n)
self.notifications_in_progress[child_n.id] = child_n
childnotifications.append(child_n)
if n.type == 'PROBLEM':
# Remember the contacts. We might need them later in the
# recovery code some lines above
self.notified_contacts.add(contact)
return childnotifications
# return a check to check the host/service
# and return id of the check
# Fred : passive only checked host dependency
def launch_check(self, t, ref_check=None, force=False, dependent=False,
check_variant=None):
if check_variant is None:
check_variant = self.default_check_variant
# def launch_check(self, t, ref_check=None, force=False):
c = None
cls = self.__class__
# the check is being forced, so we just replace next_chk time by now
in_checking = self.is_in_checking(check_variant)
if force and in_checking:
now = time.time()
c_in_progress = self.get_checks_in_progress(check_variant)[0]
c_in_progress.t_to_go = now
return c_in_progress.id
# If I'm already in checking, Why launch a new check?
# If ref_check_id is not None , this is a dependency_ check
# If none, it might be a forced check, so OK, I do a new
# Dependency check, we have to create a new check that will be launched only once (now)
# Otherwise it will delay the next real check. this can lead to an infinite SOFT state.
if not force and in_checking and ref_check is not None:
c_in_progress = self.get_checks_in_progress(check_variant)[0] # 0 is OK because in_checking is True
# c_in_progress has almost everything we need but we cant copy.deepcopy() it
# we need another c.id
command_line = c_in_progress.command
timeout = c_in_progress.timeout
poller_tag = c_in_progress.poller_tag
env = c_in_progress.env
module_type = c_in_progress.module_type
check_variant = c_in_progress.check_variant
priority = c_in_progress.priority
c = Check('scheduled', command_line, self, t, ref_check,
timeout=timeout,
poller_tag=poller_tag,
env=env,
module_type=module_type,
check_variant=check_variant,
priority=priority,
dependency_check=True)
self.actions.append(c)
# print("Creating new check with new id : %d, old id : %d" % (c.id, c_in_progress.id))
return c.id
if force or check_variant != self.default_check_variant or \
not self.is_no_check_dependent():
# Fred : passive only checked host dependency
if dependent and self.my_type == 'host' and \
self.passive_checks_enabled and not self.active_checks_enabled:
logger.debug("Host check is for a host that is only passively "
"checked (%s), do not launch the check !", self.host_name)
return None
# By default we will use our default check_command
if check_variant == self.default_check_variant:
check_command = self.check_command
modulations = self.checkmodulations
else:
check_command = getattr(self, "%s_check_command" % check_variant)
modulations = []
# But if a checkway is available, use this one instead.
# Take the first available
for cw in modulations:
c_cw = cw.get_check_command(t)
if c_cw:
check_command = c_cw
break
# Get the command to launch
m = MacroResolver()
data = self.get_data_for_checks()
command_line = m.resolve_command(check_command, data)
# remember it, for pure debuging purpose
if check_variant == self.default_check_variant:
self.last_check_command = command_line
else:
setattr(self, "%s_last_check_command" % check_variant, command_line)
# By default env is void
env = {}
# And get all environment variables only if needed
if cls.enable_environment_macros or check_command.enable_environment_macros:
env = m.get_env_macros(data)
# By default we take the global timeout, but we use the command one if it
# define it (by default it's -1)
timeout = cls.check_timeout
if check_command.timeout != -1:
timeout = check_command.timeout
priority = check_command.priority
# Make the Check object and put the service in checking
# Make the check inherit poller_tag from the command
# And reactionner_tag too
c = Check('scheduled', command_line, self, t, ref_check,
timeout=timeout, poller_tag=check_command.poller_tag,
env=env, module_type=check_command.module_type,
check_variant=check_variant, priority=priority)
# We keep a trace of all checks in progress
# to know if we are in checking_or not
self.checks_in_progress.append(c)
# We need to put this new check in our actions queue
# so scheduler can take it
if c is not None:
self.actions.append(c)
return c.id
# None mean I already take it into account
return None
# returns either 0 or a positive number
# 0 == don't check for orphans
# non-zero == number of secs that can pass before
# marking the check an orphan.
def get_time_to_orphanage(self):
# if disabled program-wide, disable it
if not self.check_for_orphaned:
return 0
# otherwise, check what my local conf says
if self.time_to_orphanage <= 0:
return 0
return self.time_to_orphanage
# Get the perfdata command with macro resolved for this
def get_perfdata_command(self):
cls = self.__class__
if not cls.process_performance_data or not self.process_perf_data:
return
if cls.perfdata_command is not None:
m = MacroResolver()
data = self.get_data_for_event_handler()
cmd = m.resolve_command(cls.perfdata_command, data)
reactionner_tag = cls.perfdata_command.reactionner_tag
e = EventHandler(cmd, timeout=cls.perfdata_timeout,
ref=self, reactionner_tag=reactionner_tag)
# ok we can put it in our temp action queue
self.actions.append(e)
# Create the whole business rule tree
# if we need it
def create_business_rules(self, hosts, services, running=False):
cmdCall = getattr(self, 'check_command', None)
# If we do not have a command, we bailout
if cmdCall is None:
return
# we get our based command, like
# check_tcp!80 -> check_tcp
cmd = cmdCall.call
elts = cmd.split('!')
base_cmd = elts[0]
# If it's bp_rule, we got a rule :)
if base_cmd == 'bp_rule':
# print("Got rule", elts, cmd)
self.got_business_rule = True
rule = ''
if len(elts) >= 2:
rule = '!'.join(elts[1:])
# Only (re-)evaluate the business rule if it has never been
# evaluated before, or it contains a macro.
if re.match(r"\$[\w\d_-]+\$", rule) or self.business_rule is None:
data = self.get_data_for_checks()
m = MacroResolver()
rule = m.resolve_simple_macros_in_string(rule, data)
prev = getattr(self, "processed_business_rule", "")
if rule == prev:
# Business rule did not change (no macro was modulated)
return
fact = DependencyNodeFactory(self)
node = fact.eval_cor_pattern(rule, hosts, services, running)
# print("got node", node)
self.processed_business_rule = rule
self.business_rule = node
def get_business_rule_output(self):
"""
Returns a status string for business rules based items formatted
using business_rule_output_template attribute as template.
The template may embed output formatting for itself, and for its child
(dependant) itmes. Childs format string is expanded into the $( and )$,
using the string between brackets as format string.
Any business rule based item or child macros may be used. In addition,
the $STATUS$, $SHORTSTATUS$ and $FULLNAME$ macro which name is common
to hosts and services may be used to ease template writing.
Caution: only childs in state not OK are displayed.
Example:
A business rule with a format string looking like
"$STATUS$ [ $($TATUS$: $HOSTNAME$,$SERVICEDESC$ )$ ]"
Would return
"CRITICAL [ CRITICAL: host1,srv1 WARNING: host2,srv2 ]"
"""
got_business_rule = getattr(self, 'got_business_rule', False)
# Checks that the service is a business rule.
if got_business_rule is False or self.business_rule is None:
return ""
# Checks that the business rule has a format specified.
output_template = self.business_rule_output_template
if not output_template:
return ""
m = MacroResolver()
# Extracts children template strings
elts = re.findall(r"\$\((.*)\)\$", output_template)
if not len(elts):
child_template_string = ""
else:
child_template_string = elts[0]
# Processes child services output
children_output = ""
ok_count = 0
# Expands child items format string macros.
items = self.business_rule.list_all_elements()
for item in items:
# Do not display children in OK state
if item.last_hard_state_id == 0:
ok_count += 1
continue
data = item.get_data_for_checks()
children_output += m.resolve_simple_macros_in_string(child_template_string, data)
if ok_count == len(items):
children_output = "all checks were successful."
# Replaces children output string
template_string = re.sub("\$\(.*\)\$", children_output, output_template)
data = self.get_data_for_checks()
output = m.resolve_simple_macros_in_string(template_string, data)
return output.strip()
# Processes business rule notifications behaviour. If all problems have
# been acknowledged, no notifications should be sent if state is not OK.
# By default, downtimes are ignored, unless explicitely told to be treated
# as acknowledgements through with the business_rule_downtime_as_ack set.
def business_rule_notification_is_blocked(self):
# Walks through problems to check if all items in non ok are
# acknowledged or in downtime period.
acknowledged = 0
for s in self.source_problems:
if s.last_hard_state_id != 0:
if s.problem_has_been_acknowledged:
# Problem hast been acknowledged
acknowledged += 1
# Only check problems under downtime if we are
# explicitely told to do so.
elif self.business_rule_downtime_as_ack is True:
if s.scheduled_downtime_depth > 0:
# Problem is under downtime, and downtimes should be
# traeted as acknowledgements
acknowledged += 1
elif hasattr(s, "host") and s.host.scheduled_downtime_depth > 0:
# Host is under downtime, and downtimes should be
# traeted as acknowledgements
acknowledged += 1
if acknowledged == len(self.source_problems):
return True
else:
return False
# We ask us to manage our own internal check,
# like a business based one
def manage_internal_check(self, hosts, services, c):
# print("DBG, ask me to manage a check!")
if c.command.startswith('bp_'):
try:
# Re evaluate the business rule to take into account macro
# modulation.
# Caution: We consider the that the macro modulation did not
# change business rule dependency tree. Only Xof: values should
# be modified by modulation.
self.create_business_rules(hosts, services, running=True)
state = self.business_rule.get_state()
c.output = self.get_business_rule_output()
except Exception as e:
# Notifies the error, and return an UNKNOWN state.
c.output = "Error while re-evaluating business rule: %s" % e
logger.debug("[%s] Error while re-evaluating business rule:\n%s",
self.get_name(), traceback.format_exc())
state = 3
# _internal_host_up is for putting host as UP
elif c.command == '_internal_host_up':
state = 0
c.execution_time = 0
c.output = 'Host assumed to be UP'
# Echo is just putting the same state again
elif c.command == '_echo':
state = self.state
c.execution_time = 0
c.output = self.output
c.long_output = c.output
c.check_time = time.time()
c.exit_status = state
# print("DBG, setting state", state)
# If I'm a business rule service/host, I register myself to the
# elements I will depend on, so They will have ME as an impact
def create_business_rules_dependencies(self):
if self.got_business_rule:
# print("DBG: ask me to register me in my dependencies", self.get_name())
elts = self.business_rule.list_all_elements()
# I will register myself in this
for e in elts:
# print("I register to the element", e.get_name())
# all states, every timeperiod, and inherit parents
e.add_business_rule_act_dependency(self, ['d', 'u', 's', 'f', 'c', 'w'], None, True)
# Enforces child hosts/services notification options if told to
# do so (business_rule_(host|service)_notification_options)
# set.
if e.my_type == "host" and self.business_rule_host_notification_options:
e.notification_options = self.business_rule_host_notification_options
if e.my_type == "service" and self.business_rule_service_notification_options:
e.notification_options = self.business_rule_service_notification_options
def rebuild_ref(self):
""" Rebuild the possible reference a schedulingitem can have """
for g in self.comments, self.downtimes:
for o in g:
o.ref = self
# Go launch all our triggers
def eval_triggers(self):
for t in self.triggers:
try:
t.eval(self)
except Exception as exp:
logger.error(
"We got an exception from a trigger on %s for %s",
self.get_full_name(), traceback.format_exc()
)
| 83,567
|
Python
|
.py
| 1,696
| 37.035377
| 112
| 0.592071
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,511
|
contactgroup.py
|
shinken-solutions_shinken/shinken/objects/contactgroup.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
# Contactgroups are groups for contacts
# They are just used for the config read and explode by elements
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken.objects.itemgroup import Itemgroup, Itemgroups
from shinken.property import IntegerProp, StringProp
from shinken.log import logger
class Contactgroup(Itemgroup):
id = 1
my_type = 'contactgroup'
properties = Itemgroup.properties.copy()
properties.update({
'id': IntegerProp(default=0, fill_brok=['full_status']),
'contactgroup_name': StringProp(fill_brok=['full_status']),
'contactgroup_members': StringProp(fill_brok=['full_status']),
'alias': StringProp(fill_brok=['full_status']),
})
macros = {
'CONTACTGROUPALIAS': 'alias',
'CONTACTGROUPMEMBERS': 'get_members'
}
def get_contacts(self):
if getattr(self, 'members', None) is not None:
return [m for m in self.members]
else:
return []
def get_name(self):
return getattr(self, 'contactgroup_name', 'UNNAMED-CONTACTGROUP')
def get_contactgroup_members(self):
if self.has('contactgroup_members'):
return [m.strip() for m in self.contactgroup_members.split(',')]
else:
return []
# We fillfull properties with template ones if need
# Because hostgroup we call may not have it's members
# we call get_hosts_by_explosion on it
def get_contacts_by_explosion(self, contactgroups):
# First we tag the hg so it will not be explode
# if a son of it already call it
self.already_explode = True
# Now the recursive part
# rec_tag is set to False every CG we explode
# so if True here, it must be a loop in HG
# calls... not GOOD!
if self.rec_tag:
logger.error("[contactgroup::%s] got a loop in contactgroup definition",
self.get_name())
if self.has('members'):
return self.members
else:
return ''
# Ok, not a loop, we tag it and continue
self.rec_tag = True
cg_mbrs = self.get_contactgroup_members()
for cg_mbr in cg_mbrs:
cg = contactgroups.find_by_name(cg_mbr.strip())
if cg is not None:
value = cg.get_contacts_by_explosion(contactgroups)
if value is not None:
self.add_string_member(value)
if self.has('members'):
return self.members
else:
return ''
class Contactgroups(Itemgroups):
name_property = "contactgroup_name" # is used for finding contactgroup
inner_class = Contactgroup
def get_members_by_name(self, cgname):
cg = self.find_by_name(cgname)
if cg is None:
return []
return cg.get_contacts()
def add_contactgroup(self, cg):
self.add_item(cg)
def linkify(self, contacts):
self.linkify_cg_by_cont(contacts)
# We just search for each host the id of the host
# and replace the name by the id
def linkify_cg_by_cont(self, contacts):
for cg in self:
mbrs = cg.get_contacts()
# The new member list, in id
new_mbrs = []
for mbr in mbrs:
mbr = mbr.strip() # protect with strip at the begining so don't care about spaces
if mbr == '': # void entry, skip this
continue
m = contacts.find_by_name(mbr)
# Maybe the contact is missing, if so, must be put in unknown_members
if m is not None:
new_mbrs.append(m)
else:
cg.add_string_unknown_member(mbr)
# Make members uniq
new_mbrs = list(set(new_mbrs))
# We find the id, we replace the names
cg.replace_members(new_mbrs)
# Add a contact string to a contact member
# if the contact group do not exist, create it
def add_member(self, cname, cgname):
cg = self.find_by_name(cgname)
# if the id do not exist, create the cg
if cg is None:
cg = Contactgroup({'contactgroup_name': cgname, 'alias': cgname, 'members': cname})
self.add_contactgroup(cg)
else:
cg.add_string_member(cname)
# Use to fill members with contactgroup_members
def explode(self):
# We do not want a same hg to be explode again and again
# so we tag it
for tmp_cg in self.items.values():
tmp_cg.already_explode = False
for cg in self.items.values():
if cg.has('contactgroup_members') and not cg.already_explode:
# get_contacts_by_explosion is a recursive
# function, so we must tag hg so we do not loop
for tmp_cg in self.items.values():
tmp_cg.rec_tag = False
cg.get_contacts_by_explosion(self)
# We clean the tags
for tmp_cg in self.items.values():
if hasattr(tmp_cg, 'rec_tag'):
del tmp_cg.rec_tag
del tmp_cg.already_explode
| 6,188
|
Python
|
.py
| 146
| 33.472603
| 98
| 0.616933
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,512
|
servicegroup.py
|
shinken-solutions_shinken/shinken/objects/servicegroup.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken.property import StringProp, IntegerProp
from shinken.log import logger
from .itemgroup import Itemgroup, Itemgroups
from .service import Service
class Servicegroup(Itemgroup):
id = 1 # zero is always a little bit special... like in database
my_type = 'servicegroup'
properties = Itemgroup.properties.copy()
properties.update({
'id': IntegerProp(default=0, fill_brok=['full_status']),
'servicegroup_name': StringProp(fill_brok=['full_status']),
'servicegroup_members': StringProp(fill_brok=['full_status']),
'alias': StringProp(fill_brok=['full_status']),
'notes': StringProp(default='', fill_brok=['full_status']),
'notes_url': StringProp(default='', fill_brok=['full_status']),
'action_url': StringProp(default='', fill_brok=['full_status']),
})
macros = {
'SERVICEGROUPALIAS': 'alias',
'SERVICEGROUPMEMBERS': 'members',
'SERVICEGROUPNOTES': 'notes',
'SERVICEGROUPNOTESURL': 'notes_url',
'SERVICEGROUPACTIONURL': 'action_url'
}
def get_services(self):
if getattr(self, 'members', None) is not None:
return self.members
else:
return []
def get_name(self):
return self.servicegroup_name
def get_servicegroup_members(self):
if self.has('servicegroup_members'):
return [m.strip() for m in self.servicegroup_members.split(',')]
else:
return []
# We fillfull properties with template ones if need
# Because hostgroup we call may not have it's members
# we call get_hosts_by_explosion on it
def get_services_by_explosion(self, servicegroups):
# First we tag the hg so it will not be explode
# if a son of it already call it
self.already_explode = True
# Now the recursive part
# rec_tag is set to False every HG we explode
# so if True here, it must be a loop in HG
# calls... not GOOD!
if self.rec_tag:
logger.error("[servicegroup::%s] got a loop in servicegroup definition",
self.get_name())
if self.has('members'):
return self.members
else:
return ''
# Ok, not a loop, we tag it and continue
self.rec_tag = True
sg_mbrs = self.get_servicegroup_members()
for sg_mbr in sg_mbrs:
sg = servicegroups.find_by_name(sg_mbr.strip())
if sg is not None:
value = sg.get_services_by_explosion(servicegroups)
if value is not None:
self.add_string_member(value)
if self.has('members'):
return self.members
else:
return ''
class Servicegroups(Itemgroups):
name_property = "servicegroup_name" # is used for finding servicegroup
inner_class = Servicegroup
def linkify(self, hosts, services):
self.linkify_sg_by_srv(hosts, services)
# We just search for each host the id of the host
# and replace the name by the id
# TODO: very slow for hight services, so search with host list,
# not service one
def linkify_sg_by_srv(self, hosts, services):
for sg in self:
mbrs = sg.get_services()
# The new member list, in id
new_mbrs = []
seek = 0
host_name = ''
if len(mbrs) == 1 and mbrs[0] != '':
sg.add_string_unknown_member('%s' % mbrs[0])
for mbr in mbrs:
if seek % 2 == 0:
host_name = mbr.strip()
else:
service_desc = mbr.strip()
find = services.find_srv_by_name_and_hostname(host_name, service_desc)
if find is not None:
new_mbrs.append(find)
else:
host = hosts.find_by_name(host_name)
if not (host and host.is_excluded_for_sdesc(service_desc)):
sg.add_string_unknown_member('%s,%s' % (host_name, service_desc))
elif host:
self.configuration_warnings.append(
'servicegroup %r : %s is excluded from the services of the host %s'
% (sg, service_desc, host_name)
)
seek += 1
# Make members uniq
new_mbrs = list(set(new_mbrs))
# We find the id, we replace the names
sg.replace_members(new_mbrs)
for s in sg.members:
s.servicegroups.append(sg)
# and make this uniq
s.servicegroups = list(set(s.servicegroups))
# Add a service string to a service member
# if the service group do not exist, create it
def add_member(self, cname, sgname):
sg = self.find_by_name(sgname)
# if the id do not exist, create the cg
if sg is None:
sg = Servicegroup({'servicegroup_name': sgname, 'alias': sgname, 'members': cname})
self.add(sg)
else:
sg.add_string_member(cname)
# Use to fill members with contactgroup_members
def explode(self):
# We do not want a same hg to be explode again and again
# so we tag it
for sg in self:
sg.already_explode = False
for sg in self:
if sg.has('servicegroup_members') and not sg.already_explode:
# get_services_by_explosion is a recursive
# function, so we must tag hg so we do not loop
for sg2 in self:
sg2.rec_tag = False
sg.get_services_by_explosion(self)
# We clean the tags
for sg in self:
try:
del sg.rec_tag
except AttributeError:
pass
del sg.already_explode
| 7,083
|
Python
|
.py
| 164
| 32.853659
| 99
| 0.584071
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,513
|
arbiterlink.py
|
shinken-solutions_shinken/shinken/objects/arbiterlink.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import six
import socket
from shinken.objects.satellitelink import SatelliteLink, SatelliteLinks
from shinken.property import IntegerProp, StringProp
from shinken.http_client import HTTPException
from shinken.log import logger
""" TODO: Add some comment about this class for the doc"""
class ArbiterLink(SatelliteLink):
id = 0
my_type = 'arbiter'
properties = SatelliteLink.properties.copy()
properties.update({
'arbiter_name': StringProp(),
'host_name': StringProp(default=six.u(socket.gethostname())),
'port': IntegerProp(default=7770),
})
def get_name(self):
return self.arbiter_name
def get_config(self):
return self.con.get('get_config')
# Look for ourself as an arbiter. If we search for a specific arbiter name, go forit
# If not look be our fqdn name, or if not, our hostname
def is_me(self, lookup_name):
logger.info("And arbiter is launched with the hostname:%s "
"from an arbiter point of view of addr:%s",
self.host_name, socket.getfqdn())
if lookup_name:
return lookup_name == self.get_name()
else:
return self.host_name in (socket.getfqdn(), socket.gethostname())
def give_satellite_cfg(self):
return {'port': self.port, 'address': self.address, 'name': self.arbiter_name,
'use_ssl': self.use_ssl, 'hard_ssl_name_check': self.hard_ssl_name_check}
def do_not_run(self):
if self.con is None:
self.create_connection()
try:
self.con.get('do_not_run')
return True
except HTTPException as exp:
self.con = None
return False
def get_satellite_list(self, daemon_type):
if self.con is None:
self.create_connection()
try:
r = self.con.get_satellite_list(daemon_type)
return r
except HTTPException as exp:
self.con = None
return []
def get_satellite_status(self, daemon_type, name):
if self.con is None:
self.create_connection()
try:
r = self.con.get_satellite_status(daemon_type, name)
return r
except HTTPException as exp:
self.con = None
return {}
def get_all_states(self):
if self.con is None:
self.create_connection()
try:
r = self.con.get('get_all_states')
return r
except HTTPException as exp:
self.con = None
return None
def get_objects_properties(self, table, properties=[]):
if self.con is None:
self.create_connection()
try:
print(properties)
r = self.con.get('get_objects_properties', {'table': table, 'properties': properties})
return r
except HTTPException as exp:
self.con = None
return None
class ArbiterLinks(SatelliteLinks):
name_property = "arbiter_name"
inner_class = ArbiterLink
# We must have a realm property, so we find our realm
def linkify(self, modules):
self.linkify_s_by_plug(modules)
| 4,233
|
Python
|
.py
| 109
| 31.495413
| 98
| 0.645554
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,514
|
hostescalation.py
|
shinken-solutions_shinken/shinken/objects/hostescalation.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken.objects.item import Item, Items
from shinken.objects.escalation import Escalation
from shinken.property import IntegerProp, StringProp, ListProp
class Hostescalation(Item):
id = 1 # zero is always special in database, so we do not take risk here
my_type = 'hostescalation'
properties = Item.properties.copy()
properties.update({
'host_name': StringProp(),
'hostgroup_name': StringProp(),
'first_notification': IntegerProp(),
'last_notification': IntegerProp(),
'notification_interval': IntegerProp(default=30), # like Nagios value
'escalation_period': StringProp(default=''),
'escalation_options': ListProp(default=['d', 'u', 'r', 'w', 'c']),
'contacts': StringProp(),
'contact_groups': StringProp(),
'first_notification_time': IntegerProp(),
'last_notification_time': IntegerProp(),
})
# For debugging purpose only (nice name)
def get_name(self):
return ''
class Hostescalations(Items):
name_property = ""
inner_class = Hostescalation
# We look for contacts property in contacts and
def explode(self, escalations):
# Now we explode all escalations (host_name, service_description) to escalations
for es in self:
properties = es.__class__.properties
name = getattr(es, 'host_name', getattr(es, 'hostgroup_name', ''))
creation_dict = {'escalation_name': 'Generated-Hostescalation-%d-%s' % (es.id, name)}
for prop in properties:
if hasattr(es, prop):
creation_dict[prop] = getattr(es, prop)
s = Escalation(creation_dict)
escalations.add_escalation(s)
# print("All escalations")
# for es in escalations:
# print(es)
| 2,884
|
Python
|
.py
| 64
| 39.375
| 97
| 0.669516
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,515
|
service.py
|
shinken-solutions_shinken/shinken/objects/service.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
""" This Class is the service one, s it manage all service specific thing.
If you look at the scheduling part, look at the scheduling item class"""
from __future__ import absolute_import, division, print_function, unicode_literals
import six
import itertools
import time
import uuid
import re
try:
from ClusterShell.NodeSet import NodeSet, NodeSetParseRangeError
except ImportError:
NodeSet = None
from shinken.objects.item import Items
from shinken.objects.schedulingitem import SchedulingItem
from shinken.autoslots import AutoSlots
from shinken.util import strip_and_uniq, format_t_into_dhms_format, to_svc_hst_distinct_lists, \
get_key_value_sequence, GET_KEY_VALUE_SEQUENCE_ERROR_SYNTAX, GET_KEY_VALUE_SEQUENCE_ERROR_NODEFAULT, \
GET_KEY_VALUE_SEQUENCE_ERROR_NODE, to_list_string_of_names, to_list_of_names, to_name_if_possible, \
is_complex_expr
from shinken.property import BoolProp, IntegerProp, FloatProp,\
CharProp, StringProp, ListProp, DictProp
from shinken.macroresolver import MacroResolver
from shinken.eventhandler import EventHandler
from shinken.log import logger, naglog_result
from shinken.util import filter_service_by_regex_name
from shinken.util import filter_service_by_host_name
class Service(six.with_metaclass(AutoSlots, SchedulingItem)):
# Every service have a unique ID, and 0 is always special in
# database and co...
id = 1
# The host and service do not have the same 0 value, now yes :)
ok_up = 'OK'
# used by item class for format specific value like for Broks
my_type = 'service'
# properties defined by configuration
# required: is required in conf
# default: default value if no set in conf
# pythonize: function to call when transforming string to python object
# fill_brok: if set, send to broker. there are two categories:
# full_status for initial and update status, check_result for check results
# no_slots: do not take this property for __slots__
properties = SchedulingItem.properties.copy()
properties.update({
'host_name':
StringProp(fill_brok=['full_status', 'check_result', 'next_schedule']),
'hostgroup_name':
StringProp(default='', fill_brok=['full_status'], merging='join'),
'service_description':
StringProp(fill_brok=['full_status', 'check_result', 'next_schedule']),
'display_name':
StringProp(default='', fill_brok=['full_status'], no_slots=True),
'servicegroups':
ListProp(default=[], fill_brok=['full_status'],
brok_transformation=to_list_string_of_names, merging='join'),
'is_volatile':
BoolProp(default=False, fill_brok=['full_status']),
'check_command':
StringProp(fill_brok=['full_status']),
'initial_state':
CharProp(default='', fill_brok=['full_status']),
'initial_output':
StringProp(default='', fill_brok=['full_status']),
'max_check_attempts':
IntegerProp(default=1, fill_brok=['full_status']),
'check_interval':
IntegerProp(fill_brok=['full_status', 'check_result']),
'retry_interval':
IntegerProp(fill_brok=['full_status', 'check_result']),
'active_checks_enabled':
BoolProp(default=True, fill_brok=['full_status'], retention=True),
'passive_checks_enabled':
BoolProp(default=True, fill_brok=['full_status'], retention=True),
'check_period':
StringProp(brok_transformation=to_name_if_possible, fill_brok=['full_status']),
'obsess_over_service':
BoolProp(default=False, fill_brok=['full_status'], retention=True),
'check_freshness':
BoolProp(default=False, fill_brok=['full_status']),
'freshness_threshold':
IntegerProp(default=0, fill_brok=['full_status']),
'event_handler':
StringProp(default='', fill_brok=['full_status']),
'event_handler_enabled':
BoolProp(default=False, fill_brok=['full_status'], retention=True),
'low_flap_threshold':
IntegerProp(default=-1, fill_brok=['full_status']),
'high_flap_threshold':
IntegerProp(default=-1, fill_brok=['full_status']),
'flap_detection_enabled':
BoolProp(default=True, fill_brok=['full_status'], retention=True),
'flap_detection_options':
ListProp(default=['o', 'w', 'c', 'u'], fill_brok=['full_status'], split_on_coma=True),
'process_perf_data':
BoolProp(default=True, fill_brok=['full_status'], retention=True),
'retain_status_information':
BoolProp(default=True, fill_brok=['full_status']),
'retain_nonstatus_information':
BoolProp(default=True, fill_brok=['full_status']),
'notification_interval':
IntegerProp(default=60, fill_brok=['full_status']),
'first_notification_delay':
IntegerProp(default=0, fill_brok=['full_status']),
'notification_period':
StringProp(brok_transformation=to_name_if_possible, fill_brok=['full_status']),
'notification_options':
ListProp(default=['w', 'u', 'c', 'r', 'f', 's'],
fill_brok=['full_status'], split_on_coma=True),
'notifications_enabled':
BoolProp(default=True, fill_brok=['full_status'], retention=True),
'contacts':
ListProp(default=[], brok_transformation=to_list_of_names,
fill_brok=['full_status'], merging='join'),
'contact_groups':
ListProp(default=[], fill_brok=['full_status'], merging='join'),
'stalking_options':
ListProp(default=[''], fill_brok=['full_status'], merging='join'),
'notes':
StringProp(default='', fill_brok=['full_status']),
'notes_url':
StringProp(default='', fill_brok=['full_status']),
'action_url':
StringProp(default='', fill_brok=['full_status']),
'icon_image':
StringProp(default='', fill_brok=['full_status']),
'icon_image_alt':
StringProp(default='', fill_brok=['full_status']),
'icon_set':
StringProp(default='', fill_brok=['full_status']),
'failure_prediction_enabled':
BoolProp(default=False, fill_brok=['full_status']),
'parallelize_check':
BoolProp(default=True, fill_brok=['full_status']),
# Shinken specific
'poller_tag':
StringProp(default='None'),
'reactionner_tag':
StringProp(default='None'),
'resultmodulations':
ListProp(default=[], merging='join'),
'business_impact_modulations':
ListProp(default=[], merging='join'),
'escalations':
ListProp(default=[], fill_brok=['full_status'], merging='join', split_on_coma=True),
'maintenance_period':
StringProp(default='',
brok_transformation=to_name_if_possible, fill_brok=['full_status']),
'time_to_orphanage':
IntegerProp(default=300, fill_brok=['full_status']),
'merge_host_contacts':
BoolProp(default=False, fill_brok=['full_status']),
'labels':
ListProp(default=[], fill_brok=['full_status'], merging='join'),
'host_dependency_enabled':
BoolProp(default=True, fill_brok=['full_status']),
# BUSINESS CORRELATOR PART
# Business rules output format template
'business_rule_output_template':
StringProp(default='', fill_brok=['full_status']),
# Business rules notifications mode
'business_rule_smart_notifications':
BoolProp(default=False, fill_brok=['full_status']),
# Treat downtimes as acknowledgements in smart notifications
'business_rule_downtime_as_ack':
BoolProp(default=False, fill_brok=['full_status']),
# Enforces child nodes notification options
'business_rule_host_notification_options':
ListProp(default=[], fill_brok=['full_status'], split_on_coma=True),
'business_rule_service_notification_options':
ListProp(default=[], fill_brok=['full_status'], split_on_coma=True),
# Easy Service dep definition
'service_dependencies': # TODO: find a way to brok it?
ListProp(default=None, merging='join', split_on_coma=True),
# service generator
'duplicate_foreach':
StringProp(default=''),
'default_value':
StringProp(default=''),
# Business_Impact value
'business_impact':
IntegerProp(default=2, fill_brok=['full_status']),
# Load some triggers
'trigger':
StringProp(default=''),
'trigger_name':
StringProp(default=''),
'trigger_broker_raise_enabled':
BoolProp(default=False),
# Trending
'trending_policies':
ListProp(default=[], fill_brok=['full_status'], merging='join'),
# Our check ways. By defualt void, but will filled by an inner if need
'checkmodulations':
ListProp(default=[], fill_brok=['full_status'], merging='join'),
'macromodulations':
ListProp(default=[], merging='join'),
# Custom views
'custom_views':
ListProp(default=[], fill_brok=['full_status'], merging='join'),
# UI aggregation
'aggregation':
StringProp(default='', fill_brok=['full_status']),
# Snapshot part
'snapshot_enabled':
BoolProp(default=False),
'snapshot_command':
StringProp(default=''),
'snapshot_period':
StringProp(default=''),
'snapshot_criteria':
ListProp(default=['w', 'c', 'u'], fill_brok=['full_status'], merging='join'),
'snapshot_interval':
IntegerProp(default=5),
# Maintenance part
'maintenance_check_command':
StringProp(default='', fill_brok=['full_status']),
'maintenance_period':
StringProp(default='', brok_transformation=to_name_if_possible, fill_brok=['full_status']),
'maintenance_checks_enabled':
BoolProp(default=False, fill_brok=['full_status']),
'maintenance_check_period':
StringProp(default='', brok_transformation=to_name_if_possible, fill_brok=['full_status']),
'maintenance_check_interval':
IntegerProp(default=0, fill_brok=['full_status', 'check_result']),
'maintenance_retry_interval':
IntegerProp(default=0, fill_brok=['full_status', 'check_result']),
# Check/notification priority
'priority':
IntegerProp(default=100, fill_brok=['full_status']),
})
# properties used in the running state
running_properties = SchedulingItem.running_properties.copy()
running_properties.update({
'modified_attributes':
IntegerProp(default=0, fill_brok=['full_status'], retention=True),
'last_chk':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'next_chk':
IntegerProp(default=0, fill_brok=['full_status', 'next_schedule'], retention=True),
'in_maintenance':
IntegerProp(default=None, fill_brok=['full_status'], retention=True),
'latency':
FloatProp(default=0, fill_brok=['full_status', 'check_result'], retention=True,),
'attempt':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'state':
StringProp(default='PENDING',
fill_brok=['full_status', 'check_result'], retention=True),
'state_id':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'current_event_id':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'last_event_id':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'last_state':
StringProp(default='PENDING',
fill_brok=['full_status', 'check_result'], retention=True),
'last_state_type':
StringProp(default='HARD', fill_brok=['full_status', 'check_result'], retention=True),
'last_state_id':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'last_state_change':
FloatProp(default=0.0, fill_brok=['full_status', 'check_result'], retention=True),
'last_hard_state_change':
FloatProp(default=0.0, fill_brok=['full_status', 'check_result'], retention=True),
'last_hard_state':
StringProp(default='PENDING', fill_brok=['full_status'], retention=True),
'last_hard_state_id':
IntegerProp(default=0, fill_brok=['full_status'], retention=True),
'last_time_ok':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'last_time_warning':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'last_time_critical':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'last_time_unknown':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'duration_sec':
IntegerProp(default=0, fill_brok=['full_status'], retention=True),
'state_type':
StringProp(default='HARD', fill_brok=['full_status', 'check_result'], retention=True),
'state_type_id':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'output':
StringProp(default='', fill_brok=['full_status', 'check_result'], retention=True),
'long_output':
StringProp(default='', fill_brok=['full_status', 'check_result'], retention=True),
'is_flapping':
BoolProp(default=False, fill_brok=['full_status'], retention=True),
# dependencies for actions like notif of event handler,
# so AFTER check return
'act_depend_of':
ListProp(default=[]),
# dependencies for checks raise, so BEFORE checks
'chk_depend_of':
ListProp(default=[]),
# elements that depend of me, so the reverse than just upper
'act_depend_of_me':
ListProp(default=[]),
# elements that depend of me
'chk_depend_of_me':
ListProp(default=[]),
'last_state_update':
FloatProp(default=0.0, fill_brok=['full_status'], retention=True),
# no brok because checks are too linked
'checks_in_progress':
ListProp(default=[]),
# no broks because notifications are too linked
'notifications_in_progress': DictProp(default={}, retention=True),
'downtimes':
ListProp(default=[], fill_brok=['full_status'], retention=True),
'comments':
ListProp(default=[], fill_brok=['full_status'], retention=True),
'flapping_changes':
ListProp(default=[], fill_brok=['full_status'], retention=True),
'flapping_comment_id':
IntegerProp(default=0, fill_brok=['full_status'], retention=True),
'percent_state_change':
FloatProp(default=0.0, fill_brok=['full_status', 'check_result'], retention=True),
'problem_has_been_acknowledged':
BoolProp(default=False, fill_brok=['full_status', 'check_result']),
'acknowledgement':
StringProp(default=None, retention=True),
'acknowledgement_type':
IntegerProp(default=1, fill_brok=['full_status', 'check_result'], retention=True),
'check_type':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'has_been_checked':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'should_be_scheduled':
IntegerProp(default=1, fill_brok=['full_status'], retention=True),
'last_problem_id':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'current_problem_id':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'execution_time':
FloatProp(default=0.0, fill_brok=['full_status', 'check_result'], retention=True),
'u_time':
FloatProp(default=0.0),
's_time':
FloatProp(default=0.0),
'last_notification':
FloatProp(default=0.0, fill_brok=['full_status'], retention=True),
'current_notification_number':
IntegerProp(default=0, fill_brok=['full_status'], retention=True),
'current_notification_id':
IntegerProp(default=0, fill_brok=['full_status'], retention=True),
'check_flapping_recovery_notification':
BoolProp(default=True, fill_brok=['full_status'], retention=True),
'scheduled_downtime_depth':
IntegerProp(default=0, fill_brok=['full_status']),
'pending_flex_downtime':
IntegerProp(default=0, fill_brok=['full_status'], retention=True),
'timeout':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'start_time':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'end_time':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'early_timeout':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'return_code':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'perf_data':
StringProp(default='', fill_brok=['full_status', 'check_result'], retention=True),
'last_perf_data':
StringProp(default='', retention=True),
'host':
StringProp(default=None),
'customs':
DictProp(default={}, fill_brok=['full_status']),
# Warning: for the notified_contacts retention save,
# we save only the names of the contacts, and we should RELINK
# them when we load it.
# use for having all contacts we have notified
'notified_contacts': ListProp(default=set(),
retention=True,
retention_preparation=to_list_of_names),
'in_scheduled_downtime': BoolProp(
default=False, fill_brok=['full_status', 'check_result']),
'in_scheduled_downtime_during_last_check': BoolProp(default=False, retention=True),
'actions': ListProp(default=[]), # put here checks and notif raised
'broks': ListProp(default=[]), # and here broks raised
# Problem/impact part
'is_problem': BoolProp(default=False, fill_brok=['full_status']),
'is_impact': BoolProp(default=False, fill_brok=['full_status']),
# the save value of our business_impact for "problems"
'my_own_business_impact': IntegerProp(default=-1, fill_brok=['full_status']),
# list of problems that make us an impact
'source_problems': ListProp(default=[],
fill_brok=['full_status'],
brok_transformation=to_svc_hst_distinct_lists),
# list of the impact I'm the cause of
'impacts': ListProp(default=[],
fill_brok=['full_status'],
brok_transformation=to_svc_hst_distinct_lists),
# keep a trace of the old state before being an impact
'state_before_impact': StringProp(default='PENDING'),
# keep a trace of the old state id before being an impact
'state_id_before_impact': IntegerProp(default=0),
# if the state change, we know so we do not revert it
'state_changed_since_impact': BoolProp(default=False),
# BUSINESS CORRELATOR PART
# Say if we are business based rule or not
'got_business_rule': BoolProp(default=False, fill_brok=['full_status']),
# Previously processed business rule (with macro expanded)
'processed_business_rule': StringProp(default="", fill_brok=['full_status']),
# Our Dependency node for the business rule
'business_rule': StringProp(default=None),
# Here it's the elements we are depending on
# so our parents as network relation, or a host
# we are depending in a hostdependency
# or even if we are business based.
'parent_dependencies': StringProp(default=set(),
brok_transformation=to_svc_hst_distinct_lists,
fill_brok=['full_status']),
# Here it's the guys that depend on us. So it's the total
# opposite of the parent_dependencies
'child_dependencies': StringProp(brok_transformation=to_svc_hst_distinct_lists,
default=set(), fill_brok=['full_status']),
# Manage the unknown/unreach during hard state
'in_hard_unknown_reach_phase': BoolProp(default=False, retention=True),
'was_in_hard_unknown_reach_phase': BoolProp(default=False, retention=True),
'state_before_hard_unknown_reach_phase': StringProp(default='OK', retention=True),
# Set if the element just change its father/son topology
'topology_change': BoolProp(default=False, fill_brok=['full_status']),
# Trigger list
'triggers': ListProp(default=[]),
# snapshots part
'last_snapshot': IntegerProp(default=0, fill_brok=['full_status'], retention=True),
# Keep the string of the last command launched for this element
'last_check_command': StringProp(default=''),
# Maintenance states: PRODUCTION (0), MAINTENANCE (1), UNKNOWN (2)
'last_maintenance_chk':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'next_maintenance_chk':
IntegerProp(default=0, fill_brok=['full_status', 'next_schedule'], retention=True),
'maintenance_check_output':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'maintenance_state':
StringProp(default='PENDING', fill_brok=['full_status', 'check_result'], retention=True),
'maintenance_state_id':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'last_maintenance_state':
StringProp(default='PENDING', fill_brok=['full_status', 'check_result'], retention=True),
'last_maintenance_state_id':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'last_maintenance_state_change':
FloatProp(default=0.0, fill_brok=['full_status', 'check_result'], retention=True),
})
# Mapping between Macros and properties (can be prop or a function)
macros = {
'SERVICEDESC': 'service_description',
'SERVICEDISPLAYNAME': 'display_name',
'SERVICESTATE': 'state',
'SERVICESTATEID': 'state_id',
'LASTSERVICESTATE': 'last_state',
'LASTSERVICESTATEID': 'last_state_id',
'SERVICESTATETYPE': 'state_type',
'SERVICEATTEMPT': 'attempt',
'MAXSERVICEATTEMPTS': 'max_check_attempts',
'SERVICEISVOLATILE': 'is_volatile',
'SERVICEEVENTID': 'current_event_id',
'LASTSERVICEEVENTID': 'last_event_id',
'SERVICEPROBLEMID': 'current_problem_id',
'LASTSERVICEPROBLEMID': 'last_problem_id',
'SERVICELATENCY': 'latency',
'SERVICEEXECUTIONTIME': 'execution_time',
'SERVICEDURATION': 'get_duration',
'SERVICEDURATIONSEC': 'get_duration_sec',
'SERVICEDOWNTIME': 'get_downtime',
'SERVICEPERCENTCHANGE': 'percent_state_change',
'SERVICEGROUPNAME': 'get_groupname',
'SERVICEGROUPNAMES': 'get_groupnames',
'LASTSERVICECHECK': 'last_chk',
'LASTSERVICESTATECHANGE': 'last_state_change',
'LASTSERVICEOK': 'last_time_ok',
'LASTSERVICEWARNING': 'last_time_warning',
'LASTSERVICEUNKNOWN': 'last_time_unknown',
'LASTSERVICECRITICAL': 'last_time_critical',
'SERVICEOUTPUT': 'output',
'LONGSERVICEOUTPUT': 'long_output',
'SERVICEPERFDATA': 'perf_data',
'LASTSERVICEPERFDATA': 'last_perf_data',
'SERVICECHECKCOMMAND': 'get_check_command',
'SERVICEACKAUTHOR': 'get_ack_author_name',
'SERVICEACKAUTHORNAME': 'get_ack_author_name',
'SERVICEACKAUTHORALIAS': 'get_ack_author_name',
'SERVICEACKCOMMENT': 'get_ack_comment',
'SERVICEACTIONURL': 'action_url',
'SERVICENOTESURL': 'notes_url',
'SERVICENOTES': 'notes',
'SERVICEBUSINESSIMPACT': 'business_impact',
# Business rules output formatting related macros
'STATUS': 'get_status',
'SHORTSTATUS': 'get_short_status',
'FULLNAME': 'get_full_name',
}
# This tab is used to transform old parameters name into new ones
# so from Nagios2 format, to Nagios3 ones.
# Or Shinken deprecated names like criticity
old_properties = {
'normal_check_interval': 'check_interval',
'retry_check_interval': 'retry_interval',
'criticity': 'business_impact',
'hostgroup': 'hostgroup_name',
'hostgroups': 'hostgroup_name',
# 'criticitymodulations': 'business_impact_modulations',
}
#######
# __ _ _ _
# / _(_) | | (_)
# ___ ___ _ __ | |_ _ __ _ _ _ _ __ __ _| |_ _ ___ _ __
# / __/ _ \| '_ \| _| |/ _` | | | | '__/ _` | __| |/ _ \| '_ \
# | (_| (_) | | | | | | | (_| | |_| | | | (_| | |_| | (_) | | | |
# \___\___/|_| |_|_| |_|\__, |\__,_|_| \__,_|\__|_|\___/|_| |_|
# __/ |
# |___/
######
def get_newid(self):
cls = self.__class__
value = uuid.uuid1().hex
cls.id += 1
return value
def __repr__(self):
return '<Service host_name=%r desc=%r name=%r use=%r />' % (
getattr(self, 'host_name', None),
getattr(self, 'service_description', None),
getattr(self, 'name', None),
getattr(self, 'use', None)
)
__str__ = __repr__
@property
def unique_key(self): # actually only used for (un)indexitem() via name_property..
return (self.host_name, self.service_description)
@property
def display_name(self):
display_name = getattr(self, '_display_name', None)
if not display_name:
return self.service_description
return display_name
@display_name.setter
def display_name(self, display_name):
self._display_name = display_name
# Give a nice name output
def get_name(self):
if hasattr(self, 'service_description'):
return self.service_description
if hasattr(self, 'name'):
return self.name
return 'SERVICE-DESCRIPTION-MISSING'
# Get the servicegroups names
def get_groupnames(self):
return ','.join([sg.get_name() for sg in self.servicegroups])
# Need the whole name for debugging purpose
def get_dbg_name(self):
return "%s/%s" % (self.host.host_name, self.service_description)
def get_full_name(self):
if self.host and hasattr(self.host, 'host_name') and hasattr(self, 'service_description'):
return "%s/%s" % (self.host.host_name, self.service_description)
return 'UNKNOWN-SERVICE'
# Get our realm, so in fact our host one
def get_realm(self):
if self.host is None:
return None
return self.host.get_realm()
def get_hostgroups(self):
return self.host.hostgroups
def get_host_tags(self):
return self.host.tags
def get_service_tags(self):
return self.tags
def is_duplicate(self):
"""
Indicates if a service holds a duplicate_foreach statement
"""
if getattr(self, "duplicate_foreach", None):
return True
else:
return False
def set_initial_state(self):
mapping = {
"o": {
"state": "OK",
"state_id": 0
},
"w": {
"state": "WARNING",
"state_id": 1
},
"c": {
"state": "CRITICAL",
"state_id": 2
},
"u": {
"state": "UNKNOWN",
"state_id": 3
},
}
SchedulingItem.set_initial_state(self, mapping)
# Check is required prop are set:
# template are always correct
# contacts OR contactgroups is need
def is_correct(self):
state = True
cls = self.__class__
source = getattr(self, 'imported_from', 'unknown')
desc = getattr(self, 'service_description', 'unnamed')
hname = getattr(self, 'host_name', 'unnamed')
special_properties = ('check_period', 'notification_interval', 'host_name',
'hostgroup_name', 'notification_period')
for prop, entry in cls.properties.items():
if prop not in special_properties:
if not hasattr(self, prop) and entry.required:
logger.error("The service %s on host '%s' does not have %s", desc, hname, prop)
state = False # Bad boy...
# Then look if we have some errors in the conf
# Juts print(warnings, but raise errors)
for err in self.configuration_warnings:
logger.warning("[service::%s] %s", desc, err)
# Raised all previously saw errors like unknown contacts and co
if self.configuration_errors != []:
state = False
for err in self.configuration_errors:
logger.error("[service::%s] %s", self.get_full_name(), err)
# If no notif period, set it to None, mean 24x7
if not hasattr(self, 'notification_period'):
self.notification_period = None
# Ok now we manage special cases...
if self.notifications_enabled and self.contacts == []:
logger.warning("The service '%s' in the host '%s' does not have "
"contacts nor contact_groups in '%s'", desc, hname, source)
# Set display_name if need
if getattr(self, 'display_name', '') == '':
self.display_name = getattr(self, 'service_description', '')
# If we got an event handler, it should be valid
if getattr(self, 'event_handler', None) and not self.event_handler.is_valid():
logger.error("%s: my event_handler %s is invalid",
self.get_name(), self.event_handler.command)
state = False
if not hasattr(self, 'check_command'):
logger.error("%s: I've got no check_command", self.get_name())
state = False
# Ok got a command, but maybe it's invalid
else:
if not self.check_command.is_valid():
logger.error("%s: my check_command %s is invalid",
self.get_name(), self.check_command.command)
state = False
if self.got_business_rule:
if not self.business_rule.is_valid():
logger.error("%s: my business rule is invalid", self.get_name(),)
for bperror in self.business_rule.configuration_errors:
logger.error("%s: %s", self.get_name(), bperror)
state = False
if not hasattr(self, 'notification_interval') \
and self.notifications_enabled is True:
logger.error("%s: I've got no notification_interval but "
"I've got notifications enabled", self.get_name())
state = False
if not self.host_name:
logger.error("The service '%s' is not bound do any host.", desc)
state = False
elif self.host is None:
logger.error("The service '%s' got an unknown host_name '%s'.", desc, self.host_name)
state = False
if not hasattr(self, 'check_period'):
self.check_period = None
if hasattr(self, 'service_description'):
for c in cls.illegal_object_name_chars:
if c in self.service_description:
logger.error("%s: My service_description got the "
"character %s that is not allowed.", self.get_name(), c)
state = False
return state
# The service is dependent of his father dep
# Must be AFTER linkify
# TODO: implement "not host dependent" feature.
def fill_daddy_dependency(self):
# Depend of host, all status, is a networkdep
# and do not have timeperiod, and follow parents dep
if self.host is not None and self.host_dependency_enabled:
# I add the dep in MY list
self.act_depend_of.append(
(self.host, ['d', 'u', 's', 'f'], 'network_dep', None, True)
)
# I add the dep in Daddy list
self.host.act_depend_of_me.append(
(self, ['d', 'u', 's', 'f'], 'network_dep', None, True)
)
# And the parent/child dep lists too
self.host.register_son_in_parent_child_dependencies(self)
# Register the dependency between 2 service for action (notification etc)
def add_service_act_dependency(self, srv, status, timeperiod, inherits_parent):
# first I add the other the I depend on in MY list
self.act_depend_of.append((srv, status, 'logic_dep', timeperiod, inherits_parent))
# then I register myself in the other service dep list
srv.act_depend_of_me.append((self, status, 'logic_dep', timeperiod, inherits_parent))
# And the parent/child dep lists too
srv.register_son_in_parent_child_dependencies(self)
# Register the dependency between 2 service for action (notification etc)
# but based on a BUSINESS rule, so on fact:
# ERP depend on database, so we fill just database.act_depend_of_me
# because we will want ERP mails to go on! So call this
# on the database service with the srv=ERP service
def add_business_rule_act_dependency(self, srv, status, timeperiod, inherits_parent):
# I only register so he know that I WILL be a impact
self.act_depend_of_me.append((srv, status, 'business_dep',
timeperiod, inherits_parent))
# And the parent/child dep lists too
self.register_son_in_parent_child_dependencies(srv)
# Register the dependency between 2 service for checks
def add_service_chk_dependency(self, srv, status, timeperiod, inherits_parent):
# first I add the other the I depend on in MY list
self.chk_depend_of.append((srv, status, 'logic_dep', timeperiod, inherits_parent))
# then I register myself in the other service dep list
srv.chk_depend_of_me.append(
(self, status, 'logic_dep', timeperiod, inherits_parent)
)
# And the parent/child dep lists too
srv.register_son_in_parent_child_dependencies(self)
def duplicate(self, host):
''' For a given host, look for all copy we must create for for_each property
:type host: shinken.objects.host.Host
:return Service
'''
# In macro, it's all in UPPER case
prop = self.duplicate_foreach.strip().upper()
if prop not in host.customs: # If I do not have the property, we bail out
return []
duplicates = []
# Get the list entry, and the not one if there is one
entry = host.customs[prop]
# Look at the list of the key we do NOT want maybe,
# for _disks it will be _!disks
not_entry = host.customs.get('_' + '!' + prop[1:], '').split(',')
not_keys = strip_and_uniq(not_entry)
default_value = getattr(self, 'default_value', '')
# Transform the generator string to a list
# Missing values are filled with the default value
(key_values, errcode) = get_key_value_sequence(entry, default_value)
if key_values:
for key_value in key_values:
key = key_value['KEY']
# Maybe this key is in the NOT list, if so, skip it
if key in not_keys:
continue
value = key_value['VALUE']
new_s = self.copy()
# The copied service is not a duplicate_foreach, but a final
# object
new_s.duplicate_foreach = ""
new_s.host_name = host.get_name()
if self.is_tpl(): # if template, the new one is not
new_s.register = 1
for key in key_value:
if key == 'KEY':
if hasattr(self, 'service_description'):
# We want to change all illegal chars to a _ sign.
# We can't use class.illegal_obj_char
# because in the "explode" phase, we do not have access to this data! :(
safe_key_value = re.sub(r'[' + "`~!$%^&*\"|'<>?,()=" + ']+', '_',
key_value[key])
new_s.service_description = self.service_description.replace(
'$' + key + '$', safe_key_value
)
# Here is a list of property where we will expand the $KEY$ by the value
_the_expandables = ['check_command',
'display_name',
'aggregation',
'event_handler']
for prop in _the_expandables:
if hasattr(self, prop):
# here we can replace VALUE, VALUE1, VALUE2,...
setattr(new_s, prop, getattr(new_s, prop).replace('$' + key + '$',
key_value[key]))
if hasattr(self, 'service_dependencies'):
for i, sd in enumerate(new_s.service_dependencies):
new_s.service_dependencies[i] = sd.replace(
'$' + key + '$', key_value[key]
)
# And then add in our list this new service
duplicates.append(new_s)
else:
# If error, we should link the error to the host, because self is
# a template, and so won't be checked not print!
if errcode == GET_KEY_VALUE_SEQUENCE_ERROR_SYNTAX:
err = "The custom property '%s' of the host '%s' is not a valid entry %s for a service generator" % \
(self.duplicate_foreach.strip(), host.get_name(), entry)
logger.warning(err)
host.configuration_errors.append(err)
elif errcode == GET_KEY_VALUE_SEQUENCE_ERROR_NODEFAULT:
err = "The custom property '%s 'of the host '%s' has empty " \
"values %s but the service %s has no default_value" % \
(self.duplicate_foreach.strip(),
host.get_name(), entry, self.service_description)
logger.warning(err)
host.configuration_errors.append(err)
elif errcode == GET_KEY_VALUE_SEQUENCE_ERROR_NODE:
err = "The custom property '%s' of the host '%s' has an invalid node range %s" % \
(self.duplicate_foreach.strip(), host.get_name(), entry)
logger.warning(err)
host.configuration_errors.append(err)
return duplicates
#####
# _
# (_)
# _ __ _ _ _ __ _ __ _ _ __ __ _
# | '__| | | | '_ \| '_ \| | '_ \ / _` |
# | | | |_| | | | | | | | | | | | (_| |
# |_| \__,_|_| |_|_| |_|_|_| |_|\__, |
# __/ |
# |___/
####
# Set unreachable: our host is DOWN, but it mean nothing for a service
def set_unreachable(self):
pass
# We just go an impact, so we go unreachable
# but only if it's enable in the configuration
def set_impact_state(self):
cls = self.__class__
if cls.enable_problem_impacts_states_change:
# Keep a trace of the old state (problem came back before
# a new checks)
self.state_before_impact = self.state
self.state_id_before_impact = self.state_id
# this flag will know if we override the impact state
self.state_changed_since_impact = False
self.state = 'UNKNOWN' # exit code UNDETERMINED
self.state_id = 3
# Ok, we are no more an impact, if no news checks
# override the impact state, we came back to old
# states
# And only if we enable the state change for impacts
def unset_impact_state(self):
cls = self.__class__
if cls.enable_problem_impacts_states_change and not self.state_changed_since_impact:
self.state = self.state_before_impact
self.state_id = self.state_id_before_impact
# Set state with status return by the check
# and update flapping state
def set_state_from_exit_status(self, status):
now = time.time()
self.last_state_update = now
# we should put in last_state the good last state:
# if not just change the state by an problem/impact
# we can take current state. But if it's the case, the
# real old state is self.state_before_impact (it's the TRUE
# state in fact)
# but only if the global conf have enable the impact state change
cls = self.__class__
if cls.enable_problem_impacts_states_change \
and self.is_impact \
and not self.state_changed_since_impact:
self.last_state = self.state_before_impact
else: # standard case
self.last_state = self.state
if status == 0:
self.state = 'OK'
self.state_id = 0
self.last_time_ok = int(self.last_state_update)
state_code = 'o'
elif status == 1:
self.state = 'WARNING'
self.state_id = 1
self.last_time_warning = int(self.last_state_update)
state_code = 'w'
elif status == 2:
self.state = 'CRITICAL'
self.state_id = 2
self.last_time_critical = int(self.last_state_update)
state_code = 'c'
elif status == 3:
self.state = 'UNKNOWN'
self.state_id = 3
self.last_time_unknown = int(self.last_state_update)
state_code = 'u'
else:
self.state = 'CRITICAL' # exit code UNDETERMINED
self.state_id = 2
self.last_time_critical = int(self.last_state_update)
state_code = 'c'
if state_code in self.flap_detection_options:
self.add_flapping_change(self.state != self.last_state)
if self.state != self.last_state:
self.last_state_change = self.last_state_update
self.duration_sec = now - self.last_state_change
# Return True if status is the state (like OK) or small form like 'o'
def is_state(self, status):
if status == self.state:
return True
# Now low status
elif status == 'o' and self.state == 'OK':
return True
elif status == 'c' and self.state == 'CRITICAL':
return True
elif status == 'w' and self.state == 'WARNING':
return True
elif status == 'u' and self.state == 'UNKNOWN':
return True
return False
# The last time when the state was not OK
def last_time_non_ok_or_up(self):
non_ok_times = list(filter(
lambda x: x > self.last_time_ok,
[self.last_time_warning, self.last_time_critical, self.last_time_unknown]
))
if len(non_ok_times) == 0:
last_time_non_ok = 0 # program_start would be better
else:
last_time_non_ok = min(non_ok_times)
return last_time_non_ok
# Add a log entry with a SERVICE ALERT like:
# SERVICE ALERT: server;Load;UNKNOWN;HARD;1;I don't know what to say...
def raise_alert_log_entry(self, check_variant=None):
if check_variant is None:
naglog_result('critical', 'SERVICE ALERT: %s;%s;%s;%s;%d;%s' % (
self.host.get_name(), self.get_name(), self.state,
self.state_type, self.attempt, self.output))
elif check_variant == "maintenance":
naglog_result('critical', 'SERVICE MAINTENANCE ALERT: %s;%s;%s;%s' % (
self.host.get_name(), self.get_name(), self.maintenance_state,
self.maintenance_check_output))
# If the configuration allow it, raise an initial log like
# CURRENT SERVICE STATE: server;Load;UNKNOWN;HARD;1;I don't know what to say...
def raise_initial_state(self):
if self.__class__.log_initial_states:
naglog_result('info', 'CURRENT SERVICE STATE: %s;%s;%s;%s;%d;%s'
% (self.host.get_name(), self.get_name(),
self.state, self.state_type, self.attempt, self.output))
# Add a log entry with a Freshness alert like:
# Warning: The results of host 'Server' are stale by 0d 0h 0m 58s (threshold=0d 1h 0m 0s).
# I'm forcing an immediate check of the host.
def raise_freshness_log_entry(self, t_stale_by, t_threshold):
logger.warning("The results of service '%s' on host '%s' are stale "
"by %s (threshold=%s). I'm forcing an immediate check "
"of the service.",
self.get_name(), self.host.get_name(),
format_t_into_dhms_format(t_stale_by),
format_t_into_dhms_format(t_threshold))
# Raise a log entry with a Notification alert like
# SERVICE NOTIFICATION: superadmin;server;Load;OK;notify-by-rss;no output
def raise_notification_log_entry(self, n):
contact = n.contact
command = n.command_call
if n.type in ('DOWNTIMESTART', 'DOWNTIMEEND', 'DOWNTIMECANCELLED',
'CUSTOM', 'ACKNOWLEDGEMENT', 'FLAPPINGSTART',
'FLAPPINGSTOP', 'FLAPPINGDISABLED'):
state = '%s (%s)' % (n.type, self.state)
else:
state = self.state
if self.__class__.log_notifications:
naglog_result('critical', "SERVICE NOTIFICATION: %s;%s;%s;%s;%s;%s"
% (contact.get_name(),
self.host.get_name(), self.get_name(), state,
command.get_name(), self.output))
# Raise a log entry with a Eventhandler alert like
# SERVICE EVENT HANDLER: test_host_0;test_ok_0;OK;SOFT;4;eventhandler
def raise_event_handler_log_entry(self, command):
if self.__class__.log_event_handlers:
naglog_result('critical', "SERVICE EVENT HANDLER: %s;%s;%s;%s;%s;%s"
% (self.host.get_name(), self.get_name(),
self.state, self.state_type,
self.attempt, command.get_name()))
# Raise a log entry with a Eventhandler alert like
# SERVICE SNAPSHOT: test_host_0;test_ok_0;OK;SOFT;4;eventhandler
def raise_snapshot_log_entry(self, command):
if self.__class__.log_event_handlers:
naglog_result('critical', "SERVICE SNAPSHOT: %s;%s;%s;%s;%s;%s"
% (self.host.get_name(), self.get_name(),
self.state, self.state_type, self.attempt, command.get_name()))
# Raise a log entry with FLAPPING START alert like
# SERVICE FLAPPING ALERT: server;LOAD;STARTED;
# Service appears to have started flapping (50.6% change >= 50.0% threshold)
def raise_flapping_start_log_entry(self, change_ratio, threshold):
naglog_result('critical', "SERVICE FLAPPING ALERT: %s;%s;STARTED; "
"Service appears to have started flapping "
"(%.1f%% change >= %.1f%% threshold)"
% (self.host.get_name(), self.get_name(),
change_ratio, threshold))
# Raise a log entry with FLAPPING STOP alert like
# SERVICE FLAPPING ALERT: server;LOAD;STOPPED;
# Service appears to have stopped flapping (23.0% change < 25.0% threshold)
def raise_flapping_stop_log_entry(self, change_ratio, threshold):
naglog_result('critical', "SERVICE FLAPPING ALERT: %s;%s;STOPPED; "
"Service appears to have stopped flapping "
"(%.1f%% change < %.1f%% threshold)"
% (self.host.get_name(), self.get_name(),
change_ratio, threshold))
# If there is no valid time for next check, raise a log entry
def raise_no_next_check_log_entry(self):
logger.warning("I cannot schedule the check for the service '%s' on "
"host '%s' because there is not future valid time",
self.get_name(), self.host.get_name())
# Raise a log entry when a downtime begins
# SERVICE DOWNTIME ALERT: test_host_0;test_ok_0;STARTED;
# Service has entered a period of scheduled downtime
def raise_enter_downtime_log_entry(self):
naglog_result('critical', "SERVICE DOWNTIME ALERT: %s;%s;STARTED; "
"Service has entered a period of scheduled "
"downtime" % (self.host.get_name(), self.get_name()))
# Raise a log entry when a downtime has finished
# SERVICE DOWNTIME ALERT: test_host_0;test_ok_0;STOPPED;
# Service has exited from a period of scheduled downtime
def raise_exit_downtime_log_entry(self):
naglog_result('critical', "SERVICE DOWNTIME ALERT: %s;%s;STOPPED; Service "
"has exited from a period of scheduled downtime"
% (self.host.get_name(), self.get_name()))
# Raise a log entry when a downtime prematurely ends
# SERVICE DOWNTIME ALERT: test_host_0;test_ok_0;CANCELLED;
# Service has entered a period of scheduled downtime
def raise_cancel_downtime_log_entry(self):
naglog_result(
'critical', "SERVICE DOWNTIME ALERT: %s;%s;CANCELLED; "
"Scheduled downtime for service has been cancelled."
% (self.host.get_name(), self.get_name()))
# Is stalking?
# Launch if check is waitconsume==first time
# and if c.status is in self.stalking_options
def manage_stalking(self, c):
need_stalk = False
if c.status == 'waitconsume':
if c.exit_status == 0 and 'o' in self.stalking_options:
need_stalk = True
elif c.exit_status == 1 and 'w' in self.stalking_options:
need_stalk = True
elif c.exit_status == 2 and 'c' in self.stalking_options:
need_stalk = True
elif c.exit_status == 3 and 'u' in self.stalking_options:
need_stalk = True
if c.output == self.output:
need_stalk = False
if need_stalk:
logger.info("Stalking %s: %s", self.get_name(), c.output)
# Give data for checks's macros
def get_data_for_checks(self):
return [self.host, self]
# Give data for event handlers's macros
def get_data_for_event_handler(self):
return [self.host, self]
# Give data for notifications'n macros
def get_data_for_notifications(self, contact, n):
return [self.host, self, contact, n]
# See if the notification is launchable (time is OK and contact is OK too)
def notification_is_blocked_by_contact(self, n, contact):
return not contact.want_service_notification(self.last_chk, self.state,
n.type, self.business_impact, n.command_call)
def get_duration_sec(self):
return str(int(self.duration_sec))
def get_duration(self):
m, s = divmod(self.duration_sec, 60)
h, m = divmod(m, 60)
return "%02dh %02dm %02ds" % (h, m, s)
def get_ack_author_name(self):
if self.acknowledgement is None:
return ''
return self.acknowledgement.author
def get_ack_comment(self):
if self.acknowledgement is None:
return ''
return self.acknowledgement.comment
def get_check_command(self):
return self.check_command.get_name()
# Check if a notification for this service is suppressed at this time
def notification_is_blocked_by_item(self, type, t_wished=None):
if t_wished is None:
t_wished = time.time()
# TODO
# forced notification
# pass if this is a custom notification
# Block if notifications are program-wide disabled
if not self.enable_notifications:
return True
# Does the notification period allow sending out this notification?
if self.notification_period is not None \
and not self.notification_period.is_time_valid(t_wished):
return True
# Block if notifications are disabled for this service
if not self.notifications_enabled:
return True
# Block if the current status is in the notification_options w,u,c,r,f,s
if 'n' in self.notification_options:
return True
if type in ('PROBLEM', 'RECOVERY'):
if self.state == 'UNKNOWN' and 'u' not in self.notification_options:
return True
if self.state == 'WARNING' and 'w' not in self.notification_options:
return True
if self.state == 'CRITICAL' and 'c' not in self.notification_options:
return True
if self.state == 'OK' and 'r' not in self.notification_options:
return True
if (type in ('FLAPPINGSTART', 'FLAPPINGSTOP', 'FLAPPINGDISABLED') and
'f' not in self.notification_options):
return True
if (type in ('DOWNTIMESTART', 'DOWNTIMEEND', 'DOWNTIMECANCELLED') and
's' not in self.notification_options):
return True
# Acknowledgements make no sense when the status is ok/up
if type == 'ACKNOWLEDGEMENT':
if self.state == self.ok_up:
return True
# When in downtime, only allow end-of-downtime notifications
if self.scheduled_downtime_depth > 1 and type not in ('DOWNTIMEEND', 'DOWNTIMECANCELLED'):
return True
# Block if host is in a scheduled downtime
if self.host.scheduled_downtime_depth > 0:
return True
# Block if in a scheduled downtime and a problem arises, or flapping event
if self.scheduled_downtime_depth > 0 and type in \
('PROBLEM', 'RECOVERY', 'FLAPPINGSTART', 'FLAPPINGSTOP', 'FLAPPINGDISABLED'):
return True
# Block if the status is SOFT
if self.state_type == 'SOFT' and type == 'PROBLEM':
return True
# Block if the problem has already been acknowledged
if self.problem_has_been_acknowledged and type != 'ACKNOWLEDGEMENT':
return True
# Block if flapping
if self.is_flapping and type not in ('FLAPPINGSTART', 'FLAPPINGSTOP', 'FLAPPINGDISABLED'):
return True
# Block if host is down
if self.host.state != self.host.ok_up:
return True
# Block if business rule smart notifications is enabled and all its
# childs have been acknowledged or are under downtime.
if self.got_business_rule is True \
and self.business_rule_smart_notifications is True \
and self.business_rule_notification_is_blocked() is True \
and type == 'PROBLEM':
return True
return False
# Get a oc*p command if item has obsess_over_*
# command. It must be enabled locally and globally
def get_obsessive_compulsive_processor_command(self):
cls = self.__class__
if not cls.obsess_over or not self.obsess_over_service:
return
m = MacroResolver()
data = self.get_data_for_event_handler()
cmd = m.resolve_command(cls.ocsp_command, data)
e = EventHandler(cmd, timeout=cls.ocsp_timeout)
# ok we can put it in our temp action queue
self.actions.append(e)
def get_short_status(self):
mapping = {
0: "O",
1: "W",
2: "C",
3: "U",
}
if self.got_business_rule:
return mapping.get(self.business_rule.get_state(), "n/a")
else:
return mapping.get(self.state_id, "n/a")
def get_status(self):
if self.got_business_rule:
mapping = {
0: "OK",
1: "WARNING",
2: "CRITICAL",
3: "UNKNOWN",
}
return mapping.get(self.business_rule.get_state(), "n/a")
else:
return self.state
def get_downtime(self):
return str(self.scheduled_downtime_depth)
# Class for list of services. It's mainly, mainly for configuration part
class Services(Items):
name_property = 'unique_key' # only used by (un)indexitem (via 'name_property')
inner_class = Service # use for know what is in items
def add_template(self, tpl):
"""
Adds and index a template into the `templates` container.
This implementation takes into account that a service has two naming
attribute: `host_name` and `service_description`.
:param tpl: The template to add
"""
objcls = self.inner_class.my_type
name = getattr(tpl, 'name', '')
hname = getattr(tpl, 'host_name', '')
if not name and not hname:
mesg = "a %s template has been defined without name nor " \
"host_name%s" % (objcls, self.get_source(tpl))
tpl.configuration_errors.append(mesg)
elif name:
tpl = self.index_template(tpl)
self.templates[tpl.id] = tpl
def add_item(self, item, index=True):
"""
Adds and index an item into the `items` container.
This implementation takes into account that a service has two naming
attribute: `host_name` and `service_description`.
:param item: The item to add
:param index: Flag indicating if the item should be indexed
"""
objcls = self.inner_class.my_type
hname = getattr(item, 'host_name', '')
hgname = getattr(item, 'hostgroup_name', '')
sdesc = getattr(item, 'service_description', '')
source = getattr(item, 'imported_from', 'unknown')
if source:
in_file = " in %s" % source
else:
in_file = ""
if not hname and not hgname:
mesg = "a %s has been defined without host_name nor " \
"hostgroups%s" % (objcls, in_file)
item.configuration_errors.append(mesg)
# Do not index `duplicate_foreach` services, they have to be expanded
# during `explode` phase, similarly to what's done with templates
if index is True and not item.is_duplicate():
if hname and sdesc:
item = self.index_item(item)
else:
mesg = "a %s has been defined without host_name nor " \
"service_description%s" % (objcls, in_file)
item.configuration_errors.append(mesg)
return
self.items[item.id] = item
# Inheritance for just a property
def apply_partial_inheritance(self, prop):
for i in itertools.chain(self.items.values(),
self.templates.values()):
i.get_property_by_inheritance(prop, 0)
# If a "null" attribute was inherited, delete it
try:
if getattr(i, prop) == 'null':
delattr(i, prop)
except AttributeError:
pass
def apply_inheritance(self):
""" For all items and templates inherite properties and custom
variables.
"""
# We check for all Class properties if the host has it
# if not, it check all host templates for a value
cls = self.inner_class
for prop in cls.properties:
self.apply_partial_inheritance(prop)
for i in itertools.chain(self.items.values(),
self.templates.values()):
i.get_customs_properties_by_inheritance(0)
def linkify_templates(self):
# First we create a list of all templates
for i in itertools.chain(self.items.values(),
self.templates.values()):
self.linkify_item_templates(i)
# Then we set the tags issued from the built templates
# for i in self:
for i in itertools.chain(self.items.values(),
self.templates.values()):
i.tags = self.get_all_tags(i)
# Search for all of the services in a host
def find_srvs_by_hostname(self, host_name):
if hasattr(self, 'hosts'):
h = self.hosts.find_by_name(host_name)
if h is None:
return None
return h.get_services()
return None
# Search a service by it's name and hot_name
def find_srv_by_name_and_hostname(self, host_name, sdescr):
key = (host_name, sdescr)
return self.name_to_item.get(key, None)
# Make link between elements:
# service -> host
# service -> command
# service -> timeperiods
# service -> contacts
def linkify(self, hosts, commands, timeperiods, contacts,
resultmodulations, businessimpactmodulations, escalations,
servicegroups, triggers, checkmodulations, macromodulations):
self.linkify_with_timeperiods(timeperiods, 'notification_period')
self.linkify_with_timeperiods(timeperiods, 'check_period')
self.linkify_with_timeperiods(timeperiods, 'maintenance_period')
self.linkify_with_timeperiods(timeperiods, 'snapshot_period')
self.linkify_with_timeperiods(timeperiods, 'maintenance_check_period')
self.linkify_s_by_hst(hosts)
self.linkify_s_by_sg(servicegroups)
self.linkify_one_command_with_commands(commands, 'check_command')
self.linkify_one_command_with_commands(commands, 'event_handler')
self.linkify_one_command_with_commands(commands, 'snapshot_command')
self.linkify_one_command_with_commands(commands, 'maintenance_check_command')
self.linkify_with_contacts(contacts)
self.linkify_with_resultmodulations(resultmodulations)
self.linkify_with_business_impact_modulations(businessimpactmodulations)
# WARNING: all escalations will not be link here
# (just the escalation here, not serviceesca or hostesca).
# This last one will be link in escalations linkify.
self.linkify_with_escalations(escalations)
self.linkify_with_triggers(triggers)
self.linkify_with_checkmodulations(checkmodulations)
self.linkify_with_macromodulations(macromodulations)
def override_properties(self, hosts):
ovr_re = re.compile(r'^([^,]+),([^\s]+)\s+(.*)$')
ovr_hosts = [h for h in hosts if getattr(h, 'service_overrides', None)]
for host in ovr_hosts:
# We're only looking for hosts having service overrides defined
if isinstance(host.service_overrides, list):
service_overrides = host.service_overrides
else:
service_overrides = [host.service_overrides]
for ovr in service_overrides:
# Checks service override syntax
match = ovr_re.search(ovr)
if match is None:
err = "Error: invalid service override syntax: %s" % ovr
host.configuration_errors.append(err)
continue
sdescr, prop, value = match.groups()
# Checks if override is allowed
excludes = ['host_name', 'service_description', 'use',
'servicegroups', 'trigger', 'trigger_name']
if prop in excludes:
err = "Error: trying to override '%s', a forbidden property for service '%s'" % \
(prop, sdescr)
host.configuration_errors.append(err)
continue
# Looks for corresponding services
services = self.get_ovr_services_from_expression(host, sdescr)
if not services:
err = "Warn: trying to override property '%s' on " \
"service identified by '%s' " \
"but it's unknown for this host" % (prop, sdescr)
host.configuration_warnings.append(err)
continue
value = Service.properties[prop].pythonize(value)
for service in services:
# Pythonize the value because here value is str.
setattr(service, prop, value)
def get_ovr_services_from_expression(self, host, sdesc):
hostname = getattr(host, "host_name", "")
if sdesc == "*":
filters = [filter_service_by_host_name(hostname)]
return self.find_by_filter(filters)
elif sdesc.startswith("r:"):
pattern = sdesc[2:]
filters = [
filter_service_by_host_name(hostname),
filter_service_by_regex_name(pattern)
]
return self.find_by_filter(filters)
else:
svc = self.find_srv_by_name_and_hostname(hostname, sdesc)
if svc is not None:
return [svc]
else:
return []
# We can link services with hosts so
# We can search in O(hosts) instead
# of O(services) for common cases
def optimize_service_search(self, hosts):
self.hosts = hosts
# We just search for each host the id of the host
# and replace the name by the id
# + inform the host we are a service of him
def linkify_s_by_hst(self, hosts):
for s in self:
# If we do not have a host_name, we set it as
# a template element to delete. (like Nagios)
if not hasattr(s, 'host_name'):
s.host = None
continue
try:
hst_name = s.host_name
# The new member list, in id
hst = hosts.find_by_name(hst_name)
s.host = hst
# Let the host know we are his service
if s.host is not None:
hst.add_service_link(s)
else: # Ok, the host do not exists!
err = "Warning: the service '%s' got an invalid host_name '%s'" % \
(self.get_name(), hst_name)
s.configuration_warnings.append(err)
continue
except AttributeError as exp:
pass # Will be catch at the is_correct moment
# We look for servicegroups property in services and
# link them
def linkify_s_by_sg(self, servicegroups):
for s in self:
new_servicegroups = []
if hasattr(s, 'servicegroups') and s.servicegroups != '':
for sg_name in s.servicegroups:
sg_name = sg_name.strip()
sg = servicegroups.find_by_name(sg_name)
if sg is not None:
new_servicegroups.append(sg)
else:
err = "Error: the servicegroup '%s' of the service '%s' is unknown" %\
(sg_name, s.get_dbg_name())
s.configuration_errors.append(err)
s.servicegroups = new_servicegroups
# In the scheduler we need to relink the commandCall with
# the real commands
def late_linkify_s_by_commands(self, commands):
props = ['check_command', 'maintenance_check_command',
'event_handler', 'snapshot_command']
for s in self:
for prop in props:
cc = getattr(s, prop, None)
if cc:
cc.late_linkify_with_command(commands)
# Delete services by ids
def delete_services_by_id(self, ids):
for id in ids:
del self[id]
# Apply implicit inheritance for special properties:
# contact_groups, notification_interval , notification_period
# So service will take info from host if necessary
def apply_implicit_inheritance(self, hosts):
for prop in ('contacts', 'contact_groups', 'notification_interval',
'notification_period', 'resultmodulations', 'business_impact_modulations',
'escalations', 'poller_tag', 'reactionner_tag', 'check_period',
'business_impact', 'maintenance_period', 'priority'):
for s in self:
if not hasattr(s, prop) and hasattr(s, 'host_name'):
h = hosts.find_by_name(s.host_name)
if h is not None and hasattr(h, prop):
setattr(s, prop, getattr(h, prop))
# Create dependencies for services (daddy ones)
def apply_dependencies(self):
for s in self:
s.fill_daddy_dependency()
def set_initial_state(self):
"""
Sets services initial state if required in configuration
"""
for s in self:
s.set_initial_state()
# For services the main clean is about service with bad hosts
def clean(self):
to_del = []
for s in self:
if not s.host:
to_del.append(s.id)
for sid in to_del:
del self.items[sid]
def explode_services_from_hosts(self, hosts, s, hnames):
"""
Explodes a service based on a lis of hosts.
:param hosts: The hosts container
:param s: The base service to explode
:param hnames: The host_name list to exlode sevice on
"""
duplicate_for_hosts = [] # get the list of our host_names if more than 1
not_hosts = [] # the list of !host_name so we remove them after
for hname in hnames:
hname = hname.strip()
# If the name begin with a !, we put it in
# the not list
if hname.startswith('!'):
not_hosts.append(hname[1:])
else: # the standard list
duplicate_for_hosts.append(hname)
# remove duplicate items from duplicate_for_hosts:
duplicate_for_hosts = list(set(duplicate_for_hosts))
# Ok now we clean the duplicate_for_hosts with all hosts
# of the not
for hname in not_hosts:
try:
duplicate_for_hosts.remove(hname)
except IndexError:
pass
# Now we duplicate the service for all host_names
for hname in duplicate_for_hosts:
h = hosts.find_by_name(hname)
if h is None:
err = 'Error: The hostname %s is unknown for the ' \
'service %s!' % (hname, s.get_name())
s.configuration_errors.append(err)
continue
if h.is_excluded_for(s):
continue
new_s = s.copy()
new_s.host_name = hname
self.add_item(new_s)
def _local_create_service(self, hosts, host_name, service):
'''Create a new service based on a host_name and service instance.
:param hosts: The hosts items instance.
:type hosts: shinken.objects.host.Hosts
:param host_name: The host_name to create a new service.
:param service: The service to be used as template.
:type service: Service
:return: The new service created.
:rtype: Service
'''
h = hosts.find_by_name(host_name.strip())
if h.is_excluded_for(service):
return
# Creates concrete instance
new_s = service.copy()
new_s.host_name = host_name
new_s.register = 1
self.add_item(new_s)
return new_s
def explode_services_from_templates(self, hosts, service):
"""
Explodes services from templates. All hosts holding the specified
templates are bound the service.
:param hosts: The hosts container.
:type hosts: shinken.objects.host.Hosts
:param service: The service to explode.
:type service: Service
"""
hname = getattr(service, "host_name", None)
if not hname:
return
# Now really create the services
if is_complex_expr(hname):
hnames = self.evaluate_hostgroup_expression(
hname.strip(), hosts, hosts.templates, look_in='templates')
for name in hnames:
self._local_create_service(hosts, name, service)
else:
hnames = [n.strip() for n in hname.split(',') if n.strip()]
for hname in hnames:
for name in hosts.find_hosts_that_use_template(hname):
self._local_create_service(hosts, name, service)
def explode_services_duplicates(self, hosts, s):
"""
Explodes services holding a `duplicate_foreach` clause.
:param hosts: The hosts container
:param s: The service to explode
:type s: Service
"""
hname = getattr(s, "host_name", None)
if hname is None:
return
# the generator case, we must create several new services
# we must find our host, and get all key:value we need
h = hosts.find_by_name(hname.strip())
if h is None:
err = 'Error: The hostname %s is unknown for the ' \
'service %s!' % (hname, s.get_name())
s.configuration_errors.append(err)
return
# Duplicate services
for new_s in s.duplicate(h):
if h.is_excluded_for(new_s):
continue
# Adds concrete instance
self.add_item(new_s)
def register_service_into_servicegroups(self, s, servicegroups):
"""
Registers a service into the service groups declared in its
`servicegroups` attribute.
:param s: The service to register
:param servicegroups: The servicegroups container
"""
if hasattr(s, 'service_description'):
sname = s.service_description
shname = getattr(s, 'host_name', '')
if hasattr(s, 'servicegroups'):
# Todo: See if we can remove this if
if isinstance(s.servicegroups, list):
sgs = s.servicegroups
else:
sgs = s.servicegroups.split(',')
for sg in sgs:
servicegroups.add_member([shname, sname], sg.strip())
def register_service_dependencies(self, s, servicedependencies):
"""
Registers a service dependencies.
:param s: The service to register
:param servicedependencies: The servicedependencies container
"""
# We explode service_dependencies into Servicedependency
# We just create serviceDep with goods values (as STRING!),
# the link pass will be done after
sdeps = [d.strip() for d in
getattr(s, "service_dependencies", [])]
# %2=0 are for hosts, !=0 are for service_description
i = 0
hname = ''
for elt in sdeps:
if i % 2 == 0: # host
hname = elt
else: # description
desc = elt
# we can register it (s) (depend on) -> (hname, desc)
# If we do not have enough data for s, it's no use
if hasattr(s, 'service_description') and hasattr(s, 'host_name'):
if hname == '':
hname = s.host_name
servicedependencies.add_service_dependency(
s.host_name, s.service_description, hname, desc)
i += 1
# We create new service if necessary (host groups and co)
def explode(self, hosts, hostgroups, contactgroups,
servicegroups, servicedependencies, triggers):
"""
Explodes services, from host_name, hostgroup_name, and from templetes.
:param hosts: The hosts container
:param hostgroups: The hostgoups container
:param contactgroups: The concactgoups container
:param servicegroups: The servicegoups container
:param servicedependencies: The servicedependencies container
:param triggers: The triggers container
"""
# items::explode_trigger_string_into_triggers
self.explode_trigger_string_into_triggers(triggers)
for t in self.templates.values():
self.explode_contact_groups_into_contacts(t, contactgroups)
self.explode_services_from_templates(hosts, t)
# Then for every host create a copy of the service with just the host
# because we are adding services, we can't just loop in it
for s in list(self.items.values()):
# items::explode_host_groups_into_hosts
# take all hosts from our hostgroup_name into our host_name property
self.explode_host_groups_into_hosts(s, hosts, hostgroups)
# items::explode_contact_groups_into_contacts
# take all contacts from our contact_groups into our contact property
self.explode_contact_groups_into_contacts(s, contactgroups)
hnames = getattr(s, "host_name", '')
hnames = list(set([n.strip() for n in hnames.split(',') if n.strip()]))
# hnames = strip_and_uniq(hnames)
# We will duplicate if we have multiple host_name
# or if we are a template (so a clean service)
if len(hnames) == 1:
if not s.is_duplicate():
self.index_item(s)
else:
if len(hnames) >= 2:
self.explode_services_from_hosts(hosts, s, hnames)
# Delete expanded source service
if not s.configuration_errors:
self.remove_item(s)
# Explode services that have a duplicate_foreach clause
duplicates = [s.id for s in self if s.is_duplicate()]
for id in duplicates:
s = self.items[id]
self.explode_services_duplicates(hosts, s)
if not s.configuration_errors:
self.remove_item(s)
to_remove = []
for service in self:
host = hosts.find_by_name(service.host_name)
if host and host.is_excluded_for(service):
to_remove.append(service)
for service in to_remove:
self.remove_item(service)
# Servicegroups property need to be fullfill for got the informations
# And then just register to this service_group
for s in self:
self.register_service_into_servicegroups(s, servicegroups)
self.register_service_dependencies(s, servicedependencies)
# Will create all business tree for the
# services
def create_business_rules(self, hosts, services):
for s in self:
s.create_business_rules(hosts, services)
# Will link all business service/host with theirs
# dep for problem/impact link
def create_business_rules_dependencies(self):
for s in self:
s.create_business_rules_dependencies()
| 82,867
|
Python
|
.py
| 1,681
| 37.660916
| 117
| 0.582088
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,516
|
macromodulation.py
|
shinken-solutions_shinken/shinken/objects/macromodulation.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import time
from shinken.objects.item import Item, Items
from shinken.property import StringProp
from shinken.util import to_name_if_possible
from shinken.log import logger
class MacroModulation(Item):
id = 1 # zero is always special in database, so we do not take risk here
my_type = 'macromodulation'
properties = Item.properties.copy()
properties.update({
'macromodulation_name': StringProp(fill_brok=['full_status']),
'modulation_period': StringProp(brok_transformation=to_name_if_possible,
fill_brok=['full_status']),
})
running_properties = Item.running_properties.copy()
_special_properties = ('modulation_period',)
macros = {}
# For debugging purpose only (nice name)
def get_name(self):
return self.macromodulation_name
# Will say if we are active or not
def is_active(self):
now = int(time.time())
if not self.modulation_period or self.modulation_period.is_time_valid(now):
return True
return False
# Should have all properties, or a void macro_period
def is_correct(self):
state = True
cls = self.__class__
# Raised all previously saw errors like unknown commands or timeperiods
if self.configuration_errors != []:
state = False
for err in self.configuration_errors:
logger.error("[item::%s] %s", self.get_name(), err)
for prop, entry in cls.properties.items():
if prop not in cls._special_properties:
if not hasattr(self, prop) and entry.required:
logger.warning(
"[macromodulation::%s] %s property not set", self.get_name(), prop
)
state = False # Bad boy...
# Ok just put None as modulation_period, means 24x7
if not hasattr(self, 'modulation_period'):
self.modulation_period = None
return state
class MacroModulations(Items):
name_property = "macromodulation_name"
inner_class = MacroModulation
def linkify(self, timeperiods):
self.linkify_with_timeperiods(timeperiods, 'modulation_period')
| 3,233
|
Python
|
.py
| 74
| 36.945946
| 90
| 0.676864
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,517
|
escalation.py
|
shinken-solutions_shinken/shinken/objects/escalation.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken.objects.item import Item, Items
from shinken.util import strip_and_uniq
from shinken.property import BoolProp, IntegerProp, StringProp, ListProp
from shinken.log import logger
_special_properties = ('contacts', 'contact_groups',
'first_notification_time', 'last_notification_time')
_special_properties_time_based = ('contacts', 'contact_groups',
'first_notification', 'last_notification')
class Escalation(Item):
id = 1 # zero is always special in database, so we do not take risk here
my_type = 'escalation'
properties = Item.properties.copy()
properties.update({
'escalation_name': StringProp(),
'first_notification': IntegerProp(),
'last_notification': IntegerProp(),
'first_notification_time': IntegerProp(),
'last_notification_time': IntegerProp(),
# by default don't use the notification_interval defined in
# the escalation, but the one defined by the object
'notification_interval': IntegerProp(default=-1),
'escalation_period': StringProp(default=''),
'escalation_options': ListProp(default=['d', 'u', 'r', 'w', 'c'], split_on_coma=True),
'contacts': ListProp(default=[], split_on_coma=True),
'contact_groups': ListProp(default=[], split_on_coma=True),
})
running_properties = Item.running_properties.copy()
running_properties.update({
'time_based': BoolProp(default=False),
})
# For debugging purpose only (nice name)
def get_name(self):
return self.escalation_name
# Return True if:
# *time in in escalation_period or we do not have escalation_period
# *status is in escalation_options
# *the notification number is in our interval [[first_notification .. last_notification]]
# if we are a classic escalation.
# *If we are time based, we check if the time that we were in notification
# is in our time interval
def is_eligible(self, t, status, notif_number, in_notif_time, interval):
small_states = {
'WARNING': 'w', 'UNKNOWN': 'u', 'CRITICAL': 'c',
'RECOVERY': 'r', 'FLAPPING': 'f', 'DOWNTIME': 's',
'DOWN': 'd', 'UNREACHABLE': 'u', 'OK': 'o', 'UP': 'o'
}
# If we are not time based, we check notification numbers:
if not self.time_based:
# Begin with the easy cases
if notif_number < self.first_notification:
return False
# self.last_notification = 0 mean no end
if self.last_notification != 0 and notif_number > self.last_notification:
return False
# Else we are time based, we must check for the good value
else:
# Begin with the easy cases
if in_notif_time < self.first_notification_time * interval:
return False
# self.last_notification = 0 mean no end
if self.last_notification_time != 0 and \
in_notif_time > self.last_notification_time * interval:
return False
# If our status is not good, we bail out too
if status in small_states and small_states[status] not in self.escalation_options:
return False
# Maybe the time is not in our escalation_period
if self.escalation_period is not None and not self.escalation_period.is_time_valid(t):
return False
# Ok, I do not see why not escalade. So it's True :)
return True
# t = the reference time
def get_next_notif_time(self, t_wished, status, creation_time, interval):
small_states = {'WARNING': 'w', 'UNKNOWN': 'u', 'CRITICAL': 'c',
'RECOVERY': 'r', 'FLAPPING': 'f', 'DOWNTIME': 's',
'DOWN': 'd', 'UNREACHABLE': 'u', 'OK': 'o', 'UP': 'o'}
# If we are not time based, we bail out!
if not self.time_based:
return None
# Check if we are valid
if status in small_states and small_states[status] not in self.escalation_options:
return None
# Look for the min of our future validity
start = self.first_notification_time * interval + creation_time
# If we are after the classic next time, we are not asking for a smaller interval
if start > t_wished:
return None
# Maybe the time we found is not a valid one....
if self.escalation_period is not None and not self.escalation_period.is_time_valid(start):
return None
# Ok so I ask for my start as a possibility for the next notification time
return start
# Check is required prop are set:
# template are always correct
# contacts OR contactgroups is need
def is_correct(self):
state = True
cls = self.__class__
# If we got the _time parameters, we are time based. Unless, we are not :)
if hasattr(self, 'first_notification_time') or hasattr(self, 'last_notification_time'):
self.time_based = True
special_properties = _special_properties_time_based
else: # classic ones
special_properties = _special_properties
for prop, entry in cls.properties.items():
if prop not in special_properties:
if not hasattr(self, prop) and entry.required:
logger.info('%s: I do not have %s', self.get_name(), prop)
state = False # Bad boy...
# Raised all previously saw errors like unknown contacts and co
if self.configuration_errors != []:
state = False
for err in self.configuration_errors:
logger.info(err)
# Ok now we manage special cases...
if not hasattr(self, 'contacts') and not hasattr(self, 'contact_groups'):
logger.info('%s: I do not have contacts nor contact_groups', self.get_name())
state = False
# If time_based or not, we do not check all properties
if self.time_based:
if not hasattr(self, 'first_notification_time'):
logger.info('%s: I do not have first_notification_time', self.get_name())
state = False
if not hasattr(self, 'last_notification_time'):
logger.info('%s: I do not have last_notification_time', self.get_name())
state = False
else: # we check classical properties
if not hasattr(self, 'first_notification'):
logger.info('%s: I do not have first_notification', self.get_name())
state = False
if not hasattr(self, 'last_notification'):
logger.info('%s: I do not have last_notification', self.get_name())
state = False
return state
class Escalations(Items):
name_property = "escalation_name"
inner_class = Escalation
def linkify(self, timeperiods, contacts, services, hosts):
self.linkify_with_timeperiods(timeperiods, 'escalation_period')
self.linkify_with_contacts(contacts)
self.linkify_es_by_s(services)
self.linkify_es_by_h(hosts)
def add_escalation(self, es):
self.add_item(es)
# Will register escalations into service.escalations
def linkify_es_by_s(self, services):
for es in self:
# If no host, no hope of having a service
if not (hasattr(es, 'host_name') and hasattr(es, 'service_description')):
continue
es_hname, sdesc = es.host_name, es.service_description
if '' in (es_hname.strip(), sdesc.strip()):
continue
for hname in strip_and_uniq(es_hname.split(',')):
if sdesc.strip() == '*':
slist = services.find_srvs_by_hostname(hname)
if slist is not None:
for s in slist:
s.escalations.append(es)
else:
for sname in strip_and_uniq(sdesc.split(',')):
s = services.find_srv_by_name_and_hostname(hname, sname)
if s is not None:
# print("Linking service", s.get_name(), 'with me', es.get_name())
s.escalations.append(es)
# print("Now service", s.get_name(), 'have', s.escalations)
# Will register escalations into host.escalations
def linkify_es_by_h(self, hosts):
for es in self:
# If no host, no hope of having a service
if (not hasattr(es, 'host_name') or es.host_name.strip() == '' or
(hasattr(es, 'service_description') and
es.service_description.strip() != '')):
continue
# I must be NOT a escalation on for service
for hname in strip_and_uniq(es.host_name.split(',')):
h = hosts.find_by_name(hname)
if h is not None:
# print("Linking host", h.get_name(), 'with me', es.get_name())
h.escalations.append(es)
# print("Now host", h.get_name(), 'have', h.escalations)
# We look for contacts property in contacts and
def explode(self, hosts, hostgroups, contactgroups):
for i in self:
# items::explode_host_groups_into_hosts
# take all hosts from our hostgroup_name into our host_name property
self.explode_host_groups_into_hosts(i, hosts, hostgroups)
# items::explode_contact_groups_into_contacts
# take all contacts from our contact_groups into our contact property
self.explode_contact_groups_into_contacts(i, contactgroups)
| 10,864
|
Python
|
.py
| 213
| 40.41784
| 98
| 0.609048
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,518
|
hostgroup.py
|
shinken-solutions_shinken/shinken/objects/hostgroup.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken.objects.itemgroup import Itemgroup, Itemgroups
from shinken.util import get_obj_name
from shinken.property import StringProp, IntegerProp
from shinken.log import logger
class Hostgroup(Itemgroup):
id = 1 # zero is always a little bit special... like in database
my_type = 'hostgroup'
properties = Itemgroup.properties.copy()
properties.update({
'id': IntegerProp(default=0, fill_brok=['full_status']),
'hostgroup_name': StringProp(fill_brok=['full_status']),
'hostgroup_members': StringProp(fill_brok=['full_status']),
'alias': StringProp(fill_brok=['full_status']),
'notes': StringProp(default='', fill_brok=['full_status']),
'notes_url': StringProp(default='', fill_brok=['full_status']),
'action_url': StringProp(default='', fill_brok=['full_status']),
'realm': StringProp(default='', fill_brok=['full_status'], conf_send_preparation=get_obj_name),
})
macros = {
'HOSTGROUPALIAS': 'alias',
'HOSTGROUPMEMBERS': 'members',
'HOSTGROUPNOTES': 'notes',
'HOSTGROUPNOTESURL': 'notes_url',
'HOSTGROUPACTIONURL': 'action_url'
}
def get_name(self):
return self.hostgroup_name
def get_hosts(self):
if getattr(self, 'members', None) is not None:
return self.members
else:
return []
def get_hostgroup_members(self):
if self.has('hostgroup_members'):
return [m.strip() for m in self.hostgroup_members.split(',')]
else:
return []
# We fillfull properties with template ones if need
# Because hostgroup we call may not have it's members
# we call get_hosts_by_explosion on it
def get_hosts_by_explosion(self, hostgroups):
# First we tag the hg so it will not be explode
# if a son of it already call it
self.already_explode = True
# Now the recursive part
# rec_tag is set to False every HG we explode
# so if True here, it must be a loop in HG
# calls... not GOOD!
if self.rec_tag:
logger.error("[hostgroup::%s] got a loop in hostgroup definition", self.get_name())
return self.get_hosts()
# Ok, not a loop, we tag it and continue
self.rec_tag = True
hg_mbrs = self.get_hostgroup_members()
for hg_mbr in hg_mbrs:
hg = hostgroups.find_by_name(hg_mbr.strip())
if hg is not None:
value = hg.get_hosts_by_explosion(hostgroups)
if value is not None:
self.add_string_member(value)
return self.get_hosts()
class Hostgroups(Itemgroups):
name_property = "hostgroup_name" # is used for finding hostgroups
inner_class = Hostgroup
def get_members_by_name(self, hgname):
hg = self.find_by_name(hgname)
if hg is None:
return []
return hg.get_hosts()
def linkify(self, hosts=None, realms=None):
self.linkify_hg_by_hst(hosts)
self.linkify_hg_by_realms(realms)
# We just search for each hostgroup the id of the hosts
# and replace the name by the id
def linkify_hg_by_hst(self, hosts):
for hg in self:
mbrs = hg.get_hosts()
# The new member list, in id
new_mbrs = []
for mbr in mbrs:
mbr = mbr.strip() # protect with strip at the begining so don't care about spaces
if mbr == '': # void entry, skip this
continue
elif mbr == '*':
new_mbrs.extend(hosts)
else:
h = hosts.find_by_name(mbr)
if h is not None:
new_mbrs.append(h)
else:
hg.add_string_unknown_member(mbr)
# Make members uniq
new_mbrs = list(set(new_mbrs))
# We find the id, we replace the names
hg.replace_members(new_mbrs)
# Now register us in our members
for h in hg.members:
h.hostgroups.append(hg)
# and be sure we are uniq in it
h.hostgroups = list(set(h.hostgroups))
# More than an explode function, but we need to already
# have members so... Will be really linkify just after
# And we explode realm in ours members, but do not override
# a host realm value if it's already set
def linkify_hg_by_realms(self, realms):
# Now we explode the realm value if we've got one
# The group realm must not override a host one (warning?)
for hg in self:
if not hasattr(hg, 'realm'):
continue
# Maybe the value is void?
if not hg.realm.strip():
continue
r = realms.find_by_name(hg.realm.strip())
if r is not None:
hg.realm = r
logger.debug("[hostgroups] %s is in %s realm", hg.get_name(), r.get_name())
else:
err = "the hostgroup %s got an unknown realm '%s'" % (hg.get_name(), hg.realm)
hg.configuration_errors.append(err)
hg.realm = None
continue
for h in hg:
if h is None:
continue
if h.realm is None or h.got_default_realm: # default value not hasattr(h, 'realm'):
logger.debug("[hostgroups] apply a realm %s to host %s from a hostgroup "
"rule (%s)", hg.realm.get_name(), h.get_name(), hg.get_name())
h.realm = hg.realm
else:
if h.realm != hg.realm:
logger.warning("[hostgroups] host %s it not in the same realm than it's "
"hostgroup %s", h.get_name(), hg.get_name())
# Add a host string to a hostgroup member
# if the host group do not exist, create it
def add_member(self, hname, hgname):
hg = self.find_by_name(hgname)
# if the id do not exist, create the hg
if hg is None:
hg = Hostgroup({'hostgroup_name': hgname, 'alias': hgname, 'members': hname})
self.add(hg)
else:
hg.add_string_member(hname)
# Use to fill members with hostgroup_members
def explode(self):
# We do not want a same hg to be explode again and again
# so we tag it
for tmp_hg in self.items.values():
tmp_hg.already_explode = False
for hg in self.items.values():
if hg.has('hostgroup_members') and not hg.already_explode:
# get_hosts_by_explosion is a recursive
# function, so we must tag hg so we do not loop
for tmp_hg in self.items.values():
tmp_hg.rec_tag = False
hg.get_hosts_by_explosion(self)
# We clean the tags
for tmp_hg in self.items.values():
if hasattr(tmp_hg, 'rec_tag'):
del tmp_hg.rec_tag
del tmp_hg.already_explode
| 8,274
|
Python
|
.py
| 184
| 34.630435
| 118
| 0.582795
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,519
|
receiverlink.py
|
shinken-solutions_shinken/shinken/objects/receiverlink.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken.objects.satellitelink import SatelliteLink, SatelliteLinks
from shinken.property import BoolProp, IntegerProp, StringProp
from shinken.log import logger
from shinken.http_client import HTTPException
from shinken.serializer import serialize
class ReceiverLink(SatelliteLink):
"""Please Add a Docstring to describe the class here"""
id = 0
my_type = 'receiver'
properties = SatelliteLink.properties.copy()
properties.update({
'receiver_name': StringProp(fill_brok=['full_status'], to_send=True),
'port': IntegerProp(default=7772, fill_brok=['full_status']),
'manage_sub_realms': BoolProp(default=True, fill_brok=['full_status']),
'manage_arbiters': BoolProp(default=False, fill_brok=['full_status'], to_send=True),
'direct_routing': BoolProp(default=False, fill_brok=['full_status'], to_send=True),
'accept_passive_unknown_check_results': BoolProp(default=False,
fill_brok=['full_status'], to_send=True),
'harakiri_threshold': StringProp(default=None, fill_brok=['full_status'], to_send=True),
})
def get_name(self):
return self.receiver_name
def register_to_my_realm(self):
self.realm.receivers.append(self)
def push_host_names(self, data):
sched_id = data["sched_id"]
hnames = data["hnames"]
try:
if self.con is None:
self.create_connection()
logger.info(" (%s)", self.uri)
# If the connection failed to initialize, bail out
if self.con is None:
self.add_failed_check_attempt()
return
# r = self.con.push_host_names(sched_id, hnames)
self.con.get('ping')
self.con.put(
'push_host_names',
serialize({'sched_id': sched_id, 'hnames': hnames}),
wait='long'
)
except HTTPException as exp:
self.add_failed_check_attempt(reason=str(exp))
class ReceiverLinks(SatelliteLinks):
"""Please Add a Docstring to describe the class here"""
name_property = "receiver_name"
inner_class = ReceiverLink
| 3,259
|
Python
|
.py
| 71
| 38.873239
| 98
| 0.663725
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,520
|
matchingitem.py
|
shinken-solutions_shinken/shinken/objects/matchingitem.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
'''
This is a utility class for factorizing matching functions for
discovery runners and rules.
'''
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken.objects.item import Item
import re
class MatchingItem(Item):
# Try to see if the key,value is matching one or
# our rule. If value got ',' we must look for each value
# If one match, we quit
# We can find in matches or not_matches
def is_matching(self, key, value, look_in='matches'):
if look_in == 'matches':
d = self.matches
else:
d = self.not_matches
# If we do not even have the key, we bailout
if not key.strip() in d:
return False
# Get my matching pattern
m = d[key]
if ',' in m:
matchings = [mt.strip() for mt in m.split(',')]
else:
matchings = [m]
# Split the value by , too
values = value.split(',')
for m in matchings:
for v in values:
print("Try to match", m, v)
# Maybe m is a list, if so should check one values
if isinstance(m, list):
for _m in m:
if re.search(_m, v):
return True
else:
if re.search(m, v):
return True
return False
# Look if we match all discovery data or not
# a disco data look as a list of (key, values)
def is_matching_disco_datas(self, datas):
# If we got not data, no way we can match
if len(datas) == 0:
return False
# First we look if it's possible to match
# we must match All self.matches things
for m in self.matches:
# print("Compare to", m)
match_one = False
for (k, v) in datas.items():
# We found at least one of our match key
if m == k:
if self.is_matching(k, v):
# print("Got matching with", m, k, v)
match_one = True
continue
if not match_one:
# It match none
# print("Match none, False")
return False
# print("It's possible to be OK")
# And now look if ANY of not_matches is reach. If so
# it's False
for m in self.not_matches:
# print("Compare to NOT", m)
match_one = False
for (k, v) in datas.items():
# print("K,V", k,v)
# We found at least one of our match key
if m == k:
# print("Go loop")
if self.is_matching(k, v, look_in='not_matches'):
# print("Got matching with", m, k, v)
match_one = True
continue
if match_one:
# print("I match one, I quit")
return False
# Ok we match ALL rules in self.matches
# and NONE of self.not_matches, we can go :)
return True
| 4,092
|
Python
|
.py
| 105
| 28.790476
| 82
| 0.558017
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,521
|
serviceextinfo.py
|
shinken-solutions_shinken/shinken/objects/serviceextinfo.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
""" This is the main class for the Service ext info. In fact it's mainly
about the configuration part. Parameters are merged in Service so it's
no use in running part
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import six
from shinken.objects.item import Item, Items
from shinken.autoslots import AutoSlots
from shinken.property import StringProp, ListProp
class ServiceExtInfo(six.with_metaclass(AutoSlots, Item)):
id = 1 # zero is reserved for host (primary node for parents)
my_type = 'serviceextinfo'
# properties defined by configuration
# *required: is required in conf
# *default: default value if no set in conf
# *pythonize: function to call when transforming string to python object
# *fill_brok: if set, send to broker. there are two categories:
# full_status for initial and update status, check_result for check results
# *no_slots: do not take this property for __slots__
# Only for the initial call
# conf_send_preparation: if set, will pass the property to this function. It's used to "flatten"
# some dangerous properties like realms that are too 'linked' to be send like that.
# brok_transformation: if set, will call the function with the value of the property
# the major times it will be to flatten the data (like realm_name instead of the realm object).
properties = Item.properties.copy()
properties.update({
'host_name': StringProp(),
'service_description': StringProp(),
'notes': StringProp(default=''),
'notes_url': StringProp(default=''),
'icon_image': StringProp(default=''),
'icon_image_alt': StringProp(default=''),
})
# Hosts macros and prop that give the information
# the prop can be callable or not
macros = {
'SERVICEDESC': 'service_description',
'SERVICEACTIONURL': 'action_url',
'SERVICENOTESURL': 'notes_url',
'SERVICENOTES': 'notes'
}
#######
# __ _ _ _
# / _(_) | | (_)
# ___ ___ _ __ | |_ _ __ _ _ _ _ __ __ _| |_ _ ___ _ __
# / __/ _ \| '_ \| _| |/ _` | | | | '__/ _` | __| |/ _ \| '_ \
# | (_| (_) | | | | | | | (_| | |_| | | | (_| | |_| | (_) | | | |
# \___\___/|_| |_|_| |_|\__, |\__,_|_| \__,_|\__|_|\___/|_| |_|
# __/ |
# |___/
######
# Check is required prop are set:
# host_name is needed
def is_correct(self):
state = True
cls = self.__class__
return state
# For get a nice name
def get_name(self):
if not self.is_tpl():
try:
return self.host_name
except AttributeError: # outch, no hostname
return 'UNNAMEDHOST'
else:
try:
return self.name
except AttributeError: # outch, no name for this template
return 'UNNAMEDHOSTTEMPLATE'
# For debugging purpose only
def get_dbg_name(self):
return self.host_name
# Same but for clean call, no debug
def get_full_name(self):
return self.host_name
# Class for the hosts lists. It's mainly for configuration
# part
class ServicesExtInfo(Items):
name_property = "host_name" # use for the search by name
inner_class = ServiceExtInfo # use for know what is in items
# Merge extended host information into host
def merge(self, services):
for ei in self:
if hasattr(ei, 'register') and not getattr(ei, 'register'):
# We don't have to merge template
continue
hosts_names = ei.get_name().split(",")
for host_name in hosts_names:
s = services.find_srv_by_name_and_hostname(host_name, ei.service_description)
if s is not None:
# FUUUUUUUUUUsion
self.merge_extinfo(s, ei)
def merge_extinfo(self, service, extinfo):
properties = ['notes', 'notes_url', 'icon_image', 'icon_image_alt']
# service properties have precedence over serviceextinfo properties
for p in properties:
if getattr(service, p) == '' and getattr(extinfo, p) != '':
setattr(service, p, getattr(extinfo, p))
| 5,355
|
Python
|
.py
| 120
| 38.55
| 100
| 0.604143
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,522
|
timeperiod.py
|
shinken-solutions_shinken/shinken/objects/timeperiod.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
# Calendar date
# -------------
# r'(\d{4})-(\d{2})-(\d{2}) - (\d{4})-(\d{2})-(\d{2}) / (\d+) ([0-9:, -]+)'
# => len = 8 => CALENDAR_DATE
#
# r'(\d{4})-(\d{2})-(\d{2}) / (\d+) ([0-9:, -]+)'
# => len = 5 => CALENDAR_DATE
#
# r'(\d{4})-(\d{2})-(\d{2}) - (\d{4})-(\d{2})-(\d{2}) ([0-9:, -]+)'
# => len = 7 => CALENDAR_DATE
#
# r'(\d{4})-(\d{2})-(\d{2}) ([0-9:, -]+)'
# => len = 4 => CALENDAR_DATE
#
# Month week day
# --------------
# r'([a-z]*) (\d+) ([a-z]*) - ([a-z]*) (\d+) ([a-z]*) / (\d+) ([0-9:, -]+)'
# => len = 8 => MONTH WEEK DAY
# e.g.: wednesday 1 january - thursday 2 july / 3
#
# r'([a-z]*) (\d+) - ([a-z]*) (\d+) / (\d+) ([0-9:, -]+)' => len = 6
# e.g.: february 1 - march 15 / 3 => MONTH DATE
# e.g.: monday 2 - thusday 3 / 2 => WEEK DAY
# e.g.: day 2 - day 6 / 3 => MONTH DAY
#
# '([a-z]*) (\d+) - (\d+) / (\d+) ([0-9:, -]+)' => len = 6
# e.g.: february 1 - 15 / 3 => MONTH DATE
# e.g.: thursday 2 - 4 => WEEK DAY
# e.g.: day 1 - 4 => MONTH DAY
#
# r'([a-z]*) (\d+) ([a-z]*) - ([a-z]*) (\d+) ([a-z]*) ([0-9:, -]+)' => len = 7
# e.g.: wednesday 1 january - thursday 2 july => MONTH WEEK DAY
#
# r'([a-z]*) (\d+) - (\d+) ([0-9:, -]+)' => len = 7
# e.g.: thursday 2 - 4 => WEEK DAY
# e.g.: february 1 - 15 / 3 => MONTH DATE
# e.g.: day 1 - 4 => MONTH DAY
#
# r'([a-z]*) (\d+) - ([a-z]*) (\d+) ([0-9:, -]+)' => len = 5
# e.g.: february 1 - march 15 => MONTH DATE
# e.g.: monday 2 - thusday 3 => WEEK DAY
# e.g.: day 2 - day 6 => MONTH DAY
#
# r'([a-z]*) (\d+) ([0-9:, -]+)' => len = 3
# e.g.: february 3 => MONTH DATE
# e.g.: thursday 2 => WEEK DAY
# e.g.: day 3 => MONTH DAY
#
# r'([a-z]*) (\d+) ([a-z]*) ([0-9:, -]+)' => len = 4
# e.g.: thusday 3 february => MONTH WEEK DAY
#
# r'([a-z]*) ([0-9:, -]+)' => len = 6
# e.g.: thusday => normal values
#
# Types: CALENDAR_DATE
# MONTH WEEK DAY
# WEEK DAY
# MONTH DATE
# MONTH DAY
#
from __future__ import absolute_import, division, print_function, unicode_literals
import time
import re
from shinken.objects.item import Item, Items
from shinken.daterange import Daterange, CalendarDaterange
from shinken.daterange import StandardDaterange, MonthWeekDayDaterange
from shinken.daterange import MonthDateDaterange, WeekDayDaterange
from shinken.daterange import MonthDayDaterange
from shinken.brok import Brok
from shinken.property import IntegerProp, StringProp, ListProp, BoolProp
from shinken.log import logger, naglog_result
class Timeperiod(Item):
id = 1
my_type = 'timeperiod'
properties = Item.properties.copy()
properties.update({
'timeperiod_name': StringProp(fill_brok=['full_status']),
'alias': StringProp(default='', fill_brok=['full_status']),
'use': StringProp(default=None),
'register': IntegerProp(default=1),
# These are needed if a broker module calls methods on timeperiod objects
'dateranges': ListProp(fill_brok=['full_status'], default=[]),
'exclude': ListProp(fill_brok=['full_status'], default=[]),
'is_active': BoolProp(default=False)
})
running_properties = Item.running_properties.copy()
def __init__(self, params={}):
self.id = Timeperiod.id
Timeperiod.id = Timeperiod.id + 1
self.unresolved = []
self.dateranges = []
self.exclude = ''
self.customs = {}
self.plus = {}
self.invalid_entries = []
for key in params:
# timeperiod objects are too complicated to support multi valued
# attributes. we do as usual, last set value wins.
if isinstance(params[key], list):
if params[key]:
params[key] = params[key][-1]
else:
params[key] = ''
if key in [
'name', 'alias', 'timeperiod_name', 'exclude',
'use', 'register', 'imported_from', 'is_active', 'dateranges']:
setattr(self, key, self.properties[key].pythonize(params[key]))
elif key.startswith('_'):
self.customs[key.upper()] = params[key]
else:
self.unresolved.append(key + ' ' + params[key])
self.cache = {} # For tunning purpose only
self.invalid_cache = {} # same but for invalid search
self.configuration_errors = []
self.configuration_warnings = []
# By default the tp is None so we know we just start
self.is_active = None
self.tags = set()
def get_name(self):
return getattr(self, 'timeperiod_name', 'unknown_timeperiod')
# We fillfull properties with template ones if need
# for the unresolved values (like sunday ETCETC)
def get_unresolved_properties_by_inheritance(self, items):
# Ok, I do not have prop, Maybe my templates do?
# Same story for plus
for i in self.templates:
self.unresolved.extend(i.unresolved)
# Ok timeperiods are a bit different from classic items, because we do not have a real list
# of our raw properties, like if we got february 1 - 15 / 3 for example
def get_raw_import_values(self):
properties = ['timeperiod_name', 'alias', 'use', 'register']
r = {}
for prop in properties:
if hasattr(self, prop):
v = getattr(self, prop)
print(prop, ":", v)
r[prop] = v
# Now the unresolved one. The only way to get ride of same key things is to put
# directly the full value as the key
for other in self.unresolved:
r[other] = ''
return r
def is_time_valid(self, t):
if self.has('exclude'):
for dr in self.exclude:
if dr.is_time_valid(t):
return False
for dr in self.dateranges:
if dr.is_time_valid(t):
return True
return False
# will give the first time > t which is valid
def get_min_from_t(self, t):
mins_incl = []
for dr in self.dateranges:
mins_incl.append(dr.get_min_from_t(t))
return min(mins_incl)
# will give the first time > t which is not valid
def get_not_in_min_from_t(self, f):
pass
def find_next_valid_time_from_cache(self, t):
try:
return self.cache[t]
except KeyError:
return None
def find_next_invalid_time_from_cache(self, t):
try:
return self.invalid_cache[t]
except KeyError:
return None
# will look for active/un-active change. And log it
# [1327392000] TIMEPERIOD TRANSITION: <name>;<from>;<to>
# from is -1 on startup. to is 1 if the timeperiod starts
# and 0 if it ends.
def check_and_log_activation_change(self):
now = int(time.time())
was_active = self.is_active
self.is_active = self.is_time_valid(now)
# If we got a change, log it!
if self.is_active != was_active:
_from = 0
_to = 0
# If it's the start, get a special value for was
if was_active is None:
_from = -1
if was_active:
_from = 1
if self.is_active:
_to = 1
# Now raise the log
naglog_result(
'info', 'TIMEPERIOD TRANSITION: %s;%d;%d'
% (self.get_name(), _from, _to)
)
# clean the get_next_valid_time_from_t cache
# The entries are a dict on t. t < now are useless
# Because we do not care about past anymore.
# If not, it's not important, it's just a cache after all :)
def clean_cache(self):
now = int(time.time())
t_to_del = []
for t in self.cache:
if t < now:
t_to_del.append(t)
for t in t_to_del:
del self.cache[t]
# same for the invalid cache
t_to_del = []
for t in self.invalid_cache:
if t < now:
t_to_del.append(t)
for t in t_to_del:
del self.invalid_cache[t]
def get_next_valid_time_from_t(self, t):
# first find from cache
t = int(t)
original_t = t
# logger.debug("[%s] Check valid time for %s" %
# ( self.get_name(), time.asctime(time.localtime(t)))
res_from_cache = self.find_next_valid_time_from_cache(t)
if res_from_cache is not None:
return res_from_cache
still_loop = True
# Loop for all minutes...
while still_loop:
local_min = None
# Ok, not in cache...
dr_mins = []
s_dr_mins = []
for dr in self.dateranges:
dr_mins.append(dr.get_next_valid_time_from_t(t))
s_dr_mins = sorted([d for d in dr_mins if d is not None])
for t1 in s_dr_mins:
if not self.exclude and still_loop is True:
# No Exclude so we are good
local_min = t1
still_loop = False
else:
for tp in self.exclude:
if not tp.is_time_valid(t1) and still_loop is True:
# OK we found a date that is not valid in any exclude timeperiod
local_min = t1
still_loop = False
if local_min is None:
# print("Looking for next valid date")
exc_mins = []
if s_dr_mins != []:
for tp in self.exclude:
exc_mins.append(tp.get_next_invalid_time_from_t(s_dr_mins[0]))
s_exc_mins = sorted([d for d in exc_mins if d is not None])
if s_exc_mins != []:
local_min = s_exc_mins[0]
if local_min is None:
still_loop = False
else:
t = local_min
# No loop more than one year
if t > original_t + 3600 * 24 * 366 + 1:
still_loop = False
local_min = None
# Ok, we update the cache...
self.cache[original_t] = local_min
return local_min
def get_next_invalid_time_from_t(self, t):
# print('\n\n', self.get_name(), 'Search for next invalid from',)
# time.asctime(time.localtime(t)), t
t = int(t)
original_t = t
still_loop = True
# First try to find in cache
res_from_cache = self.find_next_invalid_time_from_cache(t)
if res_from_cache is not None:
return res_from_cache
# Then look, maybe t is already invalid
if not self.is_time_valid(t):
return t
local_min = t
res = None
# Loop for all minutes...
while still_loop:
# print("Invalid loop with", time.asctime(time.localtime(local_min)))
dr_mins = []
# val_valids = []
# val_inval = []
# But maybe we can find a better solution with next invalid of standard dateranges
# print(self.get_name(),)
# "After valid of exclude, local_min =", time.asctime(time.localtime(local_min))
for dr in self.dateranges:
# print(self.get_name(),)
# "Search a next invalid from DR", time.asctime(time.localtime(local_min))
# print(dr.__dict__)
m = dr.get_next_invalid_time_from_t(local_min)
# print(self.get_name(), "Dr", dr.__dict__,)
# "give me next invalid", time.asctime(time.localtime(m))
if m is not None:
# But maybe it's invalid for this dr, but valid for other ones.
# if not self.is_time_valid(m):
# print("Final: Got a next invalid at", time.asctime(time.localtime(m)))
dr_mins.append(m)
# if not self.is_time_valid(m):
# val_inval.append(m)
# else:
# val_valids.append(m)
# print("Add a m", time.asctime(time.localtime(m)))
# else:
# print(dr.__dict__)
# print("FUCK bad result\n\n\n")
# print("Inval")
# for v in val_inval:
# print("\t", time.asctime(time.localtime(v)))
# print("Valid")
# for v in val_valids:
# print("\t", time.asctime(time.localtime(v)))
if dr_mins != []:
local_min = min(dr_mins)
# Take the minimum valid as lower for next search
# local_min_valid = 0
# if val_valids != []:
# local_min_valid = min(val_valids)
# if local_min_valid != 0:
# local_min = local_min_valid
# else:
# local_min = min(dr_mins)
# print("UPDATE After dr: found invalid local min:",)
# time.asctime(time.localtime(local_min)),
# "is valid", self.is_time_valid(local_min)
# print(self.get_name(),)
# 'Invalid: local min', local_min #time.asctime(time.localtime(local_min))
# We do not loop unless the local_min is not valid
if not self.is_time_valid(local_min):
still_loop = False
else: # continue until we reach too far..., in one minute
# After one month, go quicker...
if local_min > original_t + 3600 * 24 * 30:
local_min += 3600
else: # else search for 1min precision
local_min += 60
# after one year, stop.
if local_min > original_t + 3600 * 24 * 366 + 1: # 60*24*366 + 1:
still_loop = False
# print("Loop?", still_loop)
# if we've got a real value, we check it with the exclude
if local_min is not None:
# Now check if local_min is not valid
for tp in self.exclude:
# print(self.get_name(),)
# "we check for invalid",
# time.asctime(time.localtime(local_min)), 'with tp', tp.name
if tp.is_time_valid(local_min):
still_loop = True
# local_min + 60
local_min = tp.get_next_invalid_time_from_t(local_min + 60)
# No loop more than one year
if local_min > original_t + 60 * 24 * 366 + 1:
still_loop = False
res = None
if not still_loop: # We find a possible value
# We take the result the minimal possible
if res is None or local_min < res:
res = local_min
# print("Finished Return the next invalid", time.asctime(time.localtime(local_min)))
# Ok, we update the cache...
self.invalid_cache[original_t] = local_min
return local_min
def has(self, prop):
return hasattr(self, prop)
# We are correct only if our daterange are
# and if we have no unmatch entries
def is_correct(self):
b = True
for dr in self.dateranges:
d = dr.is_correct()
if not d:
logger.error("[timeperiod::%s] invalid daterange ", self.get_name())
b &= d
# Warn about non correct entries
for e in self.invalid_entries:
logger.warning("[timeperiod::%s] invalid entry '%s'", self.get_name(), e)
return b
def __str__(self):
s = ''
s += str(self.__dict__) + '\n'
for elt in self.dateranges:
s += str(elt)
(start, end) = elt.get_start_and_end_time()
start = time.asctime(time.localtime(start))
end = time.asctime(time.localtime(end))
s += "\nStart and end:" + str((start, end))
s += '\nExclude'
for elt in self.exclude:
s += str(elt)
return s
def resolve_daterange(self, dateranges, entry):
# print("Trying to resolve ", entry)
res = re.search(
r'(\d{4})-(\d{2})-(\d{2}) - (\d{4})-(\d{2})-(\d{2}) / (\d+)[\s\t]*([0-9:, -]+)', entry
)
if res is not None:
# print("Good catch 1")
(syear, smon, smday, eyear, emon, emday, skip_interval, other) = res.groups()
dateranges.append(
CalendarDaterange(
syear, smon, smday, 0, 0, eyear, emon,
emday, 0, 0, skip_interval, other
)
)
return
res = re.search(r'(\d{4})-(\d{2})-(\d{2}) / (\d+)[\s\t]*([0-9:, -]+)', entry)
if res is not None:
# print("Good catch 2")
(syear, smon, smday, skip_interval, other) = res.groups()
eyear = syear
emon = smon
emday = smday
dateranges.append(
CalendarDaterange(syear, smon, smday, 0, 0, eyear,
emon, emday, 0, 0, skip_interval, other)
)
return
res = re.search(
r'(\d{4})-(\d{2})-(\d{2}) - (\d{4})-(\d{2})-(\d{2})[\s\t]*([0-9:, -]+)', entry
)
if res is not None:
# print("Good catch 3")
(syear, smon, smday, eyear, emon, emday, other) = res.groups()
dateranges.append(
CalendarDaterange(syear, smon, smday, 0, 0, eyear, emon, emday, 0, 0, 0, other)
)
return
res = re.search(r'(\d{4})-(\d{2})-(\d{2})[\s\t]*([0-9:, -]+)', entry)
if res is not None:
# print("Good catch 4")
(syear, smon, smday, other) = res.groups()
eyear = syear
emon = smon
emday = smday
dateranges.append(
CalendarDaterange(syear, smon, smday, 0, 0, eyear, emon, emday, 0, 0, 0, other)
)
return
res = re.search(
r'([a-z]*) ([\d-]+) ([a-z]*) - ([a-z]*) ([\d-]+) ([a-z]*) / (\d+)[\s\t]*([0-9:, -]+)',
entry
)
if res is not None:
# print("Good catch 5")
(swday, swday_offset, smon, ewday,
ewday_offset, emon, skip_interval, other) = res.groups()
dateranges.append(
MonthWeekDayDaterange(0, smon, 0, swday, swday_offset, 0,
emon, 0, ewday, ewday_offset, skip_interval, other)
)
return
res = re.search(r'([a-z]*) ([\d-]+) - ([a-z]*) ([\d-]+) / (\d+)[\s\t]*([0-9:, -]+)', entry)
if res is not None:
# print("Good catch 6")
(t0, smday, t1, emday, skip_interval, other) = res.groups()
if t0 in Daterange.weekdays and t1 in Daterange.weekdays:
swday = t0
ewday = t1
swday_offset = smday
ewday_offset = emday
dateranges.append(
WeekDayDaterange(0, 0, 0, swday, swday_offset,
0, 0, 0, ewday, ewday_offset, skip_interval, other)
)
return
elif t0 in Daterange.months and t1 in Daterange.months:
smon = t0
emon = t1
dateranges.append(
MonthDateDaterange(0, smon, smday, 0, 0, 0,
emon, emday, 0, 0, skip_interval, other)
)
return
elif t0 == 'day' and t1 == 'day':
dateranges.append(
MonthDayDaterange(0, 0, smday, 0, 0, 0, 0,
emday, 0, 0, skip_interval, other)
)
return
res = re.search(r'([a-z]*) ([\d-]+) - ([\d-]+) / (\d+)[\s\t]*([0-9:, -]+)', entry)
if res is not None:
# print("Good catch 7")
(t0, smday, emday, skip_interval, other) = res.groups()
if t0 in Daterange.weekdays:
swday = t0
swday_offset = smday
ewday = swday
ewday_offset = emday
dateranges.append(
WeekDayDaterange(0, 0, 0, swday, swday_offset,
0, 0, 0, ewday, ewday_offset, skip_interval, other)
)
return
elif t0 in Daterange.months:
smon = t0
emon = smon
dateranges.append(
MonthDateDaterange(0, smon, smday, 0, 0, 0, emon,
emday, 0, 0, skip_interval, other)
)
return
elif t0 == 'day':
dateranges.append(
MonthDayDaterange(0, 0, smday, 0, 0, 0, 0,
emday, 0, 0, skip_interval, other)
)
return
res = re.search(
r'([a-z]*) ([\d-]+) ([a-z]*) - ([a-z]*) ([\d-]+) ([a-z]*) [\s\t]*([0-9:, -]+)', entry
)
if res is not None:
# print("Good catch 8")
(swday, swday_offset, smon, ewday, ewday_offset, emon, other) = res.groups()
# print("Debug:", (swday, swday_offset, smon, ewday, ewday_offset, emon, other))
dateranges.append(
MonthWeekDayDaterange(0, smon, 0, swday, swday_offset,
0, emon, 0, ewday, ewday_offset, 0, other)
)
return
res = re.search(r'([a-z]*) ([\d-]+) - ([\d-]+)[\s\t]*([0-9:, -]+)', entry)
if res is not None:
# print("Good catch 9")
(t0, smday, emday, other) = res.groups()
if t0 in Daterange.weekdays:
swday = t0
swday_offset = smday
ewday = swday
ewday_offset = emday
dateranges.append(
WeekDayDaterange(
0, 0, 0, swday, swday_offset, 0, 0, 0,
ewday, ewday_offset, 0, other)
)
return
elif t0 in Daterange.months:
smon = t0
emon = smon
dateranges.append(
MonthDateDaterange(0, smon, smday, 0, 0, 0,
emon, emday, 0, 0, 0, other)
)
return
elif t0 == 'day':
dateranges.append(
MonthDayDaterange(0, 0, smday, 0, 0, 0, 0,
emday, 0, 0, 0, other)
)
return
res = re.search(r'([a-z]*) ([\d-]+) - ([a-z]*) ([\d-]+)[\s\t]*([0-9:, -]+)', entry)
if res is not None:
# print("Good catch 10")
(t0, smday, t1, emday, other) = res.groups()
if t0 in Daterange.weekdays and t1 in Daterange.weekdays:
swday = t0
ewday = t1
swday_offset = smday
ewday_offset = emday
dateranges.append(
WeekDayDaterange(0, 0, 0, swday, swday_offset, 0,
0, 0, ewday, ewday_offset, 0, other)
)
return
elif t0 in Daterange.months and t1 in Daterange.months:
smon = t0
emon = t1
dateranges.append(
MonthDateDaterange(0, smon, smday, 0, 0,
0, emon, emday, 0, 0, 0, other)
)
return
elif t0 == 'day' and t1 == 'day':
dateranges.append(
MonthDayDaterange(0, 0, smday, 0, 0, 0,
0, emday, 0, 0, 0, other)
)
return
res = re.search(r'([a-z]*) ([\d-]+) ([a-z]*)[\s\t]*([0-9:, -]+)', entry)
if res is not None:
# print("Good catch 11")
(t0, swday_offset, t1, other) = res.groups()
if t0 in Daterange.weekdays and t1 in Daterange.months:
swday = t0
smon = t1
emon = smon
ewday = swday
ewday_offset = swday_offset
dateranges.append(
MonthWeekDayDaterange(0, smon, 0, swday, swday_offset, 0, emon,
0, ewday, ewday_offset, 0, other)
)
return
res = re.search(r'([a-z]*) ([\d-]+)[\s\t]+([0-9:, -]+)', entry)
if res is not None:
# print("Good catch 12")
(t0, smday, other) = res.groups()
if t0 in Daterange.weekdays:
swday = t0
swday_offset = smday
ewday = swday
ewday_offset = swday_offset
dateranges.append(
WeekDayDaterange(0, 0, 0, swday, swday_offset, 0,
0, 0, ewday, ewday_offset, 0, other)
)
return
if t0 in Daterange.months:
smon = t0
emon = smon
emday = smday
dateranges.append(
MonthDateDaterange(
0, smon, smday, 0, 0, 0, emon, emday, 0, 0, 0, other)
)
return
if t0 == 'day':
emday = smday
dateranges.append(
MonthDayDaterange(0, 0, smday, 0, 0, 0,
0, emday, 0, 0, 0, other)
)
return
res = re.search(r'([a-z]*)[\s\t]+([0-9:, -]+)', entry)
if res is not None:
# print("Good catch 13")
(t0, other) = res.groups()
if t0 in Daterange.weekdays:
day = t0
dateranges.append(StandardDaterange(day, other))
return
logger.info("[timeentry::%s] no match for %s", self.get_name(), entry)
self.invalid_entries.append(entry)
def apply_inheritance(self):
pass
# create daterange from unresolved param
def explode(self, timeperiods):
for entry in self.unresolved:
# print("Revolving entry", entry)
self.resolve_daterange(self.dateranges, entry)
self.unresolved = []
# Will make tp in exclude with id of the timeperiods
def linkify(self, timeperiods):
new_exclude = []
if self.has('exclude') and self.exclude != []:
logger.debug("[timeentry::%s] have excluded %s", self.get_name(), self.exclude)
excluded_tps = self.exclude
# print("I will exclude from:", excluded_tps)
for tp_name in excluded_tps:
tp = timeperiods.find_by_name(tp_name.strip())
if tp is not None:
new_exclude.append(tp)
else:
logger.error("[timeentry::%s] unknown %s timeperiod", self.get_name(), tp_name)
self.exclude = new_exclude
def check_exclude_rec(self):
if self.rec_tag:
logger.error("[timeentry::%s] is in a loop in exclude parameter", self.get_name())
return False
self.rec_tag = True
for tp in self.exclude:
tp.check_exclude_rec()
return True
def fill_data_brok_from(self, data, brok_type):
cls = self.__class__
# Now config properties
for prop, entry in cls.properties.items():
# Is this property intended for broking?
# if 'fill_brok' in entry:
if brok_type in entry.fill_brok:
if hasattr(self, prop):
data[prop] = getattr(self, prop)
elif entry.has_default:
data[prop] = entry.default
class Timeperiods(Items):
name_property = "timeperiod_name"
inner_class = Timeperiod
def explode(self):
for id in self.items:
tp = self.items[id]
tp.explode(self)
def linkify(self):
for id in self.items:
tp = self.items[id]
tp.linkify(self)
def apply_inheritance(self):
# The only interesting property to inherit is exclude
self.apply_partial_inheritance('exclude')
for i in self:
i.get_customs_properties_by_inheritance(0)
# And now apply inheritance for unresolved properties
# like the dateranges in fact
for tp in self:
tp.get_unresolved_properties_by_inheritance(self.items)
# check for loop in definition
def is_correct(self):
r = True
# We do not want a same hg to be explode again and again
# so we tag it
for tp in self.items.values():
tp.rec_tag = False
for tp in self.items.values():
for tmp_tp in self.items.values():
tmp_tp.rec_tag = False
r &= tp.check_exclude_rec()
# We clean the tags
for tp in self.items.values():
del tp.rec_tag
# And check all timeperiods for correct (sunday is false)
for tp in self:
r &= tp.is_correct()
return r
if __name__ == '__main__':
t = Timeperiod()
test = ['1999-01-28 00:00-24:00',
'monday 3 00:00-24:00 ',
'day 2 00:00-24:00',
'february 10 00:00-24:00',
'february -1 00:00-24:00',
'friday -2 00:00-24:00',
'thursday -1 november 00:00-24:00',
'2007-01-01 - 2008-02-01 00:00-24:00',
'monday 3 - thursday 4 00:00-24:00',
'day 1 - 15 00:00-24:00',
'day 20 - -1 00:00-24:00',
'july -10 - -1 00:00-24:00',
'april 10 - may 15 00:00-24:00',
'tuesday 1 april - friday 2 may 00:00-24:00',
'2007-01-01 - 2008-02-01 / 3 00:00-24:00',
'2008-04-01 / 7 00:00-24:00',
'day 1 - 15 / 5 00:00-24:00',
'july 10 - 15 / 2 00:00-24:00',
'tuesday 1 april - friday 2 may / 6 00:00-24:00',
'tuesday 1 october - friday 2 may / 6 00:00-24:00',
'monday 3 - thursday 4 / 2 00:00-24:00',
'monday 4 - thursday 3 / 2 00:00-24:00',
'day -1 - 15 / 5 01:00-24:00,00:30-05:60',
'tuesday 00:00-24:00',
'sunday 00:00-24:00',
'saturday 03:00-24:00,00:32-01:02',
'wednesday 09:00-15:46,00:00-21:00',
'may 7 - february 2 00:00-10:00',
'day -1 - 5 00:00-10:00',
'tuesday 1 february - friday 1 may 01:00-24:00,00:30-05:60',
'december 2 - may -15 00:00-24:00',
]
for entry in test:
print("**********************")
print(entry)
t = Timeperiod()
t.timeperiod_name = ''
t.resolve_daterange(t.dateranges, entry)
# t.exclude = []
# t.resolve_daterange(t.exclude, 'monday 00:00-19:00')
# t.check_valid_for_today()
now = time.time()
# print("Is valid NOW?", t.is_time_valid(now))
t_next = t.get_next_valid_time_from_t(now + 5 * 60)
if t_next is not None:
print("Get next valid for now + 5 min ==>", time.asctime(time.localtime(t_next)), "<==")
else:
print("===> No future time!!!")
# print("End date:", t.get_end_time())
# print("Next valid", time.asctime(time.localtime(t.get_next_valid_time())))
print(str(t) + '\n\n')
print("*************************************************************")
t3 = Timeperiod()
t3.timeperiod_name = 't3'
t3.resolve_daterange(t3.dateranges, 'day 1 - 10 10:30-15:00')
t3.exclude = []
t2 = Timeperiod()
t2.timeperiod_name = 't2'
t2.resolve_daterange(t2.dateranges, 'day 1 - 10 12:00-17:00')
t2.exclude = [t3]
t = Timeperiod()
t.timeperiod_name = 't'
t.resolve_daterange(t.dateranges, 'day 1 - 10 14:00-15:00')
t.exclude = [t2]
print("Mon T", str(t) + '\n\n')
t_next = t.get_next_valid_time_from_t(now)
t_no_next = t.get_next_invalid_time_from_t(now)
print("Get next valid for now ==>", time.asctime(time.localtime(t_next)), "<==")
print("Get next invalid for now ==>", time.asctime(time.localtime(t_no_next)), "<==")
| 33,918
|
Python
|
.py
| 805
| 29.945342
| 100
| 0.492201
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,523
|
brokerlink.py
|
shinken-solutions_shinken/shinken/objects/brokerlink.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
from .satellitelink import SatelliteLink, SatelliteLinks
from shinken.property import IntegerProp, StringProp
class BrokerLink(SatelliteLink):
"""TODO: Add some comment about this class for the doc"""
id = 0
my_type = 'broker'
properties = SatelliteLink.properties.copy()
properties.update({
'broker_name': StringProp(fill_brok=['full_status'], to_send=True),
'port': IntegerProp(default=7772, fill_brok=['full_status']),
'broks_batch': IntegerProp(default=0, fill_brok=['full_status'], to_send=True),
'harakiri_threshold': StringProp(default=None, fill_brok=['full_status'], to_send=True),
})
def get_name(self):
return self.broker_name
def register_to_my_realm(self):
self.realm.brokers.append(self)
class BrokerLinks(SatelliteLinks):
"""TODO: Add some comment about this class for the doc"""
name_property = "broker_name"
inner_class = BrokerLink
| 1,955
|
Python
|
.py
| 44
| 41.136364
| 96
| 0.732387
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,524
|
discoveryrule.py
|
shinken-solutions_shinken/shinken/objects/discoveryrule.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
from copy import copy
from shinken.objects.item import Item, Items
from shinken.objects.matchingitem import MatchingItem
from shinken.objects.service import Service
from shinken.objects.host import Host
from shinken.property import StringProp, ListProp, IntegerProp
class Discoveryrule(MatchingItem):
id = 1 # zero is always special in database, so we do not take risk here
my_type = 'discoveryrule'
properties = Item.properties.copy()
properties.update({
'discoveryrule_name': StringProp(),
'creation_type': StringProp(default='service'),
'discoveryrule_order': IntegerProp(default=0),
# 'check_command': StringProp (),
# 'service_description': StringProp (),
# 'use': StringProp(),
})
running_properties = {
'configuration_warnings': ListProp(default=[]),
'configuration_errors': ListProp(default=[]),
}
macros = {}
# The init of a discovery will set the property of
# Discoveryrule.properties as in setattr, but all others
# will be in a list because we need to have all names
# and not lost all in __dict__
def __init__(self, params={}):
cls = self.__class__
# We have our own id of My Class type :)
# use set attr for going into the slots
# instead of __dict__ :)
setattr(self, 'id', cls.id)
cls.id += 1
self.matches = {} # for matching rules
self.not_matches = {} # for rules that should NOT match
self.writing_properties = {}
for key in params:
# delistify attributes if there is only one value
params[key] = self.compact_unique_attr_value(params[key])
# Get the properties of the Class we want
if 'creation_type' not in params:
params['creation_type'] = 'service'
map = {'service': Service, 'host': Host}
t = params['creation_type']
if t not in map:
return
tcls = map[t]
# In my own property:
# -> in __dict__
# In the properties of the 'creation_type' Class:
# -> in self.writing_properties
# if not, in matches or not match (if key starts
# with a !, it's a not rule)
# -> in self.matches or self.not_matches
# in writing properties if start with + (means 'add this')
# in writing properties if start with - (means 'del this')
for key in params:
# Some key are quite special
if key in cls.properties:
setattr(self, key, params[key])
elif (key in ['use'] or
key.startswith('+') or
key.startswith('-') or
key in tcls.properties or
key.startswith('_')):
self.writing_properties[key] = params[key]
else:
if key.startswith('!'):
key = key.split('!')[1]
self.not_matches[key] = params['!' + key]
else:
self.matches[key] = params[key]
# Then running prop :)
cls = self.__class__
# adding running properties like latency, dependency list, etc
for prop, entry in cls.running_properties.items():
# Copy is slow, so we check type
# Type with __iter__ are list or dict, or tuple.
# Item need it's own list, so qe copy
val = entry.default
if hasattr(val, '__iter__'):
setattr(self, prop, copy(val))
else:
setattr(self, prop, val)
# each instance to have his own running prop!
# Output name
def get_name(self):
try:
return self.discoveryrule_name
except AttributeError:
return "UnnamedDiscoveryRule"
class Discoveryrules(Items):
name_property = "discoveryrule_name"
inner_class = Discoveryrule
| 4,981
|
Python
|
.py
| 118
| 34.067797
| 82
| 0.615909
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,525
|
__init__.py
|
shinken-solutions_shinken/shinken/bin/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
"""
This file is to be imported by every Shinken service component:
Arbiter, Scheduler, etc. It just checks for the main requirement of
Shinken.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
VERSION = "3.0.0-RC1"
# Make sure people are using Python 2.6 or higher
# This is the canonical python version check
if sys.version_info < (2, 7):
sys.exit("Shinken requires as a minimum Python 2.7.x, sorry")
| 1,385
|
Python
|
.py
| 34
| 39.382353
| 82
| 0.760238
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,526
|
discoverymanager.py
|
shinken-solutions_shinken/shinken/discovery/discoverymanager.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import six
import sys
import os
import re
import time
import copy
import random
import string
# Always initialize random...
random.seed(time.time())
try:
import uuid
except ImportError:
uuid = None
try:
from pymongo.connection import Connection
except ImportError:
Connection = None
from shinken.log import logger
from shinken.objects import *
from shinken.objects.config import Config
from shinken.macroresolver import MacroResolver
from shinken.modulesmanager import ModulesManager
def get_uuid(self):
if uuid:
return uuid.uuid1().hex
# Ok for old python like 2.4, we will lie here :)
return int(random.random() * sys.maxint)
# Look if the name is a IPV4 address or not
def is_ipv4_addr(name):
p = r"^([01]?\d\d?|2[0-4]\d|25[0-5])" \
r"\.([01]?\d\d?|2[0-4]\d|25[0-5])" \
r"\.([01]?\d\d?|2[0-4]\d|25[0-5])\.([01]?\d\d?|2[0-4]\d|25[0-5])$"
return (re.match(p, name) is not None)
def by_order(r1, r2):
if r1.discoveryrule_order == r2.discoveryrule_order:
return 0
if r1.discoveryrule_order > r2.discoveryrule_order:
return 1
if r1.discoveryrule_order < r2.discoveryrule_order:
return -1
class DiscoveredHost(object):
my_type = 'host' # we fake our type for the macro resolving
macros = {
'HOSTNAME': 'name',
}
def __init__(self, name, rules, runners, merge=False, first_level_only=False):
self.name = name
self.data = {}
self.rules = rules
self.runners = runners
self.merge = merge
self.matched_rules = []
self.launched_runners = []
self.in_progress_runners = []
self.properties = {}
self.customs = {}
self.first_level_only = first_level_only
# In final phase, we keep only _ properties and
# rule based one
def update_properties(self, final_phase=False):
d = {}
if final_phase:
for (k, v) in self.data.items():
if k.startswith('_'):
d[k] = v
else:
d = copy.copy(self.data)
d['host_name'] = self.name
# Set address directive if an ip exists
if 'ip' in self.data:
d['address'] = self.data['ip']
self.matched_rules.sort(by_order)
for r in self.matched_rules:
for k, v in r.writing_properties.items():
# If it's a + (add) property, append
if k.startswith('+'):
kprop = k[1:]
# If the d do not already have this prop,
# create list
if kprop not in d:
print('New prop', kprop)
d[kprop] = []
elif not k.startswith('-'):
kprop = k
if kprop not in d:
print('New prop', kprop)
else:
print('Prop', kprop, 'reset with new value')
d[kprop] = []
for prop in string.split(v, ','):
prop = prop.strip()
# checks that prop does not already exist and adds
if prop not in d[kprop]:
if len(d[kprop]) > 0:
print('Already got', ','.join(d[kprop]), 'add', prop)
else:
print('Add', prop)
d[kprop].append(prop)
# Now look for - (rem) property
for k, v in r.writing_properties.items():
if k.startswith('-'):
kprop = k[1:]
if kprop in d:
for prop in string.split(v, ','):
prop = prop.strip()
if prop in d[kprop]:
print('Already got', ','.join(d[kprop]), 'rem', prop)
d[kprop].remove(prop)
# Change join prop list in string with a ',' separator
for (k, v) in d.items():
if isinstance(d[k], list):
d[k] = ','.join(d[k])
self.properties = d
print('Update our properties', self.name, d)
# For macro-resolving, we should have our macros too
self.customs = {}
for (k, v) in self.properties.items():
self.customs['_' + k.upper()] = v
# Manager ask us our properties for the configuration, so
# we keep only rules properties and _ ones
def get_final_properties(self):
self.update_properties(final_phase=True)
return self.properties
def get_to_run(self):
self.in_progress_runners = []
if self.first_level_only:
return
for r in self.runners:
# If we already launched it, we don't want it :)
if r in self.launched_runners:
print('Sorry', r.get_name(), 'was already launched')
continue
# First level discovery are for large scan, so not for here
if r.is_first_level():
print('Sorry', r.get_name(), 'is first level')
continue
# And of course it must match our data
print('Is ', r.get_name(), 'matching??', r.is_matching_disco_datas(self.properties))
if r.is_matching_disco_datas(self.properties):
self.in_progress_runners.append(r)
def need_to_run(self):
return len(self.in_progress_runners) != 0
# Now we try to match all our hosts with the rules
def match_rules(self):
print('And our data?', self.data)
for r in self.rules:
# If the rule was already successfully for this host, skip it
if r in self.matched_rules:
print('We already apply the rule', r.get_name(), 'for the host', self.name)
continue
print('Looking for match with a new rule', r.get_name(), 'for the host', self.name)
if r.is_matching_disco_datas(self.data):
self.matched_rules.append(r)
print("Generating a new rule", self.name, r.writing_properties)
self.update_properties()
def read_disco_buf(self, buf):
print('Read buf in', self.name)
for l in buf.split('\n'):
# print("")
# If it's not a disco line, bypass it
if not re.search('::', l):
continue
# print("line", l)
elts = l.split('::', 1)
if len(elts) <= 1:
# print("Bad discovery data")
continue
name = elts[0].strip()
# We can choose to keep only the basename
# of the nameid, so strip the fqdn
# But not if it's a plain ipv4 addr
# TODO : gt this! if self.conf.strip_idname_fqdn:
if not is_ipv4_addr(name):
name = name.split('.', 1)[0]
data = '::'.join(elts[1:])
# Maybe it's not me?
if name != self.name:
if not self.merge:
print('Bad data for me? I bail out data!')
data = ''
else:
print('Bad data for me? Let\'s switch !')
self.name = name
# Now get key,values
if '=' not in data:
continue
elts = data.split('=', 1)
if len(elts) <= 1:
continue
key = elts[0].strip()
value = elts[1].strip()
print("INNER -->", name, key, value)
self.data[key] = value
def launch_runners(self):
for r in self.in_progress_runners:
print("I", self.name, " is launching", r.get_name(), "with a %d seconds timeout" % 3600)
r.launch(timeout=3600, ctx=[self])
self.launched_runners.append(r)
def wait_for_runners_ends(self):
all_ok = False
while not all_ok:
print('Loop wait runner for', self.name)
all_ok = True
for r in self.in_progress_runners:
if not r.is_finished():
# print("Check finished of", r.get_name())
r.check_finished()
b = r.is_finished()
if not b:
# print(r.get_name(), "is not finished")
all_ok = False
time.sleep(0.1)
def get_runners_outputs(self):
for r in self.in_progress_runners:
if r.is_finished():
print('Get output', self.name, r.discoveryrun_name, r.current_launch)
if r.current_launch.exit_status != 0:
print("Error on run")
raw_disco_data = '\n'.join(r.get_output() for r in self.in_progress_runners
if r.is_finished())
if len(raw_disco_data) != 0:
print("Got Raw disco data", raw_disco_data)
else:
print("Got no data!")
for r in self.in_progress_runners:
print("DBG", r.current_launch)
# Now get the data for me :)
self.read_disco_buf(raw_disco_data)
class DiscoveryManager(object):
def __init__(self, path, macros, overwrite, runners, output_dir=None,
dbmod='', db_direct_insert=False, only_new_hosts=False,
backend=None, modules_path='', merge=False, conf=None, first_level_only=False):
# i am arbiter-like
self.log = logger
self.overwrite = overwrite
self.runners = runners
self.output_dir = output_dir
self.dbmod = dbmod
self.db_direct_insert = db_direct_insert
self.only_new_hosts = only_new_hosts
self.log.load_obj(self)
self.merge = merge
self.config_files = [path]
# For specific backend, to override the classic file/db behavior
self.backend = backend
self.modules_path = modules_path
self.first_level_only = first_level_only
if not conf:
self.conf = Config()
buf = self.conf.read_config(self.config_files)
# Add macros on the end of the buf so they will
# overwrite the resource.cfg ones
for (m, v) in macros:
buf += '\n$%s$=%s\n' % (m, v)
raw_objects = self.conf.read_config_buf(buf)
self.conf.create_objects_for_type(raw_objects, 'arbiter')
self.conf.create_objects_for_type(raw_objects, 'module')
self.conf.early_arbiter_linking()
self.conf.create_objects(raw_objects)
self.conf.linkify_templates()
self.conf.apply_inheritance()
self.conf.explode()
self.conf.apply_implicit_inheritance()
self.conf.fill_default()
self.conf.remove_templates()
self.conf.linkify()
self.conf.apply_dependencies()
self.conf.is_correct()
else:
self.conf = conf
self.discoveryrules = self.conf.discoveryrules
self.discoveryruns = self.conf.discoveryruns
m = MacroResolver()
m.init(self.conf)
# Hash = name, and in it (key, value)
self.disco_data = {}
# Hash = name, and in it rules that apply
self.disco_matches = {}
self.init_database()
self.init_backend()
def add(self, obj):
pass
# We try to init the database connection
def init_database(self):
self.dbconnection = None
self.db = None
if self.dbmod == '':
return
for mod in self.conf.modules:
if getattr(mod, 'module_name', '') == self.dbmod:
if Connection is None:
print("ERROR : cannot use Mongodb database : please install the pymongo library")
break
# Now try to connect
try:
uri = mod.uri
database = mod.database
self.dbconnection = Connection(uri)
self.db = getattr(self.dbconnection, database)
print("Connection to Mongodb:%s:%s is OK" % (uri, database))
except Exception as exp:
logger.error('Database init : %s', exp)
# We try to init the backend if we got one
def init_backend(self):
if not self.backend or not isinstance(self.backend, six.string_types):
return
print("Doing backend init")
for mod in self.conf.modules:
if getattr(mod, 'module_name', '') == self.backend:
print("We found our backend", mod.get_name())
self.backend = mod
if not self.backend:
print("ERROR : cannot find the module %s" % self.backend)
sys.exit(2)
self.modules_manager = ModulesManager('discovery', self.modules_path, [])
self.modules_manager.set_modules([mod])
self.modules_manager.load_and_init()
self.backend = self.modules_manager.instances[0]
print("We got our backend!", self.backend)
def loop_discovery(self):
still_loop = True
i = 0
while still_loop:
i += 1
print('\n')
print('LOOP' * 10, i)
still_loop = False
for (name, dh) in self.disco_data.items():
dh.update_properties()
to_run = dh.get_to_run()
print('Still to run for', name, to_run)
if dh.need_to_run():
still_loop = True
dh.launch_runners()
dh.wait_for_runners_ends()
dh.get_runners_outputs()
dh.match_rules()
def read_disco_buf(self):
buf = self.raw_disco_data
for l in buf.split('\n'):
# print("")
# If it's not a disco line, bypass it
if not re.search('::', l):
continue
# print("line", l)
elts = l.split('::', 1)
if len(elts) <= 1:
# print("Bad discovery data")
continue
name = elts[0].strip()
# We can choose to keep only the basename
# of the nameid, so strip the fqdn
# But not if it's a plain ipv4 addr
if self.conf.strip_idname_fqdn:
if not is_ipv4_addr(name):
name = name.split('.', 1)[0]
data = '::'.join(elts[1:])
# Register the name
if name not in self.disco_data:
self.disco_data[name] = DiscoveredHost(name,
self.discoveryrules,
self.discoveryruns,
merge=self.merge,
first_level_only=self.first_level_only)
# Now get key,values
if '=' not in data:
continue
elts = data.split('=', 1)
if len(elts) <= 1:
continue
dh = self.disco_data[name]
key = elts[0].strip()
value = elts[1].strip()
print("-->", name, key, value)
dh.data[key] = value
# Now we try to match all our hosts with the rules
def match_rules(self):
for (name, dh) in self.disco_data.items():
for r in self.discoveryrules:
# If the rule was already successfully for this host, skip it
if r in dh.matched_rules:
print('We already apply the rule', r.get_name(), 'for the host', name)
continue
if r.is_matching_disco_datas(dh.data):
dh.matched_rules.append(r)
if name not in self.disco_matches:
self.disco_matches[name] = []
self.disco_matches[name].append(r)
print("Generating", name, r.writing_properties)
dh.update_properties()
def is_allowing_runners(self, name):
name = name.strip()
# If we got no value, it's * by default
if '*' in self.runners:
return True
# print(self.runners)
# If we match the name, ok
for r in self.runners:
r_name = r.strip()
# print("Look", r_name, name)
if r_name == name:
return True
# Not good, so not run this!
return False
def allowed_runners(self):
return [r for r in self.discoveryruns if self.is_allowing_runners(r.get_name())]
def launch_runners(self):
allowed_runners = self.allowed_runners()
if len(allowed_runners) == 0:
print("ERROR : there is no matching runners selected!")
return
for r in allowed_runners:
print(
"I'm launching %s with a %d seconds timeout" %
(r.get_name(), self.conf.runners_timeout)
)
r.launch(timeout=self.conf.runners_timeout)
def wait_for_runners_ends(self):
all_ok = False
while not all_ok:
'''
all_ok = True
for r in self.allowed_runners():
if not r.is_finished():
#print("Check finished of", r.get_name())
r.check_finished()
b = r.is_finished()
if not b:
#print(r.get_name(), "is not finished")
all_ok = False
'''
all_ok = self.is_all_ok()
time.sleep(0.1)
def is_all_ok(self):
all_ok = True
for r in self.allowed_runners():
if not r.is_finished():
# print("Check finished of", r.get_name())
r.check_finished()
b = r.is_finished()
if not b:
# print(r.get_name(), "is not finished")
all_ok = False
return all_ok
def get_runners_outputs(self):
for r in self.allowed_runners():
if r.is_finished():
print(r.discoveryrun_name, r.current_launch)
if r.current_launch.exit_status != 0:
print("Error on run")
self.raw_disco_data = '\n'.join(r.get_output() for r in self.allowed_runners()
if r.is_finished())
if len(self.raw_disco_data) != 0:
print("Got Raw disco data", self.raw_disco_data)
else:
print("Got no data!")
for r in self.allowed_runners():
print("DBG", r.current_launch)
# Write all configuration we've got
def write_config(self):
# Store host to del in a separate array to remove them after look over items
items_to_del = []
still_duplicate_items = True
managed_element = True
while still_duplicate_items:
# If we didn't work in the last loop, bail out
if not managed_element:
still_duplicate_items = False
print("LOOP")
managed_element = False
for name in self.disco_data:
if name in items_to_del:
continue
managed_element = True
print('Search same host to merge.')
dha = self.disco_data[name]
# Searching same host and update host macros
for oname in self.disco_data:
dhb = self.disco_data[oname]
# When same host but different properties are detected
if dha.name == dhb.name and dha.properties != dhb.properties:
for (k, v) in dhb.properties.items():
# Merge host macros if their properties are different
if k.startswith('_') and \
k in dha.properties and dha.properties[k] != dhb.properties[k]:
dha.data[k] = dha.properties[k] + ',' + v
print('Merged host macro:', k, dha.properties[k])
items_to_del.append(oname)
print('Merged ' + oname + ' in ' + name)
dha.update_properties()
else:
still_duplicate_items = False
# Removing merged element
for item in items_to_del:
print('Deleting ' + item)
del self.disco_data[item]
# New loop to reflect changes in self.disco_data since it isn't possible
# to modify a dict object when reading it.
for name in self.disco_data:
print("Writing", name, "configuration")
self.write_host_config(name)
self.write_service_config(name)
# We search for all rules of type host, and we merge them
def write_host_config(self, host):
dh = self.disco_data[host]
d = dh.get_final_properties()
final_host = dh.name
print("Will generate a host", d)
# Maybe we do not got a directory output, but
# a bdd one.
if self.output_dir:
self.write_host_config_to_file(final_host, d)
# Maybe we want a database insert
if self.db:
self.write_host_config_to_db(final_host, d)
if self.backend:
self.backend.write_host_config_to_db(final_host, d)
# Will wrote all properties/values of d for the host
# in the file
def write_host_config_to_file(self, host, d):
p = os.path.join(self.output_dir, host)
print("Want to create host path", p)
try:
os.mkdir(p)
except OSError as exp:
# If directory already exist, it's not a problem
if not exp.errno != '17':
print("Cannot create the directory '%s' : '%s'" % (p, exp))
return
cfg_p = os.path.join(p, host + '.cfg')
if os.path.exists(cfg_p) and not self.overwrite:
print("The file '%s' already exists" % cfg_p)
return
buf = self.get_cfg_bufer(d, 'host')
# Ok, we create it so (or overwrite)
try:
fd = open(cfg_p, 'w')
fd.write(buf)
fd.close()
except OSError as exp:
print("Cannot create the file '%s' : '%s'" % (cfg_p, exp))
return
# Generate all service for a host
def write_service_config(self, host):
srv_rules = {}
dh = self.disco_data[host]
for r in dh.matched_rules:
if r.creation_type == 'service':
if 'service_description' in r.writing_properties:
desc = r.writing_properties['service_description']
if desc not in srv_rules:
srv_rules[desc] = []
srv_rules[desc].append(r)
# print("Generate services for", host)
# print(srv_rules)
for (desc, rules) in srv_rules.items():
d = {'service_description': desc, 'host_name': host}
for r in rules:
d.update(r.writing_properties)
print("Generating", desc, d)
# Maybe we do not got a directory output, but
# a bdd one.
if self.output_dir:
self.write_service_config_to_file(host, desc, d)
# Will wrote all properties/values of d for the host
# in the file
def write_service_config_to_file(self, host, desc, d):
p = os.path.join(self.output_dir, host)
# The host conf should already exist
cfg_host_p = os.path.join(p, host + '.cfg')
if not os.path.exists(cfg_host_p):
print("No host configuration available, I bail out")
return
cfg_p = os.path.join(p, desc + '.cfg')
if os.path.exists(cfg_p) and not self.overwrite:
print("The file '%s' already exists" % cfg_p)
return
buf = self.get_cfg_bufer(d, 'service')
# Ok, we create it so (or overwrite)
try:
fd = open(cfg_p, 'w')
fd.write(buf)
fd.close()
except OSError as exp:
print("Cannot create the file '%s' : '%s'" % (cfg_p, exp))
return
# Create a define t { } with data in d
def get_cfg_bufer(self, d, t):
tab = ['define %s {' % t]
for (key, value) in d.items():
tab.append(' %s %s' % (key, value))
tab.append('}\n')
return '\n'.join(tab)
# Will wrote all properties/values of d for the host
# in the database
def write_host_config_to_db(self, host, d):
table = None
# Maybe we directly insert/enable the hosts,
# or in the SkonfUI we want to go with an intermediate
# table to select/enable only some
if self.db_direct_insert:
table = self.db.hosts
else:
table = self.db.discovered_hosts
cur = table.find({'host_name': host})
exists = cur.count() > 0
if exists and not self.overwrite:
print("The host '%s' already exists in the database table %s" % (host, table))
return
# It can be the same check if db_direct_insert but whatever
if self.only_new_hosts:
for t in [self.db.hosts, self.db.discovered_hosts]:
r = table.find({'_id': host})
if r.count() > 0:
print("This is not a new host on", self.db.hosts)
return
print("Saving in database", d)
d['_id'] = host
d['_discovery_state'] = 'discovered'
table.save(d)
print("saved")
del d['_id']
| 26,993
|
Python
|
.py
| 646
| 29.24613
| 101
| 0.528691
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,527
|
__init__.py
|
shinken-solutions_shinken/shinken/discovery/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
| 923
|
Python
|
.py
| 22
| 40.863636
| 77
| 0.758621
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,528
|
bottlecore.py
|
shinken-solutions_shinken/shinken/webui/bottlecore.py
|
# -*- coding: utf-8 -*-
"""
Bottle is a fast and simple micro-framework for small web applications. It
offers request dispatching (Routes) with url parameter support, templates,
a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and
template engines - all in a single file and with no dependencies other than the
Python Standard Library.
Homepage and documentation: http://bottlepy.org/
Copyright (c) 2011, Marcel Hellkamp.
License: MIT (see LICENSE.txt for details)
"""
from __future__ import with_statement
__author__ = 'Marcel Hellkamp'
__version__ = '0.10.dev'
__license__ = 'MIT'
import base64
import cgi
import email.utils
import functools
import hmac
import httplib
import imp
import itertools
import mimetypes
import os
import re
import subprocess
import sys
import tempfile
import thread
import threading
import time
import warnings
from Cookie import SimpleCookie
from tempfile import TemporaryFile
from traceback import format_exc
from urllib import urlencode, quote as urlquote
from urlparse import urljoin, SplitResult as UrlSplitResult
try:
from collections import MutableMapping as DictMixin
except ImportError: # pragma: no cover
from UserDict import DictMixin
try:
from urlparse import parse_qs
except ImportError: # pragma: no cover
from cgi import parse_qs
try:
import cPickle as pickle
except ImportError: # pragma: no cover
import pickle
try:
from json import dumps as json_dumps, loads as json_lds
except ImportError: # pragma: no cover
try:
from simplejson import dumps as json_dumps, loads as json_lds
except ImportError: # pragma: no cover
try:
from django.utils.simplejson import dumps as json_dumps, loads as json_lds
except ImportError: # pragma: no cover
def json_dumps(data):
raise ImportError("JSON support requires Python 2.6 or simplejson.")
json_lds = json_dumps
py3k = sys.version_info >= (3, 0, 0)
NCTextIOWrapper = None
if py3k: # pragma: no cover
json_loads = lambda s: json_lds(touni(s))
# See Request.POST
from io import BytesIO
def touni(x, enc='utf8', err='strict'):
""" Convert anything to unicode """
return str(x, enc, err) if isinstance(x, bytes) else str(x)
if sys.version_info < (3, 2, 0):
from io import TextIOWrapper
class NCTextIOWrapper(TextIOWrapper):
''' Garbage collecting an io.TextIOWrapper(buffer) instance closes
the wrapped buffer. This subclass keeps it open. '''
def close(self):
pass
else:
json_loads = json_lds
from StringIO import StringIO as BytesIO
bytes = str
def touni(x, enc='utf8', err='strict'):
""" Convert anything to unicode """
return x if isinstance(x, unicode) else unicode(str(x), enc, err)
def tob(data, enc='utf8'):
""" Convert anything to bytes """
return data.encode(enc) if isinstance(data, unicode) else bytes(data)
# Convert strings and unicode to native strings
if py3k:
tonat = touni
else:
tonat = tob
tonat.__doc__ = """ Convert anything to native strings """
# Backward compatibility
def depr(message, critical=False):
if critical:
raise DeprecationWarning(message)
warnings.warn(message, DeprecationWarning, stacklevel=3)
# Small helpers
def makelist(data):
if isinstance(data, (tuple, list, set, dict)):
return list(data)
elif data:
return [data]
else:
return []
class DictProperty(object):
''' Property that maps to a key in a local dict-like attribute. '''
def __init__(self, attr, key=None, read_only=False):
self.attr, self.key, self.read_only = attr, key, read_only
def __call__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter, self.key = func, self.key or func.__name__
return self
def __get__(self, obj, cls):
if obj is None:
return self
key, storage = self.key, getattr(obj, self.attr)
if key not in storage:
storage[key] = self.getter(obj)
return storage[key]
def __set__(self, obj, value):
if self.read_only:
raise AttributeError("Read-Only property.")
getattr(obj, self.attr)[self.key] = value
def __delete__(self, obj):
if self.read_only:
raise AttributeError("Read-Only property.")
del getattr(obj, self.attr)[self.key]
def cached_property(func):
''' A property that, if accessed, replaces itself with the computed
value. Subsequent accesses won't call the getter again. '''
return DictProperty('__dict__')(func)
class lazy_attribute(object): # Does not need configuration -> lower-case name
''' A property that caches itself to the class object. '''
def __init__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter = func
def __get__(self, obj, cls):
value = self.getter(cls)
setattr(cls, self.__name__, value)
return value
class HeaderProperty(object):
def __init__(self, name, reader=None, writer=str, default=''):
self.name, self.reader, self.writer, self.default = name, reader, writer, default
self.__doc__ = 'Current value of the %r header.' % name.title()
def __get__(self, obj, cls):
if obj is None:
return self
value = obj.headers.get(self.name)
return self.reader(value) if (value and self.reader) else (value or self.default)
def __set__(self, obj, value):
if self.writer:
value = self.writer(value)
obj.headers[self.name] = value
def __delete__(self, obj):
if self.name in obj.headers:
del obj.headers[self.name]
###############################################################################
# Exceptions and Events ########################################################
###############################################################################
class BottleException(Exception):
""" A base class for exceptions used by bottle. """
pass
class HTTPResponse(BottleException):
""" Used to break execution and immediately finish the response """
def __init__(self, output='', status=200, header=None):
super(BottleException, self).__init__("HTTP Response %d" % status)
self.status = int(status)
self.output = output
self.headers = HeaderDict(header) if header else None
def apply(self, response):
if self.headers:
for key, value in self.headers.iterallitems():
response.headers[key] = value
response.status = self.status
class HTTPError(HTTPResponse):
""" Used to generate an error page """
def __init__(self, code=500, output='Unknown Error', exception=None,
traceback=None, header=None):
super(HTTPError, self).__init__(output, code, header)
self.exception = exception
self.traceback = traceback
def __repr__(self):
return template(ERROR_PAGE_TEMPLATE, e=self)
###############################################################################
# Routing ######################################################################
###############################################################################
class RouteError(BottleException):
""" This is a base class for all routing related exceptions """
class RouteReset(BottleException):
""" If raised by a plugin or request handler, the route is reset and all
plugins are re-applied. """
class RouteSyntaxError(RouteError):
""" The route parser found something not supported by this router """
class RouteBuildError(RouteError):
""" The route could not been built """
class Router(object):
''' A Router is an ordered collection of route->target pairs. It is used to
efficiently match WSGI requests against a number of routes and return
the first target that satisfies the request. The target may be anything,
usually a string, ID or callable object. A route consists of a path-rule
and a HTTP method.
The path-rule is either a static path (e.g. `/contact`) or a dynamic
path that contains wildcards (e.g. `/wiki/:page`). By default, wildcards
consume characters up to the next slash (`/`). To change that, you may
add a regular expression pattern (e.g. `/wiki/:page#[a-z]+#`).
For performance reasons, static routes (rules without wildcards) are
checked first. Dynamic routes are searched in order. Try to avoid
ambiguous or overlapping rules.
The HTTP method string matches only on equality, with two exceptions:
* ´GET´ routes also match ´HEAD´ requests if there is no appropriate
´HEAD´ route installed.
* ´ANY´ routes do match if there is no other suitable route installed.
An optional ``name`` parameter is used by :meth:`build` to identify
routes.
'''
default = '[^/]+'
@lazy_attribute
def syntax(cls):
return re.compile(r'(?<!\\):([a-zA-Z_][a-zA-Z_0-9]*)?(?:#(.*?)#)?')
def __init__(self):
self.routes = {} # A {rule: {method: target}} mapping
self.rules = [] # An ordered list of rules
self.named = {} # A name->(rule, build_info) mapping
self.static = {} # Cache for static routes: {path: {method: target}}
self.dynamic = [] # Cache for dynamic routes. See _compile()
def add(self, rule, method, target, name=None):
''' Add a new route or replace the target for an existing route. '''
if rule in self.routes:
self.routes[rule][method.upper()] = target
else:
self.routes[rule] = {method.upper(): target}
self.rules.append(rule)
if self.static or self.dynamic: # Clear precompiler cache.
self.static, self.dynamic = {}, {}
if name:
self.named[name] = (rule, None)
def build(self, _name, *anon, **args):
''' Return a string that matches a named route. Use keyword arguments
to fill out named wildcards. Remaining arguments are appended as a
query string. Raises RouteBuildError or KeyError.'''
if _name not in self.named:
raise RouteBuildError("No route with that name.", _name)
rule, pairs = self.named[_name]
if not pairs:
token = self.syntax.split(rule)
parts = [p.replace('\\:', ':') for p in token[::3]]
names = token[1::3]
if len(parts) > len(names):
names.append(None)
pairs = zip(parts, names)
self.named[_name] = (rule, pairs)
try:
anon = list(anon)
url = [s if k is None
else s + str(args.pop(k)) if k else s + str(anon.pop())
for s, k in pairs]
except IndexError:
msg = "Not enough arguments to fill out anonymous wildcards."
raise RouteBuildError(msg)
except KeyError as e:
raise RouteBuildError(*e.args)
if args:
url += ['?', urlencode(args)]
return ''.join(url)
def match(self, environ):
''' Return a (target, url_agrs) tuple or raise HTTPError(404/405). '''
targets, urlargs = self._match_path(environ)
if not targets:
raise HTTPError(404, "Not found: " + repr(environ['PATH_INFO']))
method = environ['REQUEST_METHOD'].upper()
if method in targets:
return targets[method], urlargs
if method == 'HEAD' and 'GET' in targets:
return targets['GET'], urlargs
if 'ANY' in targets:
return targets['ANY'], urlargs
allowed = [verb for verb in targets if verb != 'ANY']
if 'GET' in allowed and 'HEAD' not in allowed:
allowed.append('HEAD')
raise HTTPError(405, "Method not allowed.",
header=[('Allow', ",".join(allowed))])
def _match_path(self, environ):
''' Optimized PATH_INFO matcher. '''
path = environ['PATH_INFO'] or '/'
# Assume we are in a warm state. Search compiled rules first.
match = self.static.get(path)
if match:
return match, {}
for combined, rules in self.dynamic:
match = combined.match(path)
if not match:
continue
gpat, match = rules[match.lastindex - 1]
return match, gpat(path).groupdict() if gpat else {}
# Lazy-check if we are really in a warm state. If yes, stop here.
if self.static or self.dynamic or not self.routes:
return None, {}
# Cold state: We have not compiled any rules yet. Do so and try again.
if not environ.get('wsgi.run_once'):
self._compile()
return self._match_path(environ)
# For run_once (CGI) environments, don't compile. Just check one by one.
epath = path.replace(':', '\\:') # Turn path into its own static rule.
match = self.routes.get(epath) # This returns static rule only.
if match:
return match, {}
for rule in self.rules:
#: Skip static routes to reduce re.compile() calls.
if rule.count(':') < rule.count('\\:'):
continue
match = self._compile_pattern(rule).match(path)
if match:
return self.routes[rule], match.groupdict()
return None, {}
def _compile(self):
''' Prepare static and dynamic search structures. '''
self.static = {}
self.dynamic = []
def fpat_sub(m):
return m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:'
for rule in self.rules:
target = self.routes[rule]
if not self.syntax.search(rule):
self.static[rule.replace('\\:', ':')] = target
continue
gpat = self._compile_pattern(rule)
fpat = re.sub(r'(\\*)(\(\?P<[^>]*>|\((?!\?))', fpat_sub, gpat.pattern)
gpat = gpat.match if gpat.groupindex else None
try:
combined = '%s|(%s)' % (self.dynamic[-1][0].pattern, fpat)
self.dynamic[-1] = (re.compile(combined), self.dynamic[-1][1])
self.dynamic[-1][1].append((gpat, target))
except (AssertionError, IndexError) as e: # AssertionError: Too many groups
self.dynamic.append((re.compile('(^%s$)' % fpat),
[(gpat, target)]))
except re.error as e:
raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, e))
def _compile_pattern(self, rule):
''' Return a regular expression with named groups for each wildcard. '''
out = ''
for i, part in enumerate(self.syntax.split(rule)):
if i % 3 == 0:
out += re.escape(part.replace('\\:', ':'))
elif i % 3 == 1:
out += '(?P<%s>' % part if part else '(?:'
else:
out += '%s)' % (part or '[^/]+')
return re.compile('^%s$' % out)
###############################################################################
# Application Object ###########################################################
###############################################################################
class Bottle(object):
""" WSGI application """
def __init__(self, catchall=True, autojson=True, config=None):
""" Create a new bottle instance.
You usually don't do that. Use `bottle.app.push()` instead.
"""
self.routes = [] # List of installed routes including metadata.
self.router = Router() # Maps requests to self.route indices.
self.ccache = {} # Cache for callbacks with plugins applied.
self.plugins = [] # List of installed plugins.
self.mounts = {}
self.error_handler = {}
#: If true, most exceptions are caught and returned as :exc:`HTTPError`
self.catchall = catchall
self.config = config or {}
self.serve = True
# Default plugins
self.hooks = self.install(HooksPlugin())
if autojson:
self.install(JSONPlugin())
self.install(TemplatePlugin())
def mount(self, app, prefix, **options):
''' Mount an application to a specific URL prefix. The prefix is added
to SCIPT_PATH and removed from PATH_INFO before the sub-application
is called.:param app: an instance of :class:`Bottle`.:param prefix:
path prefix used as a mount-point.
All other parameters are passed to the underlying :meth:`route` call.
'''
if not isinstance(app, Bottle):
raise TypeError('Only Bottle instances are supported for now.')
prefix = '/'.join(filter(None, prefix.split('/')))
if not prefix:
raise TypeError('Empty prefix. Perhaps you want a merge()?')
for other in self.mounts:
if other.startswith(prefix):
raise TypeError('Conflict with existing mount: %s' % other)
path_depth = prefix.count('/') + 1
options.setdefault('method', 'ANY')
options.setdefault('skip', True)
self.mounts[prefix] = app
@self.route('/%s/:#.*#' % prefix, **options)
def mountpoint():
request.path_shift(path_depth)
return app._handle(request.environ)
def install(self, plugin):
''' Add a plugin to the list of plugins and prepare it for being
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
'''
if hasattr(plugin, 'setup'):
plugin.setup(self)
if not callable(plugin) and not hasattr(plugin, 'apply'):
raise TypeError("Plugins must be callable or implement .apply()")
self.plugins.append(plugin)
self.reset()
return plugin
def uninstall(self, plugin):
''' Uninstall plugins. Pass an instance to remove a specific plugin.
Pass a type object to remove all plugins that match that type.
Subclasses are not removed. Pass a string to remove all plugins with
a matching ``name`` attribute. Pass ``True`` to remove all plugins.
The list of affected plugins is returned. '''
removed, remove = [], plugin
for i, plugin in list(enumerate(self.plugins))[::-1]:
if remove is True or remove is plugin or remove is type(plugin) \
or getattr(plugin, 'name', True) == remove:
removed.append(plugin)
del self.plugins[i]
if hasattr(plugin, 'close'):
plugin.close()
if removed:
self.reset()
return removed
def reset(self, id=None):
''' Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID is given, only that specific route is affected. '''
if id is None:
self.ccache.clear()
else:
self.ccache.pop(id, None)
if DEBUG:
for route in self.routes:
if route['id'] not in self.ccache:
self.ccache[route['id']] = self._build_callback(route)
def close(self):
''' Close the application and all installed plugins. '''
for plugin in self.plugins:
if hasattr(plugin, 'close'):
plugin.close()
self.stopped = True
def match(self, environ):
""" (deprecated) Search for a matching route and return a
(callback, urlargs) tuple.
The first element is the associated route callback with plugins
applied. The second value is a dictionary with parameters extracted
from the URL. The :class:`Router` raises :exc:`HTTPError` (404/405)
on a non-match."""
depr("This method will change semantics in 0.10.")
return self._match(environ)
def _match(self, environ):
handle, args = self.router.match(environ)
environ['route.handle'] = handle # TODO move to router?
environ['route.url_args'] = args
try:
return self.ccache[handle], args
except KeyError:
config = self.routes[handle]
callback = self.ccache[handle] = self._build_callback(config)
return callback, args
def _build_callback(self, config):
''' Apply plugins to a route and return a new callable. '''
wrapped = config['callback']
plugins = self.plugins + config['apply']
skip = config['skip']
try:
for plugin in reversed(plugins):
if True in skip:
break
if plugin in skip or type(plugin) in skip:
continue
if getattr(plugin, 'name', True) in skip:
continue
if hasattr(plugin, 'apply'):
wrapped = plugin.apply(wrapped, config)
else:
wrapped = plugin(wrapped)
if not wrapped:
break
functools.update_wrapper(wrapped, config['callback'])
return wrapped
except RouteReset: # A plugin may have changed the config dict inplace.
return self._build_callback(config) # Apply all plugins again.
def get_url(self, routename, **kargs):
""" Return a string that matches a named route """
scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
location = self.router.build(routename, **kargs).lstrip('/')
return urljoin(urljoin('/', scriptname), location)
def route(self, path=None, method='GET', callback=None, name=None,
apply=None, skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/:name')
def hello(name):
return 'Hello %s' % name
The ``:name`` part is a wildcard. See :class:`Router` for syntax
details.:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or
a list of methods to listen to. (default: `GET`):param callback: An optional shortcut
to avoid the decorator syntax.
``route(..., callback=func)`` equals ``route(...)(func)``:param name: The name for
this route. (default: None):param apply: A decorator or plugin or a list of plugins.
These are applied to the route callback in addition to installed plugins.:param skip:
A list of plugins, plugin classes or names. Matching plugins are not installed to this
route.
``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path):
path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
def decorator(callback):
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
cfg = dict(rule=rule, method=verb, callback=callback,
name=name, app=self, config=config,
apply=plugins, skip=skiplist)
self.routes.append(cfg)
cfg['id'] = self.routes.index(cfg)
self.router.add(rule, verb, cfg['id'], name=name)
if DEBUG:
self.ccache[cfg['id']] = self._build_callback(cfg)
return callback
return decorator(callback) if callback else decorator
def get(self, path=None, method='GET', **options):
""" Equals :meth:`route`. """
return self.route(path, method, **options)
def post(self, path=None, method='POST', **options):
""" Equals :meth:`route` with a ``POST`` method parameter. """
return self.route(path, method, **options)
def put(self, path=None, method='PUT', **options):
""" Equals :meth:`route` with a ``PUT`` method parameter. """
return self.route(path, method, **options)
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options)
def error(self, code=500):
""" Decorator: Register an output handler for a HTTP error code"""
def wrapper(handler):
self.error_handler[int(code)] = handler
return handler
return wrapper
def hook(self, name):
""" Return a decorator that attaches a callback to a hook. """
def wrapper(func):
self.hooks.add(name, func)
return func
return wrapper
def handle(self, path, method='GET'):
""" (deprecated) Execute the first matching route callback and return
the result. :exc:`HTTPResponse` exceptions are caught and returned.
If :attr:`Bottle.catchall` is true, other exceptions are caught as
well and returned as :exc:`HTTPError` instances (500).
"""
depr("This method will change semantics in 0.10. Try to avoid it.")
if isinstance(path, dict):
return self._handle(path)
return self._handle({'PATH_INFO': path, 'REQUEST_METHOD': method.upper()})
def _handle(self, environ):
if not self.serve:
depr("Bottle.serve will be removed in 0.10.")
return HTTPError(503, "Server stopped")
try:
callback, args = self._match(environ)
return callback(**args)
except HTTPResponse as r:
return r
except RouteReset: # Route reset requested by the callback or a plugin.
del self.ccache[environ['route.handle']]
return self._handle(environ) # Try again.
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception as e:
if not self.catchall:
raise
stacktrace = format_exc(10)
environ['wsgi.errors'].write(stacktrace)
return HTTPError(500, "Internal Server Error", e, stacktrace)
def _cast(self, out, request, response, peek=None):
""" Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
iterable of strings and iterable of unicodes
"""
# Empty output is done here
if not out:
response['Content-Length'] = 0
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list))\
and isinstance(out[0], (bytes, unicode)):
out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, unicode):
out = out.encode(response.charset)
# Byte Strings are just returned
if isinstance(out, bytes):
response['Content-Length'] = len(out)
return [out]
# HTTPError or HTTPException (recursive, because they may wrap anything)
# TODO: Handle these explicitly in handle() or make them iterable.
if isinstance(out, HTTPError):
out.apply(response)
out = self.error_handler.get(out.status, repr)(out)
if isinstance(out, HTTPResponse):
depr('Error handlers must not return :exc:`HTTPResponse`.') # 0.9
return self._cast(out, request, response)
if isinstance(out, HTTPResponse):
out.apply(response)
return self._cast(out.output, request, response)
# File-like objects.
if hasattr(out, 'read'):
if 'wsgi.file_wrapper' in request.environ:
return request.environ['wsgi.file_wrapper'](out)
elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
return WSGIFileWrapper(out)
# Handle Iterables. We peek into them to detect their inner type.
try:
out = iter(out)
first = out.next()
while not first:
first = out.next()
except StopIteration:
return self._cast('', request, response)
except HTTPResponse as e:
first = e
except Exception as e:
first = HTTPError(500, 'Unhandled exception', e, format_exc(10))
if isinstance(e, (KeyboardInterrupt, SystemExit, MemoryError))\
or not self.catchall:
raise
# These are the inner types allowed in iterator or generator objects.
if isinstance(first, HTTPResponse):
return self._cast(first, request, response)
if isinstance(first, bytes):
return itertools.chain([first], out)
if isinstance(first, unicode):
return itertools.imap(lambda x: x.encode(response.charset),
itertools.chain([first], out))
return self._cast(HTTPError(500, 'Unsupported response type: %s'
% type(first)), request, response)
def wsgi(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
environ['bottle.app'] = self
request.bind(environ)
response.bind()
out = self._cast(self._handle(environ), request, response)
# rfc2616 section 4.3
if response.status_code in (100, 101, 204, 304)\
or request.method == 'HEAD':
if hasattr(out, 'close'):
out.close()
out = []
start_response(response.status_line, list(response.iter_headers()))
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception as e:
if not self.catchall:
raise
err = '<h1>Critical error while processing request: %s</h1>' \
% environ.get('PATH_INFO', '/')
if DEBUG:
err += '<h2>Error:</h2>\n<pre>%s</pre>\n' % repr(e)
err += '<h2>Traceback:</h2>\n<pre>%s</pre>\n' % format_exc(10)
environ['wsgi.errors'].write(err) # TODO: wsgi.error should not get html
start_response('500 INTERNAL SERVER ERROR', [('Content-Type', 'text/html')])
return [tob(err)]
def __call__(self, environ, start_response):
return self.wsgi(environ, start_response)
###############################################################################
# HTTP and WSGI Tools ##########################################################
###############################################################################
class BaseRequest(DictMixin):
""" A wrapper for WSGI environment dictionaries that adds a lot of
convenient access methods and properties. Most of them are read-only."""
#: Maximum size of memory buffer for :attr:`body` in bytes.
# SHINKEN: *1000
MEMFILE_MAX = 1024000000
def __init__(self, environ):
""" Wrap a WSGI environ dictionary. """
#: The wrapped WSGI environ dictionary. This is the only real attribute.
#: All other attributes actually are read-only properties.
self.environ = environ
environ['bottle.request'] = self
@property
def path(self):
''' The value of ``PATH_INFO`` with exactly one prefixed slash (to fix
broken clients and avoid the "empty path" edge case). '''
return '/' + self.environ.get('PATH_INFO', '').lstrip('/')
@property
def method(self):
''' The ``REQUEST_METHOD`` value as an uppercase string. '''
return self.environ.get('REQUEST_METHOD', 'GET').upper()
@DictProperty('environ', 'bottle.request.headers', read_only=True)
def headers(self):
''' A :class:`WSGIHeaderDict` that provides case-insensitive access to
HTTP request headers. '''
return WSGIHeaderDict(self.environ)
@DictProperty('environ', 'bottle.request.cookies', read_only=True)
def cookies(self):
""" Cookies parsed into a dictionary. Signed cookies are NOT decoded.
Use :meth:`get_cookie` if you expect signed cookies. """
raw_dict = SimpleCookie(self.environ.get('HTTP_COOKIE', ''))
cookies = {}
for cookie in raw_dict.itervalues():
cookies[cookie.key] = cookie.value
return cookies
def get_cookie(self, key, default=None, secret=None):
""" Return the content of a cookie. To read a `Signed Cookie`, the
`secret` must match the one used to create the cookie (see
:meth:`BaseResponse.set_cookie`). If anything goes wrong (missing
cookie or wrong signature), return a default value. """
value = self.cookies.get(key)
if secret and value:
dec = cookie_decode(value, secret) # (key, value) tuple or None
return dec[1] if dec and dec[0] == key else default
return value or default
@DictProperty('environ', 'bottle.request.query', read_only=True)
def query(self):
''' The :attr:`query_string` parsed into a :class:`MultiDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. '''
data = parse_qs(self.query_string, keep_blank_values=True)
get = self.environ['bottle.get'] = MultiDict()
for key, values in data.iteritems():
for value in values:
get[key] = value
return get
@DictProperty('environ', 'bottle.request.forms', read_only=True)
def forms(self):
""" Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is returned as a
:class:`MultiDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. """
forms = MultiDict()
for name, item in self.POST.iterallitems():
if not hasattr(item, 'filename'):
forms[name] = item
return forms
@DictProperty('environ', 'bottle.request.params', read_only=True)
def params(self):
""" A :class:`MultiDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = MultiDict()
for key, value in self.query.iterallitems():
params[key] = value
for key, value in self.forms.iterallitems():
params[key] = value
return params
@DictProperty('environ', 'bottle.request.files', read_only=True)
def files(self):
""" File uploads parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The values are instances of
:class:`cgi.FieldStorage`. The most important attributes are:
filename
The filename, if specified; otherwise None; this is the client
side filename, *not* the file name on which it is stored (that's
a temporary file you don't deal with)
file
The file(-like) object from which you can read the data.
value
The value as a *string*; for file uploads, this transparently
reads the file every time you request the value. Do not do this
on big files.
"""
files = MultiDict()
for name, item in self.POST.iterallitems():
if hasattr(item, 'filename'):
files[name] = item
return files
@DictProperty('environ', 'bottle.request.json', read_only=True)
def json(self):
''' If the ``Content-Type`` header is ``application/json``, this
property holds the parsed content of the request body. Only requests
smaller than :attr:`MEMFILE_MAX` are processed to avoid memory
exhaustion. '''
if self.environ.get('CONTENT_TYPE') == 'application/json' \
and 0 < self.content_length < self.MEMFILE_MAX:
return json_loads(self.body.read(self.MEMFILE_MAX))
return None
@DictProperty('environ', 'bottle.request.body', read_only=True)
def _body(self):
maxread = max(0, self.content_length)
stream = self.environ['wsgi.input']
body = BytesIO() if maxread < self.MEMFILE_MAX else TemporaryFile(mode='w+b')
while maxread > 0:
part = stream.read(min(maxread, self.MEMFILE_MAX))
if not part:
break
body.write(part)
maxread -= len(part)
self.environ['wsgi.input'] = body
body.seek(0)
return body
@property
def body(self):
""" The HTTP request body as a seek-able file-like object. Depending on
:attr:`MEMFILE_MAX`, this is either a temporary file or a
:class:`io.BytesIO` instance. Accessing this property for the first
time reads and replaces the ``wsgi.input`` environ variable.
Subsequent accesses just do a `seek(0)` on the file object. """
self._body.seek(0)
return self._body
#: An alias for :attr:`query`.
GET = query
@DictProperty('environ', 'bottle.request.post', read_only=True)
def POST(self):
""" The values of :attr:`forms` and :attr:`files` combined into a single
:class:`MultiDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads).
"""
post = MultiDict()
safe_env = {'QUERY_STRING': ''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ:
safe_env[key] = self.environ[key]
if NCTextIOWrapper:
fb = NCTextIOWrapper(self.body, encoding='ISO-8859-1', newline='\n')
else:
fb = self.body
data = cgi.FieldStorage(fp=fb, environ=safe_env, keep_blank_values=True)
for item in data.list or []:
post[item.name] = item if item.filename else item.value
return post
@property
def COOKIES(self):
''' Alias for :attr:`cookies` (deprecated). '''
depr('BaseRequest.COOKIES was renamed to BaseRequest.cookies (lowercase).')
return self.cookies
@property
def url(self):
""" The full request URI including hostname and scheme. If your app
lives behind a reverse proxy or load balancer and you get confusing
results, make sure that the ``X-Forwarded-Host`` header is set
correctly. """
return self.urlparts.geturl()
@DictProperty('environ', 'bottle.request.urlparts', read_only=True)
def urlparts(self):
''' The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. '''
env = self.environ
http = env.get('wsgi.url_scheme', 'http')
host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
if not host:
# HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
host = env.get('SERVER_NAME', '127.0.0.1')
port = env.get('SERVER_PORT')
if port and port != ('80' if http == 'http' else '443'):
host += ':' + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '')
@property
def fullpath(self):
""" Request path including :attr:`script_name` (if present). """
return urljoin(self.script_name, self.path.lstrip('/'))
@property
def query_string(self):
""" The raw :attr:`query` part of the URL (everything in between ``?``
and ``#``) as a string. """
return self.environ.get('QUERY_STRING', '')
@property
def script_name(self):
''' The initial portion of the URL's `path` that was removed by a higher
level (server or routing middleware) before the application was
called. This property returns an empty string, or a path with
leading and tailing slashes. '''
script_name = self.environ.get('SCRIPT_NAME', '').strip('/')
return '/' + script_name + '/' if script_name else '/'
def path_shift(self, shift=1):
''' Shift path segments from :attr:`path` to :attr:`script_name` and
vice versa.:param shift: The number of path segments to shift. May be negative
to change the shift direction. (default: 1)
'''
script = self.environ.get('SCRIPT_NAME', '/')
self['SCRIPT_NAME'], self['PATH_INFO'] = path_shift(script, self.path, shift)
@property
def content_length(self):
''' The request body length as an integer. The client is responsible to
set this header. Otherwise, the real length of the body is unknown
and -1 is returned. In this case, :attr:`body` will be empty. '''
return int(self.environ.get('CONTENT_LENGTH') or -1)
@property
def is_xhr(self):
''' True if the request was triggered by a XMLHttpRequest. This only
works with JavaScript libraries that support the `X-Requested-With`
header (most of the popular libraries do). '''
requested_with = self.environ.get('HTTP_X_REQUESTED_WITH', '')
return requested_with.lower() == 'xmlhttprequest'
@property
def is_ajax(self):
''' Alias for :attr:`is_xhr`. "Ajax" is not the right term. '''
return self.is_xhr
@property
def auth(self):
""" HTTP authentication data as a (user, password) tuple. This
implementation currently supports basic (not digest) authentication
only. If the authentication happened at a higher level (e.g. in the
front web-server or a middleware), the password field is None, but
the user field is looked up from the ``REMOTE_USER`` environ
variable. On any errors, None is returned. """
basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION', ''))
if basic:
return basic
ruser = self.environ.get('REMOTE_USER')
if ruser:
return (ruser, None)
return None
@property
def remote_route(self):
""" A list of all IPs that were involved in this request, starting with
the client IP and followed by zero or more proxies. This does only
work if all proxies support the ```X-Forwarded-For`` header. Note
that this information can be forged by malicious clients. """
proxy = self.environ.get('HTTP_X_FORWARDED_FOR')
if proxy:
return [ip.strip() for ip in proxy.split(',')]
remote = self.environ.get('REMOTE_ADDR')
return [remote] if remote else []
@property
def remote_addr(self):
""" The client IP as a string. Note that this information can be forged
by malicious clients. """
route = self.remote_route
return route[0] if route else None
def copy(self):
""" Return a new :class:`Request` with a shallow :attr:`environ` copy. """
return Request(self.environ.copy())
def __getitem__(self, key):
return self.environ[key]
def __delitem__(self, key):
self[key] = ""
del(self.environ[key])
def __iter__(self):
return iter(self.environ)
def __len__(self):
return len(self.environ)
def keys(self):
return self.environ.keys()
def __setitem__(self, key, value):
""" Change an environ value and clear all caches that depend on it. """
if self.environ.get('bottle.request.readonly'):
raise KeyError('The environ dictionary is read-only.')
self.environ[key] = value
todelete = ()
if key == 'wsgi.input':
todelete = ('body', 'forms', 'files', 'params', 'post', 'json')
elif key == 'QUERY_STRING':
todelete = ('query', 'params')
elif key.startswith('HTTP_'):
todelete = ('headers', 'cookies')
for key in todelete:
self.environ.pop('bottle.request.' + key, None)
class LocalRequest(BaseRequest, threading.local):
''' A thread-local subclass of :class:`BaseRequest`. '''
def __init__(self):
pass
bind = BaseRequest.__init__
Request = LocalRequest
def _hkey(s):
return s.title().replace('_', '-')
class BaseResponse(object):
""" Storage class for a response body as well as headers and cookies.
This class does support dict-like case-insensitive item-access to
headers, but is NOT a dict. Most notably, iterating over a response
yields parts of the body and not the headers.
"""
default_status = 200
default_content_type = 'text/html; charset=UTF-8'
#: Header blacklist for specific response codes
#: (rfc2616 section 10.2.3 and 10.3.5)
bad_headers = {
204: set(('Content-Type',)),
304: set(('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Range', 'Content-Type',
'Content-Md5', 'Last-Modified'))}
def __init__(self, body='', status=None, **headers):
#: The HTTP status code as an integer (e.g. 404).
#: Do not change it directly, see :attr:`status`.
self.status_code = None
#: The HTTP status line as a string (e.g. "404 Not Found").
#: Do not change it directly, see :attr:`status`.
self.status_line = None
#: The response body as one of the supported data types.
self.body = body
self._cookies = None
self._headers = {'Content-Type': [self.default_content_type]}
self.status = status or self.default_status
if headers:
for name, value in headers.items():
self[name] = value
def copy(self):
''' Returns a copy of self. '''
copy = Response()
copy.status = self.status
copy._headers = dict((k, v[:]) for (k, v) in self._headers.items())
return copy
def __iter__(self):
return iter(self.body)
def close(self):
if hasattr(self.body, 'close'):
self.body.close()
def _set_status(self, status):
if isinstance(status, int):
code, status = status, _HTTP_STATUS_LINES.get(status)
elif ' ' in status:
status = status.strip()
code = int(status.split()[0])
else:
raise ValueError('String status line without a reason phrase.')
if not 100 <= code <= 999:
raise ValueError('Status code out of range.')
self.status_code = code
self.status_line = status or ('%d Unknown' % code)
status = property(lambda self: self.status_code, _set_status, None,
''' A writeable property to change the HTTP response status. It accepts
either a numeric code (100-999) or a string with a custom reason
phrase (e.g. "404 Brain not found"). Both :data:`status_line` and
:data:`status_line` are updates accordingly. The return value is
always a numeric code. ''')
del _set_status
@property
def headers(self):
''' An instance of :class:`HeaderDict`, a case-insensitive dict-like
view on the response headers. '''
self.__dict__['headers'] = hdict = HeaderDict()
hdict.dict = self._headers
return hdict
def __contains__(self, name):
return _hkey(name) in self._headers
def __delitem__(self, name):
del self._headers[_hkey(name)]
def __getitem__(self, name):
return self._headers[_hkey(name)][-1]
def __setitem__(self, name, value):
self._headers[_hkey(name)] = [str(value)]
def get_header(self, name, default=None):
''' Return the value of a previously defined header. If there is no
header with that name, return a default value. '''
return self._headers.get(_hkey(name), [default])[-1]
def set_header(self, name, value, append=False):
''' Create a new response header, replacing any previously defined
headers with the same name. This equals ``response[name] = value``.:param append:
Do not delete previously defined headers. This can
result in two (or more) headers having the same name. '''
if append:
self._headers.setdefault(_hkey(name), []).append(str(value))
else:
self._headers[_hkey(name)] = [str(value)]
def iter_headers(self):
''' Yield (header, value) tuples, skipping headers that are not
allowed with the current response status code. '''
headers = self._headers.iteritems()
bad_headers = self.bad_headers.get(self.status_code)
if bad_headers:
headers = (h for h in headers if h[0] not in bad_headers)
for name, values in headers:
for value in values:
yield name, value
if self._cookies:
for c in self._cookies.values():
yield 'Set-Cookie', c.OutputString()
def wsgiheader(self):
depr('The wsgiheader method is deprecated. See headerlist.') # 0.10
return self.headerlist
@property
def headerlist(self):
''' WSGI conform list of (header, value) tuples. '''
return list(self.iter_headers())
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int)
@property
def charset(self):
""" Return the charset specified in the content-type header (default: utf8). """
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return 'UTF-8'
@property
def COOKIES(self):
""" A dict-like SimpleCookie instance. This should not be used directly.
See :meth:`set_cookie`. """
depr('The COOKIES dict is deprecated. Use `set_cookie()` instead.') # 0.10
if not self._cookies:
self._cookies = SimpleCookie()
return self._cookies
def set_cookie(self, key, value, secret=None, **options):
''' Create a new cookie or replace an old one. If the `secret` parameter is
set, create a `Signed Cookie` (described below).:param key: the name of
the cookie. :param value: the value of the cookie.:param secret: a
signature key required for signed cookies.
Additionally, this method accepts all RFC 2109 attributes that are
supported by :class:`cookie.Morsel`, including::param max_age: maximum
age in seconds.
(default: None):param expires: a datetime object or UNIX timestamp.
(default: None):param domain:
the domain that is allowed to read the cookie.
(default: current domain):param path: limits the cookie to a given
path (default: ``/``):param secure: limit the cookie to HTTPS connections
(default: off).
:param httponly: prevents client-side javascript to read this cookie
(default: off, requires Python 2.6 or newer).
If neither `expires` nor `max_age` is set (default), the cookie will
expire at the end of the browser session (as soon as the browser
window is closed).
Signed cookies may store any pickle-able object and are
cryptographically signed to prevent manipulation. Keep in mind that
cookies are limited to 4kb in most browsers.
Warning: Signed cookies are not encrypted (the client can still see
the content) and not copy-protected (the client can restore an old
cookie). The main intention is to make pickling and unpickling
save, not to store secret information at client side.
'''
if not self._cookies:
self._cookies = SimpleCookie()
if secret:
value = touni(cookie_encode((key, value), secret))
elif not isinstance(value, basestring):
raise TypeError('Secret key missing for non-string Cookie.')
self._cookies[key] = value
for k, v in options.iteritems():
self._cookies[key][k.replace('_', '-')] = v
def delete_cookie(self, key, **kwargs):
''' Delete a cookie. Be sure to use the same `domain` and `path`
settings as used to create the cookie. '''
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs)
class LocalResponse(BaseResponse, threading.local):
''' A thread-local subclass of :class:`BaseResponse`. '''
bind = BaseResponse.__init__
Response = LocalResponse
###############################################################################
# Plugins ######################################################################
###############################################################################
class JSONPlugin(object):
name = 'json'
def __init__(self, json_dumps=json_dumps):
self.json_dumps = json_dumps
def apply(self, callback, context):
dumps = self.json_dumps
if not dumps:
return callback
def wrapper(*a, **ka):
rv = callback(*a, **ka)
if isinstance(rv, dict):
response.content_type = 'application/json'
return dumps(rv)
return rv
return wrapper
class HooksPlugin(object):
name = 'hooks'
def __init__(self):
self.hooks = {'before_request': [], 'after_request': []}
self.app = None
def _empty(self):
return not (self.hooks['before_request'] or self.hooks['after_request'])
def setup(self, app):
self.app = app
def add(self, name, func):
''' Attach a callback to a hook. '''
if name not in self.hooks:
raise ValueError("Unknown hook name %s" % name)
was_empty = self._empty()
self.hooks[name].append(func)
if self.app and was_empty and not self._empty():
self.app.reset()
def remove(self, name, func):
''' Remove a callback from a hook. '''
if name not in self.hooks:
raise ValueError("Unknown hook name %s" % name)
was_empty = self._empty()
self.hooks[name].remove(func)
if self.app and not was_empty and self._empty():
self.app.reset()
def apply(self, callback, context):
if self._empty():
return callback
before_request = self.hooks['before_request']
after_request = self.hooks['after_request']
def wrapper(*a, **ka):
for hook in before_request:
hook()
rv = callback(*a, **ka)
for hook in after_request[::-1]:
hook()
return rv
return wrapper
class TypeFilterPlugin(object):
def __init__(self):
self.filter = []
self.app = None
def setup(self, app):
self.app = app
def add(self, ftype, func):
if not isinstance(ftype, type):
raise TypeError("Expected type object, got %s" % type(ftype))
self.filter = [(t, f) for (t, f) in self.filter if t != ftype]
self.filter.append((ftype, func))
if len(self.filter) == 1 and self.app:
self.app.reset()
def apply(self, callback, context):
filter = self.filter
if not filter:
return callback
def wrapper(*a, **ka):
rv = callback(*a, **ka)
for testtype, filterfunc in filter:
if isinstance(rv, testtype):
rv = filterfunc(rv)
return rv
return wrapper
class TemplatePlugin(object):
''' This plugin applies the :func:`view` decorator to all routes with a
`template` config parameter. If the parameter is a tuple, the second
element must be a dict with additional options (e.g. `template_engine`)
or default variables for the template. '''
name = 'template'
def apply(self, callback, context):
conf = context['config'].get('template')
if isinstance(conf, (tuple, list)) and len(conf) == 2:
return view(conf[0], **conf[1])(callback)
elif isinstance(conf, str) and 'template_opts' in context['config']:
depr('The `template_opts` parameter is deprecated.') # 0.9
return view(conf, **context['config']['template_opts'])(callback)
elif isinstance(conf, str):
return view(conf)(callback)
else:
return callback
#: Not a plugin, but part of the plugin API. TODO: Find a better place.
class _ImportRedirect(object):
def __init__(self, name, impmask):
''' Create a virtual package that redirects imports (see PEP 302). '''
self.name = name
self.impmask = impmask
self.module = sys.modules.setdefault(name, imp.new_module(name))
self.module.__dict__.update({'__file__': '<virtual>', '__path__': [],
'__all__': [], '__loader__': self})
sys.meta_path.append(self)
def find_module(self, fullname, path=None):
if '.' not in fullname:
return
packname, modname = fullname.rsplit('.', 1)
if packname != self.name:
return
return self
def load_module(self, fullname):
if fullname in sys.modules:
return sys.modules[fullname]
packname, modname = fullname.rsplit('.', 1)
realname = self.impmask % modname
__import__(realname)
module = sys.modules[fullname] = sys.modules[realname]
setattr(self.module, modname, module)
module.__loader__ = self
return module
###############################################################################
# Common Utilities #############################################################
###############################################################################
class MultiDict(DictMixin):
""" This dict stores multiple values per key, but behaves exactly like a
normal dict in that it returns only the newest value for any given key.
There are special methods available to access the full list of values.
"""
def __init__(self, *a, **k):
self.dict = dict((k, [v]) for k, v in dict(*a, **k).iteritems())
def __len__(self):
return len(self.dict)
def __iter__(self):
return iter(self.dict)
def __contains__(self, key):
return key in self.dict
def __delitem__(self, key):
del self.dict[key]
def __getitem__(self, key):
return self.dict[key][-1]
def __setitem__(self, key, value):
self.append(key, value)
def iterkeys(self):
return self.dict.iterkeys()
def itervalues(self):
return (v[-1] for v in self.dict.itervalues())
def iteritems(self):
return ((k, v[-1]) for (k, v) in self.dict.iteritems())
def iterallitems(self):
for key, values in self.dict.iteritems():
for value in values:
yield key, value
# 2to3 is not able to fix these automatically.
keys = iterkeys if py3k else lambda self: list(self.iterkeys())
values = itervalues if py3k else lambda self: list(self.itervalues())
items = iteritems if py3k else lambda self: list(self.iteritems())
allitems = iterallitems if py3k else lambda self: list(self.iterallitems())
def get(self, key, default=None, index=-1):
''' Return the current value for a key. The third `index` parameter
defaults to -1 (last value). '''
if key in self.dict or default is KeyError:
return self.dict[key][index]
return default
def append(self, key, value):
''' Add a new value to the list of values for this key. '''
self.dict.setdefault(key, []).append(value)
def replace(self, key, value):
''' Replace the list of values with a single value. '''
self.dict[key] = [value]
def getall(self, key):
''' Return a (possibly empty) list of values for a key. '''
return self.dict.get(key) or []
class HeaderDict(MultiDict):
""" A case-insensitive version of :class:`MultiDict` that defaults to
replace the old value instead of appending it. """
def __init__(self, *a, **ka):
self.dict = {}
if a or ka:
self.update(*a, **ka)
def __contains__(self, key):
return _hkey(key) in self.dict
def __delitem__(self, key):
del self.dict[_hkey(key)]
def __getitem__(self, key):
return self.dict[_hkey(key)][-1]
def __setitem__(self, key, value):
self.dict[_hkey(key)] = [str(value)]
def append(self, key, value):
self.dict.setdefault(_hkey(key), []).append(str(value))
def replace(self, key, value):
self.dict[_hkey(key)] = [str(value)]
def getall(self, key):
return self.dict.get(_hkey(key)) or []
def get(self, key, default=None, index=-1):
return MultiDict.get(self, _hkey(key), default, index)
def filter(self, names):
for name in map(_hkey, names):
if name in self.dict:
del self.dict[name]
class WSGIHeaderDict(DictMixin):
''' This dict-like class wraps a WSGI environ dict and provides convenient
access to HTTP_* fields. Keys and values are native strings
(2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI
environment contains non-native string values, these are de- or encoded
using a lossless 'latin1' character set.
The API will remain stable even on changes to the relevant PEPs.
Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one
that uses non-native strings.)
'''
#: List of keys that do not have a 'HTTP_' prefix.
cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH')
def __init__(self, environ):
self.environ = environ
def _ekey(self, key):
''' Translate header field name to CGI/WSGI environ key. '''
key = key.replace('-', '_').upper()
if key in self.cgikeys:
return key
return 'HTTP_' + key
def raw(self, key, default=None):
''' Return the header value as is (may be bytes or unicode). '''
return self.environ.get(self._ekey(key), default)
def __getitem__(self, key):
return tonat(self.environ[self._ekey(key)], 'latin1')
def __setitem__(self, key, value):
raise TypeError("%s is read-only." % self.__class__)
def __delitem__(self, key):
raise TypeError("%s is read-only." % self.__class__)
def __iter__(self):
for key in self.environ:
if key[:5] == 'HTTP_':
yield key[5:].replace('_', '-').title()
elif key in self.cgikeys:
yield key.replace('_', '-').title()
def keys(self):
return list(self)
def __len__(self):
return len(list(self))
def __contains__(self, key):
return self._ekey(key) in self.environ
class AppStack(list):
""" A stack-like list. Calling it returns the head of the stack. """
def __call__(self):
""" Return the current default application. """
return self[-1]
def push(self, value=None):
""" Add a new :class:`Bottle` instance to the stack """
if not isinstance(value, Bottle):
value = Bottle()
self.append(value)
return value
class WSGIFileWrapper(object):
def __init__(self, fp, buffer_size=1024 * 64):
self.fp, self.buffer_size = fp, buffer_size
for attr in ('fileno', 'close', 'read', 'readlines'):
if hasattr(fp, attr):
setattr(self, attr, getattr(fp, attr))
def __iter__(self):
read, buff = self.fp.read, self.buffer_size
while True:
part = read(buff)
if not part:
break
yield part
###############################################################################
# Application Helper ###########################################################
###############################################################################
def abort(code=500, text='Unknown Error: Application stopped.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
def redirect(url, code=303):
""" Aborts execution and causes a 303 redirect. """
location = urljoin(request.url, url)
raise HTTPResponse("", status=code, header=dict(Location=location))
def static_file(filename, root, mimetype='auto', download=False):
""" Open a file in a safe way and return :exc:`HTTPResponse` with status
code 200, 305, 401 or 404. Set Content-Type, Content-Encoding,
Content-Length and Last-Modified header. Obey If-Modified-Since header
and HEAD requests.
"""
root = os.path.abspath(root) + os.sep
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
header = dict()
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if mimetype == 'auto':
mimetype, encoding = mimetypes.guess_type(filename)
if mimetype:
header['Content-Type'] = mimetype
if encoding:
header['Content-Encoding'] = encoding
elif mimetype:
header['Content-Type'] = mimetype
if download:
download = os.path.basename(filename if download is True else download)
header['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
header['Content-Length'] = stats.st_size
lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
header['Last-Modified'] = lm
ims = request.environ.get('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = parse_date(ims.split(";")[0].strip())
if ims is not None and ims >= int(stats.st_mtime):
header['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
return HTTPResponse(status=304, header=header)
body = '' if request.method == 'HEAD' else open(filename, 'rb')
return HTTPResponse(body, header=header)
###############################################################################
# HTTP Utilities and MISC (TODO) ###############################################
###############################################################################
def debug(mode=True):
""" Change the debug level.
There is only one debug level supported at the moment."""
global DEBUG
DEBUG = bool(mode)
def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return time.mktime(ts[:8] + (0,)) - (ts[9] or 0) - time.timezone
except (TypeError, ValueError, IndexError, OverflowError):
return None
def parse_auth(header):
""" Parse rfc2617 HTTP authentication header string (basic) and return (user,pass)
tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
# TODO: Add 2to3 save base64[encode/decode] functions.
user, pwd = touni(base64.b64decode(tob(data))).split(':', 1)
return user, pwd
except (KeyError, ValueError):
return None
def _lscmp(a, b):
''' Compares two strings in a cryptographically save way:
Runtime is not affected by length of common prefix. '''
return not sum(0 if x == y else 1 for x, y in zip(a, b)) and len(a) == len(b)
def cookie_encode(data, key):
''' Encode and sign a pickle-able object. Return a (byte) string '''
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(key, msg).digest())
return tob('!') + sig + tob('?') + msg
def cookie_decode(data, key):
''' Verify and decode an encoded string. Return an object or None.'''
data = tob(data)
if cookie_is_encoded(data):
sig, msg = data.split(tob('?'), 1)
if _lscmp(sig[1:], base64.b64encode(hmac.new(key, msg).digest())):
return pickle.loads(base64.b64decode(msg))
return None
def cookie_is_encoded(data):
''' Return True if the argument looks like a encoded cookie.'''
return bool(data.startswith(tob('!')) and tob('?') in data)
def yieldroutes(func):
""" Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example::
a() -> '/a'
b(x, y) -> '/b/:x/:y'
c(x, y=5) -> '/c/:x' and '/c/:x/:y'
d(x=5, y=6) -> '/d' and '/d/:x' and '/d/:x/:y'
"""
import inspect # Expensive module. Only import if necessary.
path = '/' + func.__name__.replace('__', '/').lstrip('/')
spec = inspect.getargspec(func)
argc = len(spec[0]) - len(spec[3] or [])
path += ('/:%s' * argc) % tuple(spec[0][:argc])
yield path
for arg in spec[0][argc:]:
path += '/:%s' % arg
yield path
def path_shift(script_name, path_info, shift=1):
''' Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.:param script_name: The SCRIPT_NAME path.:param script_name:
The PATH_INFO path.:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1)
'''
if shift == 0:
return script_name, path_info
pathlist = path_info.strip('/').split('/')
scriptlist = script_name.strip('/').split('/')
if pathlist and pathlist[0] == '':
pathlist = []
if scriptlist and scriptlist[0] == '':
scriptlist = []
if shift > 0 and shift <= len(pathlist):
moved = pathlist[:shift]
scriptlist = scriptlist + moved
pathlist = pathlist[shift:]
elif shift < 0 and shift >= -len(scriptlist):
moved = scriptlist[shift:]
pathlist = moved + pathlist
scriptlist = scriptlist[:shift]
else:
empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
new_script_name = '/' + '/'.join(scriptlist)
new_path_info = '/' + '/'.join(pathlist)
if path_info.endswith('/') and pathlist:
new_path_info += '/'
return new_script_name, new_path_info
# Decorators
# TODO: Replace default_app() with app()
def validate(**vkargs):
"""
Validates and manipulates keyword arguments by user defined callables.
Handles ValueError and missing arguments by raising HTTPError(403).
"""
def decorator(func):
def wrapper(**kargs):
for key, value in vkargs.iteritems():
if key not in kargs:
abort(403, 'Missing parameter: %s' % key)
try:
kargs[key] = value(kargs[key])
except ValueError:
abort(403, 'Wrong parameter format for: %s' % key)
return func(**kargs)
return wrapper
return decorator
def auth_basic(check, realm="private", text="Access denied"):
''' Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. '''
def decorator(func):
def wrapper(*a, **ka):
user, password = request.auth or (None, None)
if user is None or not check(user, password):
response.headers['WWW-Authenticate'] = 'Basic realm="%s"' % realm
return HTTPError(401, text)
return func(*a, **ka)
return wrapper
return decorator
def make_default_app_wrapper(name):
''' Return a callable that relays calls to the current default app. '''
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
for name in '''route get post put delete error mount
hook install uninstall'''.split():
globals()[name] = make_default_app_wrapper(name)
url = make_default_app_wrapper('get_url')
del name
###############################################################################
# Server Adapter ###############################################################
###############################################################################
class ServerAdapter(object):
quiet = False
def __init__(self, host='127.0.0.1', port=8080, **config):
self.options = config
self.host = host
self.port = int(port)
def run(self, handler): # pragma: no cover
pass
def __repr__(self):
args = ', '.join(['%s=%s' % (k, repr(v)) for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class CGIServer(ServerAdapter):
quiet = True
def run(self, handler): # pragma: no cover
from wsgiref.handlers import CGIHandler
def fixed_environ(environ, start_response):
environ.setdefault('PATH_INFO', '')
return handler(environ, start_response)
CGIHandler().run(fixed_environ)
class FlupFCGIServer(ServerAdapter):
def run(self, handler): # pragma: no cover
import flup.server.fcgi
kwargs = {'bindAddress': (self.host, self.port)}
kwargs.update(self.options) # allow to override bindAddress and others
flup.server.fcgi.WSGIServer(handler, **kwargs).run()
class FlupSCGIServer(ServerAdapter):
def run(self, handler): # pragma: no cover
import flup.server.scgi
kwargs = {'bindAddress': (self.host, self.port)}
kwargs.update(self.options) # allow to override bindAddress and others
flup.server.scgi.WSGIServer(handler, **kwargs).run()
class WSGIRefServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from wsgiref.simple_server import make_server, WSGIRequestHandler
print("Launching Swsgi backend")
if self.quiet:
class QuietHandler(WSGIRequestHandler):
def log_request(*args, **kw):
pass
self.options['handler_class'] = QuietHandler
srv = make_server(self.host, self.port, handler, **self.options)
srv.serve_forever()
# Shinken: add WSGIRefServerSelect
class WSGIRefServerSelect(ServerAdapter):
def run(self, handler): # pragma: no cover
from wsgiref.simple_server import make_server, WSGIRequestHandler
if self.quiet:
class QuietHandler(WSGIRequestHandler):
def log_request(*args, **kw):
pass
self.options['handler_class'] = QuietHandler
srv = make_server(self.host, self.port, handler, **self.options)
# srv.serve_forever()
return srv
class CherryPyServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from cherrypy import wsgiserver
print("Launching CherryPy backend")
server = wsgiserver.CherryPyWSGIServer((self.host, self.port), handler)
try:
server.start()
finally:
server.stop()
class PasteServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from paste import httpserver
print("Launching Paste backend")
if not self.quiet:
from paste.translogger import TransLogger
handler = TransLogger(handler)
httpserver.serve(handler, host=self.host, port=str(self.port),
**self.options)
class MeinheldServer(ServerAdapter):
def run(self, handler):
from meinheld import server
server.listen((self.host, self.port))
server.run(handler)
class FapwsServer(ServerAdapter):
""" Extremely fast webserver using libev. See http://www.fapws.org/ """
def run(self, handler): # pragma: no cover
import fapws._evwsgi as evwsgi
from fapws import base, config
port = self.port
if float(config.SERVER_IDENT[-2:]) > 0.4:
# fapws3 silently changed its API in 0.5
port = str(port)
evwsgi.start(self.host, port)
# fapws3 never releases the GIL. Complain upstream. I tried. No luck.
if 'BOTTLE_CHILD' in os.environ and not self.quiet:
print("WARNING: Auto-reloading does not work with Fapws3.")
print(" (Fapws3 breaks python thread support)")
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return handler(environ, start_response)
evwsgi.wsgi_cb(('', app))
evwsgi.run()
class TornadoServer(ServerAdapter):
""" The super hyped asynchronous server by facebook. Untested. """
def run(self, handler): # pragma: no cover
import tornado.wsgi
import tornado.httpserver
import tornado.ioloop
container = tornado.wsgi.WSGIContainer(handler)
server = tornado.httpserver.HTTPServer(container)
server.listen(port=self.port)
tornado.ioloop.IOLoop.instance().start()
class AppEngineServer(ServerAdapter):
""" Adapter for Google App Engine. """
quiet = True
def run(self, handler):
from google.appengine.ext.webapp import util
# A main() function in the handler script enables 'App Caching'.
# Lets makes sure it is there. This _really_ improves performance.
module = sys.modules.get('__main__')
if module and not hasattr(module, 'main'):
module.main = lambda: util.run_wsgi_app(handler)
util.run_wsgi_app(handler)
class TwistedServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
reactor.run()
class DieselServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(handler, port=self.port)
app.run()
class GeventServer(ServerAdapter):
""" Untested. Options:
* `monkey` (default: True) fixes the stdlib to use greenthreads.
* `fast` (default: False) uses libevent's http server, but has some
issues: No streaming, no pipelining, no SSL.
"""
def run(self, handler):
from gevent import wsgi as wsgi_fast, pywsgi, monkey
if self.options.get('monkey', True):
monkey.patch_all()
wsgi = wsgi_fast if self.options.get('fast') else pywsgi
wsgi.WSGIServer((self.host, self.port), handler).serve_forever()
class GunicornServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from gunicorn.arbiter import Arbiter
from gunicorn.config import Config
handler.cfg = Config({'bind': "%s:%d" % (self.host, self.port), 'workers': 4})
arbiter = Arbiter(handler)
arbiter.run()
class EventletServer(ServerAdapter):
""" Untested """
def run(self, handler):
from eventlet import wsgi, listen
wsgi.server(listen((self.host, self.port)), handler)
class RocketServer(ServerAdapter):
""" Untested. As requested in issue 63
https://github.com/defnull/bottle/issues/#issue/63 """
def run(self, handler):
from rocket import Rocket
server = Rocket((self.host, self.port), 'wsgi', {'wsgi_app': handler})
server.start()
class BjoernServer(ServerAdapter):
""" Screamingly fast server written in C: https://github.com/jonashaag/bjoern """
def run(self, handler):
from bjoern import run
run(handler, self.host, self.port)
class AutoServer(ServerAdapter):
""" Untested. """
adapters = [PasteServer, CherryPyServer, TwistedServer, WSGIRefServer]
def run(self, handler):
for sa in self.adapters:
try:
return sa(self.host, self.port, **self.options).run(handler)
except ImportError:
pass
# Shinken: add 'wsgirefselect': WSGIRefServerSelect,
server_names = {
'cgi': CGIServer,
'flup': FlupFCGIServer,
'flupscgi': FlupSCGIServer,
'wsgiref': WSGIRefServer,
'wsgirefselect': WSGIRefServerSelect,
'cherrypy': CherryPyServer,
'paste': PasteServer,
'fapws3': FapwsServer,
'tornado': TornadoServer,
'gae': AppEngineServer,
'diesel': DieselServer,
'meinheld': MeinheldServer,
'gunicorn': GunicornServer,
'eventlet': EventletServer,
'gevent': GeventServer,
'rocket': RocketServer,
'bjoern': BjoernServer,
'auto': AutoServer,
}
###############################################################################
# Application Control ##########################################################
###############################################################################
def _load(target, **vars):
""" Fetch something from a module. The exact behavior depends on the
target string:
If the target is a valid python import path (e.g. `package.module`),
the rightmost part is returned as a module object.
If the target contains a colon (e.g. `package.module:var`) the module
variable specified after the colon is returned.
If the part after the colon contains any non-alphanumeric characters
(e.g. `package.module:func(var)`) the result of the expression
is returned. The expression has access to keyword arguments supplied
to this function.
Example::
>>> _load('bottle')
<module 'bottle' from 'bottle.py'>
>>> _load('bottle:Bottle')
<class 'bottle.Bottle'>
>>> _load('bottle:cookie_encode(v, secret)', v='foo', secret='bar')
'!F+hN4dQxaDJ4QxxaZ+Z3jw==?gAJVA2Zvb3EBLg=='
"""
module, target = target.split(":", 1) if ':' in target else (target, None)
if module not in sys.modules:
__import__(module)
if not target:
return sys.modules[module]
if target.isalnum():
return getattr(sys.modules[module], target)
package_name = module.split('.')[0]
vars[package_name] = sys.modules[package_name]
return eval('%s.%s' % (module, target), vars)
def load_app(target):
""" Load a bottle application based on a target string and return the
application object.
If the target is an import path (e.g. package.module), the application
stack is used to isolate the routes defined in that module.
If the target contains a colon (e.g. package.module:myapp) the
module variable specified after the colon is returned instead.
"""
tmp = app.push() # Create a new "default application"
rv = _load(target) # Import the target module
app.remove(tmp) # Remove the temporary added default application
return rv if isinstance(rv, Bottle) else tmp
# Shinken: add the return of the server
def run(app=None, server='wsgiref', host='127.0.0.1', port=8080,
interval=1, reloader=False, quiet=False, **kargs):
""" Start a server instance. This method blocks until the server terminates.:param app:
WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`):param server: Server adapter to use.
See :data:`server_names` keys for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`):param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1):param port: Server port
to bind to. Values below 1024 require root privileges.
(default: 8080):param reloader: Start auto-reloading server?
(default: False):param interval: Auto-reloader interval in seconds (default:
1):param quiet: Suppress output to stdout and stderr? (default: False):param options:
Options passed to the server adapter.
"""
# Shinken
res = None
app = app or default_app()
if isinstance(app, basestring):
app = load_app(app)
if isinstance(server, basestring):
server = server_names.get(server)
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise RuntimeError("Server must be a subclass of ServerAdapter")
server.quiet = server.quiet or quiet
if not server.quiet and not os.environ.get('BOTTLE_CHILD'):
print("Bottle server starting up (using %s)..." % repr(server))
print("Listening on http://%s:%d/" % (server.host, server.port))
print("Use Ctrl-C to quit.")
print
try:
if reloader:
interval = min(interval, 1)
if os.environ.get('BOTTLE_CHILD'):
_reloader_child(server, app, interval)
else:
_reloader_observer(server, app, interval)
else:
# Shinken
res = server.run(app)
except KeyboardInterrupt:
pass
if not server.quiet and not os.environ.get('BOTTLE_CHILD'):
print("Shutting down...")
# Shinken
return res
class FileCheckerThread(threading.Thread):
''' Thread that periodically checks for changed module files. '''
def __init__(self, lockfile, interval):
threading.Thread.__init__(self)
self.lockfile, self.interval = lockfile, interval
# 1: lockfile to old; 2: lockfile missing
# 3: module file changed; 5: external exit
self.status = 0
def run(self):
exists = os.path.exists
mtime = lambda path: os.stat(path).st_mtime
files = dict()
for module in sys.modules.values():
path = getattr(module, '__file__', '')
if path[-4:] in ('.pyo', '.pyc'):
path = path[:-1]
if path and exists(path):
files[path] = mtime(path)
while not self.status:
for path, lmtime in files.iteritems():
if not exists(path) or mtime(path) > lmtime:
self.status = 3
if not exists(self.lockfile):
self.status = 2
elif mtime(self.lockfile) < time.time() - self.interval - 5:
self.status = 1
if not self.status:
time.sleep(self.interval)
if self.status != 5:
thread.interrupt_main()
def _reloader_child(server, app, interval):
''' Start the server and check for modified files in a background thread.
As soon as an update is detected, KeyboardInterrupt is thrown in
the main thread to exit the server loop. The process exists with status
code 3 to request a reload by the observer process. If the lockfile
is not modified in 2*interval second or missing, we assume that the
observer process died and exit with status code 1 or 2.
'''
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
try:
bgcheck.start()
server.run(app)
except KeyboardInterrupt:
pass
bgcheck.status, status = 5, bgcheck.status
bgcheck.join() # bgcheck.status == 5 --> silent exit
if status:
sys.exit(status)
def _reloader_observer(server, app, interval):
''' Start a child process with identical commandline arguments and restart
it as long as it exists with status code 3. Also create a lockfile and
touch it (update mtime) every interval seconds.
'''
fd, lockfile = tempfile.mkstemp(prefix='bottle-reloader.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
try:
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # I am alive!
time.sleep(interval)
if p.poll() != 3:
if os.path.exists(lockfile):
os.unlink(lockfile)
sys.exit(p.poll())
elif not server.quiet:
print("Reloading server...")
except KeyboardInterrupt:
pass
if os.path.exists(lockfile):
os.unlink(lockfile)
###############################################################################
# Template Adapters ############################################################
###############################################################################
class TemplateError(HTTPError):
def __init__(self, message):
HTTPError.__init__(self, 500, message)
class BaseTemplate(object):
""" Base class and minimal API for template adapters """
extentions = ['tpl', 'html', 'thtml', 'stpl']
settings = {} # used in prepare()
defaults = {} # used in render()
def __init__(self, source=None, name=None, lookup=[], encoding='utf8', **settings):
""" Create a new template.
If the source parameter (str or buffer) is missing, the name argument
is used to guess a template filename. Subclasses can assume that
self.source and/or self.filename are set. Both are strings.
The lookup, encoding and settings parameters are stored as instance
variables.
The lookup parameter stores a list containing directory paths.
The encoding parameter should be used to decode byte strings or files.
The settings parameter contains a dict for engine-specific settings.
"""
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = map(os.path.abspath, lookup)
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=[]):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if os.path.isfile(name):
return name
for spath in lookup:
fname = os.path.join(spath, name)
if os.path.isfile(fname):
return fname
for ext in cls.extentions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
''' This reads or sets the global settings stored in class.settings. '''
if args:
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
""" Run preparations (parsing, caching, ...).
It should be possible to call this again to refresh a template or to
update settings.
"""
raise NotImplementedError
def render(self, *args, **kwargs):
""" Render the template with the specified local variables and return
a single byte or unicode string. If it is a byte string, the encoding
must match self.encoding. This method must be thread-safe!
Local variables may be provided in dictionaries (*args)
or directly, as keywords (**kwargs).
"""
raise NotImplementedError
class MakoTemplate(BaseTemplate):
def prepare(self, **options):
from mako.template import Template
from mako.lookup import TemplateLookup
options.update({'input_encoding': self.encoding})
options.setdefault('format_exceptions', bool(DEBUG))
lookup = TemplateLookup(directories=self.lookup, **options)
if self.source:
self.tpl = Template(self.source, lookup=lookup, **options)
else:
self.tpl = Template(uri=self.name, filename=self.filename, lookup=lookup, **options)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
class CheetahTemplate(BaseTemplate):
def prepare(self, **options):
from Cheetah.Template import Template
self.context = threading.local()
self.context.vars = {}
options['searchList'] = [self.context.vars]
if self.source:
self.tpl = Template(source=self.source, **options)
else:
self.tpl = Template(file=self.filename, **options)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
self.context.vars.update(self.defaults)
self.context.vars.update(kwargs)
out = str(self.tpl)
self.context.vars.clear()
return out
class Jinja2Template(BaseTemplate):
def prepare(self, filters=None, tests=None, **kwargs):
from jinja2 import Environment, FunctionLoader
if 'prefix' in kwargs: # TODO: to be removed after a while
raise RuntimeError('The keyword argument `prefix` has been removed. '
'Use the full jinja2 environment name line_statement_prefix'
'instead.')
self.env = Environment(loader=FunctionLoader(self.loader), **kwargs)
if filters:
self.env.filters.update(filters)
if tests:
self.env.tests.update(tests)
if self.source:
self.tpl = self.env.from_string(self.source)
else:
self.tpl = self.env.get_template(self.filename)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
def loader(self, name):
fname = self.search(name, self.lookup)
if fname:
with open(fname, "rb") as f:
return f.read().decode(self.encoding)
class SimpleTALTemplate(BaseTemplate):
''' Untested! '''
def prepare(self, **options):
from simpletal import simpleTAL
# TODO: add option to load METAL files during render
if self.source:
self.tpl = simpleTAL.compileHTMLTemplate(self.source)
else:
with open(self.filename, 'rb') as fp:
self.tpl = simpleTAL.compileHTMLTemplate(tonat(fp.read()))
def render(self, *args, **kwargs):
from simpletal import simpleTALES
from StringIO import StringIO
for dictarg in args:
kwargs.update(dictarg)
# TODO: maybe reuse a context instead of always creating one
context = simpleTALES.Context()
for k, v in self.defaults.items():
context.addGlobal(k, v)
for k, v in kwargs.items():
context.addGlobal(k, v)
output = StringIO()
self.tpl.expand(context, output)
return output.getvalue()
class SimpleTemplate(BaseTemplate):
blocks = ('if', 'elif', 'else', 'try', 'except', 'finally', 'for', 'while',
'with', 'def', 'class')
dedent_blocks = ('elif', 'else', 'except', 'finally')
@lazy_attribute
def re_pytokens(cls):
''' This matches comments and all kinds of quoted strings but does
NOT match comments (#...) within quoted strings. (trust me) '''
return re.compile(r'''
(''(?!')|""(?!")|'{6}|"{6} # Empty strings (all 4 types)
|'(?:[^\\']|\\.)+?' # Single quotes (')
|"(?:[^\\"]|\\.)+?" # Double quotes (")
|'{3}(?:[^\\]|\\.|\n)+?'{3} # Triple-quoted strings (')
|"{3}(?:[^\\]|\\.|\n)+?"{3} # Triple-quoted strings (")
|\#.* # Comments
)''', re.VERBOSE)
def prepare(self, escape_func=cgi.escape, noescape=False):
self.cache = {}
enc = self.encoding
self._str = lambda x: touni(x, enc)
self._escape = lambda x: escape_func(touni(x, enc))
if noescape:
self._str, self._escape = self._escape, self._str
@classmethod
def split_comment(cls, code):
""" Removes comments (#...) from python code. """
if '#' not in code:
return code
#: Remove comments only (leave quoted strings as they are)
subf = lambda m: '' if m.group(0)[0] == '#' else m.group(0)
return re.sub(cls.re_pytokens, subf, code)
@cached_property
def co(self):
return compile(self.code, self.filename or '<string>', 'exec')
@cached_property
def code(self):
stack = [] # Current Code indentation
lineno = 0 # Current line of code
ptrbuffer = [] # Buffer for printable strings and token tuple instances
codebuffer = [] # Buffer for generated python code
multiline = dedent = oneline = False
template = self.source if self.source else open(self.filename).read()
def yield_tokens(line):
for i, part in enumerate(re.split(r'\{\{(.*?)\}\}', line)):
if i % 2:
if part.startswith('!'):
yield 'RAW', part[1:]
else:
yield 'CMD', part
else:
yield 'TXT', part
def flush(): # Flush the ptrbuffer
if not ptrbuffer:
return
cline = ''
for line in ptrbuffer:
for token, value in line:
if token == 'TXT':
cline += repr(value)
elif token == 'RAW':
cline += '_str(%s)' % value
elif token == 'CMD':
cline += '_escape(%s)' % value
cline += ', '
cline = cline[:-2] + '\\\n'
cline = cline[:-2]
if cline[:-1].endswith('\\\\\\\\\\n'):
cline = cline[:-7] + cline[-1] # 'nobr\\\\\n' --> 'nobr'
cline = '_printlist([' + cline + '])'
del ptrbuffer[:] # Do this before calling code() again
code(cline)
def code(stmt):
for line in stmt.splitlines():
codebuffer.append(' ' * len(stack) + line.strip())
for line in template.splitlines(True):
lineno += 1
line = line if isinstance(line, unicode)\
else unicode(line, encoding=self.encoding)
if lineno <= 2:
m = re.search(r"%.*coding[:=]\s*([-\w\.]+)", line)
if m:
self.encoding = m.group(1)
if m:
line = line.replace('coding', 'coding (removed)')
if line.strip()[:2].count('%') == 1:
line = line.split('%', 1)[1].lstrip() # Full line following the %
cline = self.split_comment(line).strip()
cmd = re.split(r'[^a-zA-Z0-9_]', cline)[0]
flush() # encoding (TODO: why?)
if cmd in self.blocks or multiline:
cmd = multiline or cmd
dedent = cmd in self.dedent_blocks # "else:"
if dedent and not oneline and not multiline:
cmd = stack.pop()
code(line)
oneline = not cline.endswith(':') # "if 1: pass"
multiline = cmd if cline.endswith('\\') else False
if not oneline and not multiline:
stack.append(cmd)
elif cmd == 'end' and stack:
code('#end(%s) %s' % (stack.pop(), line.strip()[3:]))
elif cmd == 'include':
p = cline.split(None, 2)[1:]
if len(p) == 2:
code("_=_include(%s, _stdout, %s)" % (repr(p[0]), p[1]))
elif p:
code("_=_include(%s, _stdout)" % repr(p[0]))
else: # Empty %include -> reverse of %rebase
code("_printlist(_base)")
elif cmd == 'rebase':
p = cline.split(None, 2)[1:]
if len(p) == 2:
code("globals()['_rebase']=(%s, dict(%s))" % (repr(p[0]), p[1]))
elif p:
code("globals()['_rebase']=(%s, {})" % repr(p[0]))
else:
code(line)
else: # Line starting with text (not '%') or '%%' (escaped)
if line.strip().startswith('%%'):
line = line.replace('%%', '%', 1)
ptrbuffer.append(yield_tokens(line))
flush()
return '\n'.join(codebuffer) + '\n'
def subtemplate(self, _name, _stdout, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
if _name not in self.cache:
self.cache[_name] = self.__class__(name=_name, lookup=self.lookup)
return self.cache[_name].execute(_stdout, kwargs)
def execute(self, _stdout, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
env = self.defaults.copy()
env.update({'_stdout': _stdout,
'_printlist': _stdout.extend,
'_include': self.subtemplate,
'_str': self._str,
'_escape': self._escape})
env.update(kwargs)
eval(self.co, env)
if '_rebase' in env:
subtpl, rargs = env['_rebase']
subtpl = self.__class__(name=subtpl, lookup=self.lookup)
rargs['_base'] = _stdout[:] # copy stdout
del _stdout[:] # clear stdout
return subtpl.execute(_stdout, rargs)
return env
def render(self, *args, **kwargs):
""" Render the template using keyword arguments as local variables. """
for dictarg in args:
kwargs.update(dictarg)
stdout = []
self.execute(stdout, kwargs)
return ''.join(stdout)
def template(*args, **kwargs):
'''
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
'''
tpl = args[0] if args else None
template_adapter = kwargs.pop('template_adapter', SimpleTemplate)
if tpl not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
if isinstance(tpl, template_adapter):
TEMPLATES[tpl] = tpl
if settings:
TEMPLATES[tpl].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tpl] = template_adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tpl] = template_adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tpl]:
abort(500, 'Template (%s) not found' % tpl)
for dictarg in args[1:]:
kwargs.update(dictarg)
return TEMPLATES[tpl].render(kwargs)
mako_template = functools.partial(template, template_adapter=MakoTemplate)
cheetah_template = functools.partial(template, template_adapter=CheetahTemplate)
jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
simpletal_template = functools.partial(template, template_adapter=SimpleTALTemplate)
def view(tpl_name, **defaults):
''' Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
'''
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
return result
return wrapper
return decorator
mako_view = functools.partial(view, template_adapter=MakoTemplate)
cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
simpletal_view = functools.partial(view, template_adapter=SimpleTALTemplate)
###############################################################################
# Constants and Globals ########################################################
###############################################################################
TEMPLATE_PATH = ['./', './views/']
TEMPLATES = {}
DEBUG = False
#: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found')
HTTP_CODES = httplib.responses
HTTP_CODES[418] = "I'm a teapot" # RFC 2324
_HTTP_STATUS_LINES = dict((k, '%d %s' % (k, v)) for (k, v) in HTTP_CODES.iteritems())
#: The default template used for error pages. Override with @error()
# SHINKEN MOD: change from bottle import DEBUG to from shinken.webui.bottle import DEBUG,...
ERROR_PAGE_TEMPLATE = """
%try:
%from shinken.webui.bottlecore import DEBUG, HTTP_CODES, request, touni
%status_name = HTTP_CODES.get(e.status, 'Unknown').title()
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>Error {{e.status}}: {{status_name}}</title>
<style type="text/css">
html {background-color: #eee; font-family: sans;}
body {background-color: #fff; border: 1px solid #ddd;
padding: 15px; margin: 15px;}
pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;}
</style>
</head>
<body>
<h1>Error {{e.status}}: {{status_name}}</h1>
<p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt>
caused an error:</p>
<pre>{{e.output}}</pre>
%if DEBUG and e.exception:
<h2>Exception:</h2>
<pre>{{repr(e.exception)}}</pre>
%end
%if DEBUG and e.traceback:
<h2>Traceback:</h2>
<pre>{{e.traceback}}</pre>
%end
</body>
</html>
%except ImportError:
<b>ImportError:</b> Could not generate the error page. Please add bottle to
the import path.
%end
"""
#: A thread-save instance of :class:`Request` representing the `current` request.
request = Request()
#: A thread-save instance of :class:`Response` used to build the HTTP response.
response = Response()
#: A thread-save namespace. Not used by Bottle.
local = threading.local()
# Initialize app stack (create first empty Bottle app)
# BC: 0.6.4 and needed for run()
app = default_app = AppStack()
app.push()
#: A virtual package that redirects import statements.
#: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`.
ext = _ImportRedirect(__name__ + '.ext', 'bottle_%s').module
| 109,973
|
Python
|
.py
| 2,390
| 36.611297
| 100
| 0.589576
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,529
|
__init__.py
|
shinken-solutions_shinken/shinken/webui/__init__.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
| 919
|
Python
|
.py
| 22
| 40.681818
| 77
| 0.758659
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,530
|
bottlewebui.py
|
shinken-solutions_shinken/shinken/webui/bottlewebui.py
|
# -*- coding: utf-8 -*-
"""
Bottle is a fast and simple micro-framework for small web applications. It
offers request dispatching (Routes) with url parameter support, templates,
a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and
template engines - all in a single file and with no dependencies other than the
Python Standard Library.
Homepage and documentation: http://bottlepy.org/
Copyright (c) 2011, Marcel Hellkamp.
License: MIT (see LICENSE.txt for details)
"""
from __future__ import with_statement
__author__ = 'Marcel Hellkamp'
__version__ = '0.10.dev'
__license__ = 'MIT'
import base64
import cgi
import email.utils
import functools
import hmac
import httplib
import imp
import itertools
import mimetypes
import os
import re
import subprocess
import sys
import tempfile
import thread
import threading
import time
import warnings
from Cookie import SimpleCookie
from tempfile import TemporaryFile
from traceback import format_exc
from urllib import urlencode, quote as urlquote
from urlparse import urljoin, SplitResult as UrlSplitResult
try:
from collections import MutableMapping as DictMixin
except ImportError: # pragma: no cover
from UserDict import DictMixin
try:
from urlparse import parse_qs
except ImportError: # pragma: no cover
from cgi import parse_qs
try:
import cPickle as pickle
except ImportError: # pragma: no cover
import pickle
try:
from json import dumps as json_dumps, loads as json_lds
except ImportError: # pragma: no cover
try:
from simplejson import dumps as json_dumps, loads as json_lds
except ImportError: # pragma: no cover
try:
from django.utils.simplejson import dumps as json_dumps, loads as json_lds
except ImportError: # pragma: no cover
def json_dumps(data):
raise ImportError("JSON support requires Python 2.6 or simplejson.")
json_lds = json_dumps
py3k = sys.version_info >= (3, 0, 0)
NCTextIOWrapper = None
if py3k: # pragma: no cover
json_loads = lambda s: json_lds(touni(s))
# See Request.POST
from io import BytesIO
def touni(x, enc='utf8', err='strict'):
""" Convert anything to unicode """
return str(x, enc, err) if isinstance(x, bytes) else str(x)
if sys.version_info < (3, 2, 0):
from io import TextIOWrapper
class NCTextIOWrapper(TextIOWrapper):
''' Garbage collecting an io.TextIOWrapper(buffer) instance closes
the wrapped buffer. This subclass keeps it open. '''
def close(self):
pass
else:
json_loads = json_lds
from StringIO import StringIO as BytesIO
bytes = str
def touni(x, enc='utf8', err='strict'):
""" Convert anything to unicode """
return x if isinstance(x, unicode) else unicode(str(x), enc, err)
def tob(data, enc='utf8'):
""" Convert anything to bytes """
return data.encode(enc) if isinstance(data, unicode) else bytes(data)
# Convert strings and unicode to native strings
if py3k:
tonat = touni
else:
tonat = tob
tonat.__doc__ = """ Convert anything to native strings """
# Backward compatibility
def depr(message, critical=False):
if critical:
raise DeprecationWarning(message)
warnings.warn(message, DeprecationWarning, stacklevel=3)
# Small helpers
def makelist(data):
if isinstance(data, (tuple, list, set, dict)):
return list(data)
elif data:
return [data]
else:
return []
class DictProperty(object):
''' Property that maps to a key in a local dict-like attribute. '''
def __init__(self, attr, key=None, read_only=False):
self.attr, self.key, self.read_only = attr, key, read_only
def __call__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter, self.key = func, self.key or func.__name__
return self
def __get__(self, obj, cls):
if obj is None:
return self
key, storage = self.key, getattr(obj, self.attr)
if key not in storage:
storage[key] = self.getter(obj)
return storage[key]
def __set__(self, obj, value):
if self.read_only:
raise AttributeError("Read-Only property.")
getattr(obj, self.attr)[self.key] = value
def __delete__(self, obj):
if self.read_only:
raise AttributeError("Read-Only property.")
del getattr(obj, self.attr)[self.key]
def cached_property(func):
''' A property that, if accessed, replaces itself with the computed
value. Subsequent accesses won't call the getter again. '''
return DictProperty('__dict__')(func)
class lazy_attribute(object): # Does not need configuration -> lower-case name
''' A property that caches itself to the class object. '''
def __init__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter = func
def __get__(self, obj, cls):
value = self.getter(cls)
setattr(cls, self.__name__, value)
return value
class HeaderProperty(object):
def __init__(self, name, reader=None, writer=str, default=''):
self.name, self.reader, self.writer, self.default = name, reader, writer, default
self.__doc__ = 'Current value of the %r header.' % name.title()
def __get__(self, obj, cls):
if obj is None:
return self
value = obj.headers.get(self.name)
return self.reader(value) if (value and self.reader) else (value or self.default)
def __set__(self, obj, value):
if self.writer:
value = self.writer(value)
obj.headers[self.name] = value
def __delete__(self, obj):
if self.name in obj.headers:
del obj.headers[self.name]
###############################################################################
# Exceptions and Events ########################################################
###############################################################################
class BottleException(Exception):
""" A base class for exceptions used by bottle. """
pass
class HTTPResponse(BottleException):
""" Used to break execution and immediately finish the response """
def __init__(self, output='', status=200, header=None):
super(BottleException, self).__init__("HTTP Response %d" % status)
self.status = int(status)
self.output = output
self.headers = HeaderDict(header) if header else None
def apply(self, response):
if self.headers:
for key, value in self.headers.iterallitems():
response.headers[key] = value
response.status = self.status
class HTTPError(HTTPResponse):
""" Used to generate an error page """
def __init__(self, code=500, output='Unknown Error', exception=None,
traceback=None, header=None):
super(HTTPError, self).__init__(output, code, header)
self.exception = exception
self.traceback = traceback
def __repr__(self):
return template(ERROR_PAGE_TEMPLATE, e=self)
###############################################################################
# Routing ######################################################################
###############################################################################
class RouteError(BottleException):
""" This is a base class for all routing related exceptions """
class RouteReset(BottleException):
""" If raised by a plugin or request handler, the route is reset and all
plugins are re-applied. """
class RouteSyntaxError(RouteError):
""" The route parser found something not supported by this router """
class RouteBuildError(RouteError):
""" The route could not been built """
class Router(object):
''' A Router is an ordered collection of route->target pairs. It is used to
efficiently match WSGI requests against a number of routes and return
the first target that satisfies the request. The target may be anything,
usually a string, ID or callable object. A route consists of a path-rule
and a HTTP method.
The path-rule is either a static path (e.g. `/contact`) or a dynamic
path that contains wildcards (e.g. `/wiki/:page`). By default, wildcards
consume characters up to the next slash (`/`). To change that, you may
add a regular expression pattern (e.g. `/wiki/:page#[a-z]+#`).
For performance reasons, static routes (rules without wildcards) are
checked first. Dynamic routes are searched in order. Try to avoid
ambiguous or overlapping rules.
The HTTP method string matches only on equality, with two exceptions:
* ´GET´ routes also match ´HEAD´ requests if there is no appropriate
´HEAD´ route installed.
* ´ANY´ routes do match if there is no other suitable route installed.
An optional ``name`` parameter is used by :meth:`build` to identify
routes.
'''
default = '[^/]+'
@lazy_attribute
def syntax(cls):
return re.compile(r'(?<!\\):([a-zA-Z_][a-zA-Z_0-9]*)?(?:#(.*?)#)?')
def __init__(self):
self.routes = {} # A {rule: {method: target}} mapping
self.rules = [] # An ordered list of rules
self.named = {} # A name->(rule, build_info) mapping
self.static = {} # Cache for static routes: {path: {method: target}}
self.dynamic = [] # Cache for dynamic routes. See _compile()
def add(self, rule, method, target, name=None):
''' Add a new route or replace the target for an existing route. '''
if rule in self.routes:
self.routes[rule][method.upper()] = target
else:
self.routes[rule] = {method.upper(): target}
self.rules.append(rule)
if self.static or self.dynamic: # Clear precompiler cache.
self.static, self.dynamic = {}, {}
if name:
self.named[name] = (rule, None)
def build(self, _name, *anon, **args):
''' Return a string that matches a named route. Use keyword arguments
to fill out named wildcards. Remaining arguments are appended as a
query string. Raises RouteBuildError or KeyError.'''
if _name not in self.named:
raise RouteBuildError("No route with that name.", _name)
rule, pairs = self.named[_name]
if not pairs:
token = self.syntax.split(rule)
parts = [p.replace('\\:', ':') for p in token[::3]]
names = token[1::3]
if len(parts) > len(names):
names.append(None)
pairs = zip(parts, names)
self.named[_name] = (rule, pairs)
try:
anon = list(anon)
url = [s if k is None
else s + str(args.pop(k)) if k else s + str(anon.pop())
for s, k in pairs]
except IndexError:
msg = "Not enough arguments to fill out anonymous wildcards."
raise RouteBuildError(msg)
except KeyError as e:
raise RouteBuildError(*e.args)
if args:
url += ['?', urlencode(args)]
return ''.join(url)
def match(self, environ):
''' Return a (target, url_agrs) tuple or raise HTTPError(404/405). '''
targets, urlargs = self._match_path(environ)
if not targets:
raise HTTPError(404, "Not found: " + repr(environ['PATH_INFO']))
method = environ['REQUEST_METHOD'].upper()
if method in targets:
return targets[method], urlargs
if method == 'HEAD' and 'GET' in targets:
return targets['GET'], urlargs
if 'ANY' in targets:
return targets['ANY'], urlargs
allowed = [verb for verb in targets if verb != 'ANY']
if 'GET' in allowed and 'HEAD' not in allowed:
allowed.append('HEAD')
raise HTTPError(405, "Method not allowed.",
header=[('Allow', ",".join(allowed))])
def _match_path(self, environ):
''' Optimized PATH_INFO matcher. '''
path = environ['PATH_INFO'] or '/'
# Assume we are in a warm state. Search compiled rules first.
match = self.static.get(path)
if match:
return match, {}
for combined, rules in self.dynamic:
match = combined.match(path)
if not match:
continue
gpat, match = rules[match.lastindex - 1]
return match, gpat(path).groupdict() if gpat else {}
# Lazy-check if we are really in a warm state. If yes, stop here.
if self.static or self.dynamic or not self.routes:
return None, {}
# Cold state: We have not compiled any rules yet. Do so and try again.
if not environ.get('wsgi.run_once'):
self._compile()
return self._match_path(environ)
# For run_once (CGI) environments, don't compile. Just check one by one.
epath = path.replace(':', '\\:') # Turn path into its own static rule.
match = self.routes.get(epath) # This returns static rule only.
if match:
return match, {}
for rule in self.rules:
#: Skip static routes to reduce re.compile() calls.
if rule.count(':') < rule.count('\\:'):
continue
match = self._compile_pattern(rule).match(path)
if match:
return self.routes[rule], match.groupdict()
return None, {}
def _compile(self):
''' Prepare static and dynamic search structures. '''
self.static = {}
self.dynamic = []
def fpat_sub(m):
return m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:'
for rule in self.rules:
target = self.routes[rule]
if not self.syntax.search(rule):
self.static[rule.replace('\\:', ':')] = target
continue
gpat = self._compile_pattern(rule)
fpat = re.sub(r'(\\*)(\(\?P<[^>]*>|\((?!\?))', fpat_sub, gpat.pattern)
gpat = gpat.match if gpat.groupindex else None
try:
combined = '%s|(%s)' % (self.dynamic[-1][0].pattern, fpat)
self.dynamic[-1] = (re.compile(combined), self.dynamic[-1][1])
self.dynamic[-1][1].append((gpat, target))
except (AssertionError, IndexError), e: # AssertionError: Too many groups
self.dynamic.append((re.compile('(^%s$)' % fpat),
[(gpat, target)]))
except re.error as e:
raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, e))
def _compile_pattern(self, rule):
''' Return a regular expression with named groups for each wildcard. '''
out = ''
for i, part in enumerate(self.syntax.split(rule)):
if i % 3 == 0:
out += re.escape(part.replace('\\:', ':'))
elif i % 3 == 1:
out += '(?P<%s>' % part if part else '(?:'
else:
out += '%s)' % (part or '[^/]+')
return re.compile('^%s$' % out)
###############################################################################
# Application Object ###########################################################
###############################################################################
class Bottle(object):
""" WSGI application """
def __init__(self, catchall=True, autojson=True, config=None):
""" Create a new bottle instance.
You usually don't do that. Use `bottle.app.push()` instead.
"""
self.routes = [] # List of installed routes including metadata.
self.router = Router() # Maps requests to self.route indices.
self.ccache = {} # Cache for callbacks with plugins applied.
self.plugins = [] # List of installed plugins.
self.mounts = {}
self.error_handler = {}
#: If true, most exceptions are caught and returned as :exc:`HTTPError`
self.catchall = catchall
self.config = config or {}
self.serve = True
# Default plugins
self.hooks = self.install(HooksPlugin())
if autojson:
self.install(JSONPlugin())
self.install(TemplatePlugin())
def mount(self, app, prefix, **options):
''' Mount an application to a specific URL prefix. The prefix is added
to SCRIPT_PATH and removed from PATH_INFO before the sub-application
is called.:param app: an instance of :class:`Bottle`.:param prefix:
path prefix used as a mount-point.
All other parameters are passed to the underlying :meth:`route` call.
'''
if not isinstance(app, Bottle):
raise TypeError('Only Bottle instances are supported for now.')
prefix = '/'.join(filter(None, prefix.split('/')))
if not prefix:
raise TypeError('Empty prefix. Perhaps you want a merge()?')
for other in self.mounts:
if other.startswith(prefix):
raise TypeError('Conflict with existing mount: %s' % other)
path_depth = prefix.count('/') + 1
options.setdefault('method', 'ANY')
options.setdefault('skip', True)
self.mounts[prefix] = app
@self.route('/%s/:#.*#' % prefix, **options)
def mountpoint():
request.path_shift(path_depth)
return app._handle(request.environ)
def install(self, plugin):
''' Add a plugin to the list of plugins and prepare it for being
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
'''
if hasattr(plugin, 'setup'):
plugin.setup(self)
if not callable(plugin) and not hasattr(plugin, 'apply'):
raise TypeError("Plugins must be callable or implement .apply()")
self.plugins.append(plugin)
self.reset()
return plugin
def uninstall(self, plugin):
''' Uninstall plugins. Pass an instance to remove a specific plugin.
Pass a type object to remove all plugins that match that type.
Subclasses are not removed. Pass a string to remove all plugins with
a matching ``name`` attribute. Pass ``True`` to remove all plugins.
The list of affected plugins is returned. '''
removed, remove = [], plugin
for i, plugin in list(enumerate(self.plugins))[::-1]:
if remove is True or remove is plugin or remove is type(plugin) \
or getattr(plugin, 'name', True) == remove:
removed.append(plugin)
del self.plugins[i]
if hasattr(plugin, 'close'):
plugin.close()
if removed:
self.reset()
return removed
def reset(self, id=None):
''' Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID is given, only that specific route is affected. '''
if id is None:
self.ccache.clear()
else:
self.ccache.pop(id, None)
if DEBUG:
for route in self.routes:
if route['id'] not in self.ccache:
self.ccache[route['id']] = self._build_callback(route)
def close(self):
''' Close the application and all installed plugins. '''
for plugin in self.plugins:
if hasattr(plugin, 'close'):
plugin.close()
self.stopped = True
def match(self, environ):
""" (deprecated) Search for a matching route and return a
(callback, urlargs) tuple.
The first element is the associated route callback with plugins
applied. The second value is a dictionary with parameters extracted
from the URL. The :class:`Router` raises :exc:`HTTPError` (404/405)
on a non-match."""
depr("This method will change semantics in 0.10.")
return self._match(environ)
def _match(self, environ):
handle, args = self.router.match(environ)
environ['route.handle'] = handle # TODO move to router?
environ['route.url_args'] = args
try:
return self.ccache[handle], args
except KeyError:
config = self.routes[handle]
callback = self.ccache[handle] = self._build_callback(config)
return callback, args
def _build_callback(self, config):
''' Apply plugins to a route and return a new callable. '''
wrapped = config['callback']
plugins = self.plugins + config['apply']
skip = config['skip']
try:
for plugin in reversed(plugins):
if True in skip:
break
if plugin in skip or type(plugin) in skip:
continue
if getattr(plugin, 'name', True) in skip:
continue
if hasattr(plugin, 'apply'):
wrapped = plugin.apply(wrapped, config)
else:
wrapped = plugin(wrapped)
if not wrapped:
break
functools.update_wrapper(wrapped, config['callback'])
return wrapped
except RouteReset: # A plugin may have changed the config dict inplace.
return self._build_callback(config) # Apply all plugins again.
def get_url(self, routename, **kargs):
""" Return a string that matches a named route """
scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
location = self.router.build(routename, **kargs).lstrip('/')
return urljoin(urljoin('/', scriptname), location)
def route(self, path=None, method='GET', callback=None, name=None,
apply=None, skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/:name')
def hello(name):
return 'Hello %s' % name
The ``:name`` part is a wildcard. See :class:`Router` for syntax
details.:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.:param method: HTTP method (`GET`, `POST`, `PUT`, ...)
or a list of methods to listen to. (default: `GET`):param callback: An optional
shortcut to avoid the decorator syntax. ``route(..., callback=func)`` equals
``route(...)(func)``:param name: The name for this route. (default: None):param apply:
A decorator or plugin or a list of plugins. These are applied to the route callback in
addition to installed plugins.:param skip: A list of plugins, plugin classes or names.
Matching plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path):
path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
def decorator(callback):
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
cfg = dict(rule=rule, method=verb, callback=callback,
name=name, app=self, config=config,
apply=plugins, skip=skiplist)
self.routes.append(cfg)
cfg['id'] = self.routes.index(cfg)
self.router.add(rule, verb, cfg['id'], name=name)
if DEBUG:
self.ccache[cfg['id']] = self._build_callback(cfg)
return callback
return decorator(callback) if callback else decorator
def get(self, path=None, method='GET', **options):
""" Equals :meth:`route`. """
return self.route(path, method, **options)
def post(self, path=None, method='POST', **options):
""" Equals :meth:`route` with a ``POST`` method parameter. """
return self.route(path, method, **options)
def put(self, path=None, method='PUT', **options):
""" Equals :meth:`route` with a ``PUT`` method parameter. """
return self.route(path, method, **options)
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options)
def error(self, code=500):
""" Decorator: Register an output handler for a HTTP error code"""
def wrapper(handler):
self.error_handler[int(code)] = handler
return handler
return wrapper
def hook(self, name):
""" Return a decorator that attaches a callback to a hook. """
def wrapper(func):
self.hooks.add(name, func)
return func
return wrapper
def handle(self, path, method='GET'):
""" (deprecated) Execute the first matching route callback and return
the result. :exc:`HTTPResponse` exceptions are caught and returned.
If :attr:`Bottle.catchall` is true, other exceptions are caught as
well and returned as :exc:`HTTPError` instances (500).
"""
depr("This method will change semantics in 0.10. Try to avoid it.")
if isinstance(path, dict):
return self._handle(path)
return self._handle({'PATH_INFO': path, 'REQUEST_METHOD': method.upper()})
def _handle(self, environ):
if not self.serve:
depr("Bottle.serve will be removed in 0.10.")
return HTTPError(503, "Server stopped")
try:
callback, args = self._match(environ)
return callback(**args)
except HTTPResponse as r:
return r
except RouteReset: # Route reset requested by the callback or a plugin.
del self.ccache[environ['route.handle']]
return self._handle(environ) # Try again.
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception as e:
if not self.catchall:
raise
stacktrace = format_exc(10)
environ['wsgi.errors'].write(stacktrace)
return HTTPError(500, "Internal Server Error", e, stacktrace)
def _cast(self, out, request, response, peek=None):
""" Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
iterable of strings and iterable of unicodes
"""
# Empty output is done here
if not out:
response['Content-Length'] = 0
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list))\
and isinstance(out[0], (bytes, unicode)):
out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, unicode):
out = out.encode(response.charset)
# Byte Strings are just returned
if isinstance(out, bytes):
response['Content-Length'] = len(out)
return [out]
# HTTPError or HTTPException (recursive, because they may wrap anything)
# TODO: Handle these explicitly in handle() or make them iterable.
if isinstance(out, HTTPError):
out.apply(response)
out = self.error_handler.get(out.status, repr)(out)
if isinstance(out, HTTPResponse):
depr('Error handlers must not return :exc:`HTTPResponse`.') # 0.9
return self._cast(out, request, response)
if isinstance(out, HTTPResponse):
out.apply(response)
return self._cast(out.output, request, response)
# File-like objects.
if hasattr(out, 'read'):
if 'wsgi.file_wrapper' in request.environ:
return request.environ['wsgi.file_wrapper'](out)
elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
return WSGIFileWrapper(out)
# Handle Iterables. We peek into them to detect their inner type.
try:
out = iter(out)
first = out.next()
while not first:
first = out.next()
except StopIteration:
return self._cast('', request, response)
except HTTPResponse as e:
first = e
except Exception as e:
first = HTTPError(500, 'Unhandled exception', e, format_exc(10))
if isinstance(e, (KeyboardInterrupt, SystemExit, MemoryError))\
or not self.catchall:
raise
# These are the inner types allowed in iterator or generator objects.
if isinstance(first, HTTPResponse):
return self._cast(first, request, response)
if isinstance(first, bytes):
return itertools.chain([first], out)
if isinstance(first, unicode):
return itertools.imap(lambda x: x.encode(response.charset),
itertools.chain([first], out))
return self._cast(HTTPError(500, 'Unsupported response type: %s'
% type(first)), request, response)
def wsgi(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
environ['bottle.app'] = self
if 'HTTP_X_FORWARDED_PROTO' in environ:
environ['wsgi.url_scheme'] = environ['HTTP_X_FORWARDED_PROTO']
request.bind(environ)
response.bind()
out = self._cast(self._handle(environ), request, response)
# rfc2616 section 4.3
if response.status_code in (100, 101, 204, 304)\
or request.method == 'HEAD':
if hasattr(out, 'close'):
out.close()
out = []
start_response(response.status_line, list(response.iter_headers()))
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception as e:
if not self.catchall:
raise
err = '<h1>Critical error while processing request: %s</h1>' \
% environ.get('PATH_INFO', '/')
if DEBUG:
err += '<h2>Error:</h2>\n<pre>%s</pre>\n' % repr(e)
err += '<h2>Traceback:</h2>\n<pre>%s</pre>\n' % format_exc(10)
environ['wsgi.errors'].write(err) # TODO: wsgi.error should not get html
start_response('500 INTERNAL SERVER ERROR', [('Content-Type', 'text/html')])
return [tob(err)]
def __call__(self, environ, start_response):
return self.wsgi(environ, start_response)
###############################################################################
# HTTP and WSGI Tools ##########################################################
###############################################################################
class BaseRequest(DictMixin):
""" A wrapper for WSGI environment dictionaries that adds a lot of
convenient access methods and properties. Most of them are read-only."""
#: Maximum size of memory buffer for :attr:`body` in bytes.
MEMFILE_MAX = 102400
def __init__(self, environ):
""" Wrap a WSGI environ dictionary. """
#: The wrapped WSGI environ dictionary. This is the only real attribute.
#: All other attributes actually are read-only properties.
self.environ = environ
environ['bottle.request'] = self
@property
def path(self):
''' The value of ``PATH_INFO`` with exactly one prefixed slash (to fix
broken clients and avoid the "empty path" edge case). '''
return '/' + self.environ.get('PATH_INFO', '').lstrip('/')
@property
def method(self):
''' The ``REQUEST_METHOD`` value as an uppercase string. '''
return self.environ.get('REQUEST_METHOD', 'GET').upper()
@DictProperty('environ', 'bottle.request.headers', read_only=True)
def headers(self):
''' A :class:`WSGIHeaderDict` that provides case-insensitive access to
HTTP request headers. '''
return WSGIHeaderDict(self.environ)
@DictProperty('environ', 'bottle.request.cookies', read_only=True)
def cookies(self):
""" Cookies parsed into a dictionary. Signed cookies are NOT decoded.
Use :meth:`get_cookie` if you expect signed cookies. """
raw_dict = SimpleCookie(self.environ.get('HTTP_COOKIE', ''))
cookies = {}
for cookie in raw_dict.itervalues():
cookies[cookie.key] = cookie.value
return cookies
def get_cookie(self, key, default=None, secret=None):
""" Return the content of a cookie. To read a `Signed Cookie`, the
`secret` must match the one used to create the cookie (see
:meth:`BaseResponse.set_cookie`). If anything goes wrong (missing
cookie or wrong signature), return a default value. """
value = self.cookies.get(key)
if secret and value:
dec = cookie_decode(value, secret) # (key, value) tuple or None
return dec[1] if dec and dec[0] == key else default
return value or default
@DictProperty('environ', 'bottle.request.query', read_only=True)
def query(self):
''' The :attr:`query_string` parsed into a :class:`MultiDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. '''
data = parse_qs(self.query_string, keep_blank_values=True)
get = self.environ['bottle.get'] = MultiDict()
for key, values in data.iteritems():
for value in values:
get[key] = value
return get
@DictProperty('environ', 'bottle.request.forms', read_only=True)
def forms(self):
""" Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is returned as a
:class:`MultiDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. """
forms = MultiDict()
for name, item in self.POST.iterallitems():
if not hasattr(item, 'filename'):
forms[name] = item
return forms
@DictProperty('environ', 'bottle.request.params', read_only=True)
def params(self):
""" A :class:`MultiDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = MultiDict()
for key, value in self.query.iterallitems():
params[key] = value
for key, value in self.forms.iterallitems():
params[key] = value
return params
@DictProperty('environ', 'bottle.request.files', read_only=True)
def files(self):
""" File uploads parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The values are instances of
:class:`cgi.FieldStorage`. The most important attributes are:
filename
The filename, if specified; otherwise None; this is the client
side filename, *not* the file name on which it is stored (that's
a temporary file you don't deal with)
file
The file(-like) object from which you can read the data.
value
The value as a *string*; for file uploads, this transparently
reads the file every time you request the value. Do not do this
on big files.
"""
files = MultiDict()
for name, item in self.POST.iterallitems():
if hasattr(item, 'filename'):
files[name] = item
return files
@DictProperty('environ', 'bottle.request.json', read_only=True)
def json(self):
''' If the ``Content-Type`` header is ``application/json``, this
property holds the parsed content of the request body. Only requests
smaller than :attr:`MEMFILE_MAX` are processed to avoid memory
exhaustion. '''
if self.environ.get('CONTENT_TYPE') == 'application/json' \
and 0 < self.content_length < self.MEMFILE_MAX:
return json_loads(self.body.read(self.MEMFILE_MAX))
return None
@DictProperty('environ', 'bottle.request.body', read_only=True)
def _body(self):
maxread = max(0, self.content_length)
stream = self.environ['wsgi.input']
body = BytesIO() if maxread < self.MEMFILE_MAX else TemporaryFile(mode='w+b')
while maxread > 0:
part = stream.read(min(maxread, self.MEMFILE_MAX))
if not part:
break
body.write(part)
maxread -= len(part)
self.environ['wsgi.input'] = body
body.seek(0)
return body
@property
def body(self):
""" The HTTP request body as a seek-able file-like object. Depending on
:attr:`MEMFILE_MAX`, this is either a temporary file or a
:class:`io.BytesIO` instance. Accessing this property for the first
time reads and replaces the ``wsgi.input`` environ variable.
Subsequent accesses just do a `seek(0)` on the file object. """
self._body.seek(0)
return self._body
#: An alias for :attr:`query`.
GET = query
@DictProperty('environ', 'bottle.request.post', read_only=True)
def POST(self):
""" The values of :attr:`forms` and :attr:`files` combined into a single
:class:`MultiDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads).
"""
post = MultiDict()
safe_env = {'QUERY_STRING': ''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ:
safe_env[key] = self.environ[key]
if NCTextIOWrapper:
fb = NCTextIOWrapper(self.body, encoding='ISO-8859-1', newline='\n')
else:
fb = self.body
data = cgi.FieldStorage(fp=fb, environ=safe_env, keep_blank_values=True)
for item in data.list or []:
post[item.name] = item if item.filename else item.value
return post
@property
def COOKIES(self):
''' Alias for :attr:`cookies` (deprecated). '''
depr('BaseRequest.COOKIES was renamed to BaseRequest.cookies (lowercase).')
return self.cookies
@property
def url(self):
""" The full request URI including hostname and scheme. If your app
lives behind a reverse proxy or load balancer and you get confusing
results, make sure that the ``X-Forwarded-Host`` header is set
correctly. """
return self.urlparts.geturl()
@DictProperty('environ', 'bottle.request.urlparts', read_only=True)
def urlparts(self):
''' The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. '''
env = self.environ
http = env.get('wsgi.url_scheme', 'http')
host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
if not host:
# HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
host = env.get('SERVER_NAME', '127.0.0.1')
port = env.get('SERVER_PORT')
if port and port != ('80' if http == 'http' else '443'):
host += ':' + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '')
@property
def fullpath(self):
""" Request path including :attr:`script_name` (if present). """
return urljoin(self.script_name, self.path.lstrip('/'))
@property
def query_string(self):
""" The raw :attr:`query` part of the URL (everything in between ``?``
and ``#``) as a string. """
return self.environ.get('QUERY_STRING', '')
@property
def script_name(self):
''' The initial portion of the URL's `path` that was removed by a higher
level (server or routing middleware) before the application was
called. This property returns an empty string, or a path with
leading and tailing slashes. '''
script_name = self.environ.get('SCRIPT_NAME', '').strip('/')
return '/' + script_name + '/' if script_name else '/'
def path_shift(self, shift=1):
''' Shift path segments from :attr:`path` to :attr:`script_name` and
vice versa.:param shift: The number of path segments to shift. May be negative
to change the shift direction. (default: 1)
'''
script = self.environ.get('SCRIPT_NAME', '/')
self['SCRIPT_NAME'], self['PATH_INFO'] = path_shift(script, self.path, shift)
@property
def content_length(self):
''' The request body length as an integer. The client is responsible to
set this header. Otherwise, the real length of the body is unknown
and -1 is returned. In this case, :attr:`body` will be empty. '''
return int(self.environ.get('CONTENT_LENGTH') or -1)
@property
def is_xhr(self):
''' True if the request was triggered by a XMLHttpRequest. This only
works with JavaScript libraries that support the `X-Requested-With`
header (most of the popular libraries do). '''
requested_with = self.environ.get('HTTP_X_REQUESTED_WITH', '')
return requested_with.lower() == 'xmlhttprequest'
@property
def is_ajax(self):
''' Alias for :attr:`is_xhr`. "Ajax" is not the right term. '''
return self.is_xhr
@property
def auth(self):
""" HTTP authentication data as a (user, password) tuple. This
implementation currently supports basic (not digest) authentication
only. If the authentication happened at a higher level (e.g. in the
front web-server or a middleware), the password field is None, but
the user field is looked up from the ``REMOTE_USER`` environ
variable. On any errors, None is returned. """
basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION', ''))
if basic:
return basic
ruser = self.environ.get('REMOTE_USER')
if ruser:
return (ruser, None)
return None
@property
def remote_route(self):
""" A list of all IPs that were involved in this request, starting with
the client IP and followed by zero or more proxies. This does only
work if all proxies support the ```X-Forwarded-For`` header. Note
that this information can be forged by malicious clients. """
proxy = self.environ.get('HTTP_X_FORWARDED_FOR')
if proxy:
return [ip.strip() for ip in proxy.split(',')]
remote = self.environ.get('REMOTE_ADDR')
return [remote] if remote else []
@property
def remote_addr(self):
""" The client IP as a string. Note that this information can be forged
by malicious clients. """
route = self.remote_route
return route[0] if route else None
def copy(self):
""" Return a new :class:`Request` with a shallow :attr:`environ` copy. """
return Request(self.environ.copy())
def __getitem__(self, key):
return self.environ[key]
def __delitem__(self, key):
self[key] = ""
del(self.environ[key])
def __iter__(self):
return iter(self.environ)
def __len__(self):
return len(self.environ)
def keys(self):
return self.environ.keys()
def __setitem__(self, key, value):
""" Change an environ value and clear all caches that depend on it. """
if self.environ.get('bottle.request.readonly'):
raise KeyError('The environ dictionary is read-only.')
self.environ[key] = value
todelete = ()
if key == 'wsgi.input':
todelete = ('body', 'forms', 'files', 'params', 'post', 'json')
elif key == 'QUERY_STRING':
todelete = ('query', 'params')
elif key.startswith('HTTP_'):
todelete = ('headers', 'cookies')
for key in todelete:
self.environ.pop('bottle.request.' + key, None)
class LocalRequest(BaseRequest, threading.local):
''' A thread-local subclass of :class:`BaseRequest`. '''
def __init__(self):
pass
bind = BaseRequest.__init__
Request = LocalRequest
def _hkey(s):
return s.title().replace('_', '-')
class BaseResponse(object):
""" Storage class for a response body as well as headers and cookies.
This class does support dict-like case-insensitive item-access to
headers, but is NOT a dict. Most notably, iterating over a response
yields parts of the body and not the headers.
"""
default_status = 200
default_content_type = 'text/html; charset=UTF-8'
#: Header blacklist for specific response codes
#: (rfc2616 section 10.2.3 and 10.3.5)
bad_headers = {
204: set(('Content-Type',)),
304: set(('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Range', 'Content-Type',
'Content-Md5', 'Last-Modified'))}
def __init__(self, body='', status=None, **headers):
#: The HTTP status code as an integer (e.g. 404).
#: Do not change it directly, see :attr:`status`.
self.status_code = None
#: The HTTP status line as a string (e.g. "404 Not Found").
#: Do not change it directly, see :attr:`status`.
self.status_line = None
#: The response body as one of the supported data types.
self.body = body
self._cookies = None
self._headers = {'Content-Type': [self.default_content_type]}
self.status = status or self.default_status
if headers:
for name, value in headers.items():
self[name] = value
def copy(self):
''' Returns a copy of self. '''
copy = Response()
copy.status = self.status
copy._headers = dict((k, v[:]) for (k, v) in self._headers.items())
return copy
def __iter__(self):
return iter(self.body)
def close(self):
if hasattr(self.body, 'close'):
self.body.close()
def _set_status(self, status):
if isinstance(status, int):
code, status = status, _HTTP_STATUS_LINES.get(status)
elif ' ' in status:
status = status.strip()
code = int(status.split()[0])
else:
raise ValueError('String status line without a reason phrase.')
if not 100 <= code <= 999:
raise ValueError('Status code out of range.')
self.status_code = code
self.status_line = status or ('%d Unknown' % code)
status = property(lambda self: self.status_code, _set_status, None,
''' A writeable property to change the HTTP response status. It accepts
either a numeric code (100-999) or a string with a custom reason
phrase (e.g. "404 Brain not found"). Both :data:`status_line` and
:data:`status_line` are updates accordingly. The return value is
always a numeric code. ''')
del _set_status
@property
def headers(self):
''' An instance of :class:`HeaderDict`, a case-insensitive dict-like
view on the response headers. '''
self.__dict__['headers'] = hdict = HeaderDict()
hdict.dict = self._headers
return hdict
def __contains__(self, name):
return _hkey(name) in self._headers
def __delitem__(self, name):
del self._headers[_hkey(name)]
def __getitem__(self, name):
return self._headers[_hkey(name)][-1]
def __setitem__(self, name, value):
self._headers[_hkey(name)] = [str(value)]
def get_header(self, name, default=None):
''' Return the value of a previously defined header. If there is no
header with that name, return a default value. '''
return self._headers.get(_hkey(name), [default])[-1]
def set_header(self, name, value, append=False):
''' Create a new response header, replacing any previously defined
headers with the same name. This equals ``response[name] = value``.:param append:
Do not delete previously defined headers. This can
result in two (or more) headers having the same name. '''
if append:
self._headers.setdefault(_hkey(name), []).append(str(value))
else:
self._headers[_hkey(name)] = [str(value)]
def iter_headers(self):
''' Yield (header, value) tuples, skipping headers that are not
allowed with the current response status code. '''
headers = self._headers.iteritems()
bad_headers = self.bad_headers.get(self.status_code)
if bad_headers:
headers = (h for h in headers if h[0] not in bad_headers)
for name, values in headers:
for value in values:
yield name, value
if self._cookies:
for c in self._cookies.values():
yield 'Set-Cookie', c.OutputString()
def wsgiheader(self):
depr('The wsgiheader method is deprecated. See headerlist.') # 0.10
return self.headerlist
@property
def headerlist(self):
''' WSGI conform list of (header, value) tuples. '''
return list(self.iter_headers())
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int)
@property
def charset(self):
""" Return the charset specified in the content-type header (default: utf8). """
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return 'UTF-8'
@property
def COOKIES(self):
""" A dict-like SimpleCookie instance. This should not be used directly.
See :meth:`set_cookie`. """
depr('The COOKIES dict is deprecated. Use `set_cookie()` instead.') # 0.10
if not self._cookies:
self._cookies = SimpleCookie()
return self._cookies
def set_cookie(self, key, value, secret=None, **options):
''' Create a new cookie or replace an old one. If the `secret` parameter is
set, create a `Signed Cookie` (described below).:param key: the name of the cookie.
:param value: the value of the cookie.:param secret: a signature key required for
signed cookies.
Additionally, this method accepts all RFC 2109 attributes that are
supported by :class:`cookie.Morsel`, including::param max_age: maximum age in seconds.
(default: None):param expires: a datetime object or UNIX timestamp.
(default: None):param domain:
the domain that is allowed to read the cookie.
(default: current domain):param path: limits the cookie to a given path
(default: ``/``):param secure: limit the cookie to HTTPS connections (default: off).
:param httponly:
prevents client-side javascript to read this cookie
(default: off, requires Python 2.6 or newer).
If neither `expires` nor `max_age` is set (default), the cookie will
expire at the end of the browser session (as soon as the browser
window is closed).
Signed cookies may store any pickle-able object and are
cryptographically signed to prevent manipulation. Keep in mind that
cookies are limited to 4kb in most browsers.
Warning: Signed cookies are not encrypted (the client can still see
the content) and not copy-protected (the client can restore an old
cookie). The main intention is to make pickling and unpickling
save, not to store secret information at client side.
'''
if not self._cookies:
self._cookies = SimpleCookie()
if secret:
value = touni(cookie_encode((key, value), secret))
elif not isinstance(value, basestring):
raise TypeError('Secret key missing for non-string Cookie.')
self._cookies[key] = value
for k, v in options.iteritems():
self._cookies[key][k.replace('_', '-')] = v
def delete_cookie(self, key, **kwargs):
''' Delete a cookie. Be sure to use the same `domain` and `path`
settings as used to create the cookie. '''
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs)
class LocalResponse(BaseResponse, threading.local):
''' A thread-local subclass of :class:`BaseResponse`. '''
bind = BaseResponse.__init__
Response = LocalResponse
###############################################################################
# Plugins ######################################################################
###############################################################################
class JSONPlugin(object):
name = 'json'
def __init__(self, json_dumps=json_dumps):
self.json_dumps = json_dumps
def apply(self, callback, context):
dumps = self.json_dumps
if not dumps:
return callback
def wrapper(*a, **ka):
rv = callback(*a, **ka)
if isinstance(rv, dict):
response.content_type = 'application/json'
return dumps(rv)
return rv
return wrapper
class HooksPlugin(object):
name = 'hooks'
def __init__(self):
self.hooks = {'before_request': [], 'after_request': []}
self.app = None
def _empty(self):
return not (self.hooks['before_request'] or self.hooks['after_request'])
def setup(self, app):
self.app = app
def add(self, name, func):
''' Attach a callback to a hook. '''
if name not in self.hooks:
raise ValueError("Unknown hook name %s" % name)
was_empty = self._empty()
self.hooks[name].append(func)
if self.app and was_empty and not self._empty():
self.app.reset()
def remove(self, name, func):
''' Remove a callback from a hook. '''
if name not in self.hooks:
raise ValueError("Unknown hook name %s" % name)
was_empty = self._empty()
self.hooks[name].remove(func)
if self.app and not was_empty and self._empty():
self.app.reset()
def apply(self, callback, context):
if self._empty():
return callback
before_request = self.hooks['before_request']
after_request = self.hooks['after_request']
def wrapper(*a, **ka):
for hook in before_request:
hook()
rv = callback(*a, **ka)
for hook in after_request[::-1]:
hook()
return rv
return wrapper
class TypeFilterPlugin(object):
def __init__(self):
self.filter = []
self.app = None
def setup(self, app):
self.app = app
def add(self, ftype, func):
if not isinstance(ftype, type):
raise TypeError("Expected type object, got %s" % type(ftype))
self.filter = [(t, f) for (t, f) in self.filter if t != ftype]
self.filter.append((ftype, func))
if len(self.filter) == 1 and self.app:
self.app.reset()
def apply(self, callback, context):
filter = self.filter
if not filter:
return callback
def wrapper(*a, **ka):
rv = callback(*a, **ka)
for testtype, filterfunc in filter:
if isinstance(rv, testtype):
rv = filterfunc(rv)
return rv
return wrapper
class TemplatePlugin(object):
''' This plugin applies the :func:`view` decorator to all routes with a
`template` config parameter. If the parameter is a tuple, the second
element must be a dict with additional options (e.g. `template_engine`)
or default variables for the template. '''
name = 'template'
def apply(self, callback, context):
conf = context['config'].get('template')
if isinstance(conf, (tuple, list)) and len(conf) == 2:
return view(conf[0], **conf[1])(callback)
elif isinstance(conf, str) and 'template_opts' in context['config']:
depr('The `template_opts` parameter is deprecated.') # 0.9
return view(conf, **context['config']['template_opts'])(callback)
elif isinstance(conf, str):
return view(conf)(callback)
else:
return callback
#: Not a plugin, but part of the plugin API. TODO: Find a better place.
class _ImportRedirect(object):
def __init__(self, name, impmask):
''' Create a virtual package that redirects imports (see PEP 302). '''
self.name = name
self.impmask = impmask
self.module = sys.modules.setdefault(name, imp.new_module(name))
self.module.__dict__.update({'__file__': '<virtual>', '__path__': [],
'__all__': [], '__loader__': self})
sys.meta_path.append(self)
def find_module(self, fullname, path=None):
if '.' not in fullname:
return
packname, modname = fullname.rsplit('.', 1)
if packname != self.name:
return
return self
def load_module(self, fullname):
if fullname in sys.modules:
return sys.modules[fullname]
packname, modname = fullname.rsplit('.', 1)
realname = self.impmask % modname
__import__(realname)
module = sys.modules[fullname] = sys.modules[realname]
setattr(self.module, modname, module)
module.__loader__ = self
return module
###############################################################################
# Common Utilities #############################################################
###############################################################################
class MultiDict(DictMixin):
""" This dict stores multiple values per key, but behaves exactly like a
normal dict in that it returns only the newest value for any given key.
There are special methods available to access the full list of values.
"""
def __init__(self, *a, **k):
self.dict = dict((k, [v]) for k, v in dict(*a, **k).iteritems())
def __len__(self):
return len(self.dict)
def __iter__(self):
return iter(self.dict)
def __contains__(self, key):
return key in self.dict
def __delitem__(self, key):
del self.dict[key]
def __getitem__(self, key):
return self.dict[key][-1]
def __setitem__(self, key, value):
self.append(key, value)
def iterkeys(self):
return self.dict.iterkeys()
def itervalues(self):
return (v[-1] for v in self.dict.itervalues())
def iteritems(self):
return ((k, v[-1]) for (k, v) in self.dict.iteritems())
def iterallitems(self):
for key, values in self.dict.iteritems():
for value in values:
yield key, value
# 2to3 is not able to fix these automatically.
keys = iterkeys if py3k else lambda self: list(self.iterkeys())
values = itervalues if py3k else lambda self: list(self.itervalues())
items = iteritems if py3k else lambda self: list(self.iteritems())
allitems = iterallitems if py3k else lambda self: list(self.iterallitems())
def get(self, key, default=None, index=-1):
''' Return the current value for a key. The third `index` parameter
defaults to -1 (last value). '''
if key in self.dict or default is KeyError:
return self.dict[key][index]
return default
def append(self, key, value):
''' Add a new value to the list of values for this key. '''
self.dict.setdefault(key, []).append(value)
def replace(self, key, value):
''' Replace the list of values with a single value. '''
self.dict[key] = [value]
def getall(self, key):
''' Return a (possibly empty) list of values for a key. '''
return self.dict.get(key) or []
class HeaderDict(MultiDict):
""" A case-insensitive version of :class:`MultiDict` that defaults to
replace the old value instead of appending it. """
def __init__(self, *a, **ka):
self.dict = {}
if a or ka:
self.update(*a, **ka)
def __contains__(self, key):
return _hkey(key) in self.dict
def __delitem__(self, key):
del self.dict[_hkey(key)]
def __getitem__(self, key):
return self.dict[_hkey(key)][-1]
def __setitem__(self, key, value):
self.dict[_hkey(key)] = [str(value)]
def append(self, key, value):
self.dict.setdefault(_hkey(key), []).append(str(value))
def replace(self, key, value):
self.dict[_hkey(key)] = [str(value)]
def getall(self, key):
return self.dict.get(_hkey(key)) or []
def get(self, key, default=None, index=-1):
return MultiDict.get(self, _hkey(key), default, index)
def filter(self, names):
for name in map(_hkey, names):
if name in self.dict:
del self.dict[name]
class WSGIHeaderDict(DictMixin):
''' This dict-like class wraps a WSGI environ dict and provides convenient
access to HTTP_* fields. Keys and values are native strings
(2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI
environment contains non-native string values, these are de- or encoded
using a lossless 'latin1' character set.
The API will remain stable even on changes to the relevant PEPs.
Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one
that uses non-native strings.)
'''
#: List of keys that do not have a 'HTTP_' prefix.
cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH')
def __init__(self, environ):
self.environ = environ
def _ekey(self, key):
''' Translate header field name to CGI/WSGI environ key. '''
key = key.replace('-', '_').upper()
if key in self.cgikeys:
return key
return 'HTTP_' + key
def raw(self, key, default=None):
''' Return the header value as is (may be bytes or unicode). '''
return self.environ.get(self._ekey(key), default)
def __getitem__(self, key):
return tonat(self.environ[self._ekey(key)], 'latin1')
def __setitem__(self, key, value):
raise TypeError("%s is read-only." % self.__class__)
def __delitem__(self, key):
raise TypeError("%s is read-only." % self.__class__)
def __iter__(self):
for key in self.environ:
if key[:5] == 'HTTP_':
yield key[5:].replace('_', '-').title()
elif key in self.cgikeys:
yield key.replace('_', '-').title()
def keys(self):
return list(self)
def __len__(self):
return len(list(self))
def __contains__(self, key):
return self._ekey(key) in self.environ
class AppStack(list):
""" A stack-like list. Calling it returns the head of the stack. """
def __call__(self):
""" Return the current default application. """
return self[-1]
def push(self, value=None):
""" Add a new :class:`Bottle` instance to the stack """
if not isinstance(value, Bottle):
value = Bottle()
self.append(value)
return value
class WSGIFileWrapper(object):
def __init__(self, fp, buffer_size=1024 * 64):
self.fp, self.buffer_size = fp, buffer_size
for attr in ('fileno', 'close', 'read', 'readlines'):
if hasattr(fp, attr):
setattr(self, attr, getattr(fp, attr))
def __iter__(self):
read, buff = self.fp.read, self.buffer_size
while True:
part = read(buff)
if not part:
break
yield part
###############################################################################
# Application Helper ###########################################################
###############################################################################
def abort(code=500, text='Unknown Error: Application stopped.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
def redirect(url, code=303):
""" Aborts execution and causes a 303 redirect. """
location = urljoin(request.url, url)
raise HTTPResponse("", status=code, header=dict(Location=location))
def static_file(filename, root, mimetype='auto', download=False):
""" Open a file in a safe way and return :exc:`HTTPResponse` with status
code 200, 305, 401 or 404. Set Content-Type, Content-Encoding,
Content-Length and Last-Modified header. Obey If-Modified-Since header
and HEAD requests.
"""
root = os.path.abspath(root) + os.sep
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
header = dict()
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if mimetype == 'auto':
mimetype, encoding = mimetypes.guess_type(filename)
if mimetype:
header['Content-Type'] = mimetype
if encoding:
header['Content-Encoding'] = encoding
elif mimetype:
header['Content-Type'] = mimetype
if download:
download = os.path.basename(filename if download is True else download)
header['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
header['Content-Length'] = stats.st_size
lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
header['Last-Modified'] = lm
ims = request.environ.get('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = parse_date(ims.split(";")[0].strip())
if ims is not None and ims >= int(stats.st_mtime):
header['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
return HTTPResponse(status=304, header=header)
body = '' if request.method == 'HEAD' else open(filename, 'rb')
return HTTPResponse(body, header=header)
###############################################################################
# HTTP Utilities and MISC (TODO) ###############################################
###############################################################################
def debug(mode=True):
""" Change the debug level.
There is only one debug level supported at the moment."""
global DEBUG
DEBUG = bool(mode)
def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return time.mktime(ts[:8] + (0,)) - (ts[9] or 0) - time.timezone
except (TypeError, ValueError, IndexError, OverflowError):
return None
def parse_auth(header):
""" Parse rfc2617 HTTP authentication header string (basic) and
return (user,pass) tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
# TODO: Add 2to3 save base64[encode/decode] functions.
user, pwd = touni(base64.b64decode(tob(data))).split(':', 1)
return user, pwd
except (KeyError, ValueError):
return None
def _lscmp(a, b):
''' Compares two strings in a cryptographically save way:
Runtime is not affected by length of common prefix. '''
return not sum(0 if x == y else 1 for x, y in zip(a, b)) and len(a) == len(b)
def cookie_encode(data, key):
''' Encode and sign a pickle-able object. Return a (byte) string '''
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(key, msg).digest())
return tob('!') + sig + tob('?') + msg
def cookie_decode(data, key):
''' Verify and decode an encoded string. Return an object or None.'''
data = tob(data)
if cookie_is_encoded(data):
sig, msg = data.split(tob('?'), 1)
if _lscmp(sig[1:], base64.b64encode(hmac.new(key, msg).digest())):
return pickle.loads(base64.b64decode(msg))
return None
def cookie_is_encoded(data):
''' Return True if the argument looks like a encoded cookie.'''
return bool(data.startswith(tob('!')) and tob('?') in data)
def yieldroutes(func):
""" Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example::
a() -> '/a'
b(x, y) -> '/b/:x/:y'
c(x, y=5) -> '/c/:x' and '/c/:x/:y'
d(x=5, y=6) -> '/d' and '/d/:x' and '/d/:x/:y'
"""
import inspect # Expensive module. Only import if necessary.
path = '/' + func.__name__.replace('__', '/').lstrip('/')
spec = inspect.getargspec(func)
argc = len(spec[0]) - len(spec[3] or [])
path += ('/:%s' * argc) % tuple(spec[0][:argc])
yield path
for arg in spec[0][argc:]:
path += '/:%s' % arg
yield path
def path_shift(script_name, path_info, shift=1):
''' Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.:param script_name: The SCRIPT_NAME path.:param script_name:
The PATH_INFO path.:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1)
'''
if shift == 0:
return script_name, path_info
pathlist = path_info.strip('/').split('/')
scriptlist = script_name.strip('/').split('/')
if pathlist and pathlist[0] == '':
pathlist = []
if scriptlist and scriptlist[0] == '':
scriptlist = []
if shift > 0 and shift <= len(pathlist):
moved = pathlist[:shift]
scriptlist = scriptlist + moved
pathlist = pathlist[shift:]
elif shift < 0 and shift >= -len(scriptlist):
moved = scriptlist[shift:]
pathlist = moved + pathlist
scriptlist = scriptlist[:shift]
else:
empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
new_script_name = '/' + '/'.join(scriptlist)
new_path_info = '/' + '/'.join(pathlist)
if path_info.endswith('/') and pathlist:
new_path_info += '/'
return new_script_name, new_path_info
# Decorators
# TODO: Replace default_app() with app()
def validate(**vkargs):
"""
Validates and manipulates keyword arguments by user defined callables.
Handles ValueError and missing arguments by raising HTTPError(403).
"""
def decorator(func):
def wrapper(**kargs):
for key, value in vkargs.iteritems():
if key not in kargs:
abort(403, 'Missing parameter: %s' % key)
try:
kargs[key] = value(kargs[key])
except ValueError:
abort(403, 'Wrong parameter format for: %s' % key)
return func(**kargs)
return wrapper
return decorator
def auth_basic(check, realm="private", text="Access denied"):
''' Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. '''
def decorator(func):
def wrapper(*a, **ka):
user, password = request.auth or (None, None)
if user is None or not check(user, password):
response.headers['WWW-Authenticate'] = 'Basic realm="%s"' % realm
return HTTPError(401, text)
return func(*a, **ka)
return wrapper
return decorator
def make_default_app_wrapper(name):
''' Return a callable that relays calls to the current default app. '''
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
for name in '''route get post put delete error mount
hook install uninstall'''.split():
globals()[name] = make_default_app_wrapper(name)
url = make_default_app_wrapper('get_url')
del name
###############################################################################
# Server Adapter ###############################################################
###############################################################################
class ServerAdapter(object):
quiet = False
def __init__(self, host='127.0.0.1', port=8080, **config):
self.options = config
self.host = host
self.port = int(port)
def run(self, handler): # pragma: no cover
pass
def __repr__(self):
args = ', '.join(['%s=%s' % (k, repr(v)) for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class CGIServer(ServerAdapter):
quiet = True
def run(self, handler): # pragma: no cover
from wsgiref.handlers import CGIHandler
def fixed_environ(environ, start_response):
environ.setdefault('PATH_INFO', '')
return handler(environ, start_response)
CGIHandler().run(fixed_environ)
class FlupFCGIServer(ServerAdapter):
def run(self, handler): # pragma: no cover
import flup.server.fcgi
kwargs = {'bindAddress': (self.host, self.port)}
kwargs.update(self.options) # allow to override bindAddress and others
flup.server.fcgi.WSGIServer(handler, **kwargs).run()
class FlupSCGIServer(ServerAdapter):
def run(self, handler): # pragma: no cover
import flup.server.scgi
kwargs = {'bindAddress': (self.host, self.port)}
kwargs.update(self.options) # allow to override bindAddress and others
flup.server.scgi.WSGIServer(handler, **kwargs).run()
class WSGIRefServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from wsgiref.simple_server import make_server, WSGIRequestHandler
print("Launching Swsgi backend")
if self.quiet:
class QuietHandler(WSGIRequestHandler):
def log_request(*args, **kw):
pass
self.options['handler_class'] = QuietHandler
srv = make_server(self.host, self.port, handler, **self.options)
srv.serve_forever()
# Shinken: add WSGIRefServerSelect
class WSGIRefServerSelect(ServerAdapter):
def run(self, handler): # pragma: no cover
print("Call the Select version")
from wsgiref.simple_server import make_server, WSGIRequestHandler
if self.quiet:
class QuietHandler(WSGIRequestHandler):
def log_request(*args, **kw):
pass
self.options['handler_class'] = QuietHandler
srv = make_server(self.host, self.port, handler, **self.options)
# srv.serve_forever()
return srv
class CherryPyServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from cherrypy import wsgiserver
print("Launching CherryPy backend")
server = wsgiserver.CherryPyWSGIServer((self.host, self.port), handler)
try:
server.start()
finally:
server.stop()
class PasteServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from paste import httpserver
print("Launching Paste backend")
if not self.quiet:
from paste.translogger import TransLogger
handler = TransLogger(handler)
httpserver.serve(handler, host=self.host, port=str(self.port),
**self.options)
class MeinheldServer(ServerAdapter):
def run(self, handler):
from meinheld import server
server.listen((self.host, self.port))
server.run(handler)
class FapwsServer(ServerAdapter):
""" Extremely fast webserver using libev. See http://www.fapws.org/ """
def run(self, handler): # pragma: no cover
import fapws._evwsgi as evwsgi
from fapws import base, config
port = self.port
if float(config.SERVER_IDENT[-2:]) > 0.4:
# fapws3 silently changed its API in 0.5
port = str(port)
evwsgi.start(self.host, port)
# fapws3 never releases the GIL. Complain upstream. I tried. No luck.
if 'BOTTLE_CHILD' in os.environ and not self.quiet:
print("WARNING: Auto-reloading does not work with Fapws3.")
print(" (Fapws3 breaks python thread support)")
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return handler(environ, start_response)
evwsgi.wsgi_cb(('', app))
evwsgi.run()
class TornadoServer(ServerAdapter):
""" The super hyped asynchronous server by facebook. Untested. """
def run(self, handler): # pragma: no cover
import tornado.wsgi
import tornado.httpserver
import tornado.ioloop
container = tornado.wsgi.WSGIContainer(handler)
server = tornado.httpserver.HTTPServer(container)
server.listen(port=self.port)
tornado.ioloop.IOLoop.instance().start()
class AppEngineServer(ServerAdapter):
""" Adapter for Google App Engine. """
quiet = True
def run(self, handler):
from google.appengine.ext.webapp import util
# A main() function in the handler script enables 'App Caching'.
# Lets makes sure it is there. This _really_ improves performance.
module = sys.modules.get('__main__')
if module and not hasattr(module, 'main'):
module.main = lambda: util.run_wsgi_app(handler)
util.run_wsgi_app(handler)
class TwistedServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
reactor.run()
class DieselServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(handler, port=self.port)
app.run()
class GeventServer(ServerAdapter):
""" Untested. Options:
* `monkey` (default: True) fixes the stdlib to use greenthreads.
* `fast` (default: False) uses libevent's http server, but has some
issues: No streaming, no pipelining, no SSL.
"""
def run(self, handler):
from gevent import wsgi as wsgi_fast, pywsgi, monkey
if self.options.get('monkey', True):
monkey.patch_all()
wsgi = wsgi_fast if self.options.get('fast') else pywsgi
wsgi.WSGIServer((self.host, self.port), handler).serve_forever()
class GunicornServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from gunicorn.arbiter import Arbiter
from gunicorn.config import Config
handler.cfg = Config({'bind': "%s:%d" % (self.host, self.port), 'workers': 4})
arbiter = Arbiter(handler)
arbiter.run()
class EventletServer(ServerAdapter):
""" Untested """
def run(self, handler):
from eventlet import wsgi, listen
wsgi.server(listen((self.host, self.port)), handler)
class RocketServer(ServerAdapter):
""" Untested. As requested in issue 63
https://github.com/defnull/bottle/issues/#issue/63 """
def run(self, handler):
from rocket import Rocket
server = Rocket((self.host, self.port), 'wsgi', {'wsgi_app': handler})
server.start()
class BjoernServer(ServerAdapter):
""" Screamingly fast server written in C: https://github.com/jonashaag/bjoern """
def run(self, handler):
from bjoern import run
run(handler, self.host, self.port)
class AutoServer(ServerAdapter):
""" Untested. """
adapters = [PasteServer, CherryPyServer, TwistedServer, WSGIRefServer]
def run(self, handler):
for sa in self.adapters:
try:
return sa(self.host, self.port, **self.options).run(handler)
except ImportError:
pass
# Shinken: add 'wsgirefselect': WSGIRefServerSelect,
server_names = {
'cgi': CGIServer,
'flup': FlupFCGIServer,
'flupscgi': FlupSCGIServer,
'wsgiref': WSGIRefServer,
'wsgirefselect': WSGIRefServerSelect,
'cherrypy': CherryPyServer,
'paste': PasteServer,
'fapws3': FapwsServer,
'tornado': TornadoServer,
'gae': AppEngineServer,
'diesel': DieselServer,
'meinheld': MeinheldServer,
'gunicorn': GunicornServer,
'eventlet': EventletServer,
'gevent': GeventServer,
'rocket': RocketServer,
'bjoern': BjoernServer,
'auto': AutoServer,
}
###############################################################################
# Application Control ##########################################################
###############################################################################
def _load(target, **vars):
""" Fetch something from a module. The exact behavior depends on the
target string:
If the target is a valid python import path (e.g. `package.module`),
the rightmost part is returned as a module object.
If the target contains a colon (e.g. `package.module:var`) the module
variable specified after the colon is returned.
If the part after the colon contains any non-alphanumeric characters
(e.g. `package.module:func(var)`) the result of the expression
is returned. The expression has access to keyword arguments supplied
to this function.
Example::
>>> _load('bottle')
<module 'bottle' from 'bottle.py'>
>>> _load('bottle:Bottle')
<class 'bottle.Bottle'>
>>> _load('bottle:cookie_encode(v, secret)', v='foo', secret='bar')
'!F+hN4dQxaDJ4QxxaZ+Z3jw==?gAJVA2Zvb3EBLg=='
"""
module, target = target.split(":", 1) if ':' in target else (target, None)
if module not in sys.modules:
__import__(module)
if not target:
return sys.modules[module]
if target.isalnum():
return getattr(sys.modules[module], target)
package_name = module.split('.')[0]
vars[package_name] = sys.modules[package_name]
return eval('%s.%s' % (module, target), vars)
def load_app(target):
""" Load a bottle application based on a target string and return the
application object.
If the target is an import path (e.g. package.module), the application
stack is used to isolate the routes defined in that module.
If the target contains a colon (e.g. package.module:myapp) the
module variable specified after the colon is returned instead.
"""
tmp = app.push() # Create a new "default application"
rv = _load(target) # Import the target module
app.remove(tmp) # Remove the temporary added default application
return rv if isinstance(rv, Bottle) else tmp
# Shinken: add the return of the server
def run(app=None, server='wsgiref', host='127.0.0.1', port=8080,
interval=1, reloader=False, quiet=False, **kargs):
""" Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`):param server:
Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`):param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1):param port: Server port to
bind to. Values below 1024 require root privileges. (default: 8080):param reloader:
Start auto-reloading server? (default: False):param interval: Auto-reloader interval
in seconds (default: 1):param quiet: Suppress output to stdout and stderr?
(default: False):param options: Options passed to the server adapter.
"""
# Shinken
res = None
app = app or default_app()
if isinstance(app, basestring):
app = load_app(app)
if isinstance(server, basestring):
server = server_names.get(server)
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise RuntimeError("Server must be a subclass of ServerAdapter")
server.quiet = server.quiet or quiet
if not server.quiet and not os.environ.get('BOTTLE_CHILD'):
print("Bottle server starting up (using %s)..." % repr(server))
print("Listening on http://%s:%d/" % (server.host, server.port))
print("Use Ctrl-C to quit.")
print
try:
if reloader:
interval = min(interval, 1)
if os.environ.get('BOTTLE_CHILD'):
_reloader_child(server, app, interval)
else:
_reloader_observer(server, app, interval)
else:
# Shinken
res = server.run(app)
except KeyboardInterrupt:
pass
if not server.quiet and not os.environ.get('BOTTLE_CHILD'):
print("Shutting down...")
# Shinken
return res
class FileCheckerThread(threading.Thread):
''' Thread that periodically checks for changed module files. '''
def __init__(self, lockfile, interval):
threading.Thread.__init__(self)
self.lockfile, self.interval = lockfile, interval
# 1: lockfile to old; 2: lockfile missing
# 3: module file changed; 5: external exit
self.status = 0
def run(self):
exists = os.path.exists
mtime = lambda path: os.stat(path).st_mtime
files = dict()
for module in sys.modules.values():
path = getattr(module, '__file__', '')
if path[-4:] in ('.pyo', '.pyc'):
path = path[:-1]
if path and exists(path):
files[path] = mtime(path)
while not self.status:
for path, lmtime in files.iteritems():
if not exists(path) or mtime(path) > lmtime:
self.status = 3
if not exists(self.lockfile):
self.status = 2
elif mtime(self.lockfile) < time.time() - self.interval - 5:
self.status = 1
if not self.status:
time.sleep(self.interval)
if self.status != 5:
thread.interrupt_main()
def _reloader_child(server, app, interval):
''' Start the server and check for modified files in a background thread.
As soon as an update is detected, KeyboardInterrupt is thrown in
the main thread to exit the server loop. The process exists with status
code 3 to request a reload by the observer process. If the lockfile
is not modified in 2*interval second or missing, we assume that the
observer process died and exit with status code 1 or 2.
'''
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
try:
bgcheck.start()
server.run(app)
except KeyboardInterrupt:
pass
bgcheck.status, status = 5, bgcheck.status
bgcheck.join() # bgcheck.status == 5 --> silent exit
if status:
sys.exit(status)
def _reloader_observer(server, app, interval):
''' Start a child process with identical commandline arguments and restart
it as long as it exists with status code 3. Also create a lockfile and
touch it (update mtime) every interval seconds.
'''
fd, lockfile = tempfile.mkstemp(prefix='bottle-reloader.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
try:
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # I am alive!
time.sleep(interval)
if p.poll() != 3:
if os.path.exists(lockfile):
os.unlink(lockfile)
sys.exit(p.poll())
elif not server.quiet:
print("Reloading server...")
except KeyboardInterrupt:
pass
if os.path.exists(lockfile):
os.unlink(lockfile)
###############################################################################
# Template Adapters ############################################################
###############################################################################
class TemplateError(HTTPError):
def __init__(self, message):
HTTPError.__init__(self, 500, message)
class BaseTemplate(object):
""" Base class and minimal API for template adapters """
extentions = ['tpl', 'html', 'thtml', 'stpl']
settings = {} # used in prepare()
defaults = {} # used in render()
def __init__(self, source=None, name=None, lookup=[], encoding='utf8', **settings):
""" Create a new template.
If the source parameter (str or buffer) is missing, the name argument
is used to guess a template filename. Subclasses can assume that
self.source and/or self.filename are set. Both are strings.
The lookup, encoding and settings parameters are stored as instance
variables.
The lookup parameter stores a list containing directory paths.
The encoding parameter should be used to decode byte strings or files.
The settings parameter contains a dict for engine-specific settings.
"""
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = map(os.path.abspath, lookup)
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=[]):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if os.path.isfile(name):
return name
for spath in lookup:
fname = os.path.join(spath, name)
if os.path.isfile(fname):
return fname
for ext in cls.extentions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
''' This reads or sets the global settings stored in class.settings. '''
if args:
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
""" Run preparations (parsing, caching, ...).
It should be possible to call this again to refresh a template or to
update settings.
"""
raise NotImplementedError
def render(self, *args, **kwargs):
""" Render the template with the specified local variables and return
a single byte or unicode string. If it is a byte string, the encoding
must match self.encoding. This method must be thread-safe!
Local variables may be provided in dictionaries (*args)
or directly, as keywords (**kwargs).
"""
raise NotImplementedError
class MakoTemplate(BaseTemplate):
def prepare(self, **options):
from mako.template import Template
from mako.lookup import TemplateLookup
options.update({'input_encoding': self.encoding})
options.setdefault('format_exceptions', bool(DEBUG))
lookup = TemplateLookup(directories=self.lookup, **options)
if self.source:
self.tpl = Template(self.source, lookup=lookup, **options)
else:
self.tpl = Template(uri=self.name, filename=self.filename, lookup=lookup, **options)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
class CheetahTemplate(BaseTemplate):
def prepare(self, **options):
from Cheetah.Template import Template
self.context = threading.local()
self.context.vars = {}
options['searchList'] = [self.context.vars]
if self.source:
self.tpl = Template(source=self.source, **options)
else:
self.tpl = Template(file=self.filename, **options)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
self.context.vars.update(self.defaults)
self.context.vars.update(kwargs)
out = str(self.tpl)
self.context.vars.clear()
return out
class Jinja2Template(BaseTemplate):
def prepare(self, filters=None, tests=None, **kwargs):
from jinja2 import Environment, FunctionLoader
if 'prefix' in kwargs: # TODO: to be removed after a while
raise RuntimeError('The keyword argument `prefix` has been removed. '
'Use the full jinja2 environment name line_statement_prefix'
'instead.')
self.env = Environment(loader=FunctionLoader(self.loader), **kwargs)
if filters:
self.env.filters.update(filters)
if tests:
self.env.tests.update(tests)
if self.source:
self.tpl = self.env.from_string(self.source)
else:
self.tpl = self.env.get_template(self.filename)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
def loader(self, name):
fname = self.search(name, self.lookup)
if fname:
with open(fname, "rb") as f:
return f.read().decode(self.encoding)
class SimpleTALTemplate(BaseTemplate):
''' Untested! '''
def prepare(self, **options):
from simpletal import simpleTAL
# TODO: add option to load METAL files during render
if self.source:
self.tpl = simpleTAL.compileHTMLTemplate(self.source)
else:
with open(self.filename, 'rb') as fp:
self.tpl = simpleTAL.compileHTMLTemplate(tonat(fp.read()))
def render(self, *args, **kwargs):
from simpletal import simpleTALES
from StringIO import StringIO
for dictarg in args:
kwargs.update(dictarg)
# TODO: maybe reuse a context instead of always creating one
context = simpleTALES.Context()
for k, v in self.defaults.items():
context.addGlobal(k, v)
for k, v in kwargs.items():
context.addGlobal(k, v)
output = StringIO()
self.tpl.expand(context, output)
return output.getvalue()
class SimpleTemplate(BaseTemplate):
blocks = ('if', 'elif', 'else', 'try', 'except', 'finally', 'for', 'while',
'with', 'def', 'class')
dedent_blocks = ('elif', 'else', 'except', 'finally')
@lazy_attribute
def re_pytokens(cls):
''' This matches comments and all kinds of quoted strings but does
NOT match comments (#...) within quoted strings. (trust me) '''
return re.compile(r'''
(''(?!')|""(?!")|'{6}|"{6} # Empty strings (all 4 types)
|'(?:[^\\']|\\.)+?' # Single quotes (')
|"(?:[^\\"]|\\.)+?" # Double quotes (")
|'{3}(?:[^\\]|\\.|\n)+?'{3} # Triple-quoted strings (')
|"{3}(?:[^\\]|\\.|\n)+?"{3} # Triple-quoted strings (")
|\#.* # Comments
)''', re.VERBOSE)
def prepare(self, escape_func=cgi.escape, noescape=False):
self.cache = {}
enc = self.encoding
self._str = lambda x: touni(x, enc)
self._escape = lambda x: escape_func(touni(x, enc))
if noescape:
self._str, self._escape = self._escape, self._str
@classmethod
def split_comment(cls, code):
""" Removes comments (#...) from python code. """
if '#' not in code:
return code
#: Remove comments only (leave quoted strings as they are)
subf = lambda m: '' if m.group(0)[0] == '#' else m.group(0)
return re.sub(cls.re_pytokens, subf, code)
@cached_property
def co(self):
return compile(self.code, self.filename or '<string>', 'exec')
@cached_property
def code(self):
stack = [] # Current Code indentation
lineno = 0 # Current line of code
ptrbuffer = [] # Buffer for printable strings and token tuple instances
codebuffer = [] # Buffer for generated python code
multiline = dedent = oneline = False
template = self.source if self.source else open(self.filename).read()
def yield_tokens(line):
for i, part in enumerate(re.split(r'\{\{(.*?)\}\}', line)):
if i % 2:
if part.startswith('!'):
yield 'RAW', part[1:]
else:
yield 'CMD', part
else:
yield 'TXT', part
def flush(): # Flush the ptrbuffer
if not ptrbuffer:
return
cline = ''
for line in ptrbuffer:
for token, value in line:
if token == 'TXT':
cline += repr(value)
elif token == 'RAW':
cline += '_str(%s)' % value
elif token == 'CMD':
cline += '_escape(%s)' % value
cline += ', '
cline = cline[:-2] + '\\\n'
cline = cline[:-2]
if cline[:-1].endswith('\\\\\\\\\\n'):
cline = cline[:-7] + cline[-1] # 'nobr\\\\\n' --> 'nobr'
cline = '_printlist([' + cline + '])'
del ptrbuffer[:] # Do this before calling code() again
code(cline)
def code(stmt):
for line in stmt.splitlines():
codebuffer.append(' ' * len(stack) + line.strip())
for line in template.splitlines(True):
lineno += 1
line = line if isinstance(line, unicode) else unicode(line, encoding=self.encoding)
if lineno <= 2:
m = re.search(r"%.*coding[:=]\s*([-\w\.]+)", line)
if m:
self.encoding = m.group(1)
if m:
line = line.replace('coding', 'coding (removed)')
if line.strip()[:2].count('%') == 1:
line = line.split('%', 1)[1].lstrip() # Full line following the %
cline = self.split_comment(line).strip()
cmd = re.split(r'[^a-zA-Z0-9_]', cline)[0]
flush() # encoding (TODO: why?)
if cmd in self.blocks or multiline:
cmd = multiline or cmd
dedent = cmd in self.dedent_blocks # "else:"
if dedent and not oneline and not multiline:
cmd = stack.pop()
code(line)
oneline = not cline.endswith(':') # "if 1: pass"
multiline = cmd if cline.endswith('\\') else False
if not oneline and not multiline:
stack.append(cmd)
elif cmd == 'end' and stack:
code('#end(%s) %s' % (stack.pop(), line.strip()[3:]))
elif cmd == 'include':
p = cline.split(None, 2)[1:]
if len(p) == 2:
code("_=_include(%s, _stdout, %s)" % (repr(p[0]), p[1]))
elif p:
code("_=_include(%s, _stdout)" % repr(p[0]))
else: # Empty %include -> reverse of %rebase
code("_printlist(_base)")
elif cmd == 'rebase':
p = cline.split(None, 2)[1:]
if len(p) == 2:
code("globals()['_rebase']=(%s, dict(%s))" % (repr(p[0]), p[1]))
elif p:
code("globals()['_rebase']=(%s, {})" % repr(p[0]))
else:
code(line)
else: # Line starting with text (not '%') or '%%' (escaped)
if line.strip().startswith('%%'):
line = line.replace('%%', '%', 1)
ptrbuffer.append(yield_tokens(line))
flush()
return '\n'.join(codebuffer) + '\n'
def subtemplate(self, _name, _stdout, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
if _name not in self.cache:
self.cache[_name] = self.__class__(name=_name, lookup=self.lookup)
return self.cache[_name].execute(_stdout, kwargs)
def execute(self, _stdout, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
env = self.defaults.copy()
env.update({'_stdout': _stdout,
'_printlist': _stdout.extend,
'_include': self.subtemplate,
'_str': self._str,
'_escape': self._escape})
env.update(kwargs)
eval(self.co, env)
if '_rebase' in env:
subtpl, rargs = env['_rebase']
subtpl = self.__class__(name=subtpl, lookup=self.lookup)
rargs['_base'] = _stdout[:] # copy stdout
del _stdout[:] # clear stdout
return subtpl.execute(_stdout, rargs)
return env
def render(self, *args, **kwargs):
""" Render the template using keyword arguments as local variables. """
for dictarg in args:
kwargs.update(dictarg)
stdout = []
self.execute(stdout, kwargs)
return ''.join(stdout)
def template(*args, **kwargs):
'''
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
'''
tpl = args[0] if args else None
template_adapter = kwargs.pop('template_adapter', SimpleTemplate)
if tpl not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
if isinstance(tpl, template_adapter):
TEMPLATES[tpl] = tpl
if settings:
TEMPLATES[tpl].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tpl] = template_adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tpl] = template_adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tpl]:
abort(500, 'Template (%s) not found' % tpl)
for dictarg in args[1:]:
kwargs.update(dictarg)
return TEMPLATES[tpl].render(kwargs)
mako_template = functools.partial(template, template_adapter=MakoTemplate)
cheetah_template = functools.partial(template, template_adapter=CheetahTemplate)
jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
simpletal_template = functools.partial(template, template_adapter=SimpleTALTemplate)
def view(tpl_name, **defaults):
''' Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
'''
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
return result
return wrapper
return decorator
mako_view = functools.partial(view, template_adapter=MakoTemplate)
cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
simpletal_view = functools.partial(view, template_adapter=SimpleTALTemplate)
###############################################################################
# Constants and Globals ########################################################
###############################################################################
TEMPLATE_PATH = ['./', './views/']
TEMPLATES = {}
DEBUG = False
#: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found')
HTTP_CODES = httplib.responses
HTTP_CODES[418] = "I'm a teapot" # RFC 2324
_HTTP_STATUS_LINES = dict((k, '%d %s' % (k, v)) for (k, v) in HTTP_CODES.iteritems())
#: The default template used for error pages. Override with @error()
# SHINKEN MOD: change from bottle import DEBUG to from shinken.webui.bottle import DEBUG,...
ERROR_PAGE_TEMPLATE = """
%try:
%from shinken.webui.bottlewebui import DEBUG, HTTP_CODES, request, touni
%status_name = HTTP_CODES.get(e.status, 'Unknown').title()
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>Error {{e.status}}: {{status_name}}</title>
<style type="text/css">
html {background-color: #eee; font-family: sans;}
body {background-color: #fff; border: 1px solid #ddd;
padding: 15px; margin: 15px;}
pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;}
</style>
</head>
<body>
<h1>Error {{e.status}}: {{status_name}}</h1>
<p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt>
caused an error:</p>
<pre>{{e.output}}</pre>
%if DEBUG and e.exception:
<h2>Exception:</h2>
<pre>{{repr(e.exception)}}</pre>
%end
%if DEBUG and e.traceback:
<h2>Traceback:</h2>
<pre>{{e.traceback}}</pre>
%end
</body>
</html>
%except ImportError:
<b>ImportError:</b> Could not generate the error page. Please add bottle to
the import path.
%end
"""
#: A thread-save instance of :class:`Request` representing the `current` request.
request = Request()
#: A thread-save instance of :class:`Response` used to build the HTTP response.
response = Response()
#: A thread-save namespace. Not used by Bottle.
local = threading.local()
# Initialize app stack (create first empty Bottle app)
# BC: 0.6.4 and needed for run()
app = default_app = AppStack()
app.push()
#: A virtual package that redirects import statements.
#: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`.
ext = _ImportRedirect(__name__ + '.ext', 'bottle_%s').module
| 110,046
|
Python
|
.py
| 2,388
| 36.690955
| 98
| 0.589966
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,531
|
_importlib.py
|
shinken-solutions_shinken/shinken/misc/_importlib.py
|
"""Backport of importlib.import_module from 3.x."""
# While not critical (and in no way guaranteed!), it would be nice to keep this
# code compatible with Python 2.3.
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
def _resolve_name(name, package, level):
"""Return the absolute name of the module to be imported."""
if not hasattr(package, 'rindex'):
raise ValueError("'package' not set to a string")
dot = len(package)
for x in range(level, 1, -1):
try:
dot = package.rindex('.', 0, dot)
except ValueError:
raise ValueError("attempted relative import beyond top-level package")
return "%s.%s" % (package[:dot], name)
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
level = 0
for character in name:
if character != '.':
break
level += 1
name = _resolve_name(name[level:], package, level)
__import__(name)
return sys.modules[name]
| 1,378
|
Python
|
.py
| 33
| 34.969697
| 82
| 0.652466
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,532
|
termcolor.py
|
shinken-solutions_shinken/shinken/misc/termcolor.py
|
# coding: utf-8
# Copyright (c) 2008-2011 Volvox Development Team
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Author: Konstantin Lepa <konstantin.lepa@gmail.com>
"""ANSII Color formatting for output in terminal."""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
__ALL__ = ['colored', 'cprint']
VERSION = (1, 1, 0)
ATTRIBUTES = dict(
list(
zip(
[
'bold',
'dark',
'',
'underline',
'blink',
'',
'reverse',
'concealed'
],
list(range(1, 9))
)
)
)
del ATTRIBUTES['']
HIGHLIGHTS = dict(
list(
zip(
[
'on_grey',
'on_red',
'on_green',
'on_yellow',
'on_blue',
'on_magenta',
'on_cyan',
'on_white'
],
list(range(40, 48))
)
)
)
COLORS = dict(
list(
zip(
[
'grey',
'red',
'green',
'yellow',
'blue',
'magenta',
'cyan',
'white',
],
list(range(90, 98))
)
)
)
RESET = '\033[0m'
def colored(text, color=None, on_color=None, attrs=None):
"""Colorize text.
Available text colors:
red, green, yellow, blue, magenta, cyan, white.
Available text highlights:
on_red, on_green, on_yellow, on_blue, on_magenta, on_cyan, on_white.
Available attributes:
bold, dark, underline, blink, reverse, concealed.
Example:
colored('Hello, World!', 'red', 'on_grey', ['blue', 'blink'])
colored('Hello, World!', 'green')
"""
if os.getenv('ANSI_COLORS_DISABLED') is None:
fmt_str = '\033[%dm%s'
if color is not None:
text = fmt_str % (COLORS[color], text)
if on_color is not None:
text = fmt_str % (HIGHLIGHTS[on_color], text)
if attrs is not None:
for attr in attrs:
text = fmt_str % (ATTRIBUTES[attr], text)
# Shinken mod
if color is not None:
text += RESET
return text
def cprint(text, color=None, on_color=None, attrs=None, **kwargs):
"""Print colorize text.
It accepts arguments of print(function.)
"""
print((colored(text, color, on_color, attrs)), **kwargs)
if __name__ == '__main__':
print('Current terminal type: %s' % os.getenv('TERM'))
print('Test basic colors:')
cprint('Grey color', 'grey')
cprint('Red color', 'red')
cprint('Green color', 'green')
cprint('Yellow color', 'yellow')
cprint('Blue color', 'blue')
cprint('Magenta color', 'magenta')
cprint('Cyan color', 'cyan')
cprint('White color', 'white')
print(('-' * 78))
print('Test highlights:')
cprint('On grey color', on_color='on_grey')
cprint('On red color', on_color='on_red')
cprint('On green color', on_color='on_green')
cprint('On yellow color', on_color='on_yellow')
cprint('On blue color', on_color='on_blue')
cprint('On magenta color', on_color='on_magenta')
cprint('On cyan color', on_color='on_cyan')
cprint('On white color', color='grey', on_color='on_white')
print('-' * 78)
print('Test attributes:')
cprint('Bold grey color', 'grey', attrs=['bold'])
cprint('Dark red color', 'red', attrs=['dark'])
cprint('Underline green color', 'green', attrs=['underline'])
cprint('Blink yellow color', 'yellow', attrs=['blink'])
cprint('Reversed blue color', 'blue', attrs=['reverse'])
cprint('Concealed Magenta color', 'magenta', attrs=['concealed'])
cprint('Bold underline reverse cyan color', 'cyan',
attrs=['bold', 'underline', 'reverse'])
cprint('Dark blink concealed white color', 'white',
attrs=['dark', 'blink', 'concealed'])
print(('-' * 78))
print('Test mixing:')
cprint('Underline red on grey color', 'red', 'on_grey',
['underline'])
cprint('Reversed green on red color', 'green', 'on_red', ['reverse'])
| 5,268
|
Python
|
.py
| 148
| 28.094595
| 82
| 0.588686
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,533
|
filter.py
|
shinken-solutions_shinken/shinken/misc/filter.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
"""
Helper functions for some filtering, like for user based
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# Get only user relevant items for the user
def only_related_to(lst, user):
# if the user is an admin, show all
if user.is_admin:
return lst
# Ok the user is a simple user, we should filter
r = set()
for i in lst:
# Maybe the user is a direct contact
if user in i.contacts:
r.add(i)
continue
# TODO: add a notified_contact pass
# Maybe it's a contact of a linked elements (source problems or impacts)
is_find = False
for s in i.source_problems:
if user in s.contacts:
r.add(i)
is_find = True
# Ok skip this object now
if is_find:
continue
# Now impacts related maybe?
for imp in i.impacts:
if user in imp.contacts:
r.add(i)
return list(r)
| 1,933
|
Python
|
.py
| 53
| 31.150943
| 82
| 0.672368
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,534
|
md5crypt.py
|
shinken-solutions_shinken/shinken/misc/md5crypt.py
|
#########################################################
# md5crypt.py
#
# 0423.2000 by michal wallace http://www.sabren.com/
# based on perl's Crypt::PasswdMD5 by Luis Munoz (lem@cantv.net)
# based on /usr/src/libcrypt/crypt.c from FreeBSD 2.2.5-RELEASE
#
# MANY THANKS TO
#
# Carey Evans - http://home.clear.net.nz/pages/c.evans/
# Dennis Marti - http://users.starpower.net/marti1/
#
# For the patches that got this thing working!
#
#########################################################
"""md5crypt.py - Provides interoperable MD5-based crypt() function
SYNOPSIS
import md5crypt.py
cryptedpassword = md5crypt.md5crypt(password, salt);
DESCRIPTION
unix_md5_crypt() provides a crypt()-compatible interface to the
rather new MD5-based crypt() function found in modern operating systems.
It's based on the implementation found on FreeBSD 2.2.[56]-RELEASE and
contains the following license in it:
"THE BEER-WARE LICENSE" (Revision 42):
<phk@login.dknet.dk> wrote this file. As long as you retain this notice you
can do whatever you want with this stuff. If we meet some day, and you think
this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
apache_md5_crypt() provides a function compatible with Apache's
.htpasswd files. This was contributed by Bryan Hart <bryan@eai.com>.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
MAGIC = '$1$' # Magic string
ITOA64 = "./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
from hashlib import md5
def to64(v, n):
ret = ''
while n - 1 >= 0:
n = n - 1
ret = ret + ITOA64[v & 0x3f]
v = v >> 6
return ret
def apache_md5_crypt(pw, salt):
# change the Magic string to match the one used by Apache
return unix_md5_crypt(pw, salt, '$apr1$')
def unix_md5_crypt(pw, salt, magic=None):
if magic is None:
magic = MAGIC
# Take care of the magic string if present
if salt[:len(magic)] == magic:
salt = salt[len(magic):]
# salt can have up to 8 characters:
import string
salt = string.split(salt, '$', 1)[0]
salt = salt[:8]
ctx = pw + magic + salt
final = md5(pw + salt + pw).digest()
for pl in range(len(pw), 0, -16):
if pl > 16:
ctx = ctx + final[:16]
else:
ctx = ctx + final[:pl]
# Now the 'weird' xform (??)
i = len(pw)
while i:
if i & 1:
ctx = ctx + chr(0) # if ($i & 1) { $ctx->add(pack("C", 0)); }
else:
ctx = ctx + pw[0]
i = i >> 1
final = md5(ctx).digest()
# The following is supposed to make
# things run slower.
# my question: WTF???
for i in range(1000):
ctx1 = ''
if i & 1:
ctx1 = ctx1 + pw
else:
ctx1 = ctx1 + final[:16]
if i % 3:
ctx1 = ctx1 + salt
if i % 7:
ctx1 = ctx1 + pw
if i & 1:
ctx1 = ctx1 + final[:16]
else:
ctx1 = ctx1 + pw
final = md5(ctx1).digest()
# Final xform
passwd = ''
passwd = passwd + to64((int(ord(final[0])) << 16)
| (int(ord(final[6])) << 8)
| (int(ord(final[12]))), 4)
passwd = passwd + to64((int(ord(final[1])) << 16)
| (int(ord(final[7])) << 8)
| (int(ord(final[13]))), 4)
passwd = passwd + to64((int(ord(final[2])) << 16)
| (int(ord(final[8])) << 8)
| (int(ord(final[14]))), 4)
passwd = passwd + to64((int(ord(final[3])) << 16)
| (int(ord(final[9])) << 8)
| (int(ord(final[15]))), 4)
passwd = passwd + to64((int(ord(final[4])) << 16)
| (int(ord(final[10])) << 8)
| (int(ord(final[5]))), 4)
passwd = passwd + to64((int(ord(final[11]))), 2)
return magic + salt + '$' + passwd
# assign a wrapper function:
md5crypt = unix_md5_crypt
if __name__ == "__main__":
print(unix_md5_crypt("cat", "hat"))
| 4,158
|
Python
|
.py
| 112
| 29.767857
| 82
| 0.551698
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,535
|
perfdata.py
|
shinken-solutions_shinken/shinken/misc/perfdata.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import re
from shinken.util import to_best_int_float
perfdata_split_pattern = re.compile(r'([^=]+=\S+)')
# TODO: Improve this regex to not match strings like this:
# 'metric=45+e-456.56unit;50;80;0;45+-e45e-'
metric_pattern = \
re.compile(
r'^([^=]+)=([\d\.\-\+eE]+)([\w\/%]*)'
r';?([\d\.\-\+eE:~@]+)?;?([\d\.\-\+eE:~@]+)?;?([\d\.\-\+eE]+)?;?([\d\.\-\+eE]+)?;?\s*'
)
# If we can return an int or a float, or None
# if we can't
def guess_int_or_float(val):
try:
return to_best_int_float(val)
except Exception as exp:
return None
# Class for one metric of a perf_data
class Metric(object):
def __init__(self, s):
self.name = self.value = self.uom = \
self.warning = self.critical = self.min = self.max = None
s = s.strip()
# print("Analysis string", s)
r = metric_pattern.match(s)
if r:
# Get the name but remove all ' in it
self.name = r.group(1).replace("'", "")
self.value = guess_int_or_float(r.group(2))
self.uom = r.group(3)
self.warning = guess_int_or_float(r.group(4))
self.critical = guess_int_or_float(r.group(5))
self.min = guess_int_or_float(r.group(6))
self.max = guess_int_or_float(r.group(7))
# print('Name', self.name)
# print("Value", self.value)
# print("Res", r)
# print(r.groups())
if self.uom == '%':
self.min = 0
self.max = 100
def __str__(self):
s = "%s=%s%s" % (self.name, self.value, self.uom)
if self.warning:
s = s + ";%s" % (self.warning)
if self.critical:
s = s + ";%s" % (self.critical)
return s
class PerfDatas(object):
def __init__(self, s):
s = s or ''
elts = perfdata_split_pattern.findall(s)
elts = [e for e in elts if e != '']
self.metrics = {}
for e in elts:
m = Metric(e)
if m.name is not None:
self.metrics[m.name] = m
def __iter__(self):
return self.metrics.values()
def __len__(self):
return len(self.metrics)
def __getitem__(self, key):
return self.metrics[key]
def __contains__(self, key):
return key in self.metrics
| 3,358
|
Python
|
.py
| 89
| 31.11236
| 94
| 0.588687
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,536
|
__init__.py
|
shinken-solutions_shinken/shinken/misc/__init__.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
| 919
|
Python
|
.py
| 22
| 40.681818
| 77
| 0.758659
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,537
|
sorter.py
|
shinken-solutions_shinken/shinken/misc/sorter.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
"""
Helper functions for some sorting
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# Sort hosts and services by impact, states and co
def hst_srv_sort(s1, s2):
if s1.business_impact > s2.business_impact:
return -1
if s2.business_impact > s1.business_impact:
return 1
# Ok, we compute a importance value so
# For host, the order is UP, UNREACH, DOWN
# For service: OK, UNKNOWN, WARNING, CRIT
# And DOWN is before CRITICAL (potential more impact)
tab = {'host': {0: 0, 1: 4, 2: 1},
'service': {0: 0, 1: 2, 2: 3, 3: 1}
}
state1 = tab[s1.__class__.my_type].get(s1.state_id, 0)
state2 = tab[s2.__class__.my_type].get(s2.state_id, 0)
# ok, here, same business_impact
# Compare warn and crit state
if state1 > state2:
return -1
if state2 > state1:
return 1
# Ok, so by name...
if s1.get_full_name() > s2.get_full_name():
return 1
else:
return -1
# Sort hosts and services by impact, states and co
def worse_first(s1, s2):
# Ok, we compute a importance value so
# For host, the order is UP, UNREACH, DOWN
# For service: OK, UNKNOWN, WARNING, CRIT
# And DOWN is before CRITICAL (potential more impact)
tab = {'host': {0: 0, 1: 4, 2: 1},
'service': {0: 0, 1: 2, 2: 3, 3: 1}
}
state1 = tab[s1.__class__.my_type].get(s1.state_id, 0)
state2 = tab[s2.__class__.my_type].get(s2.state_id, 0)
# ok, here, same business_impact
# Compare warn and crit state
if state1 > state2:
return -1
if state2 > state1:
return 1
# Same? ok by business impact
if s1.business_impact > s2.business_impact:
return -1
if s2.business_impact > s1.business_impact:
return 1
# Ok, so by name...
# Ok, so by name...
if s1.get_full_name() > s2.get_full_name():
return -1
else:
return 1
# Sort hosts and services by last_state_change time
def last_state_change_earlier(s1, s2):
# ok, here, same business_impact
# Compare warn and crit state
if s1.last_state_change > s2.last_state_change:
return -1
if s1.last_state_change < s2.last_state_change:
return 1
return 0
| 3,209
|
Python
|
.py
| 89
| 31.370787
| 82
| 0.657216
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,538
|
regenerator.py
|
shinken-solutions_shinken/shinken/misc/regenerator.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import time
# Import all objects we will need
from shinken.objects.host import Host, Hosts
from shinken.objects.hostgroup import Hostgroup, Hostgroups
from shinken.objects.service import Service, Services
from shinken.objects.servicegroup import Servicegroup, Servicegroups
from shinken.objects.contact import Contact, Contacts
from shinken.objects.contactgroup import Contactgroup, Contactgroups
from shinken.objects.notificationway import NotificationWay, NotificationWays
from shinken.objects.timeperiod import Timeperiod, Timeperiods
from shinken.objects.command import Command, Commands
from shinken.objects.config import Config
from shinken.objects.schedulerlink import SchedulerLink, SchedulerLinks
from shinken.objects.reactionnerlink import ReactionnerLink, ReactionnerLinks
from shinken.objects.pollerlink import PollerLink, PollerLinks
from shinken.objects.brokerlink import BrokerLink, BrokerLinks
from shinken.objects.receiverlink import ReceiverLink, ReceiverLinks
from shinken.util import safe_print
from shinken.message import Message
from shinken.log import logger
# Class for a Regenerator. It will get broks, and "regenerate" real objects
# from them :)
class Regenerator(object):
def __init__(self):
# Our Real datas
self.configs = {}
self.hosts = Hosts([])
self.services = Services([])
self.notificationways = NotificationWays([])
self.contacts = Contacts([])
self.hostgroups = Hostgroups([])
self.servicegroups = Servicegroups([])
self.contactgroups = Contactgroups([])
self.timeperiods = Timeperiods([])
self.commands = Commands([])
self.schedulers = SchedulerLinks([])
self.pollers = PollerLinks([])
self.reactionners = ReactionnerLinks([])
self.brokers = BrokerLinks([])
self.receivers = ReceiverLinks([])
# From now we only look for realms names
self.realms = set()
self.tags = {}
self.services_tags = {}
# And in progress one
self.inp_hosts = {}
self.inp_services = {}
self.inp_hostgroups = {}
self.inp_servicegroups = {}
self.inp_contactgroups = {}
# Do not ask for full data resent too much
self.last_need_data_send = time.time()
# Flag to say if our data came from the scheduler or not
# (so if we skip *initial* broks)
self.in_scheduler_mode = False
# The Queue where to launch message, will be fill from the broker
self.from_q = None
# Load an external queue for sending messages
def load_external_queue(self, from_q):
self.from_q = from_q
# If we are called from a scheduler it self, we load the data from it
def load_from_scheduler(self, sched):
# Ok, we are in a scheduler, so we will skip some useless
# steps
self.in_scheduler_mode = True
# Go with the data creation/load
c = sched.conf
# Simulate a drop conf
b = sched.get_program_status_brok()
#b.prepare()
self.manage_program_status_brok(b)
# Now we will lie and directly map our objects :)
logger.debug("Regenerator::load_from_scheduler")
self.hosts = c.hosts
self.services = c.services
self.notificationways = c.notificationways
self.contacts = c.contacts
self.hostgroups = c.hostgroups
self.servicegroups = c.servicegroups
self.contactgroups = c.contactgroups
self.timeperiods = c.timeperiods
self.commands = c.commands
# We also load the realm
for h in self.hosts:
self.realms.add(h.realm)
break
# If we are in a scheduler mode, some broks are dangerous, so
# we will skip them
def want_brok(self, brok):
if self.in_scheduler_mode:
return brok.type not in ['program_status', 'initial_host_status',
'initial_hostgroup_status', 'initial_service_status',
'initial_servicegroup_status', 'initial_contact_status',
'initial_contactgroup_status', 'initial_timeperiod_status',
'initial_command_status']
# Ok you are wondering why we don't add initial_broks_done?
# It's because the LiveSTatus modules need this part to do internal things.
# But don't worry, the vanilla regenerator will just skip it in all_done_linking :D
# Not in don't want? so want! :)
return True
def manage_brok(self, brok):
""" Look for a manager function for a brok, and call it """
manage = getattr(self, 'manage_' + brok.type + '_brok', None)
# If we can and want it, got for it :)
if manage and self.want_brok(brok):
if brok.type not in ('service_next_schedule', 'host_next_schedule', 'service_check_result',
'host_check_result',
'update_service_status', 'update_host_status', 'update_poller_status',
'update_broker_status',
'update_receiver_status',
'update_scheduler_status'):
logger.debug('REGEN: manage brok %s:%s' % (brok.type, brok.id))
return manage(brok)
def update_element(self, e, data):
for prop in data:
setattr(e, prop, data[prop])
# Now we get all data about an instance, link all this stuff :)
def all_done_linking(self, inst_id):
# In a scheduler we are already "linked" so we can skip this
if self.in_scheduler_mode:
logger.debug("Regenerator: We skip the all_done_linking phase because we are in a scheduler")
return
start = time.time()
logger.debug("In ALL Done linking phase for instance %s" % inst_id)
# check if the instance is really defined, so got ALL the
# init phase
if inst_id not in self.configs.keys():
logger.debug("Warning: the instance %d is not fully given, bailout" % inst_id)
return
# Try to load the in progress list and make them available for
# finding
try:
inp_hosts = self.inp_hosts[inst_id]
inp_hostgroups = self.inp_hostgroups[inst_id]
inp_contactgroups = self.inp_contactgroups[inst_id]
inp_services = self.inp_services[inst_id]
inp_servicegroups = self.inp_servicegroups[inst_id]
except Exception as exp:
logger.error("[Regen] Warning all done: %s" % exp)
return
# Link HOSTGROUPS with hosts
for hg in inp_hostgroups:
new_members = []
for (i, hname) in hg.members:
h = inp_hosts.find_by_name(hname)
if h:
new_members.append(h)
hg.members = new_members
# Merge HOSTGROUPS with real ones
for inphg in inp_hostgroups:
hgname = inphg.hostgroup_name
hg = self.hostgroups.find_by_name(hgname)
# If hte hostgroup already exist, just add the new
# hosts into it
if hg:
hg.members.extend(inphg.members)
else: # else take the new one
self.hostgroups.add_item(inphg)
# Now link HOSTS with hostgroups, and commands
for h in inp_hosts:
# print("Linking %s groups %s" % (h.get_name(), h.hostgroups))
new_hostgroups = []
for hgname in h.hostgroups.split(','):
hgname = hgname.strip()
hg = self.hostgroups.find_by_name(hgname)
if hg:
new_hostgroups.append(hg)
h.hostgroups = new_hostgroups
# Now link Command() objects
self.linkify_a_command(h, 'check_command')
self.linkify_a_command(h, 'maintenance_check_command')
self.linkify_a_command(h, 'event_handler')
# Now link timeperiods
self.linkify_a_timeperiod_by_name(h, 'notification_period')
self.linkify_a_timeperiod_by_name(h, 'check_period')
self.linkify_a_timeperiod_by_name(h, 'maintenance_check_period')
self.linkify_a_timeperiod_by_name(h, 'maintenance_period')
# And link contacts too
self.linkify_contacts(h, 'contacts')
# Linkify tags
for t in h.tags:
if t not in self.tags:
self.tags[t] = 0
self.tags[t] += 1
# We can really declare this host OK now
old_h = self.hosts.find_by_name(h.get_name())
if old_h is not None:
self.hosts.remove_item(old_h)
self.hosts.add_item(h)
# Link SERVICEGROUPS with services
for sg in inp_servicegroups:
new_members = []
for (i, sname) in sg.members:
if i not in inp_services:
continue
s = inp_services[i]
new_members.append(s)
sg.members = new_members
# Merge SERVICEGROUPS with real ones
for inpsg in inp_servicegroups:
sgname = inpsg.servicegroup_name
sg = self.servicegroups.find_by_name(sgname)
# If the servicegroup already exist, just add the new
# services into it
if sg:
sg.members.extend(inpsg.members)
else: # else take the new one
self.servicegroups.add_item(inpsg)
# Now link SERVICES with hosts, servicesgroups, and commands
for s in inp_services:
new_servicegroups = []
for sgname in s.servicegroups.split(','):
sgname = sgname.strip()
sg = self.servicegroups.find_by_name(sgname)
if sg:
new_servicegroups.append(sg)
s.servicegroups = new_servicegroups
# Now link with host
hname = s.host_name
s.host = self.hosts.find_by_name(hname)
if s.host:
old_s = s.host.find_service_by_name(s.service_description)
if old_s is not None:
s.host.services.remove(old_s)
s.host.services.append(s)
# Now link Command() objects
self.linkify_a_command(s, 'check_command')
self.linkify_a_command(s, 'maintenance_check_command')
self.linkify_a_command(s, 'event_handler')
# Now link timeperiods
self.linkify_a_timeperiod_by_name(s, 'notification_period')
self.linkify_a_timeperiod_by_name(s, 'check_period')
self.linkify_a_timeperiod_by_name(s, 'maintenance_period')
self.linkify_a_timeperiod_by_name(s, 'maintenance_check_period')
# And link contacts too
self.linkify_contacts(s, 'contacts')
# Linkify services tags
for t in s.tags:
if t not in self.services_tags:
self.services_tags[t] = 0
self.services_tags[t] += 1
# We can really declare this host OK now
self.services.add_item(s, index=True)
# Add realm of theses hosts. Only the first is useful
for h in inp_hosts:
self.realms.add(h.realm)
break
# Now we can link all impacts/source problem list
# but only for the new ones here of course
for h in inp_hosts:
self.linkify_dict_srv_and_hosts(h, 'impacts')
self.linkify_dict_srv_and_hosts(h, 'source_problems')
self.linkify_host_and_hosts(h, 'parents')
self.linkify_host_and_hosts(h, 'childs')
self.linkify_dict_srv_and_hosts(h, 'parent_dependencies')
self.linkify_dict_srv_and_hosts(h, 'child_dependencies')
# Now services too
for s in inp_services:
self.linkify_dict_srv_and_hosts(s, 'impacts')
self.linkify_dict_srv_and_hosts(s, 'source_problems')
self.linkify_dict_srv_and_hosts(s, 'parent_dependencies')
self.linkify_dict_srv_and_hosts(s, 'child_dependencies')
# Linking TIMEPERIOD exclude with real ones now
for tp in self.timeperiods:
new_exclude = []
for ex in tp.exclude:
exname = ex.timeperiod_name
t = self.timeperiods.find_by_name(exname)
if t:
new_exclude.append(t)
tp.exclude = new_exclude
# Link CONTACTGROUPS with contacts
for cg in inp_contactgroups:
new_members = []
for (i, cname) in cg.members:
c = self.contacts.find_by_name(cname)
if c:
new_members.append(c)
cg.members = new_members
# Merge contactgroups with real ones
for inpcg in inp_contactgroups:
cgname = inpcg.contactgroup_name
cg = self.contactgroups.find_by_name(cgname)
# If the contactgroup already exist, just add the new
# contacts into it
if cg:
cg.members.extend(inpcg.members)
cg.members = list(set(cg.members))
else: # else take the new one
self.contactgroups.add_item(inpcg)
logger.debug("[Regen] ALL LINKING TIME %s" % (time.time() - start))
# clean old objects
del self.inp_hosts[inst_id]
del self.inp_hostgroups[inst_id]
del self.inp_contactgroups[inst_id]
del self.inp_services[inst_id]
del self.inp_servicegroups[inst_id]
# We look for o.prop (CommandCall) and we link the inner
# Command() object with our real ones
def linkify_a_command(self, o, prop):
cc = getattr(o, prop, None)
# if the command call is void, bypass it
if not cc:
setattr(o, prop, None)
return
cmdname = cc.command
c = self.commands.find_by_name(cmdname)
cc.command = c
# We look at o.prop and for each command we relink it
def linkify_commands(self, o, prop):
v = getattr(o, prop, None)
if not v:
# If do not have a command list, put a void list instead
setattr(o, prop, [])
return
for cc in v:
cmdname = cc.command
c = self.commands.find_by_name(cmdname)
cc.command = c
# We look at the timeperiod() object of o.prop
# and we replace it with our true one
def linkify_a_timeperiod(self, o, prop):
t = getattr(o, prop, None)
if not t:
setattr(o, prop, None)
return
tpname = t.timeperiod_name
tp = self.timeperiods.find_by_name(tpname)
setattr(o, prop, tp)
# same than before, but the value is a string here
def linkify_a_timeperiod_by_name(self, o, prop):
tpname = getattr(o, prop, None)
if not tpname:
setattr(o, prop, None)
return
tp = self.timeperiods.find_by_name(tpname)
setattr(o, prop, tp)
# We look at o.prop and for each contacts in it,
# we replace it with true object in self.contacts
def linkify_contacts(self, o, prop):
v = getattr(o, prop)
if not v:
return
new_v = []
for cname in v:
c = self.contacts.find_by_name(cname)
if c:
new_v.append(c)
setattr(o, prop, new_v)
# We got a service/host dict, we want to get back to a
# flat list
def linkify_dict_srv_and_hosts(self, o, prop):
v = getattr(o, prop)
if not v:
setattr(o, prop, [])
new_v = []
# print("Linkify Dict SRV/Host", v, o.get_name(), prop)
for name in v['services']:
elts = name.split('/')
hname = elts[0]
sdesc = elts[1]
s = self.services.find_srv_by_name_and_hostname(hname, sdesc)
if s:
new_v.append(s)
for hname in v['hosts']:
h = self.hosts.find_by_name(hname)
if h:
new_v.append(h)
setattr(o, prop, new_v)
def linkify_host_and_hosts(self, o, prop):
v = getattr(o, prop)
if not v:
setattr(o, prop, [])
new_v = []
for hname in v:
h = self.hosts.find_by_name(hname)
if h:
new_v.append(h)
setattr(o, prop, new_v)
###############
# Brok management part
###############
def before_after_hook(self, brok, obj):
"""
This can be used by derived classes to compare the data in the brok
with the object which will be updated by these data. For example,
it is possible to find out in this method whether the state of a
host or service has changed.
"""
pass
#######
# INITIAL PART
#######
def manage_program_status_brok(self, b):
data = b.data
c_id = data['instance_id']
logger.debug("[Regen] Creating config: %s" % c_id)
# We get a real Conf object ,adn put our data
c = Config()
self.update_element(c, data)
# Clean all in_progress things.
# And in progress one
self.inp_hosts[c_id] = Hosts([])
self.inp_services[c_id] = Services([])
self.inp_hostgroups[c_id] = Hostgroups([])
self.inp_servicegroups[c_id] = Servicegroups([])
self.inp_contactgroups[c_id] = Contactgroups([])
# And we save it
self.configs[c_id] = c
# Clean the old "hard" objects
# We should clean all previously added hosts and services
logger.debug("Clean hosts/service of %s" % c_id)
to_del_h = [h for h in self.hosts if h.instance_id == c_id]
to_del_srv = [s for s in self.services if s.instance_id == c_id]
logger.debug("Cleaning host:%d srv:%d" % (len(to_del_h), len(to_del_srv)))
# Clean hosts from hosts and hostgroups
for h in to_del_h:
logger.debug("Deleting %s" % h.get_name())
self.hosts.remove_item(h)
# Now clean all hostgroups too
for hg in self.hostgroups:
logger.debug("Cleaning hostgroup %s:%d" % (hg.get_name(), len(hg.members)))
# Exclude from members the hosts with this inst_id
hg.members = [h for h in hg.members if h.instance_id != c_id]
logger.debug("Len after clean %s" % len(hg.members))
for s in to_del_srv:
logger.debug("Deleting %s" % s.get_full_name())
self.services.remove_item(s)
# Now clean service groups
for sg in self.servicegroups:
sg.members = [s for s in sg.members if s.instance_id != c_id]
# Get a new host. Add in in in progress tab
def manage_initial_host_status_brok(self, b):
data = b.data
hname = data['host_name']
inst_id = data['instance_id']
# Try to get the inp progress Hosts
try:
inp_hosts = self.inp_hosts[inst_id]
except Exception as exp: # not good. we will cry in theprogram update
logger.error("[Regen] host_check_result:: Not good! %s" % exp)
return
# logger.debug("Creating a host: %s in instance %d" % (hname, inst_id))
h = Host({})
self.update_element(h, data)
# We need to rebuild Downtime and Comment relationship
for dtc in h.downtimes + h.comments:
dtc.ref = h
# Ok, put in in the in progress hosts
inp_hosts[h.id] = h
# From now we only create a hostgroup in the in prepare
# part. We will link at the end.
def manage_initial_hostgroup_status_brok(self, b):
data = b.data
hgname = data['hostgroup_name']
inst_id = data['instance_id']
# Try to get the inp progress Hostgroups
try:
inp_hostgroups = self.inp_hostgroups[inst_id]
except Exception as exp: # not good. we will cry in theprogram update
logger.error("[regen] host_check_result:: Not good! %s" % exp)
return
logger.debug("Creating a hostgroup: %s in instance %d" % (hgname, inst_id))
# With void members
hg = Hostgroup([])
# populate data
self.update_element(hg, data)
# We will link hosts into hostgroups later
# so now only save it
inp_hostgroups[hg.id] = hg
def manage_initial_service_status_brok(self, b):
data = b.data
hname = data['host_name']
sdesc = data['service_description']
inst_id = data['instance_id']
# Try to get the inp progress Hosts
try:
inp_services = self.inp_services[inst_id]
except Exception as exp: # not good. we will cry in theprogram update
logger.error("[Regen] host_check_result Not good! %s" % exp)
return
# logger.debug("Creating a service: %s/%s in instance %d" % (hname, sdesc, inst_id))
s = Service({})
self.update_element(s, data)
# We need to rebuild Downtime and Comment relationship
for dtc in s.downtimes + s.comments:
dtc.ref = s
# Ok, put in in the in progress hosts
inp_services[s.id] = s
# We create a servicegroup in our in progress part
# we will link it after
def manage_initial_servicegroup_status_brok(self, b):
data = b.data
sgname = data['servicegroup_name']
inst_id = data['instance_id']
# Try to get the inp progress Hostgroups
try:
inp_servicegroups = self.inp_servicegroups[inst_id]
except Exception as exp: # not good. we will cry in theprogram update
logger.error("[Regen] manage_initial_servicegroup_status_brok:: Not good! %s" % exp)
return
logger.debug("Creating a servicegroup: %s in instance %d" % (sgname, inst_id))
# With void members
sg = Servicegroup([])
# populate data
self.update_element(sg, data)
# We will link hosts into hostgroups later
# so now only save it
inp_servicegroups[sg.id] = sg
# For Contacts, it's a global value, so 2 cases:
# We got it -> we update it
# We don't -> we create it
# In both cases we need to relink it
def manage_initial_contact_status_brok(self, b):
data = b.data
cname = data['contact_name']
c = self.contacts.find_by_name(cname)
if c:
self.update_element(c, data)
else:
c = Contact({})
self.update_element(c, data)
self.contacts.add_item(c)
# Delete some useless contact values
del c.host_notification_commands
del c.service_notification_commands
del c.host_notification_period
del c.service_notification_period
# Now manage notification ways too
# Same than for contacts. We create or
# update
nws = c.notificationways
new_notifways = []
for cnw in nws:
nwname = cnw.notificationway_name
nw = self.notificationways.find_by_name(nwname)
if not nw:
logger.debug("Creating notif way %s" % nwname)
nw = NotificationWay([])
self.notificationways.add_item(nw)
# Now update it
for prop in NotificationWay.properties:
if hasattr(cnw, prop):
setattr(nw, prop, getattr(cnw, prop))
new_notifways.append(nw)
# Linking the notification way
# With commands
self.linkify_commands(nw, 'host_notification_commands')
self.linkify_commands(nw, 'service_notification_commands')
# Now link timeperiods
self.linkify_a_timeperiod(nw, 'host_notification_period')
self.linkify_a_timeperiod(nw, 'service_notification_period')
c.notificationways = new_notifways
# From now we only create a hostgroup with unlink data in the
# in prepare list. We will link all of them at the end.
def manage_initial_contactgroup_status_brok(self, b):
data = b.data
cgname = data['contactgroup_name']
inst_id = data['instance_id']
# Try to get the inp progress Contactgroups
try:
inp_contactgroups = self.inp_contactgroups[inst_id]
except Exception as exp: # not good. we will cry in theprogram update
logger.error("[Regen] manage_initial_contactgroup_status_brok Not good! %s" % exp)
return
logger.debug("Creating an contactgroup: %s in instance %d" % (cgname, inst_id))
# With void members
cg = Contactgroup([])
# populate data
self.update_element(cg, data)
# We will link contacts into contactgroups later
# so now only save it
inp_contactgroups[cg.id] = cg
# For Timeperiods we got 2 cases: do we already got the command or not.
# if got: just update it
# if not: create it and declare it in our main commands
def manage_initial_timeperiod_status_brok(self, b):
data = b.data
# print("Creating timeperiod", data)
tpname = data['timeperiod_name']
tp = self.timeperiods.find_by_name(tpname)
if tp:
# print("Already existing timeperiod", tpname)
self.update_element(tp, data)
else:
# print("Creating Timeperiod:", tpname)
tp = Timeperiod({})
self.update_element(tp, data)
self.timeperiods.add_item(tp)
# For command we got 2 cases: do we already got the command or not.
# if got: just update it
# if not: create it and declare it in our main commands
def manage_initial_command_status_brok(self, b):
data = b.data
cname = data['command_name']
c = self.commands.find_by_name(cname)
if c:
# print("Already existing command", cname, "updating it")
self.update_element(c, data)
else:
# print("Creating a new command", cname)
c = Command({})
self.update_element(c, data)
self.commands.add_item(c)
def manage_initial_scheduler_status_brok(self, b):
data = b.data
scheduler_name = data['scheduler_name']
sched = SchedulerLink({})
self.update_element(sched, data)
self.schedulers[scheduler_name] = sched
def manage_initial_poller_status_brok(self, b):
data = b.data
poller_name = data['poller_name']
poller = PollerLink({})
self.update_element(poller, data)
self.pollers[poller_name] = poller
def manage_initial_reactionner_status_brok(self, b):
data = b.data
reactionner_name = data['reactionner_name']
reac = ReactionnerLink({})
self.update_element(reac, data)
self.reactionners[reactionner_name] = reac
def manage_initial_broker_status_brok(self, b):
data = b.data
broker_name = data['broker_name']
broker = BrokerLink({})
self.update_element(broker, data)
# print("CMD:", c)
self.brokers[broker_name] = broker
def manage_initial_receiver_status_brok(self, b):
data = b.data
receiver_name = data['receiver_name']
receiver = ReceiverLink({})
self.update_element(receiver, data)
self.receivers[receiver_name] = receiver
# This brok is here when the WHOLE initial phase is done.
# So we got all data, we can link all together :)
def manage_initial_broks_done_brok(self, b):
inst_id = b.data['instance_id']
self.all_done_linking(inst_id)
#################
# Status Update part
#################
# A scheduler send us a "I'm alive" brok. If we never
# heard about this one, we got some problem and we
# ask him some initial data :)
def manage_update_program_status_brok(self, b):
data = b.data
c_id = data['instance_id']
# If we got an update about an unknown instance, cry and ask for a full
# version!
if c_id not in self.configs.keys():
# Do not ask data too quickly, very dangerous
# one a minute
if time.time() - self.last_need_data_send > 60 and self.from_q is not None:
logger.debug("I ask the broker for instance id data: %s" % c_id)
msg = Message(id=0, type='NeedData', data={'full_instance_id': c_id})
self.from_q.put(msg)
self.last_need_data_send = time.time()
return
# Ok, good conf, we can update it
c = self.configs[c_id]
self.update_element(c, data)
# In fact, an update of a host is like a check return
def manage_update_host_status_brok(self, b):
# There are some properties that should not change and are already linked
# so just remove them
clean_prop = ['id', 'check_command', 'maintenance_check_command',
'hostgroups', 'contacts', 'notification_period',
'contact_groups', 'check_period', 'event_handler',
'maintenance_period', 'maintenance_check_period',
'realm', 'customs', 'escalations']
# some are only use when a topology change happened
toplogy_change = b.data['topology_change']
if not toplogy_change:
other_to_clean = ['childs', 'parents', 'child_dependencies', 'parent_dependencies']
clean_prop.extend(other_to_clean)
data = b.data
for prop in clean_prop:
del data[prop]
hname = data['host_name']
h = self.hosts.find_by_name(hname)
if h:
self.before_after_hook(b, h)
self.update_element(h, data)
# We can have some change in our impacts and source problems.
self.linkify_dict_srv_and_hosts(h, 'impacts')
self.linkify_dict_srv_and_hosts(h, 'source_problems')
# If the topology change, update it
if toplogy_change:
logger.debug("Topology change for %s %s" % (h.get_name(), h.parent_dependencies))
self.linkify_host_and_hosts(h, 'parents')
self.linkify_host_and_hosts(h, 'childs')
self.linkify_dict_srv_and_hosts(h, 'parent_dependencies')
self.linkify_dict_srv_and_hosts(h, 'child_dependencies')
# Relink downtimes and comments
for dtc in h.downtimes + h.comments:
dtc.ref = h
# In fact, an update of a service is like a check return
def manage_update_service_status_brok(self, b):
# There are some properties that should not change and are already linked
# so just remove them
clean_prop = ['id', 'check_command', 'maintenance_check_command',
'servicegroups', 'contacts', 'notification_period',
'contact_groups', 'check_period', 'event_handler',
'maintenance_period', 'maintenance_check_period',
'customs', 'escalations']
# some are only use when a topology change happened
toplogy_change = b.data['topology_change']
if not toplogy_change:
other_to_clean = ['child_dependencies', 'parent_dependencies']
clean_prop.extend(other_to_clean)
data = b.data
for prop in clean_prop:
del data[prop]
hname = data['host_name']
sdesc = data['service_description']
s = self.services.find_srv_by_name_and_hostname(hname, sdesc)
if s:
self.before_after_hook(b, s)
self.update_element(s, data)
# We can have some change in our impacts and source problems.
self.linkify_dict_srv_and_hosts(s, 'impacts')
self.linkify_dict_srv_and_hosts(s, 'source_problems')
# If the topology change, update it
if toplogy_change:
self.linkify_dict_srv_and_hosts(s, 'parent_dependencies')
self.linkify_dict_srv_and_hosts(s, 'child_dependencies')
# Relink downtimes and comments with the service
for dtc in s.downtimes + s.comments:
dtc.ref = s
def manage_update_broker_status_brok(self, b):
data = b.data
broker_name = data['broker_name']
try:
s = self.brokers[broker_name]
self.update_element(s, data)
except Exception:
pass
def manage_update_receiver_status_brok(self, b):
data = b.data
receiver_name = data['receiver_name']
try:
s = self.receivers[receiver_name]
self.update_element(s, data)
except Exception:
pass
def manage_update_reactionner_status_brok(self, b):
data = b.data
reactionner_name = data['reactionner_name']
try:
s = self.reactionners[reactionner_name]
self.update_element(s, data)
except Exception:
pass
def manage_update_poller_status_brok(self, b):
data = b.data
poller_name = data['poller_name']
try:
s = self.pollers[poller_name]
self.update_element(s, data)
except Exception:
pass
def manage_update_scheduler_status_brok(self, b):
data = b.data
scheduler_name = data['scheduler_name']
try:
s = self.schedulers[scheduler_name]
self.update_element(s, data)
# print("S:", s)
except Exception:
pass
#################
# Check result and schedule part
#################
def manage_host_check_result_brok(self, b):
data = b.data
hname = data['host_name']
h = self.hosts.find_by_name(hname)
if h:
self.before_after_hook(b, h)
self.update_element(h, data)
# this brok should arrive within a second after the host_check_result_brok
def manage_host_next_schedule_brok(self, b):
self.manage_host_check_result_brok(b)
# A service check have just arrived, we UPDATE data info with this
def manage_service_check_result_brok(self, b):
data = b.data
hname = data['host_name']
sdesc = data['service_description']
s = self.services.find_srv_by_name_and_hostname(hname, sdesc)
if s:
self.before_after_hook(b, s)
self.update_element(s, data)
# A service check update have just arrived, we UPDATE data info with this
def manage_service_next_schedule_brok(self, b):
self.manage_service_check_result_brok(b)
| 36,047
|
Python
|
.py
| 820
| 33.489024
| 105
| 0.596574
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,539
|
logevent.py
|
shinken-solutions_shinken/shinken/misc/logevent.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2014 - Savoir-Faire Linux inc.
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import re
event_type_pattern = \
re.compile(
'^\[[0-9]{10}] (?:HOST|SERVICE) (ALERT|NOTIFICATION|FLAPPING|DOWNTIME)(?: ALERT)?:.*'
)
event_types = {
'NOTIFICATION': {
# ex: "[1402515279] SERVICE NOTIFICATION:
# admin;localhost;check-ssh;CRITICAL;notify-service-by-email;Connection refused"
'pattern': '\[([0-9]{10})\] (HOST|SERVICE) (NOTIFICATION): '
'([^\;]*);([^\;]*);(?:([^\;]*);)?([^\;]*);([^\;]*);([^\;]*)',
'properties': [
'time',
'notification_type', # 'SERVICE' (or could be 'HOST')
'event_type', # 'NOTIFICATION'
'contact', # 'admin'
'hostname', # 'localhost'
'service_desc', # 'check-ssh' (or could be None)
'state', # 'CRITICAL'
'notification_method', # 'notify-service-by-email'
'output', # 'Connection refused'
]
},
'ALERT': {
# ex: "[1329144231] SERVICE ALERT:
# dfw01-is02-006;cpu load maui;WARNING;HARD;4;WARNING - load average: 5.04, 4.67, 5.04"
'pattern': '^\[([0-9]{10})] (HOST|SERVICE) (ALERT): '
'([^\;]*);(?:([^\;]*);)?([^\;]*);([^\;]*);([^\;]*);([^\;]*)',
'properties': [
'time',
'alert_type', # 'SERVICE' (or could be 'HOST')
'event_type', # 'ALERT'
'hostname', # 'localhost'
'service_desc', # 'cpu load maui' (or could be None)
'state', # 'WARNING'
'state_type', # 'HARD'
'attempts', # '4'
'output', # 'WARNING - load average: 5.04, 4.67, 5.04'
]
},
'DOWNTIME': {
# ex: "[1279250211] HOST DOWNTIME ALERT:
# maast64;STARTED; Host has entered a period of scheduled downtime"
'pattern': '^\[([0-9]{10})\] (HOST|SERVICE) (DOWNTIME) ALERT: '
'([^\;]*);(STARTED|STOPPED|CANCELLED);(.*)',
'properties': [
'time',
'downtime_type', # '(SERVICE or could be 'HOST')
'event_type', # 'DOWNTIME'
'hostname', # 'maast64'
'state', # 'STARTED'
'output', # 'Host has entered a period of scheduled downtime'
]
},
'FLAPPING': {
# service flapping ex: "[1375301662] SERVICE FLAPPING ALERT:
# testhost;check_ssh;STARTED;
# Service appears to have started flapping (24.2% change >= 20.0% threshold)"
# host flapping ex: "[1375301662] HOST FLAPPING ALERT:
# hostbw;STARTED; Host appears to have started flapping (20.1% change > 20.0% threshold)"
'pattern': '^\[([0-9]{10})] (HOST|SERVICE) (FLAPPING) ALERT: '
'([^\;]*);(?:([^\;]*);)?([^\;]*);([^\;]*)',
'properties': [
'time',
'alert_type', # 'SERVICE' or 'HOST'
'event_type', # 'FLAPPING'
'hostname', # The hostname
'service_desc', # The service description or None
'state', # 'STOPPED' or 'STARTED'
'output', # 'Service appears to have started flapping (24% change >= 20.0% threshold)'
]
}
}
# Class for parsing event logs
# Populates self.data with the log type's properties
class LogEvent(object):
def __init__(self, log):
self.data = {}
# Find the type of event
event_type_match = event_type_pattern.match(log)
if event_type_match:
# parse it with it's pattern
event_type = event_types[event_type_match.group(1)]
properties_match = re.match(event_type['pattern'], log)
if properties_match:
# Populate self.data with the event's properties
for i, p in enumerate(event_type['properties']):
self.data[p] = properties_match.group(i + 1)
# Convert the time to int
self.data['time'] = int(self.data['time'])
# Convert attempts to int
if 'attempts' in self.data:
self.data['attempts'] = int(self.data['attempts'])
def __iter__(self):
return iter(self.data.items())
def __len__(self):
return len(self.data)
def __getitem__(self, key):
return self.data[key]
def __contains__(self, key):
return key in self.data
def __str__(self):
return str(self.data)
| 5,221
|
Python
|
.py
| 122
| 34.040984
| 99
| 0.553346
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,540
|
datamanager.py
|
shinken-solutions_shinken/shinken/misc/datamanager.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken.util import safe_print
from shinken.misc.sorter import hst_srv_sort, last_state_change_earlier
from shinken.misc.filter import only_related_to
class DataManager(object):
def __init__(self):
self.rg = None
def load(self, rg):
self.rg = rg
# UI will launch us names in str, we got unicode
# in our rg, so we must manage it here
def get_host(self, hname):
return self.rg.hosts.find_by_name(hname)
def get_service(self, hname, sdesc):
return self.rg.services.find_srv_by_name_and_hostname(hname, sdesc)
def get_all_hosts_and_services(self):
all = []
all.extend(self.rg.hosts)
all.extend(self.rg.services)
return all
def get_contact(self, name):
return self.rg.contacts.find_by_name(name)
def get_contactgroup(self, name):
return self.rg.contactgroups.find_by_name(name)
def get_contacts(self):
return self.rg.contacts
def get_hostgroups(self):
return self.rg.hostgroups
def get_hostgroup(self, name):
return self.rg.hostgroups.find_by_name(name)
def get_servicegroups(self):
return self.rg.servicegroups
def get_servicegroup(self, name):
return self.rg.servicegroups.find_by_name(name)
# Get the hostgroups sorted by names, and zero size in the end
# if selected one, put it in the first place
def get_hostgroups_sorted(self, selected=''):
r = []
selected = selected.strip()
hg_names = [hg.get_name() for hg in self.rg.hostgroups
if len(hg.members) > 0 and hg.get_name() != selected]
hg_names.sort()
hgs = [self.rg.hostgroups.find_by_name(n) for n in hg_names]
hgvoid_names = [hg.get_name() for hg in self.rg.hostgroups
if len(hg.members) == 0 and hg.get_name() != selected]
hgvoid_names.sort()
hgvoids = [self.rg.hostgroups.find_by_name(n) for n in hgvoid_names]
if selected:
hg = self.rg.hostgroups.find_by_name(selected)
if hg:
r.append(hg)
r.extend(hgs)
r.extend(hgvoids)
return r
def get_hosts(self):
return self.rg.hosts
def get_services(self):
return self.rg.services
def get_schedulers(self):
return self.rg.schedulers
def get_pollers(self):
return self.rg.pollers
def get_brokers(self):
return self.rg.brokers
def get_receivers(self):
return self.rg.receivers
def get_reactionners(self):
return self.rg.reactionners
def get_program_start(self):
for c in self.rg.configs.values():
return c.program_start
return None
def get_realms(self):
return self.rg.realms
def get_realm(self, r):
if r in self.rg.realms:
return r
return None
# Get the hosts tags sorted by names, and zero size in the end
def get_host_tags_sorted(self):
r = []
names = self.rg.tags.keys()
names.sort()
for n in names:
r.append((n, self.rg.tags[n]))
return r
# Get the hosts tagged with a specific tag
def get_hosts_tagged_with(self, tag):
r = []
for h in self.get_hosts():
if tag in h.get_host_tags():
r.append(h)
return r
# Get the services tags sorted by names, and zero size in the end
def get_service_tags_sorted(self):
r = []
names = self.rg.services_tags.keys()
names.sort()
for n in names:
r.append((n, self.rg.services_tags[n]))
return r
def get_important_impacts(self):
res = []
for s in self.rg.services:
if s.is_impact and s.state not in ['OK', 'PENDING']:
if s.business_impact > 2:
res.append(s)
for h in self.rg.hosts:
if h.is_impact and h.state not in ['UP', 'PENDING']:
if h.business_impact > 2:
res.append(h)
return res
# Returns all problems
def get_all_problems(self, to_sort=True, get_acknowledged=False):
res = []
if not get_acknowledged:
res.extend([s for s in self.rg.services
if s.state not in ['OK', 'PENDING'] and
not s.is_impact and not s.problem_has_been_acknowledged and
not s.host.problem_has_been_acknowledged])
res.extend([h for h in self.rg.hosts
if h.state not in ['UP', 'PENDING'] and
not h.is_impact and not h.problem_has_been_acknowledged])
else:
res.extend([s for s in self.rg.services
if s.state not in ['OK', 'PENDING'] and not s.is_impact])
res.extend([h for h in self.rg.hosts
if h.state not in ['UP', 'PENDING'] and not h.is_impact])
if to_sort:
res.sort(hst_srv_sort)
return res
# returns problems, but the most recent before
def get_problems_time_sorted(self):
pbs = self.get_all_problems(to_sort=False)
pbs.sort(last_state_change_earlier)
return pbs
# Return all non managed impacts
def get_all_impacts(self):
res = []
for s in self.rg.services:
if s.is_impact and s.state not in ['OK', 'PENDING']:
# If s is acked, pass
if s.problem_has_been_acknowledged:
continue
# We search for impacts that were NOT currently managed
if len([p for p in s.source_problems if not p.problem_has_been_acknowledged]) > 0:
res.append(s)
for h in self.rg.hosts:
if h.is_impact and h.state not in ['UP', 'PENDING']:
# If h is acked, pass
if h.problem_has_been_acknowledged:
continue
# We search for impacts that were NOT currently managed
if len([p for p in h.source_problems if not p.problem_has_been_acknowledged]) > 0:
res.append(h)
return res
# Return the number of problems
def get_nb_problems(self):
return len(self.get_all_problems(to_sort=False))
# Get the number of all problems, even the ack ones
def get_nb_all_problems(self, user):
res = []
res.extend([s for s in self.rg.services
if s.state not in ['OK', 'PENDING'] and not s.is_impact])
res.extend([h for h in self.rg.hosts
if h.state not in ['UP', 'PENDING'] and not h.is_impact])
return len(only_related_to(res, user))
# Return the number of impacts
def get_nb_impacts(self):
return len(self.get_all_impacts())
def get_nb_elements(self):
return len(self.rg.services) + len(self.rg.hosts)
def get_important_elements(self):
res = []
# We want REALLY important things, so business_impact > 2, but not just IT elements that are
# root problems, so we look only for config defined my_own_business_impact value too
res.extend([s for s in self.rg.services
if (s.business_impact > 2 and not 0 <= s.my_own_business_impact <= 2)])
res.extend([h for h in self.rg.hosts
if (h.business_impact > 2 and not 0 <= h.my_own_business_impact <= 2)])
print("DUMP IMPORTANT")
for i in res:
safe_print(i.get_full_name(), i.business_impact, i.my_own_business_impact)
return res
# For all business impacting elements, and give the worse state
# if warning or critical
def get_overall_state(self):
h_states = [h.state_id for h in self.rg.hosts
if h.business_impact > 2 and h.is_impact and h.state_id in [1, 2]]
s_states = [s.state_id for s in self.rg.services
if s.business_impact > 2 and s.is_impact and s.state_id in [1, 2]]
print("get_overall_state:: hosts and services business problems", h_states, s_states)
if len(h_states) == 0:
h_state = 0
else:
h_state = max(h_states)
if len(s_states) == 0:
s_state = 0
else:
s_state = max(s_states)
# Ok, now return the max of hosts and services states
return max(h_state, s_state)
# Same but for pure IT problems
def get_overall_it_state(self):
h_states = [h.state_id for h in self.rg.hosts if h.is_problem and h.state_id in [1, 2]]
s_states = [s.state_id for s in self.rg.services if s.is_problem and s.state_id in [1, 2]]
if len(h_states) == 0:
h_state = 0
else:
h_state = max(h_states)
if len(s_states) == 0:
s_state = 0
else:
s_state = max(s_states)
# Ok, now return the max of hosts and services states
return max(h_state, s_state)
# Get percent of all Services
def get_per_service_state(self):
all_services = self.rg.services
problem_services = []
problem_services.extend([s for s in self.rg.services
if s.state not in ['OK', 'PENDING'] and not s.is_impact])
if len(all_services) == 0:
res = 0
else:
res = int(100 - (len(problem_services) * 100) / float(len(all_services)))
return res
# Get percent of all Hosts
def get_per_hosts_state(self):
all_hosts = self.rg.hosts
problem_hosts = []
problem_hosts.extend([s for s in self.rg.hosts
if s.state not in ['UP', 'PENDING'] and not s.is_impact])
if len(all_hosts) == 0:
res = 0
else:
res = int(100 - (len(problem_hosts) * 100) / float(len(all_hosts)))
return res
# For all business impacting elements, and give the worse state
# if warning or critical
def get_len_overall_state(self):
h_states = [h.state_id for h in self.rg.hosts
if h.business_impact > 2 and h.is_impact and h.state_id in [1, 2]]
s_states = [s.state_id for s in self.rg.services
if s.business_impact > 2 and s.is_impact and s.state_id in [1, 2]]
print("get_len_overall_state:: hosts and services business problems", h_states, s_states)
# Just return the number of impacting elements
return len(h_states) + len(s_states)
# Return a tree of {'elt': Host, 'fathers': [{}, {}]}
def get_business_parents(self, obj, levels=3):
res = {'node': obj, 'fathers': []}
# if levels == 0:
# return res
for i in obj.parent_dependencies:
# We want to get the levels deep for all elements, but
# go as far as we should for bad elements
if levels != 0 or i.state_id != 0:
par_elts = self.get_business_parents(i, levels=levels - 1)
res['fathers'].append(par_elts)
print("get_business_parents::Give elements", res)
return res
# Ok, we do not have true root problems, but we can try to guess isn't it?
# We can just guess for services with the same services of this host in fact
def guess_root_problems(self, obj):
if obj.__class__.my_type != 'service':
return []
r = [s for s in obj.host.services if s.state_id != 0 and s != obj]
return r
datamgr = DataManager()
| 12,561
|
Python
|
.py
| 291
| 33.584192
| 100
| 0.596513
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,541
|
common.py
|
shinken-solutions_shinken/shinken/misc/common.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Sebastien Coavoux, s.coavoux@free.fr
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import namedtuple
ModAttr = namedtuple('ModAttr', ['modattr', 'attribute', 'value'])
DICT_MODATTR = {
"MODATTR_NONE": ModAttr("MODATTR_NONE", "", 0),
"MODATTR_NOTIFICATIONS_ENABLED":
ModAttr("MODATTR_NOTIFICATIONS_ENABLED", "notifications_enabled", 1),
"notifications_enabled": ModAttr("MODATTR_NOTIFICATIONS_ENABLED", "notifications_enabled", 1),
"MODATTR_ACTIVE_CHECKS_ENABLED":
ModAttr("MODATTR_ACTIVE_CHECKS_ENABLED", "active_checks_enabled", 2),
"active_checks_enabled": ModAttr("MODATTR_ACTIVE_CHECKS_ENABLED", "active_checks_enabled", 2),
"MODATTR_PASSIVE_CHECKS_ENABLED":
ModAttr("MODATTR_PASSIVE_CHECKS_ENABLED", "passive_checks_enabled", 4),
"passive_checks_enabled":
ModAttr("MODATTR_PASSIVE_CHECKS_ENABLED", "passive_checks_enabled", 4),
"MODATTR_EVENT_HANDLER_ENABLED":
ModAttr("MODATTR_EVENT_HANDLER_ENABLED", "event_handler_enabled", 8),
"event_handler_enabled": ModAttr("MODATTR_EVENT_HANDLER_ENABLED", "event_handler_enabled", 8),
"MODATTR_FLAP_DETECTION_ENABLED":
ModAttr("MODATTR_FLAP_DETECTION_ENABLED", "flap_detection_enabled", 16),
"flap_detection_enabled":
ModAttr("MODATTR_FLAP_DETECTION_ENABLED", "flap_detection_enabled", 16),
"MODATTR_FAILURE_PREDICTION_ENABLED":
ModAttr("MODATTR_FAILURE_PREDICTION_ENABLED", "failure_prediction_enabled", 32),
"failure_prediction_enabled":
ModAttr("MODATTR_FAILURE_PREDICTION_ENABLED", "failure_prediction_enabled", 32),
"MODATTR_PERFORMANCE_DATA_ENABLED":
ModAttr("MODATTR_PERFORMANCE_DATA_ENABLED", "process_performance_data", 64),
"process_performance_data":
ModAttr("MODATTR_PERFORMANCE_DATA_ENABLED", "process_performance_data", 64),
"MODATTR_OBSESSIVE_HANDLER_ENABLED":
ModAttr("MODATTR_OBSESSIVE_HANDLER_ENABLED", "obsess_over_service", 128),
"obsess_over_service":
ModAttr("MODATTR_OBSESSIVE_HANDLER_ENABLED", "obsess_over_service", 128),
"MODATTR_EVENT_HANDLER_COMMAND": ModAttr("MODATTR_EVENT_HANDLER_COMMAND", "event_handler", 256),
"event_handler": ModAttr("MODATTR_EVENT_HANDLER_COMMAND", "event_handler", 256),
"MODATTR_CHECK_COMMAND": ModAttr("MODATTR_CHECK_COMMAND", "check_command", 512),
"check_command": ModAttr("MODATTR_CHECK_COMMAND", "check_command", 512),
"MODATTR_NORMAL_CHECK_INTERVAL":
ModAttr("MODATTR_NORMAL_CHECK_INTERVAL", "check_interval", 1024),
"check_interval": ModAttr("MODATTR_NORMAL_CHECK_INTERVAL", "check_interval", 1024),
"MODATTR_RETRY_CHECK_INTERVAL": ModAttr("MODATTR_RETRY_CHECK_INTERVAL", "retry_interval", 2048),
"retry_interval": ModAttr("MODATTR_RETRY_CHECK_INTERVAL", "retry_interval", 2048),
"MODATTR_MAX_CHECK_ATTEMPTS": ModAttr("MODATTR_MAX_CHECK_ATTEMPTS", "max_check_attempts", 4096),
"max_check_attempts": ModAttr("MODATTR_MAX_CHECK_ATTEMPTS", "max_check_attempts", 4096),
"MODATTR_FRESHNESS_CHECKS_ENABLED":
ModAttr("MODATTR_FRESHNESS_CHECKS_ENABLED", "check_freshness", 8192),
"check_freshness": ModAttr("MODATTR_FRESHNESS_CHECKS_ENABLED", "check_freshness", 8192),
"MODATTR_CHECK_TIMEPERIOD": ModAttr("MODATTR_CHECK_TIMEPERIOD", "check_period", 16384),
"check_period": ModAttr("MODATTR_CHECK_TIMEPERIOD", "check_period", 16384),
"MODATTR_CUSTOM_VARIABLE": ModAttr("MODATTR_CUSTOM_VARIABLE", "customs", 32768),
"custom_variable": ModAttr("MODATTR_CUSTOM_VARIABLE", "customs", 32768),
"MODATTR_NOTIFICATION_TIMEPERIOD":
ModAttr("MODATTR_NOTIFICATION_TIMEPERIOD", "notification_period", 65536),
"notification_period": ModAttr("MODATTR_NOTIFICATION_TIMEPERIOD", "notification_period", 65536),
}
try:
from setproctitle import setproctitle
except ImportError as err:
setproctitle = lambda s: None
| 4,684
|
Python
|
.py
| 79
| 54.683544
| 100
| 0.729983
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,542
|
schedulerdaemon.py
|
shinken-solutions_shinken/shinken/daemons/schedulerdaemon.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import six
import os
import signal
import time
import traceback
import zlib
import base64
import sys
from shinken.scheduler import Scheduler
from shinken.macroresolver import MacroResolver
from shinken.external_command import ExternalCommandManager
from shinken.daemon import Daemon
from shinken.property import PathProp, IntegerProp
from shinken.log import logger
from shinken.satellite import BaseSatellite, IForArbiter as IArb, Interface
from shinken.util import nighty_five_percent, parse_memory_expr, free_memory, to_bool
from shinken.stats import statsmgr
from shinken.serializer import serialize, deserialize
# Interface for Workers
class IChecks(Interface):
""" Interface for Workers:
They connect here and see if they are still OK with our running_id,
if not, they must drop their checks """
# poller or reactionner is asking us our running_id
# def get_running_id(self):
# return self.running_id
# poller or reactionner ask us actions
def get_checks(self, do_checks=False, do_actions=False, poller_tags='None',
reactionner_tags='None', worker_name='none',
module_types='fork', max_actions=None):
# print("We ask us checks")
do_checks = to_bool(do_checks)
do_actions = to_bool(do_actions)
poller_tags = [t.strip() for t in poller_tags.split(",") if t.strip()]
reactionner_tags = [t.strip() for t in reactionner_tags.split(",") if t.strip()]
module_types = [t.strip() for t in module_types.split(",") if t.strip()]
if max_actions is not None:
try:
max_actions = int(max_actions)
except ValueError:
logger.error("Invalid max_actions in get_checks, should be an "
"integer. Igored.")
max_actions = None
res = self.app.get_to_run_checks(
do_checks,
do_actions,
poller_tags,
reactionner_tags,
worker_name,
module_types,
max_actions
)
# print("Sending %d checks" % len(res))
self.app.nb_checks_send += len(res)
return serialize(res)
get_checks.encode = 'raw'
# poller or reactionner are putting us results
def put_results(self, results):
nb_received = len(results)
self.app.nb_check_received += nb_received
if nb_received != 0:
logger.debug("Received %d results", nb_received)
for result in results:
result.set_type_active()
with self.app.waiting_results_lock:
self.app.waiting_results.extend(results)
# for c in results:
# self.sched.put_results(c)
return serialize(True)
put_results.method = 'PUT'
put_results.need_lock = False
class IBroks(Interface):
""" Interface for Brokers:
They connect here and get all broks (data for brokers). Data must be ORDERED!
(initial status BEFORE update...) """
# A broker ask us broks
def get_broks(self, bname, broks_batch=0):
# Maybe it was not registered as it should, if so,
# do it for it
if bname not in self.app.brokers:
self.fill_initial_broks(bname)
if broks_batch:
try:
broks_batch = int(broks_batch)
except ValueError:
logger.error("Invalid broks_batch in get_broks, should be an "
"integer. Igored.")
broks_batch = 0
# Now get the broks for this specific broker
res = self.app.get_broks(bname, broks_batch)
# got only one global counter for broks
self.app.nb_broks_send += len(res)
# we do not more have a full broks in queue
self.app.brokers[bname]['has_full_broks'] = False
return serialize(res)
get_broks.encode = 'raw'
# A broker is a new one, if we do not have
# a full broks, we clean our broks, and
# fill it with all new values
def fill_initial_broks(self, bname):
if bname not in self.app.brokers:
logger.info("A new broker just connected : %s", bname)
self.app.brokers[bname] = {'broks': [], 'has_full_broks': False}
e = self.app.brokers[bname]
if not e['has_full_broks']:
del e['broks'][:]
self.app.fill_initial_broks(bname, with_logs=True)
class IStats(Interface):
"""
Interface for various stats about scheduler activity
"""
doc = '''Get raw stats from the daemon:
* nb_scheduled: number of scheduled checks (to launch in the future)
* nb_inpoller: number of check take by the pollers
* nb_zombies: number of zombie checks (should be close to zero)
* nb_notifications: number of notifications+event handlers
* latency: avg,min,max latency for the services (should be <10s)
'''
def get_raw_stats(self):
sched = self.app.sched
res = {}
res['nb_scheduled'] = len([c for c in sched.checks.values() if c.status == 'scheduled'])
res['nb_inpoller'] = len([c for c in sched.checks.values() if c.status == 'inpoller'])
res['nb_zombies'] = len([c for c in sched.checks.values() if c.status == 'zombie'])
res['nb_notifications'] = len(sched.actions)
# Spare scehdulers do not have such properties
if hasattr(sched, 'services'):
# Get a overview of the latencies with just
# a 95 percentile view, but lso min/max values
latencies = [s.latency for s in sched.services]
lat_avg, lat_min, lat_max = nighty_five_percent(latencies)
res['latency'] = (0.0, 0.0, 0.0)
if lat_avg:
res['latency'] = (lat_avg, lat_min, lat_max)
return res
get_raw_stats.doc = doc
class IForArbiter(IArb):
""" Interface for Arbiter. We ask him a for a conf and after that listen for instructions
from the arbiter. The arbiter is the interface to the administrator, so we must listen
carefully and give him the information he wants. Which could be for another scheduler """
# arbiter is sending us a external command.
# it can send us global command, or specific ones
def run_external_commands(self, cmds):
self.app.sched.run_external_commands(cmds)
run_external_commands.method = 'PUT'
def put_conf(self, conf):
self.app.sched.die()
super(IForArbiter, self).put_conf(conf)
put_conf.method = 'PUT'
# Call by arbiter if it thinks we are running but we must not (like
# if I was a spare that take a conf but the master returns, I must die
# and wait for a new conf)
# Us: No please...
# Arbiter: I don't care, hasta la vista baby!
# Us: ... <- Nothing! We are dead! you didn't follow or what??
def wait_new_conf(self):
logger.debug("Arbiter wants me to wait for a new configuration")
self.app.sched.die()
super(IForArbiter, self).wait_new_conf()
'''
class Injector(Interface):
# A broker ask us broks
def inject(self, bincode):
# first we need to get a real code object
import marshal
print("Calling Inject mode")
code = marshal.loads(bincode)
result = None
exec code
try:
return result
except NameError as exp:
return None
'''
# The main app class
class Shinken(BaseSatellite):
properties = BaseSatellite.properties.copy()
properties.update({
'pidfile': PathProp(default='schedulerd.pid'),
'port': IntegerProp(default=7768),
'local_log': PathProp(default='schedulerd.log'),
})
# Create the shinken class:
# Create a Pyro server (port = arvg 1)
# then create the interface for arbiter
# Then, it wait for a first configuration
def __init__(self, config_file, is_daemon, do_replace, debug, debug_file, profile=''):
BaseSatellite.__init__(self, 'scheduler', config_file, is_daemon, do_replace, debug,
debug_file)
self.interface = IForArbiter(self)
self.istats = IStats(self)
self.sched = Scheduler(self)
self.ichecks = None
self.ibroks = None
self.must_run = True
# Now the interface
self.uri = None
self.uri2 = None
# And possible links for satellites
# from now only pollers
self.pollers = {}
self.reactionners = {}
self.brokers = {}
def do_stop(self):
if self.http_daemon:
if self.ibroks:
self.http_daemon.unregister(self.ibroks)
if self.ichecks:
self.http_daemon.unregister(self.ichecks)
super(Shinken, self).do_stop()
def compensate_system_time_change(self, difference):
""" Compensate a system time change of difference for all hosts/services/checks/notifs """
logger.warning("A system time change of %d has been detected. Compensating...", difference)
# We only need to change some value
self.program_start = max(0, self.program_start + difference)
if not hasattr(self.sched, "conf"):
# Race condition where time change before getting conf
return
# Then we compensate all host/services
for h in self.sched.hosts:
h.compensate_system_time_change(difference)
for s in self.sched.services:
s.compensate_system_time_change(difference)
# Now all checks and actions
for c in self.sched.checks.values():
# Already launch checks should not be touch
if c.status == 'scheduled' and c.t_to_go is not None:
t_to_go = c.t_to_go
ref = c.ref
new_t = max(0, t_to_go + difference)
if ref.check_period is not None:
# But it's no so simple, we must match the timeperiod
new_t = ref.check_period.get_next_valid_time_from_t(new_t)
# But maybe no there is no more new value! Not good :(
# Say as error, with error output
if new_t is None:
c.state = 'waitconsume'
c.exit_status = 2
c.output = '(Error: there is no available check time after time change!)'
c.check_time = time.time()
c.execution_time = 0
else:
c.t_to_go = new_t
ref.next_chk = new_t
# Now all checks and actions
for c in self.sched.actions.values():
# Already launch checks should not be touch
if c.status == 'scheduled':
t_to_go = c.t_to_go
# Event handler do not have ref
ref = getattr(c, 'ref', None)
new_t = max(0, t_to_go + difference)
# Notification should be check with notification_period
if c.is_a == 'notification':
if ref.notification_period:
# But it's no so simple, we must match the timeperiod
new_t = ref.notification_period.get_next_valid_time_from_t(new_t)
# And got a creation_time variable too
c.creation_time = c.creation_time + difference
# But maybe no there is no more new value! Not good :(
# Say as error, with error output
if new_t is None:
c.state = 'waitconsume'
c.exit_status = 2
c.output = '(Error: there is no available check time after time change!)'
c.check_time = time.time()
c.execution_time = 0
else:
c.t_to_go = new_t
def manage_signal(self, sig, frame):
logger.warning("Received a SIGNAL %s", sig)
# If we got USR1, just dump memory
if sig == signal.SIGUSR1:
self.sched.need_dump_memory = True
elif sig == signal.SIGUSR2: # usr2, dump objects
self.sched.need_objects_dump = True
else: # if not, die :)
self.sched.die()
self.must_run = False
Daemon.manage_signal(self, sig, frame)
def do_loop_turn(self):
# Ok, now the conf
self.wait_for_initial_conf()
if not self.new_conf:
return
logger.info("New configuration received")
self.setup_new_conf()
logger.info("New configuration loaded")
self.sched.run()
if self.new_conf and self.graceful_enabled:
self.switch_process()
def setup_new_conf(self):
pk = self.new_conf
conf_raw = pk['conf']
override_conf = pk['override_conf']
modules = pk['modules']
satellites = pk['satellites']
instance_name = pk['instance_name']
push_flavor = pk['push_flavor']
skip_initial_broks = pk['skip_initial_broks']
accept_passive_unknown_check_results = pk['accept_passive_unknown_check_results']
api_key = pk['api_key']
secret = pk['secret']
http_proxy = pk['http_proxy']
statsd_host = pk['statsd_host']
statsd_port = pk['statsd_port']
statsd_prefix = pk['statsd_prefix']
statsd_enabled = pk['statsd_enabled']
statsd_interval = pk['statsd_interval']
statsd_types = pk['statsd_types']
statsd_pattern = pk['statsd_pattern']
harakiri_threshold = parse_memory_expr(pk['harakiri_threshold'])
# horay, we got a name, we can set it in our stats objects
statsmgr.register(self.sched, instance_name, 'scheduler',
api_key=api_key,
secret=secret,
http_proxy=http_proxy,
statsd_host=statsd_host,
statsd_port=statsd_port,
statsd_prefix=statsd_prefix,
statsd_enabled=statsd_enabled,
statsd_interval=statsd_interval,
statsd_types=statsd_types,
statsd_pattern=statsd_pattern)
t0 = time.time()
conf = deserialize(conf_raw)
logger.debug("Conf received at %d. Unserialized in %d secs", t0, time.time() - t0)
if harakiri_threshold is not None:
self.raw_conf = self.new_conf
else:
self.raw_conf = None
self.new_conf = None
if self.aggressive_memory_management:
free_memory()
# Tag the conf with our data
self.conf = conf
self.conf.push_flavor = push_flavor
self.conf.instance_name = instance_name
self.conf.skip_initial_broks = skip_initial_broks
self.conf.accept_passive_unknown_check_results = accept_passive_unknown_check_results
self.cur_conf = conf
self.override_conf = override_conf
self.modules = modules
self.satellites = satellites
self.harakiri_threshold = harakiri_threshold
# self.pollers = self.app.pollers
if self.conf.human_timestamp_log:
logger.set_human_format()
# Now We create our pollers
for pol_id in satellites['pollers']:
# Must look if we already have it
already_got = pol_id in self.pollers
p = satellites['pollers'][pol_id]
self.pollers[pol_id] = p
if p['name'] in override_conf['satellitemap']:
p = dict(p) # make a copy
p.update(override_conf['satellitemap'][p['name']])
proto = 'http'
if p['use_ssl']:
proto = 'https'
uri = '%s://%s:%s/' % (proto, p['address'], p['port'])
self.pollers[pol_id]['uri'] = uri
self.pollers[pol_id]['last_connection'] = 0
# Now We create our reactionners
for reac_id in satellites['reactionners']:
# Must look if we already have it
already_got = reac_id in self.reactionners
reac = satellites['reactionners'][reac_id]
self.reactionners[reac_id] = reac
if reac['name'] in override_conf['satellitemap']:
reac = dict(reac) # make a copy
reac.update(override_conf['satellitemap'][reac['name']])
proto = 'http'
if p['use_ssl']:
proto = 'https'
uri = '%s://%s:%s/' % (proto, reac['address'], reac['port'])
self.reactionners[reac_id]['uri'] = uri
self.reactionners[reac_id]['last_connection'] = 0
# First mix conf and override_conf to have our definitive conf
for prop in self.override_conf:
# print("Overriding the property %s with value %s" % (prop, self.override_conf[prop]))
val = self.override_conf[prop]
setattr(self.conf, prop, val)
if self.conf.use_timezone != '':
logger.debug("Setting our timezone to %s", self.conf.use_timezone)
os.environ['TZ'] = self.conf.use_timezone
time.tzset()
if len(self.modules) != 0:
logger.debug("I've got %s modules", self.modules)
# TODO: if scheduler had previous modules instanciated it must clean them!
self.modules_manager.set_modules(self.modules)
self.do_load_modules()
# give it an interface
# But first remove previous interface if exists
if self.ichecks is not None:
logger.debug("Deconnecting previous Check Interface")
self.http_daemon.unregister(self.ichecks)
# Now create and connect it
self.ichecks = IChecks(self.sched)
self.http_daemon.register(self.ichecks)
logger.debug("The Scheduler Interface uri is: %s", self.uri)
# Same for Broks
if self.ibroks is not None:
logger.debug("Deconnecting previous Broks Interface")
self.http_daemon.unregister(self.ibroks)
# Create and connect it
self.ibroks = IBroks(self.sched)
self.http_daemon.register(self.ibroks)
logger.info("Loading configuration.")
self.conf.explode_global_conf()
# we give sched it's conf
self.sched.reset()
self.sched.load_conf(self.conf)
self.sched.load_satellites(self.pollers, self.reactionners)
# We must update our Config dict macro with good value
# from the config parameters
self.sched.conf.fill_resource_macros_names_macros()
# print("DBG: got macros", self.sched.conf.macros)
# Creating the Macroresolver Class & unique instance
m = MacroResolver()
m.init(self.conf)
# self.conf.dump()
# self.conf.quick_debug()
# Now create the external commander
# it's a applyer: it role is not to dispatch commands,
# but to apply them
e = ExternalCommandManager(self.conf, 'applyer')
# Scheduler need to know about external command to
# activate it if necessary
self.sched.load_external_command(e)
# External command need the sched because he can raise checks
e.load_scheduler(self.sched)
# We clear our schedulers managed (it's us :) )
# and set ourself in it
self.schedulers = {self.conf.instance_id: self.sched}
# Suicide service if memory threshold has been exceeded
def check_memory_usage(self):
super(Shinken, self).check_memory_usage()
if self.new_conf is not None:
self.sched.die()
# Give the arbiter the data about what I manage
# for me it's just my instance_id and my push flavor
def what_i_managed(self):
if hasattr(self, 'conf'):
return {self.conf.instance_id: self.conf.push_flavor}
else:
return {}
# our main function, launch after the init
def main(self):
try:
self.load_config_file()
# Setting log level
logger.setLevel(self.log_level)
# Force the debug level if the daemon is said to start with such level
if self.debug:
logger.setLevel('DEBUG')
self.look_for_early_exit()
self.load_parent_config()
self.do_daemon_init_and_start()
self.load_modules_manager()
self.http_daemon.register(self.interface)
self.http_daemon.register(self.istats)
# self.inject = Injector(self.sched)
# self.http_daemon.register(self.inject)
self.http_daemon.unregister(self.interface)
self.uri = self.http_daemon.uri
logger.info("[scheduler] General interface is at: %s", self.uri)
self.do_mainloop()
except Exception as exp:
self.print_unrecoverable(traceback.format_exc())
raise
| 21,989
|
Python
|
.py
| 494
| 34.368421
| 99
| 0.60657
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,543
|
brokerdaemon.py
|
shinken-solutions_shinken/shinken/daemons/brokerdaemon.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import six
import os
import sys
import time
import traceback
import base64
import zlib
import threading
import copy
from multiprocessing import active_children
from collections import deque
import io
from shinken.satellite import BaseSatellite
from shinken.property import PathProp, IntegerProp
from shinken.util import sort_by_ids, get_memory, parse_memory_expr, free_memory
from shinken.serializer import serialize, deserialize, SerializeError
from shinken.log import logger
from shinken.stats import statsmgr
from shinken.external_command import ExternalCommand
from shinken.http_client import HTTPClient, HTTPException
from shinken.daemon import Daemon, Interface
class IStats(Interface):
"""
Interface for various stats about broker activity
"""
doc = 'Get raw stats from the daemon'
def get_raw_stats(self):
app = self.app
res = []
insts = [inst for inst in app.modules_manager.instances if inst.is_external]
for inst in insts:
try:
res.append({'module_name': inst.get_name(), 'queue_size': inst.to_q.qsize()})
except Exception as exp:
res.append({'module_name': inst.get_name(), 'queue_size': 0})
return res
get_raw_stats.doc = doc
# Our main APP class
class Broker(BaseSatellite):
properties = BaseSatellite.properties.copy()
properties.update({
'pidfile': PathProp(default='brokerd.pid'),
'port': IntegerProp(default=7772),
'local_log': PathProp(default='brokerd.log'),
})
def __init__(self, config_file, is_daemon, do_replace, debug, debug_file, profile=''):
super(Broker, self).__init__('broker', config_file, is_daemon, do_replace, debug,
debug_file)
# Our arbiters
self.arbiters = {}
# Our pollers, reactionners and receivers
self.pollers = {}
self.reactionners = {}
self.receivers = {}
# Modules are load one time
self.have_modules = False
# Can have a queue of external_commands given by modules
# will be processed by arbiter
self.external_commands = []
# All broks to manage
self.broks = deque() # broks to manage
self.external_module_broks = deque() # broks during this loop to send to external modules
self.broks_lock = threading.RLock() # to manage lock when managing broks
# broks raised this turn and that needs to be put in self.broks
self.broks_internal_raised = []
# broks raised by the arbiters, we need a lock so the push can be in parallel
# to our current activities and won't lock the arbiter
self.arbiter_broks = []
self.arbiter_broks_lock = threading.RLock()
self.timeout = 1.0
self.istats = IStats(self)
# Schedulers have some queues. We can simplify the call by adding
# elements into the proper queue just by looking at their type
# Brok -> self.broks
# TODO: better tag ID?
# External commands -> self.external_commands
def add(self, elt):
cls_type = elt.__class__.my_type
if cls_type == 'brok':
# For brok, we TAG brok with our instance_id
elt.instance_id = 0
self.broks_internal_raised.append(elt)
return
elif cls_type == 'externalcommand':
logger.debug("Enqueuing an external command '%s'", ExternalCommand.__dict__)
self.external_commands.append(elt)
# Maybe we got a Message from the modules, it's way to ask something
# like from now a full data from a scheduler for example.
elif cls_type == 'message':
# We got a message, great!
logger.debug(elt.__dict__)
if elt.get_type() == 'NeedData':
data = elt.get_data()
# Full instance id means: I got no data for this scheduler
# so give me all dumbass!
if 'full_instance_id' in data:
c_id = data['full_instance_id']
source = elt.source
logger.info('The module %s is asking me to get all initial data '
'from the scheduler %d',
source, c_id)
# so we just reset the connection and the running_id,
# it will just get all new things
try:
self.schedulers[c_id]['con'] = None
self.schedulers[c_id]['running_id'] = 0
except KeyError: # maybe this instance was not known, forget it
logger.warning("the module %s ask me a full_instance_id "
"for an unknown ID (%d)!", source, c_id)
# Maybe a module tells me that it's dead, I must log it's last words...
if elt.get_type() == 'ICrash':
data = elt.get_data()
logger.error('the module %s just crash! Please look at the traceback:',
data['name'])
logger.error(data['trace'])
# The module death will be looked for elsewhere and restarted.
# Get the good tabs for links by the kind. If unknown, return None
def get_links_from_type(self, d_type):
t = {'scheduler': self.schedulers,
'arbiter': self.arbiters,
'poller': self.pollers,
'reactionner': self.reactionners,
'receiver': self.receivers
}
if d_type in t:
return t[d_type]
return None
# Check if we do not connect to often to this
def is_connection_try_too_close(self, elt):
now = time.time()
last_connection = elt['last_connection']
if now - last_connection < 5:
return True
return False
# wrapper function for the real function do_
# just for timing the connection
def pynag_con_init(self, id, type='scheduler'):
_t = time.time()
r = self.do_pynag_con_init(id, type)
statsmgr.timing('con-init.%s' % type, time.time() - _t, 'perf')
return r
# initialize or re-initialize connection with scheduler or
# arbiter if type == arbiter
def do_pynag_con_init(self, id, type='scheduler'):
# Get the good links tab for looping..
links = self.get_links_from_type(type)
if links is None:
logger.debug('Type unknown for connection! %s', type)
return
# default timeout for daemons like pollers/reactionners/...
timeout = 3
data_timeout = 120
if type == 'scheduler':
# If sched is not active, I do not try to init
# it is just useless
is_active = links[id]['active']
if not is_active:
return
# schedulers also got real timeout to respect
timeout = links[id]['timeout']
data_timeout = links[id]['data_timeout']
# If we try to connect too much, we slow down our tests
if self.is_connection_try_too_close(links[id]):
return
# Ok, we can now update it
links[id]['last_connection'] = time.time()
# DBG: print("Init connection with", links[id]['uri'])
running_id = links[id]['running_id']
# DBG: print("Running id before connection", running_id)
uri = links[id]['uri']
try:
con = links[id]['con'] = HTTPClient(uri=uri,
strong_ssl=links[id]['hard_ssl_name_check'],
timeout=timeout, data_timeout=data_timeout)
except HTTPException as exp:
# But the multiprocessing module is not compatible with it!
# so we must disable it immediately after
logger.info(
"Connection problem to the %s %s: %s", type, links[id]['name'],
exp
)
links[id]['con'] = None
return
try:
# initial ping must be quick
con.get('ping')
new_run_id = con.get('get_running_id')
new_run_id = float(new_run_id)
# data transfer can be longer
# The schedulers have been restarted: it has a new run_id.
# So we clear all verifs, they are obsolete now.
if new_run_id != running_id:
logger.debug("[%s] New running id for the %s %s: %s (was %s)",
self.name, type, links[id]['name'], new_run_id, running_id)
del links[id]['broks'][:]
# we must ask for a new full broks if
# it's a scheduler
if type == 'scheduler':
logger.debug("[%s] I ask for a broks generation to the scheduler %s",
self.name, links[id]['name'])
con.get('fill_initial_broks', {'bname': self.name}, wait='long')
# Ok all is done, we can save this new running id
links[id]['running_id'] = new_run_id
except HTTPException as exp:
logger.info(
"Connection problem to the %s %s: %s",
type, links[id]['name'], exp)
links[id]['con'] = None
return
except KeyError as exp:
logger.info(
"the %s '%s' is not initialized: %s",
type, links[id]['name'], exp)
links[id]['con'] = None
traceback.print_stack()
return
logger.info("Connection OK to the %s %s", type, links[id]['name'])
# Get a brok. Our role is to put it in the modules
# DO NOT CHANGE data of b!!!
# REF: doc/broker-modules.png (4-5)
def manage_brok(self, b):
# Call all modules if they catch the call
for mod in self.modules_manager.get_internal_instances():
try:
mod.manage_brok(b)
except Exception as exp:
logger.debug(exp.__dict__)
logger.warning(
"The mod %s raise an exception: %s, I'm tagging it to restart later",
mod.get_name(), exp)
logger.warning("Exception type: %s", type(exp))
logger.warning("Back trace of this kill: %s", traceback.format_exc())
self.modules_manager.set_to_restart(mod)
# Add broks (a tab) to different queues for
# internal and external modules
def add_broks_to_queue(self, broks):
statsmgr.incr('core.broker.broks.in', len(broks), 'queue')
# Ok now put in queue broks to be managed by
# internal modules
with self.broks_lock:
self.broks.extend(broks)
self.external_module_broks.extend(broks)
# Each turn we get all broks from
# self.broks_internal_raised and we put them in
# self.broks
def interger_internal_broks(self):
self.add_broks_to_queue(self.broks_internal_raised)
self.broks_internal_raised = []
# We will get in the broks list the broks from the arbiters,
# but as the arbiter_broks list can be push by arbiter without Global lock,
# we must protect this with he list lock
def interger_arbiter_broks(self):
with self.arbiter_broks_lock:
self.add_broks_to_queue(self.arbiter_broks)
self.arbiter_broks = []
# We get new broks from schedulers
# REF: doc/broker-modules.png (2)
def get_new_broks(self, type='scheduler'):
# Get the good links tab for looping..
links = self.get_links_from_type(type)
if links is None:
logger.debug('Type unknown for connection! %s', type)
return
# We check for new check in each schedulers and put
# the result in new_checks
for sched_id in links:
try:
con = links[sched_id]['con']
if con is not None: # None = not initialized
t0 = time.time()
# Before ask a call that can be long, do a simple ping to be sure it is alive
con.get('ping')
payload = con.get(
'get_broks',
{'bname': self.name, 'broks_batch': self.broks_batch},
wait='long')
try:
broks = deserialize(payload)
except (TypeError, SerializeError) as exp:
logger.error('Cannot load broks data from %s : %s',
links[sched_id]['name'], exp)
links[sched_id]['con'] = None
continue
logger.debug("%s Broks get in %s", len(broks), time.time() - t0)
for b in broks:
b.instance_id = links[sched_id]['instance_id']
# Ok, we can add theses broks to our queues
self.add_broks_to_queue(broks)
else: # no con? make the connection
self.pynag_con_init(sched_id, type=type)
# Ok, con is not known, so we create it
except KeyError as exp:
logger.debug("Key error for get_broks : %s", exp)
self.pynag_con_init(sched_id, type=type)
except HTTPException as exp:
logger.warning(
"Connection problem to the %s %s: %s",
type, links[sched_id]['name'], exp
)
links[sched_id]['con'] = None
# scheduler must not #be initialized
except AttributeError as exp:
logger.warning(
"The %s %s should not be initialized: %s",
type, links[sched_id]['name'], exp
)
# scheduler must not have checks
# What the F**k? We do not know what happened,
# so.. bye bye :)
except Exception as e:
logger.error(e)
logger.error(traceback.format_exc())
sys.exit(1)
# Helper function for module, will give our broks
def get_retention_data(self):
return self.broks
# Get back our broks from a retention module
def restore_retention_data(self, data):
self.broks.extend(data)
def do_stop(self):
act = active_children()
for a in act:
a.terminate()
a.join(1)
super(Broker, self).do_stop()
def setup_new_conf(self):
conf = self.new_conf
self.cur_conf = conf
# Got our name from the globals
g_conf = conf['global']
if 'broker_name' in g_conf:
name = g_conf['broker_name']
else:
name = 'Unnamed broker'
self.name = name
props_to_get = ['broks_batch', 'api_key', 'secret', 'http_proxy',
'statsd_host', 'statsd_port', 'statsd_prefix',
'statsd_enabled', 'statsd_interval', 'statsd_types',
'statsd_pattern']
for prop in props_to_get:
v = g_conf[prop]
setattr(self, prop, v)
self.harakiri_threshold = parse_memory_expr(g_conf['harakiri_threshold'])
if self.harakiri_threshold is not None:
self.raw_conf = self.new_conf
else:
self.raw_conf = None
self.new_conf = None
if self.aggressive_memory_management:
free_memory()
# We got a name so we can update the logger and the stats global objects
logger.load_obj(self, name)
statsmgr.register(self, name, 'broker',
api_key=self.api_key,
secret=self.secret,
http_proxy=self.http_proxy,
statsd_host=self.statsd_host,
statsd_port=self.statsd_port,
statsd_prefix=self.statsd_prefix,
statsd_enabled=self.statsd_enabled,
statsd_interval=self.statsd_interval,
statsd_types=self.statsd_types,
statsd_pattern=self.statsd_pattern)
logger.debug("[%s] Sending us configuration %s", self.name, conf)
# If we've got something in the schedulers, we do not
# want it anymore
# self.schedulers.clear()
for sched_id in conf['schedulers']:
# Must look if we already have it to do not overdie our broks
already_got = False
# We can already got this conf id, but with another address
if sched_id in self.schedulers:
new_addr = conf['schedulers'][sched_id]['address']
old_addr = self.schedulers[sched_id]['address']
new_port = conf['schedulers'][sched_id]['port']
old_port = self.schedulers[sched_id]['port']
# Should got all the same to be ok :)
if new_addr == old_addr and new_port == old_port:
already_got = True
if already_got:
broks = self.schedulers[sched_id]['broks']
running_id = self.schedulers[sched_id]['running_id']
else:
broks = []
running_id = 0
s = conf['schedulers'][sched_id]
self.schedulers[sched_id] = s
# replacing scheduler address and port by those defined in satellitemap
if s['name'] in g_conf['satellitemap']:
s = dict(s) # make a copy
s.update(g_conf['satellitemap'][s['name']])
proto = 'http'
if s['use_ssl']:
proto = 'https'
uri = '%s://%s:%s/' % (proto, s['address'], s['port'])
self.schedulers[sched_id]['uri'] = uri
self.schedulers[sched_id]['broks'] = broks
self.schedulers[sched_id]['instance_id'] = s['instance_id']
self.schedulers[sched_id]['running_id'] = running_id
self.schedulers[sched_id]['active'] = s['active']
self.schedulers[sched_id]['last_connection'] = 0
self.schedulers[sched_id]['timeout'] = s['timeout']
self.schedulers[sched_id]['data_timeout'] = s['data_timeout']
logger.info("We have our schedulers: %s ", self.schedulers)
# Now get arbiter
for arb_id in conf['arbiters']:
# Must look if we already have it
already_got = arb_id in self.arbiters
if already_got:
broks = self.arbiters[arb_id]['broks']
else:
broks = []
a = conf['arbiters'][arb_id]
self.arbiters[arb_id] = a
# replacing arbiter address and port by those defined in satellitemap
if a['name'] in g_conf['satellitemap']:
a = dict(a) # make a copy
a.update(g_conf['satellitemap'][a['name']])
proto = 'http'
if a['use_ssl']:
proto = 'https'
uri = '%s://%s:%s/' % (proto, a['address'], a['port'])
self.arbiters[arb_id]['uri'] = uri
self.arbiters[arb_id]['broks'] = broks
self.arbiters[arb_id]['instance_id'] = 0 # No use so all to 0
self.arbiters[arb_id]['running_id'] = 0
self.arbiters[arb_id]['last_connection'] = 0
# We do not connect to the arbiter. Connection hangs
logger.info("We have our arbiters: %s ", self.arbiters)
# Now for pollers
for pol_id in conf['pollers']:
# Must look if we already have it
already_got = pol_id in self.pollers
if already_got:
broks = self.pollers[pol_id]['broks']
running_id = self.schedulers[sched_id]['running_id']
else:
broks = []
running_id = 0
p = conf['pollers'][pol_id]
self.pollers[pol_id] = p
# replacing poller address and port by those defined in satellitemap
if p['name'] in g_conf['satellitemap']:
p = dict(p) # make a copy
p.update(g_conf['satellitemap'][p['name']])
proto = 'http'
if p['use_ssl']:
proto = 'https'
uri = '%s://%s:%s/' % (proto, p['address'], p['port'])
self.pollers[pol_id]['uri'] = uri
self.pollers[pol_id]['broks'] = broks
self.pollers[pol_id]['instance_id'] = 0 # No use so all to 0
self.pollers[pol_id]['running_id'] = running_id
self.pollers[pol_id]['last_connection'] = 0
logger.info("We have our pollers: %s", self.pollers)
# Now reactionners
for rea_id in conf['reactionners']:
# Must look if we already have it
already_got = rea_id in self.reactionners
if already_got:
broks = self.reactionners[rea_id]['broks']
running_id = self.schedulers[sched_id]['running_id']
else:
broks = []
running_id = 0
r = conf['reactionners'][rea_id]
self.reactionners[rea_id] = r
# replacing reactionner address and port by those defined in satellitemap
if r['name'] in g_conf['satellitemap']:
r = dict(r) # make a copy
r.update(g_conf['satellitemap'][r['name']])
proto = 'http'
if r['use_ssl']:
proto = 'https'
uri = '%s://%s:%s/' % (proto, r['address'], r['port'])
self.reactionners[rea_id]['uri'] = uri
self.reactionners[rea_id]['broks'] = broks
self.reactionners[rea_id]['instance_id'] = 0 # No use so all to 0
self.reactionners[rea_id]['running_id'] = running_id
self.reactionners[rea_id]['last_connection'] = 0
logger.info("We have our reactionners: %s", self.reactionners)
# Now receivers
for rec_id in conf['receivers']:
# Must look if we already have it
already_got = rec_id in self.receivers
if already_got:
broks = self.receivers[rec_id]['broks']
running_id = self.schedulers[sched_id]['running_id']
else:
broks = []
running_id = 0
r = conf['receivers'][rec_id]
self.receivers[rec_id] = r
# replacing reactionner address and port by those defined in satellitemap
if r['name'] in g_conf['satellitemap']:
r = dict(r) # make a copy
r.update(g_conf['satellitemap'][r['name']])
proto = 'http'
if r['use_ssl']:
proto = 'https'
uri = '%s://%s:%s/' % (proto, r['address'], r['port'])
self.receivers[rec_id]['uri'] = uri
self.receivers[rec_id]['broks'] = broks
self.receivers[rec_id]['instance_id'] = 0 # No use so all to 0
self.receivers[rec_id]['running_id'] = running_id
self.receivers[rec_id]['last_connection'] = 0
if not self.have_modules:
self.modules = mods = conf['global']['modules']
self.have_modules = True
logger.info("We received modules %s ", mods)
# Ok now start, or restart them!
# Set modules, init them and start external ones
self.modules_manager.set_modules(self.modules)
self.do_load_modules()
self.modules_manager.start_external_instances()
# Set our giving timezone from arbiter
use_timezone = conf['global']['use_timezone']
if use_timezone != 'NOTSET':
logger.info("Setting our timezone to %s", use_timezone)
os.environ['TZ'] = use_timezone
time.tzset()
# Connection init with Schedulers
for sched_id in self.schedulers:
self.pynag_con_init(sched_id, type='scheduler')
for pol_id in self.pollers:
self.pynag_con_init(pol_id, type='poller')
for rea_id in self.reactionners:
self.pynag_con_init(rea_id, type='reactionner')
# An arbiter ask us to wait for a new conf, so we must clean
# all our mess we did, and close modules too
def clean_previous_run(self):
# Clean all lists
self.schedulers.clear()
self.pollers.clear()
self.reactionners.clear()
self.broks = deque(self.broks)
self.broks_internal_raised = self.broks_internal_raised[:]
with self.arbiter_broks_lock:
self.arbiter_broks = self.arbiter_broks[:]
self.external_commands = self.external_commands[:]
# And now modules
self.have_modules = False
self.modules_manager.clear_instances()
# Gets internal metrics for both statsd and
def get_internal_metrics(self):
# Queues
metrics = [
('core.broker.mem', get_memory(), 'system'),
('core.broker.external-commands.queue',
len(self.external_commands), 'queue'),
('core.broker.broks.queue', len(self.broks), 'queue'),
]
return metrics
# stats threads is asking us a main structure for stats
def get_stats_struct(self):
now = int(time.time())
# call the daemon one
res = super(Broker, self).get_stats_struct()
res.update({'name': self.name, 'type': "broker"})
# metrics specific
metrics = res["metrics"]
for metric in self.get_internal_metrics():
name, value, mtype = metric
metrics.append(name, value, now, mtype)
return res
def do_loop_turn(self):
loop_time = time.time()
with self.broks_lock:
nb_broks = len(self.broks)
nb_external_broks = len(self.external_module_broks)
logger.debug("[Broks] Begin Loop: managing queue broks [%d]" % nb_broks)
self.broks_done = 0
# FIXME: Does it come from a structure only known from enterprise ?
# for mod in self.modules_manager.get_internal_instances():
# self.local_module_stats[mod.get_name()] = 0
# Dump modules Queues size
external_modules = [
external_module for external_module in self.modules_manager.instances
if external_module.is_external
]
for external_module in external_modules:
try:
logger.debug("[Broks] External Queue len (%s): %s" % (
external_module.get_name(), external_module.to_q.qsize()
))
except Exception as exp:
logger.debug("External Queue len (%s): Exception! %s" % (external_module.get_name(), exp))
# Begin to clean modules
self.check_and_del_zombie_modules()
# Maybe the arbiter ask us to wait for a new conf
# If true, we must restart all...
if self.cur_conf is None:
# Clean previous run from useless objects and close modules
self.clean_previous_run()
self.wait_for_initial_conf()
# we may have been interrupted or so; then
# just return from this loop turn
if not self.new_conf:
return
self.setup_new_conf()
# Now we check if arbiter speak to us in the http_daemon.
# If so, we listen for it
# When it pushes conf to us, we reinit connections
self.watch_for_new_conf(0.0)
if self.new_conf:
if self.graceful_enabled and self.switch_process() is True:
# Child successfully spawned, we're exiting
return
self.setup_new_conf()
# Maybe the last loop we raised some broks internally we should
# integrate them in broks
self.interger_internal_broks()
# Also reap broks sent from the arbiters
self.interger_arbiter_broks()
# Main job, go get broks in our distants daemons
types = ['scheduler', 'poller', 'reactionner', 'receiver']
for _type in types:
_t = time.time()
# And from schedulers
self.get_new_broks(type=_type)
statsmgr.timing('core.broker.get-new-broks.%s' % _type, time.time() - _t,
'perf')
# We will works this turn with a copy of the broks, so we won't be
# impacted by possible other threads (modules or so)
with self.broks_lock:
broks = copy.copy(self.broks)
to_send = list(self.external_module_broks)
self.broks = deque()
self.external_module_broks = deque()
# and for external queues
# REF: doc/broker-modules.png (3)
# We put to external queues broks that was not already send
t0 = time.time()
# We are sending broks as a big list, more efficient than one by one
queues = self.modules_manager.get_external_to_queues()
for q in queues:
try:
q.put(to_send)
# we catch but the kill detector on the next loop will detect the
# fail module and will manage it
except Exception:
logger.error(
'FAIL TO PUSH DATA TO EXTERNAL MODULE this module will '
'be detected and restart.'
)
statsmgr.timing('core.broker.put-to-external-queue', time.time() - t0, 'perf')
logger.debug("[Broks] Time to send [%s] broks to module ([%.3f] secs)" % (len(to_send), time.time() - t0))
start = time.time()
while len(broks) != 0:
now = time.time()
# Do not 'manage' more than 1s, we must get new broks
# every 1s
if now - start > 1:
# so we must remerge our last broks with the main broks to do not
# lost them
with self.broks_lock:
logger.debug(
'Cannot manage all remaining broks [%d] in a loop '
'turn, push bask this broks in the queue.' % len(broks)
)
self.broks.extendleft(broks)
break
try:
b = broks.pop()
except IndexError: # no more broks, maybe a daemon stop, not a problem, catch it
break
# Ok, we can get the brok, and doing something with it
# REF: doc/broker-modules.png (4-5)
# We un serialize the brok before consume it
#b.prepare()
_t = time.time()
self.manage_brok(b)
statsmgr.timing('core.broker.manage-brok', time.time() - _t, 'perf')
# Maybe external modules raised 'objects' we should get them
nb_object_get = self.get_objects_from_from_queues()
logger.debug(
'[stats] nb object get control queues of external module [%d]' %
nb_object_get
)
# Say to modules it's a new tick :)
self.hook_point('tick')
logger.debug('[stats] broks done this loop %d/%d' % (self.broks_done, nb_broks))
time.sleep(max(0.01, min(1.0, 1.0 - (time.time() - loop_time))))
# Checks if memory consumption did not exceed allowed thresold
self.check_memory_usage()
# Main function, will loop forever
def main(self):
try:
self.load_config_file()
# Setting log level
logger.setLevel(self.log_level)
# Force the debug level if the daemon is said to start with such level
if self.debug:
logger.setLevel('DEBUG')
for line in self.get_header():
logger.info(line)
logger.info("[Broker] Using working directory: %s", os.path.abspath(self.workdir))
# Look if we are enabled or not. If ok, start the daemon mode
self.look_for_early_exit()
self.load_parent_config()
self.do_daemon_init_and_start()
self.load_modules_manager()
self.uri2 = self.http_daemon.register(self.interface)
logger.debug("The Arbiter uri it at %s", self.uri2)
self.uri3 = self.http_daemon.register(self.istats)
# We wait for initial conf
self.wait_for_initial_conf()
if not self.new_conf:
return
self.setup_new_conf()
# Do the modules part, we have our modules in self.modules
# REF: doc/broker-modules.png (1)
self.hook_point('load_retention')
# Now the main loop
self.do_mainloop()
except Exception as exp:
self.print_unrecoverable(traceback.format_exc())
raise
| 34,135
|
Python
|
.py
| 741
| 33.727395
| 114
| 0.559498
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,544
|
pollerdaemon.py
|
shinken-solutions_shinken/shinken/daemons/pollerdaemon.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken.satellite import Satellite
from shinken.property import PathProp, IntegerProp
# Our main APP class
class Poller(Satellite):
do_checks = True # I do checks
do_actions = False # but no actions
my_type = 'poller'
properties = Satellite.properties.copy()
properties.update({
'pidfile': PathProp(default='pollerd.pid'),
'port': IntegerProp(default=7771),
'local_log': PathProp(default='pollerd.log'),
})
def __init__(self, config_file, is_daemon, do_replace, debug, debug_file, profile):
super(Poller, self).__init__('poller', config_file, is_daemon, do_replace, debug,
debug_file)
| 1,705
|
Python
|
.py
| 39
| 40.051282
| 89
| 0.71308
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,545
|
reactionnerdaemon.py
|
shinken-solutions_shinken/shinken/daemons/reactionnerdaemon.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
# This class is an application that launches actions for the schedulers
# Actions can be:
# Notifications
# Event handlers
#
# When running the Reactionner will :
# Respond to Pyro pings from Arbiter
# Listen for new configurations from Arbiter
#
# The configuration consists of a list of Schedulers for which
# the Reactionner will launch actions for.
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken.satellite import Satellite
from shinken.property import PathProp, IntegerProp
class Reactionner(Satellite):
do_checks = False # I do not do checks
do_actions = True
my_type = 'reactionner'
properties = Satellite.properties.copy()
properties.update({
'pidfile': PathProp(default='reactionnerd.pid'),
'port': IntegerProp(default=7769),
'local_log': PathProp(default='reactionnerd.log'),
})
def __init__(self, config_file, is_daemon, do_replace, debug, debug_file, profile=''):
super(Reactionner, self).__init__('reactionner', config_file, is_daemon, do_replace, debug,
debug_file)
| 2,075
|
Python
|
.py
| 49
| 39.061224
| 99
| 0.728175
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,546
|
__init__.py
|
shinken-solutions_shinken/shinken/daemons/__init__.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
| 918
|
Python
|
.py
| 22
| 40.681818
| 77
| 0.758659
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,547
|
receiverdaemon.py
|
shinken-solutions_shinken/shinken/daemons/receiverdaemon.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import six
import os
import time
import traceback
import sys
import base64
import zlib
from multiprocessing import active_children
from shinken.satellite import Satellite
from shinken.property import PathProp, IntegerProp
from shinken.log import logger
from shinken.external_command import ExternalCommand, ExternalCommandManager
from shinken.http_client import HTTPException
from shinken.daemon import Interface
from shinken.stats import statsmgr
from shinken.util import parse_memory_expr, free_memory
from shinken.serializer import serialize
class IStats(Interface):
"""
Interface for various stats about broker activity
"""
doc = '''Get raw stats from the daemon:
* command_buffer_size: external command buffer size
'''
def get_raw_stats(self):
app = self.app
res = {'command_buffer_size': len(app.external_commands)}
return res
get_raw_stats.doc = doc
class IBroks(Interface):
""" Interface for Brokers:
They connect here and get all broks (data for brokers). Data must be ORDERED!
(initial status BEFORE update...) """
# A broker ask us broks
def get_broks(self, bname, broks_batch=0):
res = self.app.get_broks(broks_batch)
return serialize(res)
get_broks.encode = 'raw'
# Our main APP class
class Receiver(Satellite):
my_type = 'receiver'
properties = Satellite.properties.copy()
properties.update({
'pidfile': PathProp(default='receiverd.pid'),
'port': IntegerProp(default=7773),
'local_log': PathProp(default='receiverd.log'),
})
def __init__(self, config_file, is_daemon, do_replace, debug, debug_file):
super(Receiver, self).__init__(
'receiver', config_file, is_daemon, do_replace, debug, debug_file)
# Our arbiters
self.arbiters = {}
# Our pollers and reactionners
self.pollers = {}
self.reactionners = {}
# Modules are load one time
self.have_modules = False
# Can have a queue of external_commands give by modules
# will be taken by arbiter to process
self.external_commands = []
# and the unprocessed one, a buffer
self.unprocessed_external_commands = []
self.host_assoc = {}
self.direct_routing = False
self.accept_passive_unknown_check_results = False
self.istats = IStats(self)
self.ibroks = IBroks(self)
# Now create the external commander. It's just here to dispatch
# the commands to schedulers
e = ExternalCommandManager(None, 'receiver')
e.load_receiver(self)
self.external_command = e
# Schedulers have some queues. We can simplify call by adding
# elements into the proper queue just by looking at their type
# Brok -> self.broks
# TODO: better tag ID?
# External commands -> self.external_commands
def add(self, elt):
cls_type = elt.__class__.my_type
if cls_type == 'brok':
# For brok, we TAG brok with our instance_id
elt.instance_id = 0
self.broks.append(elt)
return
elif cls_type == 'externalcommand':
logger.debug("Enqueuing an external command: %s", ExternalCommand.__dict__)
self.unprocessed_external_commands.append(elt)
def push_host_names(self, data):
sched_id = data["sched_id"]
hnames = data["hnames"]
for h in hnames:
self.host_assoc[h] = sched_id
def get_sched_from_hname(self, hname):
i = self.host_assoc.get(hname, None)
e = self.schedulers.get(i, None)
return e
# Get a brok. Our role is to put it in the modules
# THEY MUST DO NOT CHANGE data of b!!!
# REF: doc/receiver-modules.png (4-5)
def manage_brok(self, b):
to_del = []
# Call all modules if they catch the call
for mod in self.modules_manager.get_internal_instances():
try:
mod.manage_brok(b)
except Exception as exp:
logger.warning(
"The mod %s raise an exception: %s, I kill it",
mod.get_name(), exp)
logger.warning("Exception type: %s", type(exp))
logger.warning("Back trace of this kill: %s", traceback.format_exc())
to_del.append(mod)
# Now remove mod that raise an exception
self.modules_manager.clear_instances(to_del)
def do_stop(self):
act = active_children()
for a in act:
a.terminate()
a.join(1)
super(Receiver, self).do_stop()
def setup_new_conf(self):
conf = self.new_conf
self.cur_conf = conf
# Got our name from the globals
if 'receiver_name' in conf['global']:
name = conf['global']['receiver_name']
else:
name = 'Unnamed receiver'
self.name = name
self.api_key = conf['global']['api_key']
self.secret = conf['global']['secret']
self.http_proxy = conf['global']['http_proxy']
self.statsd_host = conf['global']['statsd_host']
self.statsd_port = conf['global']['statsd_port']
self.statsd_prefix = conf['global']['statsd_prefix']
self.statsd_enabled = conf['global']['statsd_enabled']
self.statsd_interval = conf['global']['statsd_interval']
self.statsd_types = conf['global']['statsd_types']
self.statsd_pattern = conf['global']['statsd_pattern']
self.harakiri_threshold = parse_memory_expr(
conf['global']['harakiri_threshold'])
if self.harakiri_threshold is not None:
self.raw_conf = self.new_conf
else:
self.raw_conf = None
self.new_conf = None
if self.aggressive_memory_management:
free_memory()
statsmgr.register(self, self.name, 'receiver',
api_key=self.api_key,
secret=self.secret,
http_proxy=self.http_proxy,
statsd_host=self.statsd_host,
statsd_port=self.statsd_port,
statsd_prefix=self.statsd_prefix,
statsd_enabled=self.statsd_enabled,
statsd_interval=self.statsd_interval,
statsd_types=self.statsd_types,
statsd_pattern=self.statsd_pattern)
logger.load_obj(self, name)
self.direct_routing = conf['global']['direct_routing']
self.accept_passive_unknown_check_results = \
conf['global']['accept_passive_unknown_check_results']
g_conf = conf['global']
# If we've got something in the schedulers, we do not want it anymore
for sched_id in conf['schedulers']:
already_got = False
# We can already got this conf id, but with another address
if sched_id in self.schedulers:
new_addr = conf['schedulers'][sched_id]['address']
old_addr = self.schedulers[sched_id]['address']
new_port = conf['schedulers'][sched_id]['port']
old_port = self.schedulers[sched_id]['port']
# Should got all the same to be ok :)
if new_addr == old_addr and new_port == old_port:
already_got = True
if already_got:
logger.info("[%s] We already got the conf %d (%s)",
self.name, sched_id, conf['schedulers'][sched_id]['name'])
wait_homerun = self.schedulers[sched_id]['wait_homerun']
actions = self.schedulers[sched_id]['actions']
external_commands = self.schedulers[sched_id]['external_commands']
con = self.schedulers[sched_id]['con']
s = conf['schedulers'][sched_id]
self.schedulers[sched_id] = s
if s['name'] in g_conf['satellitemap']:
s.update(g_conf['satellitemap'][s['name']])
proto = 'http'
if s['use_ssl']:
proto = 'https'
uri = '%s://%s:%s/' % (proto, s['address'], s['port'])
self.schedulers[sched_id]['uri'] = uri
if already_got:
self.schedulers[sched_id]['wait_homerun'] = wait_homerun
self.schedulers[sched_id]['actions'] = actions
self.schedulers[sched_id]['external_commands'] = external_commands
self.schedulers[sched_id]['con'] = con
else:
self.schedulers[sched_id]['wait_homerun'] = {}
self.schedulers[sched_id]['actions'] = {}
self.schedulers[sched_id]['external_commands'] = []
self.schedulers[sched_id]['con'] = None
self.schedulers[sched_id]['running_id'] = 0
self.schedulers[sched_id]['active'] = s['active']
self.schedulers[sched_id]['timeout'] = s['timeout']
self.schedulers[sched_id]['data_timeout'] = s['data_timeout']
# Do not connect if we are a passive satellite
if self.direct_routing and not already_got:
# And then we connect to it :)
self.pynag_con_init(sched_id)
logger.debug("[%s] Sending us configuration %s", self.name, conf)
if not self.have_modules:
self.modules = mods = conf['global']['modules']
self.have_modules = True
logger.info("We received modules %s ", mods)
# Set our giving timezone from arbiter
use_timezone = conf['global']['use_timezone']
if use_timezone != 'NOTSET':
logger.info("Setting our timezone to %s", use_timezone)
os.environ['TZ'] = use_timezone
time.tzset()
# Take all external commands, make packs and send them to
# the schedulers
def push_external_commands_to_schedulers(self):
# If we are not in a direct routing mode, just bailout after
# faking resolving the commands
if not self.direct_routing:
self.external_commands.extend(self.unprocessed_external_commands)
self.unprocessed_external_commands = []
return
commands_to_process = self.unprocessed_external_commands
self.unprocessed_external_commands = []
# Now get all external commands and put them into the
# good schedulers
for ext_cmd in commands_to_process:
self.external_command.resolve_command(ext_cmd)
# Now for all alive schedulers, send the commands
for sched_id in self.schedulers:
sched = self.schedulers[sched_id]
extcmds = sched['external_commands']
cmds = [extcmd.cmd_line for extcmd in extcmds]
con = sched.get('con', None)
sent = False
if not con:
logger.warning("The scheduler is not connected %s", sched)
self.pynag_con_init(sched_id)
con = sched.get('con', None)
# If there are commands and the scheduler is alive
if len(cmds) > 0 and con:
logger.debug("Sending %d commands to scheduler %s", len(cmds), sched)
try:
# con.run_external_commands(cmds)
con.put('run_external_commands', serialize(cmds))
sent = True
# Not connected or sched is gone
except (HTTPException, KeyError) as exp:
logger.debug('manage_returns exception:: %s,%s ', type(exp), exp)
self.pynag_con_init(sched_id)
return
except AttributeError as exp: # the scheduler must not be initialized
logger.debug('manage_returns exception:: %s,%s ', type(exp), exp)
except Exception as exp:
logger.error(
"A satellite raised an unknown exception: %s (%s)",
exp, type(exp)
)
raise
# Wether we sent the commands or not, clean the scheduler list
self.schedulers[sched_id]['external_commands'] = []
# If we didn't send them, add the commands to the arbiter list
if not sent:
for extcmd in extcmds:
self.external_commands.append(extcmd)
def do_loop_turn(self):
sys.stdout.write(".")
sys.stdout.flush()
# Begin to clean modules
self.check_and_del_zombie_modules()
# Now we check if arbiter speak to us in the http_daemon.
# If so, we listen for it
# When it push us conf, we reinit connections
self.watch_for_new_conf(0.0)
if self.new_conf:
if self.graceful_enabled and self.switch_process() is True:
# Child successfully spawned, we're exiting
return
self.setup_new_conf()
# Maybe external modules raised 'objects'
# we should get them
self.get_objects_from_from_queues()
self.push_external_commands_to_schedulers()
# print("watch new conf 1: begin", len(self.broks))
self.watch_for_new_conf(1.0)
# print("get enw broks watch new conf 1: end", len(self.broks))
# Checks if memory consumption did not exceed allowed thresold
self.check_memory_usage()
# Main function, will loop forever
def main(self):
try:
self.load_config_file()
# Setting log level
logger.setLevel(self.log_level)
# Force the debug level if the daemon is said to start with such level
if self.debug:
logger.setLevel('DEBUG')
# Look if we are enabled or not. If ok, start the daemon mode
self.look_for_early_exit()
self.load_parent_config()
for line in self.get_header():
logger.info(line)
logger.info("[Receiver] Using working directory: %s", os.path.abspath(self.workdir))
self.do_daemon_init_and_start()
self.load_modules_manager()
self.uri2 = self.http_daemon.register(self.interface)
logger.debug("The Arbiter uri it at %s", self.uri2)
self.uri3 = self.http_daemon.register(self.istats)
# Register ibroks
if self.ibroks is not None:
logger.debug("Deconnecting previous Broks Interface")
self.http_daemon.unregister(self.ibroks)
# Create and connect it
self.http_daemon.register(self.ibroks)
# We wait for initial conf
self.wait_for_initial_conf()
if not self.new_conf:
return
self.setup_new_conf()
self.modules_manager.set_modules(self.modules)
self.do_load_modules()
# and start external modules too
self.modules_manager.start_external_instances()
# Do the modules part, we have our modules in self.modules
# REF: doc/receiver-modules.png (1)
# Now the main loop
self.do_mainloop()
except Exception as exp:
self.print_unrecoverable(traceback.format_exc())
raise
# stats threads is asking us a main structure for stats
def get_stats_struct(self):
# call the daemon one
res = super(Receiver, self).get_stats_struct()
res.update({'name': self.name, 'type': 'receiver',
'direct_routing': self.direct_routing})
return res
| 16,739
|
Python
|
.py
| 368
| 34.57337
| 96
| 0.597765
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,548
|
arbiterdaemon.py
|
shinken-solutions_shinken/shinken/daemons/arbiterdaemon.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import six
import sys
import os
import time
import socket
import traceback
import json
import io
from shinken.objects.config import Config
from shinken.external_command import ExternalCommandManager
from shinken.dispatcher import Dispatcher
from shinken.daemon import Daemon, Interface
from shinken.log import logger
from shinken.stats import statsmgr
from shinken.brok import Brok
from shinken.external_command import ExternalCommand
from shinken.property import BoolProp
from shinken.util import jsonify_r, get_memory, free_memory
from shinken.serializer import serialize, deserialize
# Interface for the other Arbiter
# It connects, and together we decide who's the Master and who's the Slave, etc.
# Here is a also a function to get a new conf from the master
class IForArbiter(Interface):
doc = 'Does the daemon got a configuration (internal)'
def have_conf(self, magic_hash):
# Beware, we got an str in entry, not an int
magic_hash = int(magic_hash)
# I've got a conf and a good one
if self.app.cur_conf and self.app.cur_conf.magic_hash == magic_hash:
return True
else: # I've no conf or a bad one
return False
have_conf.doc = doc
doc = 'Put a new configuration to the daemon'
# The master Arbiter is sending us a new conf in a pickle way. Ok, we take it
def put_conf(self, conf):
super(IForArbiter, self).put_conf(conf)
self.app.must_run = False
put_conf.method = 'PUT'
put_conf.doc = doc
doc = 'Get the managed configuration (internal)'
def get_config(self):
return self.app.conf
get_config.doc = doc
doc = 'Ask the daemon to do not run'
# The master arbiter asks me not to run!
def do_not_run(self):
# If I'm the master, ignore the command
if self.app.is_master:
logger.debug("Received message to not run. "
"I am the Master, ignore and continue to run.")
# Else, I'm just a spare, so I listen to my master
else:
logger.debug("Received message to not run. I am the spare, stopping.")
self.app.last_master_speack = time.time()
self.app.must_run = False
do_not_run.need_lock = False
do_not_run.doc = doc
doc = 'Get the satellite names sort by type'
# Here a function called by check_shinken to get daemons list
def get_satellite_list(self, daemon_type=''):
res = {}
for t in ['arbiter', 'scheduler', 'poller', 'reactionner', 'receiver',
'broker']:
if daemon_type and daemon_type != t:
continue
satellite_list = []
res[t] = satellite_list
daemon_name_attr = t + "_name"
daemons = self.app.get_daemons(t)
for dae in daemons:
if hasattr(dae, daemon_name_attr):
satellite_list.append(getattr(dae, daemon_name_attr))
return res
get_satellite_list.doc = doc
doc = 'Dummy call for the arbiter'
# Dummy call. We are the master, we manage what we want
def what_i_managed(self):
return serialize({})
what_i_managed.need_lock = False
what_i_managed.doc = doc
doc = 'Return all the data of the satellites'
# We will try to export all data from our satellites, but only the json-able fields
def get_all_states(self):
res = {}
for t in ['arbiter', 'scheduler', 'poller', 'reactionner', 'receiver',
'broker']:
lst = []
res[t] = lst
for d in getattr(self.app.conf, t + 's'):
cls = d.__class__
e = {}
ds = [cls.properties, cls.running_properties]
for _d in ds:
for prop in _d:
if hasattr(d, prop):
v = getattr(d, prop)
if prop == "realm":
if hasattr(v, "realm_name"):
e[prop] = v.realm_name
# give a try to a json able object
try:
json.dumps(v)
e[prop] = v
except Exception as exp:
logger.debug('%s', exp)
lst.append(e)
return res
get_all_states.doc = doc
# Try to give some properties of our objects
doc = 'Dump all objects of the type in [hosts, services, contacts, ' \
'commands, hostgroups, servicegroups]'
def get_objects_properties(self, table):
logger.debug('ASK:: table= %s', table)
objs = getattr(self.app.conf, table, None)
logger.debug("OBJS:: %s", objs)
if objs is None or len(objs) == 0:
return []
res = []
for obj in objs:
ln = jsonify_r(obj)
res.append(ln)
return res
get_objects_properties.doc = doc
# Main Arbiter Class
class Arbiter(Daemon):
def __init__(self, config_files, is_daemon, do_replace, verify_only, debug,
debug_file, profile=None, analyse=None, migrate=None, arb_name='',
dump_config_file=None):
super(Arbiter, self).__init__('arbiter', config_files[0], is_daemon, do_replace,
debug, debug_file)
self.graceful_enabled = False
self.aggressive_memory_management = False
self.config_files = config_files
self.verify_only = verify_only
self.analyse = analyse
self.migrate = migrate
self.arb_name = arb_name
self.dump_config_file = dump_config_file
self.broks = []
self.is_master = False
self.me = None
self.nb_broks_send = 0
# Now tab for external_commands
self.external_commands = []
self.fifo = None
# Used to work out if we must still be alive or not
self.must_run = True
self.interface = IForArbiter(self)
self.conf = Config()
# Use for adding things like broks
def add(self, b):
if isinstance(b, Brok):
self.broks.append(b)
elif isinstance(b, ExternalCommand):
self.external_commands.append(b)
else:
logger.warning('Cannot manage object type %s (%s)', type(b), b)
# We must push our broks to the broker
# because it's stupid to make a crossing connection
# so we find the broker responsible for our broks,
# and we send it to him
# TODO: better find the broker, here it can be dead?
# or not the good one?
def push_broks_to_broker(self):
brokers = self.conf.brokers
for brk in [b for b in brokers if b.manage_arbiters and b.alive]:
while len(self.broks) > 0:
if brk.broks_batch == 0:
count = len(self.broks)
else:
count = min(brk.broks_batch, len(self.broks))
is_send = brk.push_broks(self.broks[:count])
if is_send:
# They are gone, we keep none!
del self.broks[:count]
statsmgr.incr('core.arbiter.broks.out', count, 'queue')
# We must take external_commands from all satellites
# like brokers, pollers, reactionners or receivers
def get_external_commands_from_satellites(self):
sat_lists = [self.conf.brokers, self.conf.receivers,
self.conf.pollers, self.conf.reactionners]
for lst in sat_lists:
for sat in lst:
# Get only if alive of course
if sat.alive:
new_cmds = sat.get_external_commands()
for new_cmd in new_cmds:
self.external_commands.append(new_cmd)
# Our links to satellites can raise broks. We must send them
def get_broks_from_satellitelinks(self):
tabs = [self.conf.brokers, self.conf.schedulers,
self.conf.pollers, self.conf.reactionners,
self.conf.receivers]
for tab in tabs:
for s in tab:
new_broks = s.get_all_broks()
_type = s.my_type
statsmgr.incr('core.arbiter.broks.in.%s' % _type,
len(new_broks), 'queue')
for b in new_broks:
self.add(b)
# Our links to satellites can raise broks. We must send them
def get_initial_broks_from_satellitelinks(self):
tabs = [self.conf.brokers, self.conf.schedulers,
self.conf.pollers, self.conf.reactionners,
self.conf.receivers]
for tab in tabs:
for s in tab:
b = s.get_initial_status_brok()
self.add(b)
# Load the external commander
def load_external_command(self, e):
self.external_command = e
self.fifo = e.open()
def get_daemon_links(self, daemon_type):
# the attribute name to get these differs for schedulers and arbiters
return daemon_type + 's'
def load_config_file(self):
logger.info("Loading configuration")
# REF: doc/shinken-conf-dispatching.png (1)
buf = self.conf.read_config(self.config_files)
raw_objects = self.conf.read_config_buf(buf)
logger.debug("Opening local log file")
# First we need to get arbiters and modules
# so we can ask them for objects
self.conf.create_objects_for_type(raw_objects, 'arbiter')
self.conf.create_objects_for_type(raw_objects, 'module')
self.conf.early_arbiter_linking()
# Search which Arbiterlink I am
for arb in self.conf.arbiters:
if arb.is_me(self.arb_name):
arb.need_conf = False
self.me = arb
self.is_master = not self.me.spare
if self.is_master:
logger.info("I am the master Arbiter: %s", arb.get_name())
else:
logger.info("I am a spare Arbiter: %s", arb.get_name())
# export this data to our statsmgr object :)
api_key = getattr(self.conf, 'api_key', '')
secret = getattr(self.conf, 'secret', '')
http_proxy = getattr(self.conf, 'http_proxy', '')
statsd_host = getattr(self.conf, 'statsd_host', 'localhost')
statsd_port = getattr(self.conf, 'statsd_port', 8125)
statsd_interval = getattr(self.conf, 'statsd_interval', 5)
statsd_prefix = getattr(self.conf, 'statsd_prefix', 'shinken')
statsd_enabled = getattr(self.conf, 'statsd_enabled', False)
statsd_types = getattr(self.conf, 'statsd_types', None)
statsd_pattern = getattr(self.conf, 'statsd_pattern', '')
statsmgr.register(self, arb.get_name(), 'arbiter',
api_key=api_key, secret=secret, http_proxy=http_proxy,
statsd_host=statsd_host, statsd_port=statsd_port,
statsd_prefix=statsd_prefix,
statsd_enabled=statsd_enabled,
statsd_interval=statsd_interval,
statsd_types=statsd_types,
statsd_pattern=statsd_pattern)
# Set myself as alive ;)
self.me.alive = True
else: # not me
arb.need_conf = True
if not self.me:
sys.exit("Error: I cannot find my own Arbiter object, I bail out. \
To solve it, please change the host_name parameter in \
the object Arbiter in the file shinken-specific.cfg. \
With the value %s \
Thanks." % socket.gethostname())
logger.info("My own modules: " + ','.join([m.get_name() for m in self.me.modules]))
self.modules_dir = getattr(self.conf, 'modules_dir', '')
# Ok it's time to load the module manager now!
self.load_modules_manager()
# we request the instances without them being *started*
# (for those that are concerned ("external" modules):
# we will *start* these instances after we have been daemonized (if requested)
self.modules_manager.set_modules(self.me.modules)
self.do_load_modules()
# Call modules that manage this read configuration pass
self.hook_point('read_configuration')
# Call modules get_objects() to load new objects from them
# (example modules: glpi, mongodb, dummy_arbiter)
self.load_modules_configuration_objects(raw_objects)
# Resume standard operations ###
self.conf.create_objects(raw_objects)
# Maybe conf is already invalid
if not self.conf.conf_is_correct:
sys.exit("***> One or more problems was encountered "
"while processing the config files...")
# Manage all post-conf modules
self.hook_point('early_configuration')
# Ok here maybe we should stop because we are in a pure migration run
if self.migrate:
logger.info("Migration MODE. Early exiting from configuration relinking phase")
return
# Load all file triggers
self.conf.load_triggers()
# Create Template links
self.conf.linkify_templates()
# All inheritances
self.conf.apply_inheritance()
# Explode between types
self.conf.explode()
# Implicit inheritance for services
self.conf.apply_implicit_inheritance()
# Fill default values
self.conf.fill_default()
# Remove templates from config
self.conf.remove_templates()
# Overrides sepecific service instaces properties
self.conf.override_properties()
# Linkify objects to each other
self.conf.linkify()
# applying dependencies
self.conf.apply_dependencies()
# sets objects initial state
self.conf.set_initial_state()
# Hacking some global parameters inherited from Nagios to create
# on the fly some Broker modules like for status.dat parameters
# or nagios.log one if there are none already available
self.conf.hack_old_nagios_parameters()
# Raise warning about currently unmanaged parameters
if self.verify_only:
self.conf.warn_about_unmanaged_parameters()
# Explode global conf parameters into Classes
self.conf.explode_global_conf()
# set our own timezone and propagate it to other satellites
self.conf.propagate_timezone_option()
# Look for business rules, and create the dep tree
self.conf.create_business_rules()
# And link them
self.conf.create_business_rules_dependencies()
# Warn about useless parameters in Shinken
if self.verify_only:
self.conf.notice_about_useless_parameters()
# Manage all post-conf modules
self.hook_point('late_configuration')
# Correct conf?
self.conf.is_correct()
# Maybe some elements where not wrong, so we must clean if possible
self.conf.clean()
# If the conf is not correct, we must get out now
# if not self.conf.conf_is_correct:
# sys.exit("Configuration is incorrect, sorry, I bail out")
# REF: doc/shinken-conf-dispatching.png (2)
logger.info("Cutting the hosts and services into parts")
self.confs = self.conf.cut_into_parts()
# The conf can be incorrect here if the cut into parts see errors like
# a realm with hosts and not schedulers for it
if not self.conf.conf_is_correct:
self.conf.show_errors()
err = "Configuration is incorrect, sorry, I bail out"
logger.error(err)
sys.exit(err)
logger.info('Things look okay - No serious problems were detected '
'during the pre-flight check')
# Clean objects of temporary/unnecessary attributes for live work:
self.conf.clean()
if self.dump_config_file:
self.dump_config()
# Exit if we are just here for config checking
if self.verify_only:
sys.exit(0)
if self.analyse:
self.launch_analyse()
sys.exit(0)
# Some properties need to be "flatten" (put in strings)
# before being send, like realms for hosts for example
# BEWARE: after the cutting part, because we stringify some properties
self.conf.prepare_for_sending()
# Ok, here we must check if we go on or not.
# TODO: check OK or not
self.log_level = self.conf.log_level
self.use_local_log = self.conf.use_local_log
self.local_log = self.conf.local_log
self.pidfile = os.path.abspath(self.conf.lock_file)
self.idontcareaboutsecurity = self.conf.idontcareaboutsecurity
self.user = self.conf.shinken_user
self.group = self.conf.shinken_group
self.daemon_enabled = self.conf.daemon_enabled
self.daemon_thread_pool_size = self.conf.daemon_thread_pool_size
self.http_backend = getattr(self.conf, 'http_backend', 'auto')
self.accept_passive_unknown_check_results = BoolProp.pythonize(
getattr(self.me, 'accept_passive_unknown_check_results', '0')
)
# If the user sets a workdir, lets use it. If not, use the
# pidfile directory
if self.conf.workdir == '':
self.workdir = os.path.abspath(os.path.dirname(self.pidfile))
else:
self.workdir = self.conf.workdir
# We need to set self.host & self.port to be used by do_daemon_init_and_start
self.host = self.me.address
self.port = self.me.port
logger.info("Configuration Loaded")
def load_modules_configuration_objects(self, raw_objects):
# Now we ask for configuration modules if they
# got items for us
for inst in self.modules_manager.instances:
# TODO : clean
if hasattr(inst, 'get_objects'):
_t = time.time()
try:
r = inst.get_objects()
except Exception as exp:
logger.error(
"Instance %s raised an exception %s. Log and continue to run",
inst.get_name(), exp
)
output = io.StringIO()
traceback.print_exc(file=output)
logger.error("Back trace of this remove: %s", output.getvalue())
output.close()
continue
statsmgr.timing('hook.get-objects', time.time() - _t, 'perf')
types_creations = self.conf.types_creations
for k in types_creations:
(cls, clss, prop, dummy) = types_creations[k]
if prop in r:
for x in r[prop]:
# test if raw_objects[k] are already set - if not, add empty array
if k not in raw_objects:
raw_objects[k] = []
# put the imported_from property if the module is not already setting
# it so we know where does this object came from
if 'imported_from' not in x:
x['imported_from'] = 'module:%s' % inst.get_name()
# now append the object
raw_objects[k].append(x)
logger.debug("Added %i objects to %s from module %s",
len(r[prop]), k, inst.get_name())
def launch_analyse(self):
logger.info("We are doing an statistic analysis on the dump file %s", self.analyse)
stats = {}
types = ['hosts', 'services', 'contacts', 'timeperiods', 'commands', 'arbiters',
'schedulers', 'pollers', 'reactionners', 'brokers', 'receivers', 'modules',
'realms']
for t in types:
lst = getattr(self.conf, t)
nb = len([i for i in lst])
stats['nb_' + t] = nb
logger.info("Got %s for %s", nb, t)
max_srv_by_host = max([len(h.services) for h in self.conf.hosts])
logger.info("Max srv by host %s", max_srv_by_host)
stats['max_srv_by_host'] = max_srv_by_host
f = open(self.analyse, 'w')
s = json.dumps(stats)
logger.info("Saving stats data to a file %s", s)
f.write(s)
f.close()
def dump_config(self):
logger.info("Dumping configuration to %s", self.dump_config_file)
with open(self.dump_config_file, "w") as f:
self.conf.dump(f)
f.close()
def go_migrate(self):
print("***********" * 5)
print("WARNING : this feature is NOT supported in this version!")
print("***********" * 5)
migration_module_name = self.migrate.strip()
mig_mod = self.conf.modules.find_by_name(migration_module_name)
if not mig_mod:
print("Cannot find the migration module %s. Please configure it" % migration_module_name)
sys.exit(2)
print(self.modules_manager.instances)
# Ok now all we need is the import module
self.modules_manager.set_modules([mig_mod])
self.do_load_modules()
print(self.modules_manager.instances)
if len(self.modules_manager.instances) == 0:
print("Error during the initialization of the import module. Bailing out")
sys.exit(2)
print("Configuration migrating in progress...")
mod = self.modules_manager.instances[0]
f = getattr(mod, 'import_objects', None)
if not f or not callable(f):
print("Import module is missing the import_objects function. Bailing out")
sys.exit(2)
objs = {}
types = ['hosts', 'services', 'commands', 'timeperiods', 'contacts']
for t in types:
print("New type", t)
objs[t] = []
for i in getattr(self.conf, t):
d = i.get_raw_import_values()
if d:
objs[t].append(d)
f(objs)
# Ok we can exit now
sys.exit(0)
# Main loop function
def main(self):
try:
# Setting log level
logger.setLevel('INFO')
# Force the debug level if the daemon is said to start with such level
if self.debug:
logger.setLevel('DEBUG')
# Log will be broks
for line in self.get_header():
logger.info(line)
self.load_config_file()
logger.setLevel(self.log_level)
# Maybe we are in a migration phase. If so, we will bailout here
if self.migrate:
self.go_migrate()
# Look if we are enabled or not. If ok, start the daemon mode
self.look_for_early_exit()
self.do_daemon_init_and_start()
self.uri_arb = self.http_daemon.register(self.interface)
# ok we are now fully daemonized (if requested)
# now we can start our "external" modules (if any):
self.modules_manager.start_external_instances()
# Ok now we can load the retention data
self.hook_point('load_retention')
# And go for the main loop
self.do_mainloop()
except SystemExit as exp:
# With a 2.4 interpreter the sys.exit() in load_config_file
# ends up here and must be handled.
sys.exit(exp.code)
except Exception as exp:
self.print_unrecoverable(traceback.format_exc())
raise
def setup_new_conf(self):
""" Setup a new conf received from a Master arbiter. """
conf = self.new_conf
if not conf:
return
conf = deserialize(conf)
self.new_conf = None
self.cur_conf = conf
self.conf = conf
if self.aggressive_memory_management:
free_memory()
for arb in self.conf.arbiters:
if (arb.address, arb.port) == (self.host, self.port):
self.me = arb
arb.is_me = lambda x: True # we now definitively know who we are, just keep it.
else:
arb.is_me = lambda x: False # and we know who we are not, just keep it.
def do_loop_turn(self):
# If I am a spare, I wait for the master arbiter to send me
# true conf.
if self.me.spare:
logger.debug("I wait for master")
self.wait_for_master_death()
if self.must_run:
# Main loop
self.run()
# We wait (block) for arbiter to send us something
def wait_for_master_death(self):
logger.info("Waiting for master death")
timeout = 1.0
self.last_master_speack = time.time()
# Look for the master timeout
master_timeout = 300
for arb in self.conf.arbiters:
if not arb.spare:
master_timeout = arb.check_interval * arb.max_check_attempts
logger.info("I'll wait master for %d seconds", master_timeout)
while not self.interrupted:
elapsed, _, tcdiff = self.handleRequests(timeout)
# if there was a system Time Change (tcdiff) then we have to adapt last_master_speak:
if self.new_conf:
self.setup_new_conf()
if tcdiff:
self.last_master_speack += tcdiff
if elapsed:
self.last_master_speack = time.time()
timeout -= elapsed
if timeout > 0:
continue
timeout = 1.0
sys.stdout.write(".")
sys.stdout.flush()
# Now check if master is dead or not
now = time.time()
if now - self.last_master_speack > master_timeout:
logger.info("Arbiter Master is dead. The arbiter %s take the lead",
self.me.get_name())
for arb in self.conf.arbiters:
if not arb.spare:
arb.alive = False
self.must_run = True
break
# Take all external commands, make packs and send them to
# the schedulers
def push_external_commands_to_schedulers(self):
# Now get all external commands and put them into the
# good schedulers
for ext_cmd in self.external_commands:
self.external_command.resolve_command(ext_cmd)
# Now for all alive schedulers, send the commands
for sched in self.conf.schedulers:
cmds = sched.external_commands
if len(cmds) > 0 and sched.alive:
logger.debug("Sending %d commands to scheduler %s", len(cmds), sched.get_name())
sched.run_external_commands(cmds)
# clean them
sched.external_commands = []
# We will log if there are time period activations
# change as NOTICE in logs.
def check_and_log_tp_activation_change(self):
for tp in self.conf.timeperiods:
tp.check_and_log_activation_change()
# Main function
def run(self):
# Before running, I must be sure who am I
# The arbiters change, so we must re-discover the new self.me
for arb in self.conf.arbiters:
if arb.is_me(self.arb_name):
self.me = arb
if self.conf.human_timestamp_log:
logger.set_human_format()
logger.info("Begin to dispatch configurations to satellites")
self.dispatcher = Dispatcher(self.conf, self.me)
self.dispatcher.check_alive()
self.dispatcher.check_dispatch()
# REF: doc/shinken-conf-dispatching.png (3)
self.dispatcher.dispatch()
# Now we can get all initial broks for our satellites
self.get_initial_broks_from_satellitelinks()
suppl_socks = None
# Now create the external commander. It's just here to dispatch
# the commands to schedulers
e = ExternalCommandManager(self.conf, 'dispatcher')
e.load_arbiter(self)
self.external_command = e
logger.debug("Run baby, run...")
timeout = 1.0
while self.must_run and not self.interrupted:
elapsed, ins, _ = self.handleRequests(timeout, suppl_socks)
# If FIFO, read external command
if ins:
now = time.time()
ext_cmds = self.external_command.get()
if ext_cmds:
for ext_cmd in ext_cmds:
self.external_commands.append(ext_cmd)
else:
self.fifo = self.external_command.open()
if self.fifo is not None:
suppl_socks = [self.fifo]
else:
suppl_socks = None
elapsed += time.time() - now
if elapsed or ins:
timeout -= elapsed
if timeout > 0: # only continue if we are not over timeout
continue
# Timeout
timeout = 1.0 # reset the timeout value
# Try to see if one of my module is dead, and
# try to restart previously dead modules :)
self.check_and_del_zombie_modules()
# Call modules that manage a starting tick pass
self.hook_point('tick')
# Look for logging timeperiods activation change (active/inactive)
self.check_and_log_tp_activation_change()
# Now the dispatcher job
_t = time.time()
self.dispatcher.check_alive()
statsmgr.timing('core.arbiter.check-alive', time.time() - _t, 'perf')
_t = time.time()
self.dispatcher.check_dispatch()
statsmgr.timing('core.arbiter.check-dispatch', time.time() - _t, 'perf')
# REF: doc/shinken-conf-dispatching.png (3)
_t = time.time()
self.dispatcher.dispatch()
statsmgr.timing('core.arbiter.dispatch', time.time() - _t, 'perf')
_t = time.time()
self.dispatcher.check_bad_dispatch()
statsmgr.timing('core.arbiter.check-bad-dispatch', time.time() - _t, 'perf')
# Now get things from our module instances
self.get_objects_from_from_queues()
# Maybe our satellites links raise new broks. Must reap them
self.get_broks_from_satellitelinks()
# One broker is responsible for our broks,
# we must give him our broks
self.push_broks_to_broker()
self.get_external_commands_from_satellites()
# self.get_external_commands_from_receivers()
# send_conf_to_schedulers()
if self.nb_broks_send != 0:
logger.debug("Nb Broks send: %d", self.nb_broks_send)
self.nb_broks_send = 0
_t = time.time()
self.push_external_commands_to_schedulers()
statsmgr.timing('core.arbiter.push-external-commands', time.time() - _t,
'perf')
# It's sent, do not keep them
# TODO: check if really sent. Queue by scheduler?
self.external_commands = []
# If asked to dump my memory, I will do it
if self.need_dump_memory:
self.dump_memory()
self.need_dump_memory = False
def get_daemons(self, daemon_type):
""" Returns the daemons list defined in our conf for the given type """
# shouldn't the 'daemon_types' (whatever it is above) be always present?
return getattr(self.conf, daemon_type + 's', None)
# Helper functions for retention modules
# So we give our broks and external commands
def get_retention_data(self):
r = {}
r['broks'] = self.broks
r['external_commands'] = self.external_commands
return r
# Get back our data from a retention module
def restore_retention_data(self, data):
broks = data['broks']
external_commands = data['external_commands']
self.broks.extend(broks)
self.external_commands.extend(external_commands)
# Gets internal metrics for both statsd and
def get_internal_metrics(self):
# Queues
metrics = [
('core.arbiter.mem', get_memory(), 'system'),
('core.arbiter.external-commands.queue',
len(self.external_commands), 'queue'),
('core.arbiter.broks.queue', len(self.broks), 'queue'),
]
# Objects
for t in ("contacts", "contactgroups", "hosts", "hostgroups",
"services", "servicegroups", "commands"):
count = len(getattr(self.conf, t))
metrics.append(('core.arbiter.%s' % t, count, 'object'))
return metrics
# stats threads is asking us a main structure for stats
def get_stats_struct(self):
now = int(time.time())
# call the daemon one
res = super(Arbiter, self).get_stats_struct()
res.update({'name': self.me.get_name(), 'type': 'arbiter'})
# Managed objects
res["objects"] = {}
for t in ("contacts", "contactgroups", "hosts", "hostgroups",
"services", "servicegroups", "commands"):
res["objects"][t] = len(getattr(self.conf, t))
# Metrics specific
metrics = res['metrics']
for metric in self.get_internal_metrics():
name, value, mtype = metric
metrics.append(name, value, now, mtype)
return res
| 35,252
|
Python
|
.py
| 769
| 33.964889
| 101
| 0.580868
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,549
|
__init__.py
|
shinken-solutions_shinken/modules/__init__.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
| 919
|
Python
|
.py
| 22
| 40.681818
| 77
| 0.758659
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,550
|
module.py
|
shinken-solutions_shinken/modules/dummy_broker_external/module.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
# This Class is an example of a broker module
from __future__ import absolute_import, division, print_function, unicode_literals
import time
from shinken.basemodule import BaseModule
from shinken.log import logger
properties = {
'daemons': ['broker'],
'type': 'dummy_broker_external',
'external': True,
}
# called by the plugin manager to get a broker
def get_instance(mod_conf):
logger.info("[Dummy Broker] Get a Dummy broker module for plugin %s", mod_conf.get_name())
instance = Dummy_broker(mod_conf)
return instance
# Just print(some stuff)
class Dummy_broker(BaseModule):
def __init__(self, mod_conf):
BaseModule.__init__(self, mod_conf)
# Called by Broker to say 'let's prepare yourself guy'
def init(self):
logger.info("[Dummy Broker] Initialization of the dummy broker module")
# When you are in "external" mode, that is the main loop of your process
def main(self):
self.set_proctitle(self.name)
self.set_exit_handler()
i = 0
while not self.interrupted:
i += 1
time.sleep(0.1)
if i % 10 == 0:
logger.info('[Dummy Broker External] Ping')
logger.info('[Dummy Broker External] Exiting')
| 2,187
|
Python
|
.py
| 55
| 35.581818
| 94
| 0.705159
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,551
|
module.py
|
shinken-solutions_shinken/modules/dummy_arbiter/module.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
# This Class is an example of an Arbiter module
# Here for the configuration phase AND running one
from __future__ import absolute_import, division, print_function, unicode_literals
import time
from shinken.basemodule import BaseModule
from shinken.external_command import ExternalCommand
from shinken.log import logger
properties = {
'daemons': ['arbiter'],
'type': 'dummy_arbiter',
'external': True,
}
# called by the plugin manager to get a broker
def get_instance(plugin):
logger.info("[Dummy Arbiter] Get a Dummy arbiter module for plugin %s", plugin.get_name())
instance = Dummy_arbiter(plugin)
return instance
# Just print(some stuff)
class Dummy_arbiter(BaseModule):
def __init__(self, mod_conf):
BaseModule.__init__(self, mod_conf)
# Called by Arbiter to say 'let's prepare yourself guy'
def init(self):
logger.info("[Dummy Arbiter] Initialization of the dummy arbiter module")
#self.return_queue = self.properties['from_queue']
# Ok, main function that is called in the CONFIGURATION phase
def get_objects(self):
logger.info("[Dummy Arbiter] Ask me for objects to return")
r = {'hosts': []}
h = {'name': 'dummy host from dummy arbiter module',
'register': '0',
}
r['hosts'].append(h)
r['hosts'].append({
'host_name': "dummyhost1",
'use': 'linux-server',
'address': 'localhost'
})
logger.info("[Dummy Arbiter] Returning to Arbiter the hosts: %s", str(r))
return r
def hook_late_configuration(self, conf):
logger.info("[Dummy Arbiter] Dummy in hook late config")
def do_loop_turn(self):
logger.info("[Dummy Arbiter] Raise a external command as example")
e = ExternalCommand('Viva la revolution')
self.from_q.put(e)
time.sleep(1)
| 2,886
|
Python
|
.py
| 69
| 36.014493
| 94
| 0.675241
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,552
|
module.py
|
shinken-solutions_shinken/modules/dummy_broker/module.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
# This Class is an example of a broker module
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken.basemodule import BaseModule
from shinken.log import logger
properties = {
'daemons': ['broker'],
'type': 'dummy_broker',
'external': False,
}
# called by the plugin manager to get a broker
def get_instance(mod_conf):
logger.info("[Dummy Broker] Get a Dummy broker module for plugin %s", mod_conf.get_name())
instance = Dummy_broker(mod_conf)
return instance
# Just print(some stuff)
class Dummy_broker(BaseModule):
def __init__(self, mod_conf, foo):
BaseModule.__init__(self, mod_conf)
# Called by Broker to say 'let's prepare yourself guy'
def init(self):
logger.info("[Dummy Broker] Initialization of the dummy broker module")
# An host check have just arrived, we UPDATE data info with this
# def manage_brok(self, b):
# #Do things
# pass
| 1,897
|
Python
|
.py
| 47
| 37.723404
| 94
| 0.72886
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,553
|
module.py
|
shinken-solutions_shinken/modules/dummy_scheduler/module.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
# This Class is an example of an Scheduler module
# Here for the configuration phase AND running one
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken.basemodule import BaseModule
from shinken.log import logger
properties = {
'daemons': ['scheduler'],
'type': 'dummy_scheduler',
'external': False,
'phases': ['retention'],
}
# called by the plugin manager to get a broker
def get_instance(mod_conf):
logger.info("[Dummy Scheduler] Get a Dummy scheduler module for plugin %s", mod_conf.get_name())
instance = Dummy_scheduler(mod_conf, foo="bar")
return instance
# Just print(some stuff)
class Dummy_scheduler(BaseModule):
def __init__(self, mod_conf, foo):
BaseModule.__init__(self, mod_conf)
self.myfoo = foo
# Called by Scheduler to say 'let's prepare yourself guy'
def init(self):
logger.info("[Dummy Scheduler] Initialization of the dummy scheduler module")
# self.return_queue = self.properties['from_queue']
# Ok, main function that is called in the retention creation pass
def update_retention_objects(self, sched, log_mgr):
logger.info("[Dummy Scheduler] Asking me to update the retention objects")
# Should return if it succeed in the retention load or not
def load_retention_objects(self, sched, log_mrg):
logger.info("[Dummy Scheduler] Asking me to load the retention objects")
return False
# From now external is not used in the scheduler job
# #When you are in "external" mode, that is the main loop of your process
# def main(self):
# while True:
# print("Raise a external command as example")
# e = ExternalCommand('Viva la revolution')
# self.return_queue.put(e)
# time.sleep(1)
| 2,746
|
Python
|
.py
| 62
| 41.16129
| 100
| 0.717016
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,554
|
module.py
|
shinken-solutions_shinken/modules/dummy_poller/module.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
# This Class is an example of an Scheduler module
# Here for the configuration phase AND running one
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import signal
import time
if sys.version.startswith("2.7"):
from Queue import Empty
else:
from queue import Empty
from shinken.basemodule import BaseModule
from shinken.log import logger
properties = {
'daemons': ['poller'],
'type': 'dummy_poller',
'external': False,
# To be a real worker module, you must set this
'worker_capable': True,
}
# called by the plugin manager to get a broker
def get_instance(mod_conf):
logger.info("[Dummy Poller] Get a Dummy poller module for plugin %s", mod_conf.get_name())
instance = Dummy_poller(mod_conf)
return instance
# Just print(some stuff)
class Dummy_poller(BaseModule):
def __init__(self, mod_conf):
BaseModule.__init__(self, mod_conf)
# Called by poller to say 'let's prepare yourself guy'
def init(self):
logger.info("[Dummy Poller] Initialization of the dummy poller module")
self.i_am_dying = False
# Get new checks if less than nb_checks_max
# If no new checks got and no check in queue,
# sleep for 1 sec
# REF: doc/shinken-action-queues.png (3)
def get_new_checks(self):
try:
while(True):
logger.debug("[Dummy Poller] I %d wait for a message", self.id)
msg = self.s.get(block=False)
if msg is not None:
self.checks.append(msg.get_data())
logger.debug("[Dummy Poller] I, %d, got a message!", self.id)
except Empty as exp:
if len(self.checks) == 0:
time.sleep(1)
# Launch checks that are in status
# REF: doc/shinken-action-queues.png (4)
def launch_new_checks(self):
# queue
for chk in self.checks:
if chk.status == 'queue':
logger.warning("[Dummy Poller] Dummy (bad) check for %s", str(chk.command))
chk.exit_status = 2
chk.get_outputs('All is NOT SO well', 8012)
chk.status = 'done'
chk.execution_time = 0.1
# Check the status of checks
# if done, return message finished :)
# REF: doc/shinken-action-queues.png (5)
def manage_finished_checks(self):
to_del = []
for action in self.checks:
to_del.append(action)
try:
self.returns_queue.put(action)
except IOError as exp:
logger.info("[Dummy Poller] %d exiting: %s", self.id, exp)
sys.exit(2)
for chk in to_del:
self.checks.remove(chk)
# id = id of the worker
# s = Global Queue Master->Slave
# m = Queue Slave->Master
# return_queue = queue managed by manager
# c = Control Queue for the worker
def work(self, s, returns_queue, c):
logger.info("[Dummy Poller] Module Dummy started!")
## restore default signal handler for the workers:
signal.signal(signal.SIGTERM, signal.SIG_DFL)
timeout = 1.0
self.checks = []
self.returns_queue = returns_queue
self.s = s
self.t_each_loop = time.time()
while True:
begin = time.time()
msg = None
cmsg = None
# If we are dying (big problem!) we do not
# take new jobs, we just finished the current one
if not self.i_am_dying:
# REF: doc/shinken-action-queues.png (3)
self.get_new_checks()
# REF: doc/shinken-action-queues.png (4)
self.launch_new_checks()
# REF: doc/shinken-action-queues.png (5)
self.manage_finished_checks()
# Now get order from master
try:
cmsg = c.get(block=False)
if cmsg.get_type() == 'Die':
logger.info("[Dummy Poller] %d : Dad say we are dying...", self.id)
break
except Exception:
pass
timeout -= time.time() - begin
if timeout < 0:
timeout = 1.0
| 5,153
|
Python
|
.py
| 132
| 30.977273
| 94
| 0.614277
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,555
|
doku2rst.py
|
shinken-solutions_shinken/doc/tools/doku2rst.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import re
import os
from urllib import urlretrieve
input_folder = "pages/"
output_folder = "../source"
chapters = {'about': '01',
'gettingstarted': '02',
'configuringshinken': '03',
'runningshinken': '04',
'thebasics': '05',
'advancedtopics': '06',
'configobjects': '07',
'securityandperformancetuning': '08',
'integrationwithothersoftware': '09',
'shinkenaddons': '10',
'development': '11',
}
external_links = {}
internal_links = {}
output = []
# Functions
def title1(text):
length = len(text)
output = "\n\n%s\n%s\n%s\n\n" % ("=" * length, text, "=" * length)
write(output)
def title2(text):
length = len(text)
output = "\n\n%s\n%s\n\n" % (text, "=" * length)
write(output)
def title3(text):
length = len(text)
output = "\n\n%s\n%s\n\n" % (text, "-" * length)
write(output)
def title4(text):
length = len(text)
output = "\n\n%s\n%s\n\n" % (text, "~" * length)
write(output)
def title5(text):
length = len(text)
output = "\n\n%s\n%s\n\n" % (text, "*" * length)
write(output)
def external_link(links):
for link in links.items():
output = "\n.. _%s: %s" % link
write(output)
def normal_text(text):
output = text
write(output)
def get_image(image_url, image_path):
path = "/".join((output_folder, image_path))
try:
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
urlretrieve(image_url, path)
except Exception:
import pdb;pdb.set_trace()
def get_lengths(lengths, row):
if lengths is None:
lengths = [len(cell) for cell in row]
else:
if len(lengths) > len(row):
row = row + [''] * (len(lengths) - len(row))
row_lengths = [len(cell) for cell in row]
lengths = map(lambda x: max([i for i in x]), zip(row_lengths, lengths))
return lengths
# Main
for root, dirs, files in os.walk(input_folder):
for filename in files:
f = open(os.path.join(root, filename), 'r')
# Set vars
in_code = False
in_note = False
in_table = False
in_tagcode = False
nb_col = None
external_links = {}
internal_links = {}
tables = []
rows = []
output = []
# open file
rst_filename = filename[:-3] + "rst"
chapter = rst_filename.split("-", 1)
if len(chapter) <= 1:
chapter = "raws"
elif not chapter[0] in chapters:
chapter = "raws"
else:
chapter = chapter[0]
chapter = chapters[chapter] + "_" + chapter
if root.endswith('configobjects'):
chapter = '07_configobjects'
chapter_folder = os.path.join(output_folder, chapter)
if not os.path.exists(chapter_folder):
os.makedirs(chapter_folder)
rst_file = os.path.join(chapter_folder, rst_filename)
fw = open(rst_file, 'w')
def write(text):
fw.write(text)
# Write the first line
ref_target = ".. _%s:\n\n" % filename[:-4]
write(ref_target)
# parse line !
for line in f:
o_line = line
# always strip line ???
# line = line.strip('\n')
# nagivations links
#m = re.match("\|.*Prev.*Up.*Next.*\|", line.strip())
m = re.match("\|.*Next.*\|", line.strip())
if m:
# we don't want it
continue
m = re.match("\|.*Chapter [0-9]*.*\|", line.strip())
if m:
# we don't want it
continue
m = re.match("\|.*Part .*\|", line.strip())
if m:
# we don't want it
continue
# Title 1
m = re.match("===== ?Chapter [0-9]*\.(.*) ?=====", line)
if m:
# get datas
title = m.groups()[0]
# prepare datas
attrs = {'text': title}
data = {
'fnt': title1,
'attrs': attrs,
}
# store datas
output.append(data)
# Disable in_code
in_code = False
# next line
continue
# Title 2
m = re.match("====== ?(.*) ?======", line)
if m:
title = m.groups()[0]
# prepare datas
attrs = {'text': title}
data = {
'fnt': title1,
'attrs': attrs,
}
# store datas
output.append(data)
# Disable in_code
in_code = False
# next line
continue
# Title 2
m = re.match("===== ?(.*) ?=====", line)
if m:
title = m.groups()[0]
# prepare datas
attrs = {'text': title}
data = {
'fnt': title2,
'attrs': attrs,
}
# store datas
output.append(data)
# Disable in_code
in_code = False
# next line
continue
# Title 3
m = re.match("==== ?(.*) ?====", line)
if m:
title = m.groups()[0]
# prepare datas
attrs = {'text': title}
data = {
'fnt': title3,
'attrs': attrs,
}
# store datas
output.append(data)
# Disable in_code
in_code = False
# next line
continue
# Title 4
m = re.match("=== ?(.*) ?===", line)
if m:
title = m.groups()[0]
# prepare datas
attrs = {'text': title}
data = {
'fnt': title4,
'attrs': attrs,
}
# store datas
output.append(data)
# Disable in_code
in_code = False
# next line
continue
# Title 5
m = re.match("== ?(.*) ?==", line)
if m:
title = m.groups()[0]
# prepare datas
attrs = {'text': title}
data = {
'fnt': title5,
'attrs': attrs,
}
# store datas
output.append(data)
# Disable in_code
in_code = False
# next line
continue
# Normal line
# Search external links
m = re.search("\[\[https?://(.*?)\|(.*?)\]\]", line)
if m:
links = re.findall("\[\[(https?)://(.*?)\|(.*?)\]\]", line)
for link in links:
line = re.sub("\[\[https?://(.*?)\|(.*?)\]\]", "`%s`_" % link[2], line, count=1, flags=0)
external_links[link[2]] = link[0] + "://" + link[1]
m = re.search("\[\[https?://(.*?)\]\]", line)
if m:
links = re.findall("\[\[(https?)://(.*?)\]\]", line)
for link in links:
line = re.sub("\[\[https?://(.*?)\]\]", "`%s`_" % link[1], line, count=1, flags=0)
external_links[link[1]] = link[0] + "://" + link[1]
# Search internal links
m = re.search("\[\[(.*?)\|(.*?)\]\]", line)
if m:
links = re.findall("\[\[(.*?)\|(.*?)\]\]", line)
for link in links:
ref = link[0].split(":")[-1]
ref_text = link[1].strip()
line = re.sub("\[\[(.*?)\|(.*?)\]\]", ":ref:`%s <%s>`" % (ref_text, ref), line, count=1, flags=0)
if ref.startswith("configuringshinken/configobjects/"):
ref = ref.replace("configuringshinken/configobjects/", '')
internal_links[ref_text] = ref
m = re.search("\[\[(.*?)\]\]", line)
if m:
links = re.findall("\[\[(.*?)\]\]", line)
for link in links:
ref = link.split(":")[-1]
ref_text = ref
line = re.sub("\[\[(.*?)\]\]", ":ref:`%s` <%s>" % (ref_text, ref), line, count=1, flags=0)
if ref.startswith("configuringshinken/configobjects/"):
ref = ref.replace("configuringshinken/configobjects/", '')
internal_links[ref_text] = ref
# Search image
m = re.search("\{\{(.*?)\|(.*?)\}\}", line)
if m:
images = re.findall("\{\{(.*?)\|(.*?)\}\}", line)
for image, text in images:
# TODO prepare image var
path = image.replace(":shinken:", "")
path = path.replace(":", "/")
img_filename = os.path.basename(path)
path = os.path.dirname(path)
# Download images
image_url = image.replace(":shinken:", "")
image_url = image_url.replace(":", "/")
image_url = "http://www.shinken-monitoring.org/wiki/_media/" + image_url
##
# path = os.path.join("_static/images/", path)
path = "_static/images/" + path
image_path = os.path.join(path, img_filename)
# if not os.path.exists(path):
# os.makedirs(path)
get_image(image_url, image_path)
# TODO add \n after the image ???
image_rst_path = "/" + image_path
line = re.sub("\{\{(.*?)\}\}", "\n\n.. image:: %s\n :scale: 90 %%\n\n" % image_rst_path, line, count=1, flags=0)
m = re.search("\{\{(.*?)\}\}", line)
if m:
images = re.findall("\{\{(.*?)\}\}", line)
for image in images:
# TODO prepare image var
path = image.replace(":shinken:", "")
path = path.replace(":", "/")
img_filename = os.path.basename(path)
path = os.path.dirname(path)
# Download images
image_url = image.replace(":shinken:", "")
image_url = image_url.replace(":", "/")
image_url = "http://www.shinken-monitoring.org/wiki/_media/" + image_url
##
# path = os.path.join("_static/images/", path)
path = "_static/images/" + path
image_path = os.path.join(path, img_filename)
# if not os.path.exists(path):
# os.makedirs(path)
get_image(image_url, image_path)
# TODO add \n after the image ???
image_rst_path = "/" + image_path
line = re.sub("\{\{(.*?)\}\}", "\n\n.. image:: %s\n :scale: 90 %%\n\n" % image_rst_path, line, count=1, flags=0)
# Emphasis
m = re.search("[^/](//[^/]*//)[^/]", line)
if m:
emphasis = re.findall("[^/](//[^/]*//)[^/]", line)
for emph in emphasis:
new = "*%s*" % emph[2:-2]
line = line.replace(emph, new)
# Code with tag
m1 = re.search("<code>", line)
m2 = re.search("</code>", line)
if m2 and in_tagcode == True:
# end code
line = line.replace("</code>", "")
in_tagcode = False
elif m1 and in_tagcode == False:
# start code
line = line.replace("<code>", "\n::\n\n ")
in_tagcode = True
# code with spaces
m = re.search("^ *[-\*]", line)
# end code
if m and in_code == True and line.strip() != "::":
# Code/list merged
in_code = False
line = re.sub("^ *", "", line)
line = "\n" + line
# end code
if in_code == True and not re.search("^ *", line) and line.strip() != '':
in_code = False
m = re.search("^ *[^- \*]", line)
# start code
if m and in_code == False:
in_code = True
line = re.sub("^ ", "\n::\n\n ", line)
# if in code ....
if in_code == True or in_tagcode == True:
# In code
if not line.startswith(" "):
line = " " + line
# if NOT in code...
if in_code == False and in_tagcode == False:
line = re.sub("\\\\", "\\\\", line)
# Note
m1 = re.search("<note>", line)
m2 = re.search("<note warning>", line)
m3 = re.search("<note tip>", line)
m4 = re.search("<note important>", line)
if m1:
line = line.replace("<note>", ".. note:: ")
in_note = True
elif m2:
line = line.replace("<note warning>", ".. warning:: ")
in_note = True
elif m3:
line = line.replace("<note tip>", ".. tip:: ")
in_note = True
elif m4:
line = line.replace("<note important>", ".. important:: ")
in_note = True
elif in_note == True:
line = " " + line
m = re.search("</note>", line)
if m:
line = line.replace("</note>", "")
in_note = False
line = line.replace(u"”".encode('utf-8'), '"')
line = line.replace(u"�".encode('utf-8'), '"')
line = line.replace(u"′".encode('utf-8'), '"')
line = line.replace(u"’".encode('utf-8'), '"')
line = line.replace(u"‘".encode('utf-8'), '"')
# if line.find("$HOSTACKAUTHORNAME$") != -1:
# import pdb;pdb.set_trace()
# table
m1 = re.match(" *\^.*\^ *", line.strip())
m2 = re.match(" *\|.*\| *", line.strip())
if m1:
# Table header
in_table = True
line = line.strip()[1:-1]
cells = [c.strip() for c in line.split('^')]
if nb_col is None:
nb_col = len(cells)
rows.append(cells)
# don't write this line
continue
elif m2:
in_table = True
line = line.strip()[1:-1]
cells = [c.strip() for c in line.split('|')]
if nb_col is None:
nb_col = len(cells)
rows.append(cells)
# don't write this line
continue
elif m is None and in_table == True:
in_table = False
borders_len = reduce(get_lengths, rows, None)
line = "\n\n" + " ".join(["=" * i for i in borders_len])
for row in rows:
f_row = " ".join(['{:{fill}{align}%d}'] * nb_col)
f_row = f_row % tuple(borders_len)
if nb_col > len(row):
row = row + [''] * (nb_col - len(row))
f_row = f_row.format(*row, fill=" ", align="<")
line += "\n" + f_row
line += "\n" + " ".join(["=" * i for i in borders_len]) + "\n\n"
rows = []
nb_col = None
# prepare datas
attrs = {'text': line}
data = {
'fnt': normal_text,
'attrs': attrs,
}
# store datas
output.append(data)
# write lines
for data in output:
data['fnt'](**data['attrs'])
external_link(external_links)
# close file
fw.close()
# echo
print("mv ../source/raws/about.rst ../source/01_about/")
print("mv ../source/raws/ch07.rst ../source/02_gettingstarted/")
print("mv ../source/raws/part-problemsandimpacts.rst ../source/06_advancedtopics/")
| 16,970
|
Python
|
.py
| 429
| 25.403263
| 134
| 0.411497
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,556
|
doc_shinken_scrapper.py
|
shinken-solutions_shinken/doc/tools/doc_shinken_scrapper.py
|
#!/usr/bin/python
from __future__ import absolute_import, division, print_function, unicode_literals
from StringIO import StringIO
import os
import requests
from lxml import etree
#application_name = "shinken"
application_name = ""
url = "http://www.shinken-monitoring.org/wiki"
sitemap_url = url + "/start?do=index"
sitemap_ajax_url = url + "/lib/exe/ajax.php"
parser = etree.HTMLParser()
raw_res = requests.get(sitemap_url)
res = StringIO(raw_res.content)
tree = etree.parse(res, parser)
index = tree.find("//div[@id='index__tree']")
def parse_level(level, root=""):
items = level.findall(".//li")
# import pdb;pdb.set_trace()
for item in items:
title = item.find("./div//strong")
if not title is None:
new_root = root + ":" + title.text
print("Browse namespace : %s" % new_root)
data = {'call': 'index',
'idx' : new_root,
}
raw_ajax_res = requests.post(sitemap_ajax_url, data=data)
ajax_res = StringIO(raw_ajax_res.content)
ajax_parser = etree.HTMLParser()
ajax_tree = etree.parse(ajax_res, ajax_parser)
# print(raw_ajax_res.content)
parse_level(ajax_tree, new_root)
# http://www.shinken-monitoring.org/wiki/lib/exe/ajax.php
# print(title.getparent().getparent())
else:
# import pdb;pdb.set_trace()
page_name = item.find("./div//a").text
page_url = url + "/" + root + ":" + page_name + "?do=export_raw"
page_raw_res = requests.get(page_url)
# tmp_root = root.replace(":", "/")
tmp_root = root
if tmp_root.startswith(":"):
tmp_root = tmp_root[1:]
try:
os.makedirs(os.path.join('pages', application_name, tmp_root).replace(":", "/"))
except OSError as e:
#print(e)
pass
file_name = os.path.join('pages', application_name, tmp_root, page_name + ".txt")
file_name = file_name.replace(":", "/")
replace_dict = find_media(page_raw_res.content)
# Change links
modified_page_raw = page_raw_res.content
modified_page_raw = modified_page_raw.replace("official:official_", "official:")
modified_page_raw = modified_page_raw.replace("official_", "official:")
modified_page_raw = modified_page_raw.replace("/official/", ":%s:official:" % application_name)
modified_page_raw = modified_page_raw.replace("[[official:", "[[%s:official:" % application_name)
modified_page_raw = modified_page_raw.replace("[[:", "[[:%s:" % application_name)
# modified_page_raw = modified_page_raw.replace(":green_dot.16x16.png", ":shinken:green_dot.16x16.png")
# modified_page_raw = modified_page_raw.replace(":red_dot.16x16.png", ":shinken:red_dot.16x16.png")
# modified_page_raw = modified_page_raw.replace(":orange_dot.16x16.png", ":shinken:orange_dot.16x16.png")
# Change media links
# modified_page_raw = modified_page_raw.replace("{{:official:images:", "{{%s:official:" % application_name)
for k, v in replace_dict.items():
#print(k, v)
# if v.find("images/") != -1 or k.find("images/"):
# import pdb;pdb.set_trace()
modified_page_raw = modified_page_raw.replace(k, v.replace("/", ":"))
modified_page_raw = modified_page_raw.replace(":images/", ":images/:")
# DISABLE: add :shinken:
# modified_page_raw = modified_page_raw.replace("{{ :", "{{ :shinken:")
# modified_page_raw = modified_page_raw.replace(":shinken:shinken:", ":shinken:")
# if replace_dict:
# import pdb;pdb.set_trace()
# modified_page_raw = modified_page_raw.replace("{{:official:", "{{/%s/official/" % application_name)
# if modified_page_raw.find("{{/") != -1:
# import pdb;pdb.set_trace()
f = open(file_name, "w")
print(" Writing file : %s" % file_name)
f.write(modified_page_raw)
f.close()
def find_media(raw_data):
medias = raw_data.split("{{")
replace_dict = {}
if len(medias) > 1:
for m in medias[1:]:
media = m.split("}}")[0]
if media.startswith("http"):
continue
# if media.find(".png") == -1:
# import pdb;pdb.set_trace()
media = media.split("png")[0] + "png"
media = media.replace(":", "/")
media = media.strip()
if not media.endswith("png"):
continue
media_url = url + "/_media" + media
# DISABLE: add :shinken:
#replace_dict[media] = ":shinken:" + media.replace("/", ":")
replace_dict[media] = media.replace("/", ":")
print(" Get media : %s - %s" % (media, media_url))
media_folder = 'media/' + application_name + "/" + os.path.dirname(media)
try:
os.makedirs(media_folder)
except OSError as e:
#print(e)
pass
media_res = requests.get(media_url)
media_file = os.path.join(media_folder, os.path.basename(media))
print(" Writing media : %s" % media_file)
# print(media_res.content)
f = open(media_file, "w")
f.write(media_res.content)
f.close()
return replace_dict
parse_level(index)
lonely_pages = [
("http://www.shinken-monitoring.org/wiki/official/start?do=export_raw&do=export_raw", "official"),
("http://www.shinken-monitoring.org/wiki/packs/start?do=export_raw&do=export_raw", "packs"),
]
for p, tmp_root in lonely_pages:
page_name = "start"
page_raw_res = requests.get(p)
try:
os.makedirs(os.path.join('pages', application_name, tmp_root).replace(":", "/"))
except OSError as e:
#print(e)
pass
file_name = os.path.join('pages', application_name, tmp_root, page_name + ".txt")
file_name = file_name.replace(":", "/")
#print(file_name)
replace_dict = find_media(page_raw_res.content)
# Change links
modified_page_raw = page_raw_res.content
modified_page_raw = modified_page_raw.replace("official:official_", "official:")
modified_page_raw = modified_page_raw.replace("official_", "official:")
modified_page_raw = modified_page_raw.replace("/official/", ":%s:official:" % application_name)
modified_page_raw = modified_page_raw.replace("[[official:", "[[%s:official:" % application_name)
modified_page_raw = modified_page_raw.replace("[[:", "[[:%s:" % application_name)
# Change media links
for k, v in replace_dict.items():
# print(k, v)
modified_page_raw = modified_page_raw.replace(k, v.replace("/", ":"))
modified_page_raw = modified_page_raw.replace(":images/", ":images/:")
# DISABLE: add :shinken:
modified_page_raw = modified_page_raw.replace("{{ :", "{{ :shinken:")
modified_page_raw = modified_page_raw.replace(":shinken:shinken:", ":shinken:")
f = open(file_name, "w")
print(" Writing file : %s" % file_name)
f.write(modified_page_raw)
f.close()
# for i in `grep -R "^| "|grep Prev|cut -d ":" -f 1|uniq`; do sed -i 's/^| .*Prev.*//' $i; done
# for i in `grep -R "^| "|grep Next|cut -d ":" -f 1|uniq`; do sed -i 's/^| .*Next.*//' $i; done
# for i in `grep -R "^| "|grep About|cut -d ":" -f 1|uniq`; do sed -i 's/^| .*About.*//' $i; done
# for i in `grep -R "^| "|grep Home|cut -d ":" -f 1|uniq` ; do sed -i 's/^| .*Home.*//' $i; done
# for i in `grep -R "===== Cha" . -l`; do sed -i 's/^===== C\(.*\)=====$/====== C\1======/' $i; done
| 7,836
|
Python
|
.py
| 157
| 42.203822
| 118
| 0.566797
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,557
|
__init__.py
|
shinken-solutions_shinken/doc/theme/sphinx_rtd_theme/__init__.py
|
"""Sphinx ReadTheDocs theme.
From https://github.com/ryan-roemer/sphinx-bootstrap-theme.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
VERSION = (0, 1, 5)
__version__ = ".".join(str(v) for v in VERSION)
__version_full__ = __version__
def get_html_theme_path():
"""Return list of HTML theme paths."""
cur_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
return cur_dir
| 456
|
Python
|
.py
| 12
| 35.333333
| 82
| 0.701835
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,558
|
conf.py
|
shinken-solutions_shinken/doc/source/conf.py
|
# -*- coding: utf-8 -*-
#
# Shinken documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 13 01:01:23 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import absolute_import, division, print_function, unicode_literals
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
import shinken
from shinken.bin import VERSION
# Fix for missing modules
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
def version_info(self):
return [0,0]
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return mockType
else:
return Mock()
MOCK_MODULES = ['MySQLdb',
'_mysql_exceptions',
'cx_Oracle',
'log',
'pymongo',
#'pycurl',
]
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode', 'sphinx.ext.graphviz', 'sphinx.ext.inheritance_diagram']
# Debian 6 do NOT have such extension
try:
import sphinx.ext.mathjax
extensions.append('sphinx.ext.mathjax')
except ImportError:
pass
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Shinken Manual'
copyright = u'2013, Shinken Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = VERSION
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
html_theme_path = ["../theme"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Shinkendoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Shinken.tex', u'Shinken Documentation',
u'Shinken Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'shinken', u'Shinken Documentation',
[u'Shinken Team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Shinken', u'Shinken Documentation',
u'Shinken Team', 'Shinken', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Shinken'
epub_author = u'Shinken Team'
epub_publisher = u'Shinken Team'
epub_copyright = u'2013, Shinken Team'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| 10,447
|
Python
|
.py
| 247
| 39.890688
| 228
| 0.710245
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,559
|
cli.py
|
shinken-solutions_shinken/cli/shinkenio/cli.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import pycurl
import os
import sys
import stat
import json
import tempfile
import tarfile
import urllib
import shutil
from StringIO import StringIO
from shinken.log import logger, cprint
# Will be populated by the shinken CLI command
CONFIG = None
############# ******************** PUBLISH ****************###########
def read_package_json(fd):
buf = fd.read()
fd.close()
buf = buf.decode('utf8', 'ignore')
try:
package_json = json.loads(buf)
except ValueError as exp:
logger.error("Bad package.json file : %s", exp)
sys.exit(2)
if not package_json:
logger.error("Bad package.json file")
sys.exit(2)
return package_json
def create_archive(to_pack):
# First try to look if the directory we are trying to pack is valid
to_pack = os.path.abspath(to_pack)
if not os.path.exists(to_pack):
logger.error("Error : the directory to pack is missing %s", to_pack)
sys.exit(2)
logger.debug("Preparing to pack the directory %s", to_pack)
package_json_p = os.path.join(to_pack, 'package.json')
if not os.path.exists(package_json_p):
logger.error("Error : Missing file %s", package_json_p)
sys.exit(2)
package_json = read_package_json(open(package_json_p))
name = package_json.get('name', None)
if not name:
logger.error('Missing name entry in the package.json file. Cannot pack')
sys.exit(2)
# return True for files we want to exclude
def tar_exclude_filter(f):
# if the file start with .git, we bail out
# Also ending with ~ (Thanks emacs...)
if f.startswith('./.git') or f.startswith('.git'):
return True
if f.endswith('~'):
return True
return False
# Now prepare a destination file
tmp_dir = tempfile.gettempdir()
tmp_file = os.path.join(tmp_dir, name+'.tar.gz')
tar = tarfile.open(tmp_file, "w:gz")
os.chdir(to_pack)
tar.add(".",arcname='.', exclude=tar_exclude_filter)
tar.close()
logger.debug("Saved file %s", tmp_file)
return tmp_file
def publish_archive(archive):
# Now really publish it
proxy = CONFIG['shinken.io']['proxy']
proxy_socks5 = CONFIG['shinken.io']['proxy_socks5']
api_key = CONFIG['shinken.io']['api_key']
# Ok we will push the file with a 10s timeout
c = pycurl.Curl()
c.setopt(c.POST, 1)
c.setopt(c.CONNECTTIMEOUT, 30)
c.setopt(c.TIMEOUT, 300)
if proxy:
c.setopt(c.PROXY, proxy)
if proxy_socks5:
c.setopt(c.PROXY, proxy_socks5)
c.setopt(c.PROXYTYPE, c.PROXYTYPE_SOCKS5)
c.setopt(c.URL, "http://shinken.io/push")
c.setopt(c.HTTPPOST, [("api_key", api_key),
("data",
(c.FORM_FILE, str(archive),
c.FORM_CONTENTTYPE, "application/x-gzip"))
])
response = StringIO()
c.setopt(pycurl.WRITEFUNCTION, response.write)
c.setopt(c.VERBOSE, 1)
try:
c.perform()
except pycurl.error as exp:
logger.error("There was a critical error : %s", exp)
sys.exit(2)
return
r = c.getinfo(pycurl.HTTP_CODE)
c.close()
if r != 200:
logger.error("There was a critical error : %s", response.getvalue())
sys.exit(2)
else:
ret = json.loads(response.getvalue().replace('\\/', '/'))
status = ret.get('status')
text = ret.get('text')
if status == 200:
logger.info(text)
else:
logger.error(text)
sys.exit(2)
def do_publish(to_pack='.'):
logger.debug("WILL CALL PUBLISH.py with %s", to_pack)
archive = create_archive(to_pack)
publish_archive(archive)
################" *********************** SEARCH *************** ##################
def search(look_at):
# Now really publish it
proxy = CONFIG['shinken.io']['proxy']
proxy_socks5 = CONFIG['shinken.io']['proxy_socks5']
api_key = CONFIG['shinken.io']['api_key']
# Ok we will push the file with a 10s timeout
c = pycurl.Curl()
c.setopt(c.POST, 0)
c.setopt(c.CONNECTTIMEOUT, 30)
c.setopt(c.TIMEOUT, 300)
if proxy:
c.setopt(c.PROXY, proxy)
if proxy_socks5:
c.setopt(c.PROXY, proxy_socks5)
c.setopt(c.PROXYTYPE, c.PROXYTYPE_SOCKS5)
args = {'keywords':','.join(look_at)}
c.setopt(c.URL, str('shinken.io/searchcli?'+urllib.urlencode(args)))
response = StringIO()
c.setopt(pycurl.WRITEFUNCTION, response.write)
#c.setopt(c.VERBOSE, 1)
try:
c.perform()
except pycurl.error as exp:
logger.error("There was a critical error : %s", exp)
return
r = c.getinfo(pycurl.HTTP_CODE)
c.close()
if r != 200:
logger.error("There was a critical error : %s", response.getvalue())
sys.exit(2)
else:
ret = json.loads(response.getvalue().replace('\\/', '/'))
status = ret.get('status')
result = ret.get('result')
if status != 200:
logger.info(result)
return []
return result
def print_search_matches(matches):
if len(matches) == 0:
logger.warning("No match founded in shinken.io")
return
# We will sort and uniq results (maybe we got a all search
# so we will have both pack&modules, but some are both
ps = {}
names = [p['name'] for p in matches]
names = list(set(names))
names.sort()
for p in matches:
name = p['name']
ps[name] = p
for name in names:
p = ps[name]
user_id = p['user_id']
keywords = p['keywords']
description = p['description']
cprint('%s ' % name , 'green', end='')
cprint('(%s) [%s] : %s' % (user_id, ','.join(keywords), description))
def do_search(*look_at):
# test for generic search
if look_at == ('all',):
matches = []
look_at = ('pack',)
matches += search(look_at)
look_at = ('module',)
matches += search(look_at)
else:
logger.debug("CALL SEARCH WITH ARGS %s", str(look_at))
matches = search(look_at)
if matches == [] : print ('you are unlucky, use "shinken search all" for a complete list ')
print_search_matches(matches)
################" *********************** INVENTORY *************** ##################
def inventor(look_at):
# Now really publish it
inventory = CONFIG['paths']['inventory']
logger.debug("dumping inventory %s", inventory)
# get all sub-direcotries
for d in os.listdir(inventory):
if os.path.exists(os.path.join(inventory, d, 'package.json')):
if not look_at or d in look_at:
print(d)
# If asked, dump the content.package content
if look_at or d in look_at:
content_p = os.path.join(inventory, d, 'content.json')
if not os.path.exists(content_p):
logger.error('Missing %s file', content_p)
continue
try:
j = json.loads(open(content_p, 'r').read())
except Exception as exp:
logger.error('Bad %s file "%s"', content_p, exp)
continue
for d in j:
s = ''
if d['type'] == '5': # tar direcotry
s += '(d)'
else:
s += '(f)'
s += d['name']
print(s)
def do_inventory(*look_at):
inventor(look_at)
#################### ***************** INSTALL ************ ###################
def _copytree(src, dst, symlinks=False, ignore=None):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
if not os.path.exists(d):
os.mkdir(d)
_copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
# Do a chmod -R +x
def _chmodplusx(d):
for item in os.listdir(d):
p = os.path.join(d, item)
if os.path.isdir(p):
_chmodplusx(p)
else:
st = os.stat(p)
os.chmod(p, st.st_mode | stat.S_IEXEC | stat.S_IXGRP | stat.S_IXOTH)
def grab_package(pname):
cprint('Grabbing : ' , end='')
cprint('%s' % pname, 'green')
# Now really publish it
proxy = CONFIG['shinken.io']['proxy']
proxy_socks5 = CONFIG['shinken.io']['proxy_socks5']
api_key = CONFIG['shinken.io']['api_key']
# Ok we will push the file with a 5m timeout
c = pycurl.Curl()
c.setopt(c.POST, 0)
c.setopt(c.CONNECTTIMEOUT, 30)
c.setopt(c.TIMEOUT, 300)
if proxy:
c.setopt(c.PROXY, proxy)
if proxy_socks5:
c.setopt(c.PROXY, proxy_socks5)
c.setopt(c.PROXYTYPE, c.PROXYTYPE_SOCKS5)
c.setopt(c.URL, str('shinken.io/grab/%s' % pname))
response = StringIO()
c.setopt(pycurl.WRITEFUNCTION, response.write)
#c.setopt(c.VERBOSE, 1)
try:
c.perform()
except pycurl.error as exp:
logger.error("There was a critical error : %s", exp)
sys.exit(2)
return ''
r = c.getinfo(pycurl.HTTP_CODE)
c.close()
if r != 200:
logger.error("There was a critical error : %s", response.getvalue())
sys.exit(2)
else:
ret = response.getvalue()
logger.debug("CURL result len : %d ", len(ret))
return ret
def grab_local(d):
# First try to look if the directory we are trying to pack is valid
to_pack = os.path.abspath(d)
if not os.path.exists(to_pack):
err = "Error : the directory to install is missing %s" % to_pack
logger.error(err)
raise Exception(err)
package_json_p = os.path.join(to_pack, 'package.json')
if not os.path.exists(package_json_p):
logger.error("Error : Missing file %s", package_json_p)
sys.exit(2)
package_json = read_package_json(open(package_json_p))
pname = package_json.get('name', None)
if not pname:
err = 'Missing name entry in the package.json file. Cannot install'
logger.error(err)
raise Exception(err)
# return True for files we want to exclude
def tar_exclude_filter(f):
# if the file start with .git, we bail out
# Also ending with ~ (Thanks emacs...)
if f.startswith('./.git'):
return True
if f.endswith('~'):
return True
return False
# Now prepare a destination file
tmp_file = tempfile.mktemp()
tar = tarfile.open(tmp_file, "w:gz")
os.chdir(to_pack)
tar.add(".",arcname='.', exclude=tar_exclude_filter)
tar.close()
fd = open(tmp_file, 'rb')
raw = fd.read()
fd.close()
return (pname, raw)
def install_package(pname, raw, update_only=False):
if update_only:
logger.debug('UPDATE ONLY ENABLED')
logger.debug("Installing the package %s (size:%d)", pname, len(raw))
if len(raw) == 0:
logger.error('The package %s cannot be found', pname)
sys.exit(2)
return
tmpdir = os.path.join(tempfile.gettempdir(), pname)
logger.debug("Unpacking the package into %s", tmpdir)
if os.path.exists(tmpdir):
logger.debug("Removing previous tmp dir %s", tmpdir)
shutil.rmtree(tmpdir)
logger.debug("Creating temporary dir %s", tmpdir)
os.mkdir(tmpdir)
package_content = []
# open a file with the content
f = StringIO(raw)
tar_file = tarfile.open(fileobj=f, mode="r")
logger.debug("Tar file contents:")
for i in tar_file.getmembers():
path = i.name
if path == '.':
continue
if path.startswith('/') or '..' in path:
logger.error("SECURITY: the path %s seems dangerous!", path)
sys.exit(2)
return
# Adding all files into the package_content list
package_content.append( {'name':i.name, 'mode':i.mode, 'type':i.type, 'size':i.size} )
logger.debug("\t%s", path)
# Extract all in the tmpdir
tar_file.extractall(tmpdir)
tar_file.close()
# Now we look at the package.json that will give us our name and co
package_json_p = os.path.join(tmpdir, 'package.json')
if not os.path.exists(package_json_p):
logger.error("Error : bad archive : Missing file %s", package_json_p)
sys.exit(2)
return None
package_json = read_package_json(open(package_json_p))
logger.debug("Package.json content %s ", package_json)
modules_dir = CONFIG['paths']['modules']
share_dir = CONFIG['paths']['share']
packs_dir = CONFIG['paths']['packs']
etc_dir = CONFIG['paths']['etc']
doc_dir = CONFIG['paths']['doc']
inventory_dir = CONFIG['paths']['inventory']
libexec_dir = CONFIG['paths'].get('libexec', os.path.join(CONFIG['paths']['lib'], 'libexec'))
test_dir = CONFIG['paths'].get('test', '/__DONOTEXISTS__')
for d in (modules_dir, share_dir, packs_dir, doc_dir, inventory_dir):
if not os.path.exists(d):
logger.error("The installation directory %s is missing!", d)
sys.exit(2)
return
# Now install the package from $TMP$/share/* to $SHARE$/*
p_share = os.path.join(tmpdir, 'share')
logger.debug("TMPDIR:%s aahre_dir:%s pname:%s", tmpdir, share_dir, pname)
if os.path.exists(p_share):
logger.info("Installing the share package data")
# shutil will do the create dir
_copytree(p_share, share_dir)
logger.info("Copy done in the share directory %s", share_dir)
logger.debug("TMPDIR:%s modules_dir:%s pname:%s", tmpdir, modules_dir, pname)
# Now install the package from $TMP$/module/* to $MODULES$/pname/*
p_module = os.path.join(tmpdir, 'module')
if os.path.exists(p_module):
logger.info("Installing the module package data")
mod_dest = os.path.join(modules_dir, pname)
if os.path.exists(mod_dest):
logger.info("Removing previous module install at %s", mod_dest)
shutil.rmtree(mod_dest)
# shutil will do the create dir
shutil.copytree(p_module, mod_dest)
logger.info("Copy done in the module directory %s", mod_dest)
p_doc = os.path.join(tmpdir, 'doc')
logger.debug("TMPDIR:%s doc_dir:%s pname:%s", tmpdir, doc_dir, pname)
# Now install the package from $TMP$/doc/* to $MODULES$/doc/source/89_packages/pname/*
if os.path.exists(p_doc):
logger.info("Installing the doc package data")
doc_dest = os.path.join(doc_dir, 'source', '89_packages', pname)
if os.path.exists(doc_dest):
logger.info("Removing previous doc install at %s", doc_dest)
shutil.rmtree(doc_dest)
# shutil will do the create dir
shutil.copytree(p_doc, doc_dest)
logger.info("Copy done in the doc directory %s", doc_dest)
if not update_only:
# Now install the pack from $TMP$/pack/* to $PACKS$/pname/*
p_pack = os.path.join(tmpdir, 'pack')
if os.path.exists(p_pack):
logger.info("Installing the pack package data")
pack_dest = os.path.join(packs_dir, pname)
if os.path.exists(pack_dest):
logger.info("Removing previous pack install at %s", pack_dest)
shutil.rmtree(pack_dest)
# shutil will do the create dir
shutil.copytree(p_pack, pack_dest)
logger.info("Copy done in the pack directory %s", pack_dest)
# Now install the etc from $TMP$/etc/* to $ETC$/etc/*
p_etc = os.path.join(tmpdir, 'etc')
if os.path.exists(p_etc):
logger.info("Merging the etc package data into your etc directory")
# We don't use shutils because it NEED etc_dir to be non existant...
# Come one guys..... cp is not as terrible as this...
_copytree(p_etc, etc_dir)
logger.info("Copy done in the etc directory %s", etc_dir)
# Now install the tests from $TMP$/tests/* to $TESTS$/tests/*
# if the last one is specified on the configuration file (optionnal)
p_tests = os.path.join(tmpdir, 'test')
if os.path.exists(p_tests) and os.path.exists(test_dir):
logger.info("Merging the test package data into your test directory")
# We don't use shutils because it NEED etc_dir to be non existant...
# Come one guys..... cp is not as terrible as this...
logger.debug("COPYING %s into %s", p_tests, test_dir)
_copytree(p_tests, test_dir)
logger.info("Copy done in the test directory %s", test_dir)
# Now install the libexec things from $TMP$/libexec/* to $LIBEXEC$/*
# but also chmod a+x the plugins copied
p_libexec = os.path.join(tmpdir, 'libexec')
if os.path.exists(p_libexec) and os.path.exists(libexec_dir):
logger.info("Merging the libexec package data into your libexec directory")
logger.debug("COPYING %s into %s", p_libexec, libexec_dir)
# Before be sure all files in there are +x
_chmodplusx(p_libexec)
_copytree(p_libexec, libexec_dir)
logger.info("Copy done in the libexec directory %s", libexec_dir)
# then samve the package.json into the inventory dir
p_inv = os.path.join(inventory_dir, pname)
if not os.path.exists(p_inv):
os.mkdir(p_inv)
shutil.copy2(package_json_p, os.path.join(p_inv, 'package.json'))
# and the package content
cont = open(os.path.join(p_inv, 'content.json'), 'w')
cont.write(json.dumps(package_content))
cont.close()
# We now clean (rm) the tmpdir we don't need any more
try:
shutil.rmtree(tmpdir, ignore_errors=True)
# cannot remove? not a crime
except OSError:
pass
# THE END, output all is OK :D
cprint('OK ', 'green', end='')
cprint('%s' % pname)
def do_install(pname='', local=False, download_only=False):
raw = ''
if local:
pname, raw = grab_local(pname)
if not local:
if pname == '':
logger.error('Please select a package to instal')
return
raw = grab_package(pname)
if download_only:
tmpf = os.path.join(tempfile.gettempdir(), pname+'.tar.gz')
try:
f = open(tmpf, 'wb')
f.write(raw)
f.close()
cprint('Download OK: %s' % tmpf, 'green')
except Exception as exp:
logger.error("Package save fail: %s", exp)
sys.exit(2)
return
install_package(pname, raw)
def do_update(pname, local):
raw = ''
if local:
pname, raw = grab_local(pname)
if not local:
raw = grab_package(pname)
install_package(pname, raw, update_only=True)
exports = {
do_publish : {
'keywords': ['publish'],
'args': [
{'name' : 'to_pack', 'default':'.', 'description':'Package directory. Default to .'},
],
'description': 'Publish a package on shinken.io. Valid api key required'
},
do_search : {'keywords': ['search'], 'args': [],
'description': 'Search a package on shinken.io by looking at its keywords'
},
do_install : {
'keywords': ['install'],
'args': [
{'name' : 'pname', 'description':'Package to install'},
{'name' : '--local', 'description':'Use a local directory instead of the shinken.io version', 'type': 'bool'},
{'name' : '--download-only', 'description':'Only download the package', 'type': 'bool'},
],
'description' : 'Grab and install a package from shinken.io'
},
do_update : {
'keywords': ['update'],
'args': [
{'name' : 'pname', 'description':'Package to update)'},
{'name' : '--local', 'description':'Use a local directory instead of the shinken.io version', 'type': 'bool'},
],
'description' : 'Grab and update a package from shinken.io. Only the code and doc, NOT the configuration part! Do not update an not installed package.'
},
do_inventory : {'keywords': ['inventory'], 'args': [],
'description': 'List locally installed packages'
},
}
| 21,356
|
Python
|
.py
| 531
| 32.493409
| 159
| 0.596833
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,560
|
cli.py
|
shinken-solutions_shinken/cli/doc/cli.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from shinken.log import logger
# Will be populated by the shinken CLI command
CONFIG = None
############# ******************** SERVE ****************###########
def serve(port):
port = int(port)
logger.info("Serving documentation at port %s", port)
import SimpleHTTPServer
import SocketServer
doc_dir = CONFIG['paths']['doc']
html_dir = os.path.join(doc_dir, 'build', 'html')
os.chdir(html_dir)
try:
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
httpd = SocketServer.TCPServer(("", port), Handler)
httpd.serve_forever()
except KeyboardInterrupt:
pass
except Exception as exp:
logger.error(exp)
def do_serve(port='8080'):
if port is None:
port = '8080'
logger.debug("WILL CALL serve with %s", port)
serve(port)
################" *********************** COMPILE *************** ##################
def _compile():
try:
from sphinx import main
except ImportError:
logger.error('Cannot import the sphinx lib, please install it')
return
doc_dir = CONFIG['paths']['doc']
html_dir = os.path.join(doc_dir, 'build', 'html')
doctrees_dir = os.path.join(doc_dir, 'build', 'doctrees')
source_dir = os.path.join(doc_dir, 'source')
try:
s = 'sphinx-build -b html -d %s %s %s' % (doctrees_dir, source_dir, html_dir)
args = s.split(' ')
main(args)
except Exception as exp:
logger.error(exp)
return
def do_compile():
logger.debug("CALL compile")
_compile()
exports = {
do_serve : {
'keywords': ['doc-serve'],
'args': [
{'name' : '--port', 'default':'8080', 'description':'Port to expose the http doc. Default to 8080'},
],
'description': 'Publish the online doc on this server'
},
do_compile : {'keywords': ['doc-compile'], 'args': [],
'description': 'Compile the doc before enabling it online'
},
}
| 3,024
|
Python
|
.py
| 81
| 32.37037
| 112
| 0.62987
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,561
|
cli.py
|
shinken-solutions_shinken/cli/desc/cli.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from shinken.objects import Host
from shinken.log import logger
# Will be populated by the shinken CLI command
CONFIG = None
############# ******************** SERVE ****************###########
def serve(port):
port = int(port)
logger.info("Serving documentation at port %s", port)
import SimpleHTTPServer
import SocketServer
doc_dir = CONFIG['paths']['doc']
html_dir = os.path.join(doc_dir, 'build', 'html')
os.chdir(html_dir)
try:
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
httpd = SocketServer.TCPServer(("", port), Handler)
httpd.serve_forever()
except KeyboardInterrupt:
pass
except Exception as exp:
logger.error(exp)
def do_desc(cls='host'):
properties = Host.properties
prop_names = properties.keys()
prop_names.sort()
for k in prop_names:
v = properties[k]
if v.has_default:
print(k, '(%s)' % v.default)
else:
print(k)
exports = {
do_desc : {
'keywords': ['desc'],
'args': [
{'name' : '--cls', 'default':'host', 'description':'Object type to describe'},
],
'description': 'List this object type properties'
},
}
| 2,259
|
Python
|
.py
| 63
| 31.380952
| 90
| 0.661327
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,562
|
generate_manpages.py
|
shinken-solutions_shinken/manpages/generate_manpages.py
|
#!/usr/bin/env python
# Author: Thibault Cohen <thibault.cohen@savoirfairelinux.com>
# Inspired from http://docutils.sourceforge.net/tools/rst2man.py
from __future__ import absolute_import, division, print_function, unicode_literals
import locale
import os
try:
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_file
from docutils.writers import manpage
output_folder = os.path.join(os.path.abspath(os.path.dirname(__file__)), "manpages")
source_folder = os.path.join(os.path.abspath(os.path.dirname(__file__)), "sources")
for current_folder, subfolders, files in os.walk(source_folder):
for rst_file in files:
if rst_file.endswith(".rst"):
input_file = os.path.join(current_folder, rst_file)
output_file = os.path.join(output_folder, os.path.splitext(rst_file)[0] + ".8")
publish_file(source_path=input_file,
destination_path=output_file,
writer=manpage.Writer()
)
| 1,037
|
Python
|
.py
| 23
| 38.086957
| 91
| 0.678252
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,563
|
PythonClient.py
|
shinken-solutions_shinken/contrib/clients/TSCA/python/PythonClient.py
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import, division, print_function, unicode_literals
import csv
import time
import sys
sys.path.append('gen-py')
try:
from org.shinken_monitoring.tsca import StateService
from org.shinken_monitoring.tsca.ttypes import *
except:
print("Can't import tsca stub.")
print("Have you run thrift --gen py ../../../../shinken/modules/tsca/tsca.thrift ?")
sys.exit(1)
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
try:
# Make socket
transport = TSocket.TSocket('localhost', 9090)
# Buffering is critical. Raw sockets are very slow
transport = TTransport.TBufferedTransport(transport)
# Wrap in a protocol
protocol = TBinaryProtocol.TBinaryProtocol(transport)
# Create a client to use the protocol encoder
client = StateService.Client(protocol)
# Connect!
transport.open()
# Thrift server wait a list of list whith the following args:
# '''
# Read the list result
# Value n1: Timestamp
# Value n2: Hostname
# Value n3: Service
# Value n4: Return Code
# Value n5: Output
# '''
states_list = []
data = dataArgs()
cr = csv.reader(open(sys.argv[1], "rb"))
for elt in cr:
trace = State()
trace.timestamp = long(round(time.time()))
trace.hostname = elt[0]
trace.serv = elt[1]
trace.output = elt[2]
trace.rc = ReturnCode.OK
states_list.append(trace)
data.states = states_list
client.submit_list(data)
# Close!
transport.close()
except Thrift.TException as tx:
print('%s' % tx.message)
| 2,550
|
Python
|
.py
| 72
| 31.638889
| 88
| 0.709651
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,564
|
zmq_broker_client.py
|
shinken-solutions_shinken/contrib/clients/zmq_client/zmq_broker_client.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
# Thomas Cellerier, thomascellerier@gmail.com
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
# This is an example client for the zmq_broker module.
# This will listen for notifications using the given
# serialization method on the given ZeroMQ endpoint
# using the given ZeroMQ topic filter.
#
# Examples:
# python zmq_broker_client.py "json" "tcp://127.0.0.1:12345" "host"
# python zmq_broker_client.py "msgpack" "ipc:///tmp/shinken_pub" ""
# python zmq_broker_client.py "json" "tcp://172.23.2.189:9067" "log"
from __future__ import absolute_import, division, print_function, unicode_literals
import zmq
import sys
# Usage
if len(sys.argv) > 1:
if sys.argv[1] == "--help" or sys.argv[1] == "-h":
print("Usage: python zmq_broker_client.py [json|msgpack] [<zmq endpoint>] [<zmq topic>]")
sys.exit(-1)
# Serialization method
method = ""
if len(sys.argv) < 2 or sys.argv[1] == "json":
import json
method = "json"
elif sys.argv[1] == "msgpack":
import msgpack
method = "msgpack"
else:
print("Invalid serialization method.")
sys.exit(-1)
# ZeroMQ endpoint
sub_endpoint = "tcp://127.0.0.1:12345"
if len(sys.argv) > 2:
sub_endpoint = sys.argv[2]
# ZeroMQ Suscription Topic
topic = ""
if len(sys.argv) > 3:
topic = sys.argv[3]
# Subscribe
context = zmq.Context()
s_sub = context.socket(zmq.SUB)
s_sub.setsockopt(zmq.SUBSCRIBE, topic)
s_sub.connect(sub_endpoint)
print("Listening for shinken notifications.")
# Process incoming messages
while True:
topic = s_sub.recv()
print("Got msg on topic: " + topic)
data = s_sub.recv()
if method == "json":
json_data = json.loads(data)
pretty_msg = json.dumps(json_data, sort_keys=True, indent=4)
print(pretty_msg)
elif method == "msgpack":
msg = msgpack.unpackb(data, use_list=False)
print(msg)
s_sub.close()
context.term()
| 2,684
|
Python
|
.py
| 79
| 32.278481
| 97
| 0.729271
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,565
|
nsca_client.py
|
shinken-solutions_shinken/contrib/nsca/nsca_client.py
|
# This is a very quick and dirty code for David so he can work on its
# sikuli agent and report as nsca the results.
#
# This need to be clean a lot, it's still a server and should be a
# client class :) I can do it after my "new baby holidays" are
# finished ;)
#
# J. Gabes
from __future__ import absolute_import, division, print_function, unicode_literals
import time
import select
import socket
import struct
import random
def decrypt_xor(data, key):
keylen = len(key)
crypted = [chr(ord(data[i]) ^ ord(key[i % keylen]))
for i in xrange(len(data))]
return ''.join(crypted)
class NSCA_client():
def __init__(self, host, port, encryption_method, password):
self.host = host
self.port = port
self.encryption_method = encryption_method
self.password = password
self.rng = random.Random(password)
def get_objects(self):
"""
This is the main function that is called in the CONFIGURATION
phase.
"""
print("[Dummy] ask me for objects to return")
r = {'hosts': []}
h = {'name': 'dummy host from dummy arbiter module',
'register': '0',
}
r['hosts'].append(h)
print("[Dummy] Returning to Arbiter the hosts:", r)
return r
def send_init_packet(self, socket):
'''
Build an init packet
00-127: IV
128-131: unix timestamp
'''
iv = ''.join([chr(self.rng.randrange(256)) for i in xrange(128)])
init_packet = struct.pack("!128sI", iv, int(time.mktime(time.gmtime())))
socket.send(init_packet)
return iv
def read_check_result(self, data, iv):
'''
Read the check result
00-01: Version
02-05: CRC32
06-09: Timestamp
10-11: Return code
12-75: hostname
76-203: service
204-715: output of the plugin
716-720: padding
'''
if len(data) != 720:
return None
if self.encryption_method == 1:
data = decrypt_xor(data, self.password)
data = decrypt_xor(data, iv)
(version, pad1, crc32, timestamp, rc, hostname_dirty, service_dirty,
output_dirty, pad2) = struct.unpack("!hhIIh64s128s512sh", data)
hostname = hostname_dirty.partition("\0", 1)[0]
service = service_dirty.partition("\0", 1)[0]
output = output_dirty.partition("\0", 1)[0]
return (timestamp, rc, hostname, service, output)
def post_command(self, timestamp, rc, hostname, service, output):
'''
Send a check result command to the arbiter
'''
if len(service) == 0:
extcmd = ("[%lu] PROCESS_HOST_CHECK_RESULT;%s;%d;%s\n"
% (timestamp, hostname, rc, output))
else:
extcmd = ("[%lu] PROCESS_SERVICE_CHECK_RESULT;%s;%s;%d;%s\n"
% (timestamp, hostname, service, rc, output))
print("want to send", extcmd)
#e = ExternalCommand(extcmd)
#self.from_q.put(e)
def main(self):
"""
This is the main loop of the process when in 'external' mode.
"""
#self.set_exit_handler()
self.interrupted = False
backlog = 5
size = 8192
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#server.setblocking(0)
server.connect((self.host, self.port))
#server.listen(backlog)
input = [server]
databuffer = {}
IVs = {}
init = server.recv(size)
print("got init", init)
#init_packet = struct.pack("!128sI",iv,int(time.mktime(time.gmtime())))
(iv, t) = struct.unpack("!128sI", init)
print("IV", iv)
print("T", t)
version = 0
pad1 = 0
crc32 = 0
timestamp = int(time.time())
rc = 2
hostname_dirty = "moncul"
service_dirty = "fonctionnne"
output_dirty = "blablalba"
pad2 = 0
'''
Read the check result
00-01: Version
02-05: CRC32
06-09: Timestamp
10-11: Return code
12-75: hostname
76-203: service
204-715: output of the plugin
716-720: padding
'''
init_packet = struct.pack(
"!hhIIh64s128s512sh",
version, pad1, crc32, timestamp, rc, hostname_dirty,
service_dirty, output_dirty, pad2)
print("Create packent len", len(init_packet))
#(version, pad1, crc32, timestamp, rc, hostname_dirty, service_dirty,
# output_dirty, pad2) = struct.unpack("!hhIIh64s128s512sh",data)
data = decrypt_xor(init_packet, iv)
data = decrypt_xor(data, self.password)
server.send(data)
raise SystemExit(0)
while not self.interrupted:
print("Loop")
inputready, outputready, exceptready = select.select(input, [], [], 1)
for s in inputready:
if s == server:
# handle the server socket
#client, address = server.accept()
iv = self.send_init_packet(client)
IVs[client] = iv
input.append(client)
else:
# handle all other sockets
data = s.recv(size)
if s in databuffer:
databuffer[s] += data
else:
databuffer[s] = data
if len(databuffer[s]) == 720:
# end-of-transmission or an empty line was received
(timestamp, rc, hostname, service, output) = self.read_check_result(databuffer[s], IVs[s])
del databuffer[s]
del IVs[s]
self.post_command(timestamp, rc, hostname, service,
output)
try:
s.shutdown(2)
except Exception as exp:
print(exp)
s.close()
input.remove(s)
if __name__ == "__main__":
parser = optparse.OptionParser(
version="Python NSCA client version %s" % VERSION)
parser.add_option("-H", "--hostname", default='localhost',
help="NSCA server IP (default: %default)")
parser.add_option("-P", "--port", type="int", default='5667',
help="NSCA server port (default: %default)")
parser.add_option("-e", "--encryption", default='1',
help=("Encryption mode used by NSCA server "
"(default: %default)"))
parser.add_option("-p", "--password", default='helloworld',
help=("Password for encryption, should be the same as "
"NSCA server (default: %default)"))
parser.add_option("-d", "--delimiter", default='\t',
help="Argument delimiter (defaults to the tab-character)")
opts, args = parser.parse_args()
if args:
parser.error("does not take any positional arguments")
nsca = NSCA_client(opts.hostname, opts.port, opts.encryption, opts.password)
nsca.main()
| 7,351
|
Python
|
.py
| 187
| 27.989305
| 114
| 0.539938
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,566
|
sql2mdb.py
|
shinken-solutions_shinken/contrib/livestatus/sql2mdb.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Olivier Hanesse, olivier.hanesse@gmail.com
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import time
import os
try:
import shinken
except ImportError:
# If importing shinken fails, try to load from current directory
# or parent directory to support running without installation.
# Submodules will then be loaded from there, too.
import imp
imp.load_module('shinken', *imp.find_module('shinken', [os.path.realpath("."), os.path.realpath(".."), os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), "..")]))
from shinken.objects.config import Config
from shinken.log import logger
from shinken.modules.logstore_sqlite import get_instance as get_instance_sqlite
from shinken.modules.logstore_mongodb import get_instance as get_instance_mongodb
from shinken.modules.logstore_sqlite import LiveStatusLogStoreError
from shinken.modules.livestatus_broker.log_line import Logline
class Dummy:
def add(self, o):
pass
def row_factory(cursor, row):
"""Handler for the sqlite fetch method."""
return Logline(sqlite_cursor=cursor.description, sqlite_row=row)
class Converter(object):
def __init__(self, file):
logger.load_obj(Dummy())
self.conf = Config()
buf = self.conf.read_config([file])
raw_objects = self.conf.read_config_buf(buf)
self.conf.create_objects_for_type(raw_objects, 'arbiter')
self.conf.create_objects_for_type(raw_objects, 'module')
self.conf.early_arbiter_linking()
self.conf.create_objects(raw_objects)
for mod in self.conf.modules:
if mod.module_type == 'logstore_sqlite':
self.mod_sqlite = get_instance_sqlite(mod)
self.mod_sqlite.init()
if mod.module_type == 'logstore_mongodb':
self.mod_mongodb = get_instance_mongodb(mod)
if __name__ == '__main__':
if (len(sys.argv) < 2):
print("usage: sql2mdb shinken-specifig.cfg")
sys.exit(1)
conv = Converter(sys.argv[1])
print(conv.mod_mongodb)
print(conv.mod_sqlite)
print(conv.mod_sqlite.archive_path)
conv.mod_sqlite.use_aggressie_sql = False
try:
conv.mod_sqlite.open()
except Exception as e:
print("problem opening the sqlite db", e)
sys.exit(1)
try:
conv.mod_mongodb.open()
except Exception as e:
conv.mod_sqlite.close()
print("problem opening the mongodb", e)
sys.exit(1)
for dateobj, handle, archive, fromtime, totime in conv.mod_sqlite.log_db_relevant_files(0, time.time()):
try:
if handle == "main":
print("attach %s" % archive)
dbresult = conv.mod_sqlite.execute('SELECT * FROM logs', [], row_factory)
else:
conv.mod_sqlite.commit()
print("attach %s" % archive)
conv.mod_sqlite.execute_attach("ATTACH DATABASE '%s' AS %s" % (archive, handle))
dbresult = conv.mod_sqlite.execute('SELECT * FROM %s.logs' % (handle,), [], row_factory)
conv.mod_sqlite.execute("DETACH DATABASE %s" % handle)
# now we have the data of one day
for res in dbresult:
values = res.as_dict()
try:
conv.mod_mongodb.db[conv.mod_mongodb.collection].insert(values)
except Exception as e:
print("problem opening the mongodb", e)
time.sleep(5)
conv.mod_mongodb.db[conv.mod_mongodb.collection].insert(values)
print("wrote %d records" % len(dbresult))
except LiveStatusLogStoreError as e:
print("An error occurred:", e.args[0])
conv.mod_sqlite.close()
conv.mod_mongodb.close()
| 4,652
|
Python
|
.py
| 106
| 36.669811
| 175
| 0.660561
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,567
|
splitlivelogs.py
|
shinken-solutions_shinken/contrib/livestatus/splitlivelogs.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
"""
This script will take the sqlite database of the livestatus module and
split up the contents in single datafiles (1 for each day of data found).
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import optparse
import os
sys.path.append("..")
sys.path.append("../shinken")
sys.path.append("../../shinken")
sys.path.append("../../../shinken")
#sys.path.append("../bin")
#sys.path.append(os.path.abspath("bin"))
#import shinken
from shinken.modules.livestatus_broker.livestatus_db import LiveStatusDb
parser = optparse.OptionParser(
"%prog [options] -d database [-a archive]")
parser.add_option('-d', '--database', action='store',
dest="database",
help="The sqlite datafile of your livestatus module")
parser.add_option('-a', '--archive', action='store',
dest="archive_path",
help="(optional) path to the archive directory")
opts, args = parser.parse_args()
if not opts.database:
parser.error("Requires at least the database file (option -d/--database")
if not opts.archive_path:
opts.archive_path = os.path.join(os.path.dirname(opts.database), 'archives')
pass
# Protect for windows multiprocessing that will RELAUNCH all
if __name__ == '__main__':
if os.path.exists(opts.database):
try:
os.stat(opts.archive_path)
except Exception:
os.mkdir(opts.archive_path)
dbh = LiveStatusDb(opts.database, opts.archive_path, 3600)
dbh.log_db_do_archive()
dbh.close()
else:
print("database %s does not exist" % opts.database)
# For perf tuning:
| 2,473
|
Python
|
.py
| 62
| 35.983871
| 82
| 0.708212
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,568
|
check_shinken_mem.py
|
shinken-solutions_shinken/contrib/plugins/check_shinken_mem.py
|
#!/usr/bin/env python
#
# Autors: David Hannequin <david.hannequin@gmail.com>,
# Hartmut Goebel <h.goebel@crazy-compilers.com>
# Date: 2012-07-12
#
# Requires: Python >= 2.7 or Python plus argparse
# Platform: Linux
#
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
def MemValues():
"""
Read total mem, free mem and cached from /proc/meminfo
This is linux-only.
"""
for line in open('/proc/meminfo').readlines():
if line.startswith('MemTotal:'):
memTotal = line.split()[1]
if line.startswith('MemFree:'):
memFree = line.split()[1]
if line.startswith('Cached:'):
memCached = line.split()[1]
# :fixme: fails if one of these lines is missing in /proc/meminfo
return memTotal, memCached, memFree
def percentFreeMem():
memTotal, memCached, memFree = MemValues()
return (((int(memFree) + int(memCached)) * 100) / int(memTotal))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-w', '--warning', default='80', type=int)
parser.add_argument('-c', '--critical', default='90', type=int)
args = parser.parse_args()
critical = args.critical
warning = args.warning
pmemUsage = 100 - percentFreeMem()
if pmemUsage >= critical:
print ('CRITICAL - Memory usage: %2.1f%% |mem=%s' % (pmemUsage, pmemUsage))
raise SystemExit(2)
elif pmemUsage >= warning:
print ('WARNING - Memory usage: %2.1f%% |mem=%s' % (pmemUsage, pmemUsage))
raise SystemExit(1)
else:
print ('OK - Memory usage: %2.1f%% |mem=%s' % (pmemUsage, pmemUsage))
raise SystemExit(0)
if __name__ == "__main__":
main()
| 1,746
|
Python
|
.py
| 47
| 31.87234
| 83
| 0.639383
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,569
|
check_shinken_load.py
|
shinken-solutions_shinken/contrib/plugins/check_shinken_load.py
|
#!/usr/bin/env python
#
# Autors: David Hannequin <david.hannequin@gmail.com>,
# Hartmut Goebel <h.goebel@crazy-compilers.com>
# Date: 2012-07-12
#
# Requires: Python >= 2.7 or Python plus argparse
#
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-w', '--warning', default='3,2,1')
parser.add_argument('-c', '--critical', default='4,3,2')
args = parser.parse_args()
critical = map(float, args.critical.split(','))
warning = map(float, args.warning.split(','))
(cload1, cload5, cload15) = critical
(wload1, wload5, wload15) = warning
(load1, load5, load15) = os.getloadavg()
if load1 >= cload1 or load5 >= cload5 or load15 >= cload15:
print ('CRITICAL - Load average : %s,%s,%s|load1=%s;load5=%s;load15=%s'
% (load1, load5, load15, load1, load5, load15))
raise SystemExit(2)
elif load1 >= wload1 or load5 >= wload5 or load15 >= wload15:
print ('WARNING - Load average : %s,%s,%s|load1=%s;load5=%s;load15=%s'
% (load1, load5, load15, load1, load5, load15))
raise SystemExit(1)
else:
print ('OK - Load average : %s,%s,%s|load1=%s;load5=%s;load15=%s'
% (load1, load5, load15, load1, load5, load15))
raise SystemExit(0)
if __name__ == "__main__":
main()
| 1,447
|
Python
|
.py
| 35
| 35.942857
| 82
| 0.625089
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,570
|
checkmodule.py
|
shinken-solutions_shinken/contrib/install.d/tools/checkmodule.py
|
#!/usr/bin/env python
#
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# David GUENAULT, dguenault@monitoring-fr.org
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import getopt
def main(argv):
try:
opts, args = getopt.getopt(argv, "m:")
ret = 0
for o, a in opts:
if o == "-m":
try:
exec("import " + a)
print("OK")
except Exception:
print("KO")
ret = 2
except Exception:
ret = 1
sys.exit(ret)
if __name__ == "__main__":
main(sys.argv[1:])
| 1,406
|
Python
|
.py
| 41
| 28.97561
| 82
| 0.653676
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,571
|
sendmailservice.py
|
shinken-solutions_shinken/contrib/notifications/sendmailservice.py
|
#!/usr/bin/env python
# Autor: David Hannequin <david.hannequin@gmail.com>
# Date: 24 Oct 2011
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
TEXT_template = """***** Shinken Notification *****
Notification: %(notify)s
Service: %(service)s
Host: %(hostname)s
Address: %(hostaddress)s
State: %(state)s
Date/Time: %(datetime)s
Additional Info: %(output)s
"""
HTML_template = '''<html>
<head></head><body>
<style type="text/css">
.recovery { color:ForestGreen }
.acknowledgement { color:ForestGreen }
.problem { color: red }
.ok { color:ForestGreen }
.critical { color:red }
.warning { color:orange }
.unknown { color:gray }
.bold { font-weight:bold }
</style>
<strong> ***** Shinken Notification ***** </strong><br><br>
Notification: <span class="%(notify)s bold">%(notify)s</span><br><br>
State: <span class="%(state)s bold">%(state)s</span><br><br>
Service: %(service)s <br>
Host: %(hostname)s <br>
Address: %(hostaddress)s <br>
Date/Time: %(datetime)s<br>
Additional Info : %(output)s
</body></html>
'''
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--notification', default='unknown', dest='notify')
parser.add_argument('-s', '--servicedesc', default='unknown', dest='service')
parser.add_argument('-H', '--hostname', default='unknown')
parser.add_argument('-a', '--hostaddress', default='unknown')
parser.add_argument('-r', '--servicestate', default='unknown', dest='state')
parser.add_argument('-i', '--shortdatetime', default='unknown', dest='datetime')
parser.add_argument('-o', '--output', default='')
group = parser.add_argument_group('Mail options')
group.add_argument('-t', '--to')
group.add_argument('-S', '--sender')
group.add_argument('--server', default='localhost')
group.add_argument('--port', default=smtplib.SMTP_PORT, type=int)
args = parser.parse_args()
subject = ("** %(notify)s alert - %(hostname)s/%(service)s is %(state)s **"
% vars(args))
## Create message container - the correct MIME type is multipart/alternative.
msg = MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = args.sender
msg['To'] = args.to
# Create the body of the message (a plain-text and an HTML version).
#
# According to RFC 2046, the last part of a multipart message, in this
# case the HTML message, is best and preferred.
#
# :fixme: need to encode the body if not ascii, see
# http://mg.pov.lt/blog/unicode-emails-in-python.html for a nice
# solution.
#
msg.attach(MIMEText(TEXT_template % vars(args), 'plain'))
# :fixme: need to html-escape all values and encode the body
msg.attach(MIMEText(HTML_template % vars(args), 'html'))
# Send the message via local SMTP server.
s = smtplib.SMTP(args.server, args.port)
# sendmail function takes 3 arguments: sender's address, recipient's address
# and message to send - here it is sent as one string.
s.sendmail(args.sender, args.to, msg.as_string())
s.quit()
| 3,014
|
Python
|
.py
| 78
| 37.358974
| 82
| 0.719658
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,572
|
notify_by_xmpp.py
|
shinken-solutions_shinken/libexec/notify_by_xmpp.py
|
#!/usr/bin/env python
# skvidal@fedoraproject.org, modified by David Laval
# gplv2+
## XMPP notification
#define command{
# command_name notify-host-by-xmpp
# command_line $PLUGINSDIR$/notify_by_xmpp.py -a $PLUGINSDIR$/notify_by_xmpp.ini "Host '$HOSTALIAS$' is $HOSTSTATE$ - Info : $HOSTOUTPUT$" $CONTACTEMAIL$
#}
#
#define command{
# command_name notify-service-by-xmpp
# command_line $PLUGINSDIR$/notify_by_xmpp.py -a $PLUGINSDIR$/notify_by_xmpp.ini "$NOTIFICATIONTYPE$ $HOSTNAME$ $SERVICED ESC$ $SERVICESTATE$ $SERVICEOUTPUT$ $LONGDATETIME$" $CONTACTEMAIL$
#}
# needs a config file to get username/pass/other info format is:
#[xmpp_account]
#server=jabber.org
#port=5222
#username=yourusername
#password=yourpasssword
#resource=monitoring
defaults = {'server':'jabber.org',
'port':'5222',
'resource':'monitoring'}
# until xmppony is inplace
from __future__ import absolute_import, division, print_function, unicode_literals
import warnings
warnings.simplefilter("ignore")
import xmpp
from xmpp.protocol import Message
from optparse import OptionParser
import ConfigParser
import sys
import os
parser = OptionParser()
parser.add_option("-a", dest="authfile", default=None, help="file to retrieve username/password/server/port/resource information from")
opts, args = parser.parse_args()
conf = ConfigParser.ConfigParser(defaults=defaults)
if not opts.authfile or not os.path.exists(opts.authfile):
print("no config/auth file specified, can't continue")
sys.exit(1)
conf.read(opts.authfile)
if not conf.has_section('xmpp_account') or not conf.has_option('xmpp_account', 'username') or not conf.has_option('xmpp_account', 'password'):
print("cannot find at least one of: config section 'xmpp_account' or username or password")
sys.exit(1)
server = conf.get('xmpp_account', 'server')
username = conf.get('xmpp_account', 'username')
password = conf.get('xmpp_account', 'password')
resource = conf.get('xmpp_account', 'resource')
port = conf.get('xmpp_account', 'port')
if len(args) < 1:
print("xmppsend message [to whom, multiple args]")
sys.exit(1)
msg = args[0]
msg = msg.replace('\\n', '\n')
c = xmpp.Client(server=server, port=port, debug=[])
con = c.connect()
if not con:
print("Error: could not connect to server: %s:%s" % (c.Server, c.Port))
sys.exit(1)
auth = c.auth(user=username, password=password, resource=resource)
if not auth:
print("Error: Could not authenticate to server: %s:%s" % (c.Server, c.Port))
sys.exit(1)
if len(args) < 2:
r = c.getRoster()
for user in r.keys():
if user == username:
continue
c.send(Message(user, '%s' % msg))
else:
for user in args[1:]:
c.send(Message(user, '%s' % msg))
| 2,771
|
Python
|
.py
| 72
| 35.625
| 194
| 0.715888
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,573
|
external_mapping.py
|
shinken-solutions_shinken/libexec/external_mapping.py
|
#!/usr/bin/env python3
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
"""
This program transforms a flat dependency file into a json one so it
can be loaded in hot_dependencies_arbiter module
The input file format is:
host1 ":" vm1
host2 ":" vm2
...
Spaces around host- and vm-names will be stripped. Lines starting with
a `#` will be ignored.
You can now get a live update of your dependency tree in shinken for
your xen/virtualbox/qemu. All you have to do is finding a way to
modify this flat file when you do a live migration.
For example, you can use a script like this in your crontab::
dsh -Mc -g mydom0group 'xm list' | \
awk "/vm-/ { print \$1 }"' > /tmp/shinken_flat_mapping
"""
import os
import sys
import optparse
# Try to load json (2.5 and higer) or simplejson if failed (python2.4)
try:
import json
except ImportError:
# For old Python version, load simple json
try:
import simplejson as json
except ImportError:
raise SystemExit("Error: you need the json or simplejson module "
"for this script")
VERSION = '0.2'
def main(input_file, output_file, type):
# Check if input_file is newer than output_file
if os.path.exists(output_file):
if os.path.getmtime(output_file) >= os.path.getmtime(input_file):
print("Nothing to do")
return True
r = []
flatmappingfile = open(input_file)
try:
for line in flatmappingfile:
if line.startswith('#'):
# this is a comment line, skip it
continue
parts = line.split(':')
if type == 'service' :
v = (('service', parts[0].strip()), ('service', parts[1].strip()))
else:
v = (('host', parts[0].strip()), ('host', parts[1].strip()))
r.append(v)
finally:
flatmappingfile.close()
jsonmappingfile = open(output_file, 'w')
try:
json.dump(r, jsonmappingfile)
finally:
jsonmappingfile.close()
if __name__ == "__main__":
parser = optparse.OptionParser(
version="Shinken external flat mapping file to json mapping %s" % VERSION)
parser.add_option("-o", "--output", dest='output_file',
default='/tmp/external_mapping_file.json',
help="Path of the generated json mapping file.")
parser.add_option("-i", "--input", dest='input_file',
default='/tmp/shinken_flat_mapping',
help="Path of the flat mapping input file.")
parser.add_option("-t", "--type", dest='type',
default='host', help='it is a service or host dependency. ( host | service. Default : host)')
opts, args = parser.parse_args()
if args:
parser.error("does not take any positional arguments")
main(**vars(opts))
| 3,584
|
Python
|
.py
| 88
| 34.409091
| 115
| 0.653149
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,574
|
link_xen_host_vm.py
|
shinken-solutions_shinken/libexec/link_xen_host_vm.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# /usr/local/shinken/libexec/link_xen_host_vm.py
# This file is proposed for Shinken to link vm and xenserver.
# Devers Renaud rdevers@chavers.org
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import XenAPI
from string import split
import shutil
import optparse
# Try to load json (2.5 and higer) or simplejson if failed (python2.4)
try:
import json
except ImportError:
# For old Python version, load
# simple json (it can be hard json?! It's 2 functions guy!)
try:
import simplejson as json
except ImportError:
sys.exit("Error: you need the json or simplejson module for this script")
VERSION = '0.1'
# Split and clean the rules from a string to a list
def _split_rules(rules):
return [r.strip() for r in rules.split('|')]
# Apply all rules on the objects names
def _apply_rules(name, rules):
if 'nofqdn' in rules:
name = name.split(' ', 1)[0]
name = name.split('.', 1)[0]
if 'lower' in rules:
name = name.lower()
return name
def create_all_links(res,rules):
r = []
for host in res:
for vm in res[host]:
# First we apply rules on the names
host_name = _apply_rules(host,rules)
vm_name = _apply_rules(vm,rules)
v = (('host', host_name), ('host', vm_name))
r.append(v)
return r
def write_output(path,r):
try:
f = open(path + '.tmp', 'wb')
buf = json.dumps(r)
f.write(buf)
f.close()
shutil.move(path + '.tmp', path)
print("File %s wrote" % path)
except IOError as exp:
sys.exit("Error writing the file %s: %s" % (path, exp))
def con_poolmaster(xs, user, password):
try:
s = XenAPI.Session("http://%s" % xs)
s.xenapi.login_with_password(user,password)
return s
except XenAPI.Failure as msg:
if msg.details[0] == "HOST_IS_SLAVE":
host = msg.details[1]
s = XenAPI.Session("http://%s" % host)
s.xenapi.login_with_password(user, password)
return s
else:
print("Error: pool con:", xs, sys.exc_info()[0])
pass
except Exception:
print("Error: pool con:", xs, sys.exc_info()[0])
pass
return None
def main(output, user, password, rules, xenserver):
res = {}
for xs in xenserver:
try:
s = con_poolmaster(xs, user, password)
vms = s.xenapi.VM.get_all()
for vm in vms:
record = s.xenapi.VM.get_record(vm)
if not(record["is_a_template"]) and not(record["is_control_domain"]):
vhost = s.xenapi.VM.get_resident_on(vm)
if vhost != "OpaqueRef:NULL":
host = s.xenapi.host.get_hostname(vhost)
vm_name = s.xenapi.VM.get_name_label(vm)
if host in res.keys():
res[host].append(vm_name)
else:
res[host] = [vm_name]
s.xenapi.session.logout()
except Exception:
pass
r = create_all_links(res,rules)
print("Created %d links" % len(r))
write_output(output, r)
print("Finished!")
if __name__ == "__main__":
# Manage the options
parser = optparse.OptionParser(
version="Shinken XenServer/XCP links dumping script version %s" % VERSION)
parser.add_option("-o", "--output",
default='/tmp/xen_mapping_file.json',
help="Path of the generated mapping file.")
parser.add_option("-u", "--user",
help="User name to connect to this Vcenter")
parser.add_option("-p", "--password",
help="The password of this user")
parser.add_option('-r', '--rules', default='',
help="Rules of name transformation. Valid names are: "
"`lower`: to lower names, "
"`nofqdn`: keep only the first name (server.mydomain.com -> server)."
"You can use several rules like `lower|nofqdn`")
parser.add_option('-x','--xenserver',action="append",
help="multiple ip/fqdn of your XenServer/XCP poll master (or member). "
"ex: -x poolmaster1 -x poolmaster2 -x poolmaster3 "
"If pool member was use, the poll master was found")
opts, args = parser.parse_args()
if args:
parser.error("does not take any positional arguments")
if opts.user is None:
parser.error("missing -u or --user option for the pool master username")
if opts.password is None:
error = True
parser.error("missing -p or --password option for the pool master password")
if opts.output is None:
parser.error("missing -o or --output option for the output mapping file")
if opts.xenserver is None:
parser.error("missing -x or --xenserver option for pool master list")
main(**opts.__dict__)
| 5,556
|
Python
|
.py
| 141
| 32.446809
| 93
| 0.626274
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,575
|
notify_by_email.py
|
shinken-solutions_shinken/libexec/notify_by_email.py
|
#!/usr/bin/env python
#-*-coding:utf-8-*-
# Copyright (C) 2012:
# Romain Forlot, rforlot@yahoo.com
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sys
import socket
import logging
import getpass
import smtplib
import urllib
from optparse import OptionParser, OptionGroup
from email.mime.text import MIMEText
from email.MIMEImage import MIMEImage
from email.mime.multipart import MIMEMultipart
# Global var
shinken_image_dir = '/var/lib/shinken/share/images'
shinken_customer_logo = 'customer_logo.jpg'
webui_config_file = '/etc/shinken/modules/webui.cfg'
webui2_config_file = '/etc/shinken/modules/webui2.cfg'
webui2_image_dir = '/var/lib/shinken/share/photos'
# Set up root logging
def setup_logging():
log_level = logging.INFO
if opts.debug:
log_level = logging.DEBUG
if opts.logfile:
logging.basicConfig(filename=opts.logfile, level=log_level, format='%(asctime)s:%(levelname)s: %(message)s')
else:
logging.basicConfig(level=log_level, format='%(asctime)s:%(levelname)s: %(message)s')
def overload_test_variable():
shinken_notification_object_var = {
'service': {
'Service description': 'Test_Service',
'Service state': 'TEST',
'Service output': 'Houston, we got a problem here! Oh, wait. No. It\'s just a test.',
'Service state duration': '00h 00min 10s'
},
'host': {
'Hostname': 'Test_Host',
'Host state': 'TEST',
'Host state duration': '00h 00h 20s'
}
}
shinken_var = {
'Hostname': 'shinken',
'Host address': '127.0.0.1',
'Notification type': 'TEST',
'Date': 'Now, test'
}
return (shinken_notification_object_var, shinken_var)
def get_webui_logo():
company_logo=''
try:
webui_config_fh = open(webui2_config_file)
except IOError:
# WebUI2 not installed ...
full_logo_path = os.path.join(shinken_image_dir, shinken_customer_logo)
if os.path.isfile(full_logo_path):
return full_logo_path
if opts.webui:
# WebUI2 installed
logging.debug('Webui2 is installed')
webui_config = webui_config_fh.readlines()
for line in webui_config:
if 'company_logo' in line:
company_logo = line.rsplit('company_logo')[1].strip()
company_logo += '.png'
logging.debug('Found company logo property: %s', company_logo)
if company_logo:
full_logo_path = os.path.join(webui2_image_dir, company_logo)
if os.path.isfile(full_logo_path):
logging.debug('Found company logo file: %s', full_logo_path)
return full_logo_path
else:
logging.debug('File %s does not exist!', full_logo_path)
return ''
return company_logo
def get_webui_port():
port=''
try:
webui_config_fh = open(webui2_config_file)
except IOError:
# WebUI2 not installed, try WebUI1
try:
webui_config_fh = open(webui_config_file)
except IOError:
# No WebUI
return ''
else:
# WebUI1 installed
logging.debug('Webui1 is installed')
else:
# WebUI2 installed
logging.debug('Webui2 is installed')
logging.debug('Webui file handler: %s' % (webui_config_fh))
webui_config = webui_config_fh.readlines()
logging.debug('Webui config: %s' % (webui_config))
for line in webui_config:
if 'port' in line:
port = line.rsplit('port')[1].strip()
return port
def get_shinken_url():
if opts.webui:
hostname = socket.getfqdn()
webui_port = get_webui_port()
if not webui_port:
return
if opts.webui_url:
url = '%s/%s/%s' % (opts.webui_url, opts.notification_object, urllib.quote(shinken_var['Hostname']))
else:
url = 'http://%s:%s/%s/%s' % (hostname, webui_port, opts.notification_object, urllib.quote(shinken_var['Hostname']))
# Append service if we notify a service object
if opts.notification_object == 'service':
url += '/%s' % (urllib.quote(shinken_notification_object_var['service']['Service description']))
return url
# Get current process user that will be the mail sender
def get_user():
if opts.sender:
return opts.sender
else:
return '@'.join((getpass.getuser(), socket.getfqdn()))
#############################################################################
# Common mail functions and var
#############################################################################
mail_welcome = 'Shinken Monitoring System Notification'
mail_format = { 'html': MIMEMultipart(), 'txt': MIMEMultipart('alternative') }
# Construct mail subject field based on which object we notify
def get_mail_subject(object):
mail_subject = {
'host': 'Host %s alert for %s since %s' % (
shinken_notification_object_var['host']['Host state'],
shinken_var['Hostname'],
shinken_notification_object_var['host']['Host state duration']
),
'service': '%s on Host: %s about service %s since %s' % (
shinken_notification_object_var['service']['Service state'],
shinken_var['Hostname'],
shinken_notification_object_var['service']['Service description'],
shinken_notification_object_var['service']['Service state duration']
)
}
return mail_subject[object]
def get_content_to_send():
shinken_var.update(shinken_notification_object_var[opts.notification_object])
# Translate a comma separated list of mail recipient into a python list
def make_receivers_list(receivers):
if ',' in receivers:
ret = receivers.split(',')
else:
ret = [receivers]
return ret
# This just create mail skeleton and doesn't have any content.
# But can be used to add multiple and differents contents.
def create_mail(format):
# Fill SMTP header and body.
# It has to be multipart since we can include an image in it.
logging.debug('Mail format: %s' % (format))
msg = mail_format[format]
logging.debug('From: %s' % (get_user()))
msg['From'] = get_user()
logging.debug('To: %s' % (opts.receivers))
msg['To'] = opts.receivers
logging.debug('Subject: %s' % (opts.prefix + get_mail_subject(opts.notification_object)))
msg['Subject'] = opts.prefix + get_mail_subject(opts.notification_object)
msg['Precedence'] = 'bulk'
msg['Auto-Submitted'] = 'auto-generated'
return msg
#############################################################################
# Txt creation lair
#############################################################################
def create_txt_message(msg):
txt_content = [mail_welcome]
get_content_to_send()
for k,v in sorted(shinken_var.iteritems()):
txt_content.append(k + ': ' + v)
# Add url at the end
url = get_shinken_url()
if url != None:
txt_content.append('More details on : %s' % url)
txt_content = '\r\n'.join(txt_content)
msgText = MIMEText(txt_content, 'plain')
msg.attach(msgText)
return msg
#############################################################################
# Html creation lair
#############################################################################
# Process customer logo into mail message so it can be referenced in it later
def add_image2mail(img, mail):
fp = open(img, 'rb')
try:
msgLogo = MIMEImage(fp.read())
msgLogo.add_header('Content-ID', '<customer_logo>')
mail.attach(msgLogo)
except:
pass
fp.close()
return mail
def create_html_message(msg):
# Get url and add it in footer
url = get_shinken_url()
logging.debug('Grabbed Shinken URL : %s' % url)
# Header part
html_content = ['''
<html>\r
<head>\r
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">\r
<style type="text/css">\r
body {text-align: center; font-family: Verdana, sans-serif; font-size: 10pt;}\r
img.logo {float: left; margin: 10px 10px 10px; vertical-align: middle}\r
span {font-family: Verdana, sans-serif; font-size: 12pt;}\r
table {text-align:center; margin-left: auto; margin-right: auto;}\r
th {white-space: nowrap;}\r
th.even {background-color: #D9D9D9;color: #000000;}\r
td.even {background-color: #F2F2F2;color: #000000;}\r
th.odd {background-color: #F2F2F2;color: #000000;}\r
td.odd {background-color: #FFFFFF;color: #000000;}\r
th,td {font-family: Verdana, sans-serif; font-size: 10pt; text-align:left;}\r
th.customer {width: 600px; background-color: #004488; color: #ffffff;}\r
</style>\r
</head>\r
<body>\r'''
]
full_logo_path = get_webui_logo()
if full_logo_path:
msg = add_image2mail(full_logo_path, msg)
html_content.append('<img src="cid:customer_logo">')
html_content.append('<table width="600px"><tr><th colspan="2"><span>%s</span></th></tr>' % mail_welcome)
else:
html_content.append('<table width="600px"><tr><th colspan="2"><span>%s</span></th></tr>' % mail_welcome)
# Update shinken_var dict with appropriate dict depending which is object notified
# then we can fill mail content.
odd=True
get_content_to_send()
logging.debug('Content to send: %s' % shinken_var)
for k,v in sorted(shinken_var.iteritems()):
logging.debug('type %s : %s' % (k, type(v)))
if odd:
html_content.append('<tr><th class="odd">' + str(k) + '</th><td class="odd">' + str(v) + '</td></tr>')
odd=False
else:
html_content.append('<tr><th class="even">' + str(k) + '</th><td class="even">' + str(v) + '</td></tr>')
odd=True
html_content.append('</table>')
if url != None:
html_content.append('More details on Shinken WebUI at : <a href="%s">%s</a></body></html>' % (url, url))
else:
html_content.append('</body></html>')
# Make final string var to send and encode it to stdout encoding
# avoiding decoding error.
html_content = '\r\n'.join(html_content)
try:
if sys.stdout.encoding is not None:
encoding = sys.stdout.encoding
else:
encoding = 'utf-8'
html_msg = html_content.encode(encoding)
except UnicodeDecodeError:
logging.debug('Content is Unicode encoded.')
html_msg = html_content.decode('utf-8').encode(encoding)
logging.debug('HTML string: %s' % html_msg)
msgText = MIMEText(html_msg, 'html', encoding)
logging.debug('MIMEText: %s' % msgText)
msg.attach(msgText)
logging.debug('Mail object: %s' % msg)
return msg
if __name__ == "__main__":
parser = OptionParser(description='Notify by email receivers of Shinken alerts. Message will be formatted in html and can embed customer logo. To included customer logo, just load png image named customer_logo.png in '+shinken_image_dir)
group_debug = OptionGroup(parser, 'Debugging and test options', 'Useful to debug script under shinken processes. Useful to just make a standalone test of script to see what it looks like.')
group_general = OptionGroup(parser, 'General options', 'Default options to setup')
group_shinken = OptionGroup(parser, 'Shinken macros to specify.', 'Used to specify usual shinken macros in notifications, if not specified then it will try to get them from environment variable. You need to enable_environment_macros in shinken.cfg if you want to used them. It isn\'t recommended to use environment macros for large environments. You \'d better use options -c and -s or -h depending on which object you\'ll notify for.')
group_shinken_details = OptionGroup(parser, 'Details and additional information', 'You can include some useful additional information to notifications using these options. Good practice is to add HOST or SERVICE macros with these details and provide them to the script')
group_shinken_webui = OptionGroup(parser, 'Shinken WebUI.', 'Used to include some Shinken WebUI information in the notifications.')
# Debug and test options
group_debug.add_option('-D', '--debug', dest='debug', default=False,
action='store_true', help='Generate a test mail message')
group_debug.add_option('-t', '--test', dest='test', default=False,
action='store_true', help='Generate a test mail message')
group_debug.add_option('-l', '--logfile', dest='logfile',
help='Specify a log file. Default: log to stdout.')
# General options
group_general.add_option('-f', '--format', dest='format', type='choice', choices=['txt', 'html'],
default='html', help='Mail format "html" or "txt". Default: html')
group_general.add_option('-r', '--receivers', dest='receivers',
help='Mail recipients comma-separated list')
group_general.add_option('-F', '--sender', dest='sender',
help='Sender email address, default is system user')
group_general.add_option('-S', '--SMTP', dest='smtp', default='localhost',
help='Target SMTP hostname. None for just a sendmail lanch. Default: localhost')
group_general.add_option('-U', '--smtp-user', dest='smtp_user', default=None,
help='SMTP username. Default: None')
group_general.add_option('-P', '--smtp-password', dest='smtp_password', default=None,
help='SMTP password. Default: None')
group_general.add_option('-T', '--smtp-starttls', dest='smtp_starttls', default=False,
action='store_true', help='Connect to smtp using starttls')
group_general.add_option('-p', '--prefix', dest='prefix', default='',
help='Mail subject prefix. Default is no prefix')
# Shinken options
group_shinken.add_option('-n', '--notification-object', dest='notification_object', type='choice', default='host',
choices=['host', 'service'], help='Choose between host or service notification.')
group_shinken.add_option('-c', '--commonmacros', dest='commonmacros',
help='Double comma separated shinken macros in this order : "NOTIFICATIONTYPE$,,$HOSTNAME$,,$HOSTADDRESS$,,$LONGDATETIME$".')
group_shinken.add_option('-o', '--objectmacros', dest='objectmacros',
help='Double comma separated object shinken macros in this order : "$SERVICEDESC$,,$SERVICESTATE$,,$SERVICEOUTPUT$,,$SERVICEDURATION$" for a service object and "$HOSTSTATE$,,$HOSTDURATION$" for an host object')
group_shinken_details.add_option('-d', '--detailleddesc', dest='detailleddesc',
help='Specify $_SERVICEDETAILLEDDESC$ custom macros')
group_shinken_details.add_option('-i', '--impact', dest='impact',
help='Specify the $_SERVICEIMPACT$ custom macros')
group_shinken_details.add_option('-a', '--action', dest='fixaction',
help='Specify the $_SERVICEFIXACTIONS$ custom macros')
# Shinken WebUI options
group_shinken_webui.add_option('-w', '--webui', dest='webui', default=False,
action='store_true', help='Include link to the problem in Shinken WebUI.')
group_shinken_webui.add_option('-u', '--url', dest='webui_url',
help='WebUI URL as http://my_webui:port/url')
parser.add_option_group(group_debug)
parser.add_option_group(group_general)
parser.add_option_group(group_shinken)
parser.add_option_group(group_shinken_details)
parser.add_option_group(group_shinken_webui)
(opts, args) = parser.parse_args()
setup_logging()
# Check and process arguments
#
# Retrieve and setup shinken macros that make the mail content
if opts.commonmacros == None:
shinken_var = {
'Notification type': os.getenv('NAGIOS_NOTIFICATIONTYPE'),
'Hostname': os.getenv('NAGIOS_HOSTNAME'),
'Host address': os.getenv('NAGIOS_HOSTADDRESS'),
'Date' : os.getenv('NAGIOS_LONGDATETIME')
}
else:
macros = opts.commonmacros.split(',,')
shinken_var = {
'Notification type': macros[0],
'Hostname': macros[1],
'Host address': macros[2],
'Date' : macros[3]
}
if opts.objectmacros == None:
shinken_notification_object_var = {
'service': {
'Service description': os.getenv('NAGIOS_SERVICEDESC'),
'Service state': os.getenv('NAGIOS_SERVICESTATE'),
'Service output': os.getenv('NAGIOS_SERVICEOUTPUT'),
'Service state duration': os.getenv('NAGIOS_SERVICEDURATION')
},
'host': {
'Host state': os.getenv('NAGIOS_HOSTSTATE'),
'Host state duration': os.getenv('NAGIOS_HOSTDURATION')
}
}
else:
macros = opts.objectmacros.split(',,')
if opts.notification_object == 'service':
shinken_notification_object_var = {
'service': {
'Service description': macros[0],
'Service state': macros[1],
'Service output': macros[2],
'Service state duration': macros[3]
},
'host': {
'Host state': '',
'Host state duration': ''
}
}
else:
shinken_notification_object_var = {
'service': {
'Service description': '',
'Service state': '',
'Service output': '',
'Service state duration': ''
},'host': {
'Host state': macros[0],
'Host state duration': macros[1]
}
}
# Load test values
if opts.test:
shinken_notification_object_var, shinken_var = overload_test_variable()
# check required arguments
if opts.receivers == None:
logging.error('You must define at least one mail recipient using -r')
sys.exit(5)
else:
contactemail = opts.receivers
if opts.detailleddesc:
shinken_var['Detailled description'] = opts.detailleddesc.decode(sys.stdin.encoding)
if opts.impact:
shinken_var['Impact'] = opts.impact.decode(sys.stdin.encoding)
if opts.fixaction:
shinken_var['Fix actions'] = opts.fixaction.decode(sys.stdin.encoding)
receivers = make_receivers_list(opts.receivers)
logging.debug('Create mail skeleton')
mail = create_mail(opts.format)
logging.debug('Create %s mail content' % (opts.format))
if opts.format == 'html':
mail = create_html_message(mail)
elif opts.format == 'txt':
mail = create_txt_message(mail)
# Use SMTP or sendmail to send the mail ...
if opts.smtp != 'None':
logging.debug('Connect to %s smtp server' % (opts.smtp))
smtp = smtplib.SMTP(opts.smtp)
logging.debug('Send the mail')
if opts.smtp_starttls:
smtp.starttls()
if opts.smtp_user and opts.smtp_password:
smtp.login(opts.smtp_user, opts.smtp_password)
smtp.sendmail(get_user(), receivers, mail.as_string())
logging.info("Mail sent successfuly")
else:
sendmail = '/usr/sbin/sendmail'
logging.debug('Send the mail')
p = os.popen('%s -t' % sendmail, 'w')
logging.debug('Final mail : ' + mail.as_string())
logging.debug('Send the mail')
p.write(mail.as_string())
status = p.close()
if status is not None:
logging.error("Sendmail returned %s" % status)
else:
logging.info("Mail sent successfuly")
| 20,639
|
Python
|
.py
| 442
| 38.708145
| 440
| 0.617548
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,576
|
service_dependency_mapping.py
|
shinken-solutions_shinken/libexec/service_dependency_mapping.py
|
#!/usr/bin/env python
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
"""
This program get hosts informations from running arbiter daemon and
get service dependencies definition from config pack flat files then
dump services dependencies according to the config files to a json
that can be loaded in hot_dependencies_arbiter module.
servicedependencies file in pack use template host_name that will be
matched in hosts 'use' directive to apply those servicedependency
definition to hosts.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken.objects.arbiterlink import ArbiterLink
import os, sys, optparse, cPickle, shutil
import shinken.daemons.arbiterdaemon
from shinken.arbiterlink import ArbiterLink
from shinken.http_client import HTTPExceptions
from shinken.log import logger
from shinken.objects.config import Config
# Try to load json (2.5 and higer) or simplejson if failed (python2.4)
try:
import json
except ImportError:
# For old Python version, load simple json
try:
import simplejson as json
except ImportError:
raise SystemExit("Error: you need the json or simplejson module "
"for this script")
sat_types = ['arbiter', 'scheduler', 'poller', 'reactionner',
'receiver', 'broker']
VERSION = '0.2'
class ShinkenAdmin():
def __init__(self):
self.arb = None
self.conf = None
self.addr = 'localhost'
self.port = '7770'
self.arb_name = 'arbiter-master'
def do_connect(self, verbose=False):
'''
Connect to an arbiter daemon
Syntax: connect [host]:[port]
Ex: for Connecting to server, port 7770
> connect server:7770
Ex: connect to localhost, port 7770
> connect
'''
if verbose:
print("Connection to %s:%s" % (self.addr, self.port))
ArbiterLink.use_ssl = False
self.arb = ArbiterLink({'arbiter_name': self.arb_name, 'address': self.addr, 'port': self.port})
self.arb.fill_default()
self.arb.pythonize()
self.arb.update_infos()
if not self.arb.reachable:
sys.exit("Connection to the arbiter got a problem")
print("Connection OK")
def getconf(self, config):
'''
Get the data in the arbiter for a table and some properties
like hosts host_name realm
'''
files = [config]
conf = Config()
conf.read_config_silent = 1
# Get hosts objects
properties = [ 'host_name','use','act_depend_of']
hosts = self.arb.get_objects_properties('hosts', properties)
# Get services dependencies
svcdep_buf = conf.read_config(files)
svc_dep = conf.read_config_buf(svcdep_buf)['servicedependency']
return (hosts, svc_dep)
def load_svc_mapping(self, hosts, svc_dep, verbose=False):
'''
Make tuples mapping service dependencies. Return a list of tuples
and need hosts and service dependencies parameter.
'''
r = []
# Search for host matching "use" template
for dep in svc_dep:
# Get host_name and dependent_host_name field from servicedependency
# config file in packs. Usually values are host's pack template.
parent_host_name = self.split_and_merge(dep['host_name'])
try:
dependent_host_name = self.split_and_merge(dep['dependent_host_name'])
except KeyError:
dependent_host_name = parent_host_name
if verbose:
print("")
print('Service dependency host_name', parent_host_name)
print('Service dependency dependent_host_name', dependent_host_name)
# Make list before process them by splitting comma separated values.
dep['service_description'] = self.split_and_merge(dep['service_description'])
dep['dependent_service_description'] = self.split_and_merge(dep['dependent_service_description'])
# Construct dependencies tuples
# Search in host all hosts that use template host_name
parent_svc_tuples = []
dependent_svc_tuples = []
for parent_svc in dep['service_description']:
parent_svc_tuples += [[ ('service', host[0] + "," + parent_svc) for host in hosts if host_name in host[1] ] for host_name in parent_host_name ]
for dependent_svc in dep['dependent_service_description']:
dependent_svc_tuples += [[ ('service', host[0] + "," + dependent_svc) for host in hosts if host_name in host[1] ] for host_name in dependent_host_name ]
# No need to separate tuples by services here so we merge them
dependent_tuples = self.split_and_merge(dependent_svc_tuples, split=False)
if verbose:
print('Parent service dependencies tuples list', parent_svc_tuples)
print('Dependent service dependencies tuples list', dependent_svc_tuples)
# Process !
for parent_tuples in parent_svc_tuples:
r.append(self.make_all_dep_tuples(hosts, parent_tuples, dependent_tuples))
if verbose:
print("")
print("Result:", r)
return r
def make_all_dep_tuples(self, hosts, parent_tuples=[()], dependent_tuples=[[()]] ):
'''
List imbrication : List_by_services : [ List_by_hosts : [ Service_dependency_tuples : ( ) ] ]
'''
res = []
for ptuple in parent_tuples:
parent = { 'host_name' : self.get_dependency_tuple_host_name(ptuple), 'svc_desc' : self.get_dependency_tuple_service_description(ptuple) }
# Dive into dependent services
for dtuple in dependent_tuples:
dependent = { 'host_name' : self.get_dependency_tuple_host_name(dtuple), 'svc_desc' : self.get_dependency_tuple_service_description(dtuple) }
dependent['host_object'] = next( host for host in hosts if host[0] == dependent['host_name'] )
res = self.make_dep_tuple(parent, dependent, ptuple, dtuple, res)
return res
def make_dep_tuple(self, parent, dependent, ptuple, dtuple, res):
'''
Search host dependency and make tuple according to it.
'''
try:
dependent_host_parent = self.get_host_dependency(dependent['host_object'])
if parent['host_name'] == dependent_host_parent:
res = (ptuple, dtuple)
except IndexError:
if parent['host_name'] == dependent['host_name']:
res = (ptuple, dtuple)
return res
def get_host_dependency(self, dependent_host):
'''
Get parent host_name attribute of host.
'''
return dependent_host[2][0][0].host_name
def get_dependency_tuple_host_name(self, tuple):
'''
Just get the host name part of a dependency tuple.
A dependency tuples is : ( 'service', 'host_name, service_description' )
'''
return tuple[1].split(',')[0]
def get_dependency_tuple_service_description(self, tuple):
'''
Just get the service description part of a dependency tuple.
A dependency tuples is : ( 'service', 'host_name, service_description' )
'''
return tuple[1].split(',')[1]
def split_and_merge(self, list, split=True):
'''
Split a list on comma separator and merge resulting lists
into an uniq list then return it
'''
res = []
for elt in list:
if split:
res += elt.split(',')
else:
res += elt
return res
def clean_empty_value(self, r):
'''
Empty value comes from unused config pack and then service dep
is created but without nothing...
'''
r_cleaned = []
for elt in r:
if elt != []:
r_cleaned.append(elt)
return r_cleaned
def main(self, output_file, config, verbose):
self.do_connect(verbose)
# Get needed conf
hosts, svc_dep = self.getconf(config)
if verbose:
print("Hosts:", hosts)
print("Service Dep:", svc_dep)
# Make the map
r = self.load_svc_mapping(hosts, svc_dep, verbose)
# Clean mapping from empty value
r = self.clean_empty_value(r)
# Write ouput file
try:
f = open(output_file + '.tmp', 'wb')
buf = json.dumps(r)
f.write(buf)
f.close()
shutil.move(output_file + '.tmp', output_file)
print("File %s wrote" % output_file)
except IOError as exp:
sys.exit("Error writing the file %s: %s" % (output_file, exp))
jsonmappingfile = open(output_file, 'w')
try:
json.dump(r, jsonmappingfile)
finally:
jsonmappingfile.close()
if __name__ == "__main__":
parser = optparse.OptionParser(
version="Shinken service hot dependency according to packs (or custom) definition to json mapping %s" % VERSION)
parser.add_option("-o", "--output", dest='output_file',
default='/tmp/shinken_service_dependency:mapping.json',
help="Path of the generated json mapping file.")
parser.add_option('-c', '--config', dest='config', help='Shinken main config file.')
parser.add_option('-v', '--verbose', action='store_true', dest='verbose', help='More verbosity. Used to debug')
opts, args = parser.parse_args()
if args:
parser.error("does not take any positional arguments")
ShinkenAdmin().main(**vars(opts))
| 10,422
|
Python
|
.py
| 231
| 36.112554
| 168
| 0.625641
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,577
|
link_vmware_host_vm.py
|
shinken-solutions_shinken/libexec/link_vmware_host_vm.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2010:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel <h.goebel@goebel-consult.de>
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sys
import shlex
import shutil
import optparse
from subprocess import Popen, PIPE
# Try to load json (2.5 and higer) or simplejson if failed (python2.4)
try:
import json
except ImportError:
# For old Python version, load
# simple json (it can be hard json?! It's 2 functions guy!)
try:
import simplejson as json
except ImportError:
sys.exit("Error: you need the json or simplejson module for this script")
VERSION = '0.1'
# Split and clean the rules from a string to a list
def _split_rules(rules):
return [r.strip() for r in rules.split('|')]
# Apply all rules on the objects names
def _apply_rules(name, rules):
if 'nofqdn' in rules:
name = name.split('.', 1)[0]
if 'lower' in rules:
name = name.lower()
return name
# Get all vmware hosts from a VCenter and return the list
def get_vmware_hosts(check_esx_path, vcenter, user, password):
list_host_cmd = [check_esx_path, '-D', vcenter, '-u', user, '-p', password,
'-l', 'runtime', '-s', 'listhost']
output = Popen(list_host_cmd, stdout=PIPE).communicate()
parts = output[0].split(':')
hsts_raw = parts[1].split('|')[0]
hsts_raw_lst = hsts_raw.split(',')
hosts = []
for hst_raw in hsts_raw_lst:
hst_raw = hst_raw.strip()
# look as server4.mydomain(UP)
elts = hst_raw.split('(')
hst = elts[0]
hosts.append(hst)
return hosts
# For a specific host, ask all VM on it to the VCenter
def get_vm_of_host(check_esx_path, vcenter, host, user, password):
print("Listing host", host)
list_vm_cmd = [check_esx_path, '-D', vcenter, '-H', host,
'-u', user, '-p', password,
'-l', 'runtime', '-s', 'list']
output = Popen(list_vm_cmd, stdout=PIPE).communicate()
parts = output[0].split(':')
# Maybe we got a 'CRITICAL - There are no VMs.' message,
# if so, we bypass this host
if len(parts) < 2:
return None
vms_raw = parts[1].split('|')[0]
vms_raw_lst = vms_raw.split(',')
lst = []
for vm_raw in vms_raw_lst:
vm_raw = vm_raw.strip()
# look as MYVM(UP)
elts = vm_raw.split('(')
vm = elts[0]
lst.append(vm)
return lst
# Create all tuples of the links for the hosts
def create_all_links(res, rules):
r = []
for host in res:
for vm in res[host]:
# First we apply rules on the names
host_name = _apply_rules(host, rules)
vm_name = _apply_rules(vm, rules)
v = (('host', host_name), ('host', vm_name))
r.append(v)
return r
def write_output(r, path):
try:
f = open(path + '.tmp', 'wb')
buf = json.dumps(r)
f.write(buf)
f.close()
shutil.move(path + '.tmp', path)
print("File %s wrote" % path)
except IOError as exp:
sys.exit("Error writing the file %s: %s" % (path, exp))
def main(check_esx_path, vcenter, user, password, output, rules):
rules = _split_rules(rules)
res = {}
hosts = get_vmware_hosts(check_esx_path, vcenter, user, password)
for host in hosts:
lst = get_vm_of_host(check_esx_path, vcenter, host, user, password)
if lst:
res[host] = lst
r = create_all_links(res, rules)
print("Created %d links" % len(r))
write_output(r, output)
print("Finished!")
# Here we go!
if __name__ == "__main__":
# Manage the options
parser = optparse.OptionParser(
version="Shinken VMware links dumping script version %s" % VERSION)
parser.add_option("-o", "--output",
help="Path of the generated mapping file.")
parser.add_option("-x", "--esx3-path", dest='check_esx_path',
default='/usr/local/nagios/libexec/check_esx3.pl',
help="Full path of the check_esx3.pl script (default: %default)")
parser.add_option("-V", "--vcenter", '--Vcenter',
help="tThe IP/DNS address of your Vcenter host.")
parser.add_option("-u", "--user",
help="User name to connect to this Vcenter")
parser.add_option("-p", "--password",
help="The password of this user")
parser.add_option('-r', '--rules', default='',
help="Rules of name transformation. Valid names are: "
"`lower`: to lower names, "
"`nofqdn`: keep only the first name (server.mydomain.com -> server)."
"You can use several rules like `lower|nofqdn`")
opts, args = parser.parse_args()
if args:
parser.error("does not take any positional arguments")
if opts.vcenter is None:
parser.error("missing -V or --Vcenter option for the vcenter IP/DNS address")
if opts.user is None:
parser.error("missing -u or --user option for the vcenter username")
if opts.password is None:
error = True
parser.error("missing -p or --password option for the vcenter password")
if not os.path.exists(opts.check_esx_path):
parser.error("the path %s for the check_esx3.pl script is wrong, missing file" % opts.check_esx_path)
if opts.output is None:
parser.error("missing -o or --output option for the output mapping file")
main(**opts.__dict__)
| 6,355
|
Python
|
.py
| 156
| 34.102564
| 109
| 0.625263
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,578
|
link_libvirt_host_vm.py
|
shinken-solutions_shinken/libexec/link_libvirt_host_vm.py
|
#!/usr/bin/env python
# Copyright (C) 2012:
# Thibault Cohen, thibault.cohen@savoirfairelinux.com
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
"""
This program use libvirt to put host parent-child relations in a json one so it
can be loaded in hot_dependencies_arbiter module
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import timeit
import os
import sys
import optparse
import signal
import libvirt
class TimeoutException(Exception):
pass
# Try to load json (2.5 and higer) or simplejson if failed (python2.4)
try:
import json
except ImportError:
# For old Python version, load simple json
try:
import simplejson as json
except ImportError:
raise SystemExit("Error: you need the json or simplejson module "
"for this script")
VERSION = '0.1'
def main(uris, output_file, ignore):
def timeout_handler(signum, frame):
raise TimeoutException()
ignored_doms = []
r = []
if ignore:
ignored_doms = ignore.split(",")
for uri in uris.split(","):
signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(10) # triger alarm in 10 seconds
try:
conn = libvirt.openReadOnly(uri)
except libvirt.libvirtError as e:
print("Libvirt connection error: `%s'" % e.message.replace("\r", ""))
print("Let's try next URI")
continue
except TimeoutException:
print("Libvirt Request timeout")
print("Let's try next URI")
continue
except Exception as e:
print("Unknown Error: %s" % str(e))
print("Let's try next URI...")
continue
hypervisor = conn.getHostname()
# List all VM (stopped and started)
for dom in [conn.lookupByName(name) for name in conn.listDefinedDomains()]\
+ [conn.lookupByID(vmid) for vmid in conn.listDomainsID()]:
domain_name = dom.name()
if domain_name in ignored_doms:
continue
v = (('host', hypervisor.strip()), ('host', domain_name.strip()))
r.append(v)
r = set(r)
r = list(r)
jsonmappingfile = open(output_file, 'w')
try:
json.dump(r, jsonmappingfile)
finally:
jsonmappingfile.close()
if __name__ == "__main__":
parser = optparse.OptionParser(
version="Shinken libvirt mapping to json mapping %s" % VERSION)
parser.add_option("-o", "--output", dest='output_file',
default='/tmp/libvirt_mapping_file.json',
help="Path of the generated json mapping file.\n"
"Default: /tmp/libvirt_mapping_file.json")
parser.add_option("-u", "--uris", dest='uris',
help="Libvirt URIS separated by comma")
parser.add_option("-i", "--ignore", dest='ignore',
default=None,
help="Ignore hosts (separated by comma)\n"
"Default: None")
opts, args = parser.parse_args()
if args:
parser.error("does not take any positional arguments")
if opts.uris is None:
print("At least one URI is mandatory")
sys.exit(2)
main(**vars(opts))
| 3,932
|
Python
|
.py
| 102
| 31.117647
| 83
| 0.635958
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,579
|
dump_vmware_hosts.py
|
shinken-solutions_shinken/libexec/dump_vmware_hosts.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2010:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel <h.goebel@goebel-consult.de>
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sys
import shlex
import shutil
import optparse
from subprocess import Popen, PIPE
# Try to load json (2.5 and higer) or simplejson if failed (python2.4)
try:
import json
except ImportError:
# For old Python version, load
# simple json (it can be hard json?! It's 2 functions guy!)
try:
import simplejson as json
except ImportError:
sys.exit("Error: you need the json or simplejson module for this script")
VERSION = '0.1'
# Split and clean the rules from a string to a list
def _split_rules(rules):
return [r.strip() for r in rules.split('|')]
# Apply all rules on the objects names
def _apply_rules(name, rules):
if 'nofqdn' in rules:
name = name.split('.', 1)[0]
if 'lower' in rules:
name = name.lower()
return name
# Get all vmware hosts from a VCenter and return the list
def get_vmware_hosts(check_esx_path, vcenter, user, password):
list_host_cmd = [check_esx_path, '-D', vcenter, '-u', user, '-p', password,
'-l', 'runtime', '-s', 'listhost']
output = Popen(list_host_cmd, stdout=PIPE).communicate()
parts = output[0].split(':')
if len(parts) == 1 or not '|' in parts[1]:
print("ERROR : there was an error with the esx3.pl command. Plase fix it : '%s'" % " ".join(parts))
sys.exit(2)
hsts_raw = parts[1].split('|')[0]
hsts_raw_lst = hsts_raw.split(',')
hosts = []
for hst_raw in hsts_raw_lst:
hst_raw = hst_raw.strip()
# look as server4.mydomain(UP)
elts = hst_raw.split('(')
hst = elts[0]
hosts.append(hst)
return hosts
# For a specific host, ask all VM on it to the VCenter
def get_vm_of_host(check_esx_path, vcenter, host, user, password):
print("Listing host", host)
list_vm_cmd = [check_esx_path, '-D', vcenter, '-H', host,
'-u', user, '-p', password,
'-l', 'runtime', '-s', 'list']
output = Popen(list_vm_cmd, stdout=PIPE).communicate()
parts = output[0].split(':')
# Maybe we got a 'CRITICAL - There are no VMs.' message,
# if so, we bypass this host
if len(parts) < 2:
return None
vms_raw = parts[1].split('|')[0]
vms_raw_lst = vms_raw.split(',')
lst = []
for vm_raw in vms_raw_lst:
vm_raw = vm_raw.strip()
# look as MYVM(UP)
elts = vm_raw.split('(')
vm = elts[0]
lst.append(vm)
return lst
# Create all tuples of the links for the hosts
def create_all_links(res, rules):
r = []
for host in res:
for vm in res[host]:
# First we apply rules on the names
host_name = _apply_rules(host, rules)
vm_name = _apply_rules(vm, rules)
v = (('host', host_name), ('host', vm_name))
r.append(v)
return r
def write_output(elements, path, rules):
try:
f = open(path + '.tmp', 'wb')
for e in elements:
e = e.strip()
e = _apply_rules(e, rules)
f.write('%s\n' % e)
f.close()
shutil.move(path + '.tmp', path)
print("File %s wrote" % path)
except IOError as exp:
sys.exit("Error writing the file %s: %s" % (path, exp))
def main(check_esx_path, vcenter, user, password, output, rules, vm_only, esx_only):
rules = _split_rules(rules)
res = {}
hosts = get_vmware_hosts(check_esx_path, vcenter, user, password)
if esx_only:
write_output(hosts, output, rules)
print("Created %d hosts" % len(hosts))
sys.exit(0)
vms = []
for host in hosts:
lst = get_vm_of_host(check_esx_path, vcenter, host, user, password)
if lst:
vms.extend(lst)
write_output(vms, output, rules)
print("Created %d hosts" % len(vms))
print("Finished!")
# Here we go!
if __name__ == "__main__":
# Manage the options
parser = optparse.OptionParser(
version="Shinken VMware links dumping script version %s" % VERSION)
parser.add_option("-o", "--output",
help="Path of the generated mapping file.")
parser.add_option("-x", "--esx3-path", dest='check_esx_path',
default='/usr/local/nagios/libexec/check_esx3.pl',
help="Full path of the check_esx3.pl script (default: %default)")
parser.add_option("-V", "--vcenter", '--Vcenter',
help="tThe IP/DNS address of your Vcenter host.")
parser.add_option("-u", "--user",
help="User name to connect to this Vcenter")
parser.add_option("-p", "--password",
help="The password of this user")
parser.add_option('-r', '--rules', default='',
help="Rules of name transformation. Valid names are: "
"`lower`: to lower names, "
"`nofqdn`: keep only the first name (server.mydomain.com -> server)."
"You can use several rules like `lower|nofqdn`")
parser.add_option('--esx', default='', dest='esx_only', action='store_true',
help="Dump only the ESX hosts")
parser.add_option('--vm', default='', dest='vm_only', action='store_true',
help="Dump only the VM hosts")
opts, args = parser.parse_args()
if args:
parser.error("does not take any positional arguments")
if not opts.vm_only and not opts.esx_only:
parser.error("missing --esx or --vm option. Please choose one")
if opts.vcenter is None:
parser.error("missing -V or --Vcenter option for the vcenter IP/DNS address")
if opts.user is None:
parser.error("missing -u or --user option for the vcenter username")
if opts.password is None:
error = True
parser.error("missing -p or --password option for the vcenter password")
if not os.path.exists(opts.check_esx_path):
parser.error("the path %s for the check_esx3.pl script is wrong, missing file" % opts.check_esx_path)
if opts.output is None:
parser.error("missing -o or --output option for the output mapping file")
main(**opts.__dict__)
| 7,139
|
Python
|
.py
| 171
| 34.783626
| 109
| 0.618169
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,580
|
send_nsca.py
|
shinken-solutions_shinken/libexec/send_nsca.py
|
#!/usr/bin/env python
#
# Copyright (C) 2009-2012:
# Gabes Jean, naparuba@gmail.com
# Hanesse Olivier, olivier.hanesse@gmail.com
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import optparse
try:
import pynsca
from pynsca import NSCANotifier
except ImportError:
raise SystemExit("Error: you need the pynsca module for this script")
VERSION = '0.1'
def main(hostname, port, encryption, password):
notif = NSCANotifier(hostname, port, encryption, password)
for line in sys.stdin.readlines():
line = line.rstrip()
if not line:
continue
notif = line.split(opts.delimiter)
if len(notif) == 3:
# only host, rc, output
notif.insert(1, '') # insert service
# line consists of host, service, rc, output
assert len(notif) == 4
notif.svc_result(*notif)
if __name__ == "__main__":
parser = optparse.OptionParser(
version="Python NSCA client version %s" % VERSION)
parser.add_option("-H", "--hostname", default='localhost',
help="NSCA server IP (default: %default)")
parser.add_option("-P", "--port", type="int", default='5667',
help="NSCA server port (default: %default)")
parser.add_option("-e", "--encryption", default='1',
help=("Encryption mode used by NSCA server "
"(default: %default)"))
parser.add_option("-p", "--password", default='helloworld',
help=("Password for encryption, should be the same as "
"NSCA server (default: %default)"))
parser.add_option("-d", "--delimiter", default='\t',
help="Argument delimiter (defaults to the tab-character)")
opts, args = parser.parse_args()
if args:
parser.error("does not take any positional arguments")
main(opts.hostname, opts.port, opts.encryption, opts.password)
| 2,694
|
Python
|
.py
| 61
| 37.327869
| 82
| 0.653949
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,581
|
vmware_discovery_runner.py
|
shinken-solutions_shinken/libexec/discovery/vmware_discovery_runner.py
|
#!/usr/bin/env python
#
# Copyright (C) 2009-2010:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel <h.goebel@goebel-consult.de>
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import shutil
import optparse
from subprocess import Popen, PIPE
# Try to load json (2.5 and higer) or simplejson if failed (python2.4)
try:
import json
except ImportError:
# For old Python version, load simple json
try:
import simplejson as json
except ImportError:
raise SystemExit("Error: you need the json or simplejson module "
"for this script")
VERSION = '0.1'
def search_for_check_esx3():
"""Search for the check_esx3.pl file."""
me = os.path.abspath(__file__)
my_dir = os.path.dirname(me)
possible_paths = [os.path.join(my_dir, 'check_esx3.pl'),
'/var/lib/nagios/check_esx3.pl',
'/var/lib/plugins/nagios/check_esx3.pl',
'/var/lib/shinken/check_esx3.pl',
'/usr/local/nagios/libexec/check_esx3.pl',
'/usr/local/shinken/libexec/check_esx3.pl',
'c:\\shinken\\libexec\\check_esx3.pl']
for p in possible_paths:
print("Look for", p)
if os.path.exists(p):
print("Found a check_esx3.pl at", p)
return p
return None
def _split_rules(rules):
"""Split and clean the rules from a string to a list"""
return [r.strip() for r in rules.split('|')]
def _apply_rules(name, rules):
"""Apply rules on the objects names"""
if 'nofqdn' in rules:
name = name.split('.', 1)[0]
if 'lower' in rules:
name = name.lower()
return name
def get_vmware_hosts(check_esx_path, vcenter, user, password):
"""
Get a list of all hosts from a VCenter.
"""
list_host_cmd = [check_esx_path, '-D', vcenter, '-u', user, '-p', password,
'-l', 'runtime', '-s', 'listhost']
print("Got host list")
print(' '.join(list_host_cmd))
p = Popen(list_host_cmd, stdout=PIPE, stderr=PIPE)
output = p.communicate()
print("Exit status", p.returncode)
if p.returncode == 2:
print("Error: check_esx3.pl returnes an error:", output)
raise SystemExit(2)
parts = output[0].split(':')
hsts_raw = parts[1].split('|')[0]
hsts_raw_lst = hsts_raw.split(',')
hosts = []
for hst_raw in hsts_raw_lst:
hst_raw = hst_raw.strip()
# look as server4.mydomain(UP)
elts = hst_raw.split('(')
hst = elts[0]
hosts.append(hst)
return hosts
def get_vm_of_host(check_esx_path, vcenter, host, user, password):
"""Get a list of all virtual machines on a specific host."""
print("Listing host", host)
list_vm_cmd = [check_esx_path, '-D', vcenter, '-H', host,
'-u', user, '-p', password,
'-l', 'runtime', '-s', 'list']
print(' '.join(list_vm_cmd))
p = Popen(list_vm_cmd, stdout=PIPE)
output = p.communicate()
print("Exit status", p.returncode)
if p.returncode == 2:
print("Error: check_esx3.pl returnes an error:", output)
raise SystemExit(2)
parts = output[0].split(':')
# Maybe we got a 'CRITICAL - There are no VMs.' message,
# if so, we bypass this host
if len(parts) < 2:
return None
vms_raw = parts[1].split('|')[0]
vms_raw_lst = vms_raw.split(',')
lst = []
for vm_raw in vms_raw_lst:
vm_raw = vm_raw.strip()
# look as MYVM(UP)
elts = vm_raw.split('(')
vm = elts[0]
lst.append(vm)
return lst
def print_all_links(res, rules):
"""Create all tuples of the links for the hosts"""
r = []
for host in res:
host_name = _apply_rules(host, rules)
print("%s::esxhostname=%s" % (host_name, host_name))
print("%s::isesxhost=1" % host_name)
for vm in res[host]:
# First we apply rules on the names
vm_name = _apply_rules(vm, rules)
#v = (('host', host_name),('host', vm_name))
print("%s::vmname=%s" % (vm_name, vm_name))
print("%s::isesxvm=1" % vm_name)
print("%s::esxhost=%s" % (vm_name, host_name))
#r.append(v)
return r
def write_output(r, path):
try:
f = open(path + '.tmp', 'w')
buf = json.dumps(r)
f.write(buf)
f.close()
shutil.move(path + '.tmp', path)
print("File %s written" % path)
except IOError as exp:
raise SystemExit("Error writing the file %s: %s" % (path, exp))
def main(check_esx_path, vcenter, user, password, rules):
rules = _split_rules(rules)
res = {}
hosts = get_vmware_hosts(check_esx_path, vcenter, user, password)
for host in hosts:
lst = get_vm_of_host(check_esx_path, vcenter, host, user, password)
if lst:
res[host] = lst
print_all_links(res, rules)
#write_output(r, output)
print("Finished!")
if __name__ == "__main__":
parser = optparse.OptionParser(
version="Shinken VMware links dumping script version %s" % VERSION)
parser.add_option("-x", "--esx3-path", dest='check_esx_path',
help="Full path of the check_esx3.pl script (default: %default)")
parser.add_option("-V", "--vcenter", '--Vcenter',
help="The IP/DNS address of your Vcenter host.")
parser.add_option("-u", "--user",
help="User name to connect to this Vcenter")
parser.add_option("-p", "--password",
help="The password of this user")
parser.add_option('-r', '--rules', default='',
help="Rules of name transformation. Valid names are: "
"`lower`: to lower names, "
"`nofqdn`: keep only the first name (server.mydomain.com -> server)."
"You can use several rules like `lower|nofqdn`")
opts, args = parser.parse_args()
if args:
parser.error("does not take any positional arguments")
if opts.vcenter is None:
parser.error("missing -V or --Vcenter option for the vcenter IP/DNS address")
if opts.user is None:
parser.error("missing -u or --user option for the vcenter username")
if opts.password is None:
error = True
parser.error("missing -p or --password option for the vcenter password")
if opts.check_esx_path is None:
p = search_for_check_esx3()
# Not given, try to find one
if p is None:
parser.error("Sorry, I cannot find check_esx3.pl, please specify "
"it with -x")
#else set it :)
opts.check_esx_path = p
else:
if not os.path.exists(opts.check_esx_path):
parser.error("the path %s for the check_esx3.pl script is wrong, missing file" % opts.check_esx_path)
main(**opts.__dict__)
| 7,756
|
Python
|
.py
| 193
| 32.601036
| 113
| 0.600797
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,582
|
SAN_discover_runner.py
|
shinken-solutions_shinken/libexec/discovery/SAN_discover_runner.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2012:
# Romain, FORLOT, romain.forlot@sydel.fr
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################
#
# This script aimed to discover SAN devices in your network.
# Only IBM DS devices are supported for now.
# This use SMcli to manage Array.
#
###############################################################
from __future__ import absolute_import, division, print_function, unicode_literals
### modules import
import optparse
import re
import subprocess
import socket
import fcntl
import struct
SIOCGIFNETMASK = 0x891b
eth_dev_name='eth0'
##########
# menu #
##########
parser = optparse.OptionParser('%prog [options] -t target')
cmd = { 'ibm_ds' : '/opt/IBM_DS/client/SMcli',
'example' : '/path/to/cmd',
}
# user name and password are defined in /var/lib/net-snmp/snmpd.conf
# default parameters are defined in /usr/local/shinken/etc/resource.cfg
parser.add_option('-t', '--target', dest='target', help='IP to manage. One at a time only')
parser.add_option('-v', '--vendor', dest='vendor', help='specify SAN vendor [ibm_ds|...]')
parser.add_option('-n', '--network', action='store_true', dest='network', help='Take controller IP which are on same network as you are')
parser.add_option('-d', '--debug', action='store_true', dest='debug', help='be more verbose')
opts, args = parser.parse_args()
target = opts.target
vendor = opts.vendor
if opts.debug:
debug = True
else:
debug = False
def debuging(txt):
if debug:
print(txt)
if opts.network:
network = True
if not opts.target:
parser.error('Require at least one ip (option -t)')
if not opts.vendor:
parser.error('Require SAN vendor name. [ibm_ds|...]')
SANvendor = { 'ibm_ds' : { 'add_cmd' : [ cmd['ibm_ds'], '-A', target ],
'getprofile_cmd' : [ cmd['ibm_ds'], target, '-c', 'show storagesubsystem profile;' ],
'sanname_regex' : re.compile('PROFILE FOR STORAGE SUBSYSTEM:\s(?P<sanname>\w+)\s+.*$', re.S|re.M),
'controllers_ip_regex' : re.compile('IP address:\s+((?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?).(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?).(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?).(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?))', re.S|re.M),
},
'example' : { 'add_cmd' : [ cmd['example'], 'arg1', 'arg2' ],
'getprofile_cmd' : [ cmd['example'], 'arg1', 'arg2' ],
'sanname_regex' : re.compile('(?P<sanname>\w+)', re.S|re.M),
'controllers_ip_regex' : re.compile('IP address:\s+((?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?).(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?).(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?).(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?))', re.S|re.M),
},
}
##############
# functions #
##############
### Code snippet to retrieve some system network informations
def get_network_mask(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
netmask = fcntl.ioctl(s, SIOCGIFNETMASK, struct.pack('256s', ifname))[20:24]
return socket.inet_ntoa(netmask)
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
def address_in_network(ip,net):
ipaddr = struct.unpack('L',socket.inet_aton(ip))[0]
netaddr,bits = net.split('/')
netmask = struct.unpack('L',socket.inet_aton(netaddr))[0] & struct.unpack('L',socket.inet_aton(bits))[0]
return ipaddr & netmask == netmask
### Search for cluster software presents on target
def set_ip():
addip = v['add_cmd']
adding = subprocess.Popen(' '.join(addip), stdout=subprocess.PIPE, shell=True)
debuging(adding.communicate()[0])
adding.wait()
def get_SAN_profile():
sanprofile = v['getprofile_cmd']
get_managed_dev = subprocess.Popen(sanprofile,stdout=subprocess.PIPE)
stdoutdata = get_managed_dev.communicate()[0]
debuging(stdoutdata)
return stdoutdata
def get_name(san_profile):
getsanname = v['sanname_regex'].search(san_profile)
try:
sanname = getsanname.group('sanname')
except AttributeError:
print('Can not retrieve San name')
return sanname
def get_controllers_ip(san_profile, keep_on_same_network=False):
ctrl = v['controllers_ip_regex'].findall(san_profile)
debuging('Find ip : %s' % ctrl)
if keep_on_same_network:
my_ip = get_ip_address(eth_dev_name)
my_netmask = get_network_mask(eth_dev_name)
my_subnet_unpacked = struct.unpack('L', socket.inet_aton(my_ip))[0] & struct.unpack('L', socket.inet_aton(my_netmask))[0]
my_subnet = socket.inet_ntoa(struct.pack('L', my_subnet_unpacked))
n = [ my_subnet, my_netmask ]
i = 0
for ip in ctrl:
if not address_in_network(ip, '/'.join(n)):
ctrl.pop(i)
i += 1
return ctrl
### converts the listed files systems writing and display them on the standard output
def get_discovery_output(sanname, ctrlIP):
i = 1
for ip in ctrlIP:
print('%s::_ctrl%d=%s'%(sanname, i, ip))
i += 1
###############
# main #
###############
v = SANvendor[vendor]
# Add ip in client software managing SAN device
set_ip()
# Get SAN profile from client
profile = get_SAN_profile()
# Get SAN device name
sanname = get_name(profile)
# Get List of controllers IP
ctrl_ip = get_controllers_ip(profile, network)
get_discovery_output(sanname,ctrl_ip)
| 6,309
|
Python
|
.py
| 149
| 37.52349
| 258
| 0.624144
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,583
|
nmap_discovery_runner.py
|
shinken-solutions_shinken/libexec/discovery/nmap_discovery_runner.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2010:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
# sudo nmap 192.168.0.1 --min-rate 1000 --max-retries 0 -sU -sT -T4 -O --traceroute -oX toto.xml
from __future__ import absolute_import, division, print_function, unicode_literals
import optparse
import sys
import os
import tempfile
import subprocess
try:
# xml.etree.ElementTree is new in Python 2.5
from xml.etree.ElementTree import ElementTree
except ImportError:
sys.exit("This script needs the Python ElementTree module. Please install it")
VERSION = '0.1.1'
# Fred : command launched depending on os detection
if os.name != 'nt':
DEFAULT_CMD = "sudo nmap %s -sU -sS --min-rate %d --max-retries %d -T4 -O -oX %s"
else:
DEFAULT_CMD = "nmap %s -sU -sS --min-rate %d --max-retries %d -T4 -O -oX %s"
parser = optparse.OptionParser(
"%prog [options] -t nmap scanning targets",
version="%prog " + VERSION)
parser.add_option('-t', '--targets', dest="targets",
help="NMap scanning targets.")
parser.add_option('-v', '--verbose', dest="verbose", action='store_true',
help="Verbose output.")
parser.add_option('--min-rate', dest="min_rate",
help="Min rate option for nmap (number of parallel packets to launch. By default 1000)")
parser.add_option('--max-retries', dest="max_retries",
help="Max retries option for nmap (number of packet send retry). By default 0 - no retry -)")
parser.add_option('-s', '--simulate', dest="simulate",
help="Simulate a launch by reading an nmap XML output instead of launching a new one.")
targets = []
opts, args = parser.parse_args()
if not opts.simulate:
simulate = None
else:
simulate = opts.simulate
if not opts.simulate and not opts.targets:
parser.error("Requires at least one nmap target for scanning (option -t/--targets)")
else:
targets.append(opts.targets)
min_rate = 1000
if opts.min_rate:
min_rate = int(opts.min_rate)
max_retries = 0
if opts.max_retries:
max_retries = int(opts.max_retries)
if not opts.verbose:
verbose = False
else:
verbose = True
if args:
targets.extend(args)
print("Got our target", targets)
def debug(txt):
if verbose:
print(txt)
# Says if a host is up or not
def is_up(h):
status = h.find('status')
state = status.attrib['state']
return state == 'up'
class DetectedHost:
def __init__(self):
self.ip = ''
self.mac_vendor = ''
self.host_name = ''
self.os_possibilities = []
self.os = ('', '')
self.open_ports = []
self.parent = ''
# Keep the first name we've got
def set_host_name(self, name):
if self.host_name == '':
self.host_name = name
# Get a identifier for this host
def get_name(self):
if self.host_name != '':
return self.host_name
if self.ip != '':
return self.ip
return None
# We look for the host VMWare
def is_vmware_esx(self):
# If it's not a virtual machine bail out
if self.mac_vendor != 'VMware':
return False
# If we got all theses ports, we are quite ok for
# a VMWare host
needed_ports = [22, 80, 443, 902, 903, 5989]
for p in needed_ports:
if p not in self.open_ports:
# find one missing port, not a VMWare host
return False
# Ok all ports are found, we are a ESX :)
return True
# Says if we are a virtual machine or not
def is_vmware_vm(self):
# special case: the esx host itself
if self.is_vmware_esx():
return False
# Else, look at the mac vendor
return self.mac_vendor == 'VMware'
# Fill the different os possibilities
def add_os_possibility(self, os, osgen, accuracy, os_type, vendor):
self.os_possibilities.append((os, osgen, accuracy, os_type, vendor))
# We search if our potential parent is present in the
# other detected hosts. If so, set it as my parent
def look_for_parent(self, all_hosts):
self.parents = []
parent = self.parent
debug("Look for my parent %s -> %s" % (self.get_name(), parent))
# Ok, we didn't find any parent
# we bail out
if parent == '':
return
for h in all_hosts:
debug("Is it you? %s" % h.get_name())
if h.get_name() == parent:
debug("Houray, we find our parent %s -> %s" % (self.get_name(), h.get_name()))
self.parents.append(h.get_name())
# Look at ours oses and see which one is the better
def compute_os(self):
self.os_name = 'Unknown OS'
self.os_version = 'Unknown Version'
self.os_type = 'Unknown Type'
self.os_vendor = 'Unknown Vendor'
# Bailout if we got no os :(
if len(self.os_possibilities) == 0:
return
max_accuracy = 0
for (os, osgen, accuracy, os_type, vendor) in self.os_possibilities:
if accuracy > max_accuracy:
max_accuracy = accuracy
# now get the entry with the max value, the first one
for (os, osgen, accuracy, os_type, vendor) in self.os_possibilities:
print("Can be", (os, osgen, accuracy, os_type, vendor))
if accuracy == max_accuracy:
self.os = (os, osgen, os_type, vendor)
break
print("Will dump", self.os)
# Ok, unknown os... not good
if self.os == ('', '', '', ''):
return
self.os_name = self.os[0].lower()
self.os_version = self.os[1].lower()
self.os_type = self.os[2].lower()
self.os_vendor = self.os[3].lower()
# Return the string of the 'discovery' items
def get_discovery_output(self):
r = []
r.append('%s::isup=1' % self.get_name())
r.append(self.get_discovery_system())
r.append(self.get_discovery_macvendor())
op = self.get_discovery_ports()
if op != '':
r.append(op)
par = self.get_discovery_parents()
if par != '':
r.append(par)
fqdn = self.get_dicovery_fqdn()
if fqdn != '':
r.append(fqdn)
ip = self.get_discovery_ip()
if ip != '':
r.append(ip)
return r
# for system output
def get_discovery_system(self):
r = '%s::os=%s' % (self.get_name(), self.os_name) + '\n'
r += '%s::osversion=%s' % (self.get_name(), self.os_version) + '\n'
r += '%s::ostype=%s' % (self.get_name(), self.os_type) + '\n'
r += '%s::osvendor=%s' % (self.get_name(), self.os_vendor)
return r
def get_discovery_macvendor(self):
return '%s::macvendor=%s' % (self.get_name(), self.mac_vendor)
def get_discovery_ports(self):
if self.open_ports == []:
return ''
return '%s::openports=%s' % (self.get_name(), ','.join([str(p) for p in self.open_ports]))
def get_discovery_parents(self):
if self.parents == []:
return ''
return '%s::parents=%s' % (self.get_name(), ','.join(self.parents))
def get_dicovery_fqdn(self):
if self.host_name == '':
return ''
return '%s::fqdn=%s' % (self.get_name(), self.host_name)
def get_discovery_ip(self):
if self.ip == '':
return ''
return '%s::ip=%s' % (self.get_name(), self.ip)
if not simulate:
(_, tmppath) = tempfile.mkstemp()
print("propose a tmppath", tmppath)
# Fred : command launched depending on os detection
# cmd = "nmap %s -sU -sT --min-rate %d --max-retries %d -T4 -O -oX %s" % (' '.join(targets), min_rate, max_retries, tmppath)
cmd = DEFAULT_CMD % (' '.join(targets), min_rate, max_retries, tmppath)
print("Launching command,", cmd)
try:
nmap_process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
close_fds=False, shell=True)
except OSError as exp:
print("Debug: Error in launching command:", cmd, exp)
sys.exit(2)
print("Try to communicate")
(stdoutdata, stderrdata) = nmap_process.communicate()
if nmap_process.returncode != 0:
print("Error: the nmap return an error: '%s'" % stderrdata)
sys.exit(2)
# Fred : no need to print(nmap result catched ...)
# print("Got it", (stdoutdata, stderrdata))
print("Got it !")
xml_input = tmppath
else: # simulate mode
xml_input = simulate
tree = ElementTree()
try:
tree.parse(xml_input)
except IOError as exp:
print("Error opening file '%s': %s" % (xml_input, exp))
sys.exit(2)
hosts = tree.findall('host')
debug("Number of hosts: %d" % len(hosts))
all_hosts = []
for h in hosts:
# Bypass non up hosts
if not is_up(h):
continue
dh = DetectedHost()
# Now we get the ipaddr and the mac vendor
# for future VMWare matching
#print(h.__dict__)
addrs = h.findall('address')
for addr in addrs:
#print("Address", addr.__dict__)
addrtype = addr.attrib['addrtype']
if addrtype == 'ipv4':
dh.ip = addr.attrib['addr']
if addrtype == "mac":
if 'vendor' in addr.attrib:
dh.mac_vendor = addr.attrib['vendor'].lower()
# Now we've got the hostnames
host_names = h.findall('hostnames')
for h_name in host_names:
h_names = h_name.findall('hostname')
for h_n in h_names:
#print('hname', h_n.__dict__)
#print('Host name', h_n.attrib['name'])
dh.set_host_name(h_n.attrib['name'])
# Now print(the traceroute)
traces = h.findall('trace')
for trace in traces:
#print(trace.__dict__)
hops = trace.findall('hop')
#print("Number of hops", len(hops))
distance = len(hops)
if distance >= 2:
for hop in hops:
ttl = int(hop.attrib['ttl'])
#We search for the direct father
if ttl == distance-1:
#print(ttl)
#print("Super hop", hop.__dict__)
# Get the host name if possible, if not
# take the IP
if 'host' in hop.attrib:
dh.parent = hop.attrib['host']
else:
dh.parent = hop.attrib['ipaddr']
# Now the OS detection
ios = h.find('os')
# Fred : if no OS detected by nmap (localhost on Windows does not detect OS !)
if ios:
#print(os.__dict__)
cls = ios.findall('osclass')
# if no osclass found, try bellow the osmatch element (nmap recent versions)
if len(cls) == 0:
_os = ios.find('osmatch')
cls = _os.findall('osclass') if _os else []
for c in cls:
#print("Class", c.__dict__)
family = c.attrib['osfamily']
accuracy = c.attrib['accuracy']
osgen = c.attrib.get('osgen', '')
os_type = c.attrib.get('type', '')
vendor = c.attrib.get('vendor', '')
#print("Type:", family, osgen, accuracy)
dh.add_os_possibility(family, osgen, accuracy, os_type, vendor)
# Ok we can compute our OS now :)
dh.compute_os()
else:
debug(" No OS detected !")
family = 'Unknown'
accuracy = 'Unknown'
osgen = 'Unknown'
os_type = 'Unknown'
vendor = 'Unknown'
#print("Type:", family, osgen, accuracy)
dh.add_os_possibility(family, osgen, accuracy, os_type, vendor)
dh.compute_os()
# Now the ports :)
allports = h.findall('ports')
for ap in allports:
ports = ap.findall('port')
for p in ports:
#print("Port", p.__dict__)
p_id = p.attrib['portid']
s = p.find('state')
#print(s.__dict__)
state = s.attrib['state']
if state == 'open':
dh.open_ports.append(int(p_id))
#print(dh.__dict__)
all_hosts.append(dh)
#print("\n\n")
for h in all_hosts:
name = h.get_name()
if not name:
continue
debug("Doing name %s" % name)
#path = os.path.join(output_dir, name+'.discover')
#print("Want path", path)
#f = open(path, 'wb')
#cPickle.dump(h, f)
#f.close()
debug(str(h.__dict__))
# And generate the configuration too
h.look_for_parent(all_hosts)
#c.fill_system_conf()
#c.fill_ports_services()
#c.fill_system_services()
#c.write_host_configuration()
#print("Host config", c.get_cfg_for_host())
#c.write_services_configuration()
#print("Service config")
#print(c.get_cfg_for_services())
#print(c.__dict__)
print('\n'.join(h.get_discovery_output()))
#print("\n\n\n")
# Try to remove the temppath
try:
os.unlink(tmppath)
except Exception:
pass
| 13,741
|
Python
|
.py
| 367
| 29.904632
| 128
| 0.588213
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,584
|
fs_discovery_runner.py
|
shinken-solutions_shinken/libexec/discovery/fs_discovery_runner.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2012:
# Camille, VACQUIE
# Romain, FORLOT, romain.forlot@sydel.fr
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################
#
# First of all, the fs_discovery_runner.py script get the list
# of the files systems back from the nmap device list with SNMP
# protocol. The OID used by SNMP to recover datas is particular
# to each OS type.
# And then it converts the listed files systems writing and
# display it on the standard output.
# For example : / will be translate into _root and /var will be
# translate into _var
#
# For SNMPv3 we created a default user using the command :
# net-snmp-config --create-snmpv3-user -a "mypassword" myuser
# Here the user name is myuser and his password is mypassword
#
###############################################################
from __future__ import absolute_import, division, print_function, unicode_literals
### modules import
import netsnmp
import optparse
import re
##########
# menu #
##########
parser = optparse.OptionParser('%prog [options] -H HOSTADRESS -C SNMPCOMMUNITYREAD -O ARG1 -V SNMPVERSION -l SNMPSECNAME -L SNMPSECLEVEL -p SNMPAUTHPROTO -x SNMPAUTHPASS')
# user name and password are defined in /var/lib/net-snmp/snmpd.conf
# default parameters are defined in /usr/local/shinken/etc/resource.cfg
parser.add_option("-H", "--hostname", dest="hostname", help="Hostname to scan")
parser.add_option("-m", "--mode", dest="mode", help="Discovery mode : [ macros | tags ]. Macros will creates host macros and tags will add tags for each fs detected.")
parser.add_option("-C", "--community", dest="community", help="Community to scan (default:public)")
parser.add_option("-O", "--os", dest="os", help="OS from scanned host")
parser.add_option("-V", "--version", dest="version", type=int, help="Version number for SNMP (1, 2 or 3; default:1)")
parser.add_option("-l", "--login", dest="snmpv3_user", help="User name for snmpv3(default:admin)")
parser.add_option("-L", "--level", dest="snmpv3_level", help="Security level for snmpv3(default:authNoPriv)")
parser.add_option("-p", "--authproto", dest="snmpv3_auth", help="Authentication protocol for snmpv3(default:MD5)")
parser.add_option("-x", "--authpass", dest="snmpv3_auth_pass", help="Authentication password for snmpv3(default:monpassword)")
opts, args = parser.parse_args()
hostname = opts.hostname
os = opts.os
mode = { 'macros' : '_fs',
'tags' : 'fs',
}
if not opts.hostname:
parser.error("Requires one host and its os to scan (option -H)")
if not opts.mode:
parser.error("Requires mode. Please choose between macros or tags")
if not opts.os:
parser.error("Requires the os host(option -O)")
if opts.community:
community = opts.community
else:
community = 'public'
if opts.version:
version = opts.version
else:
version = 1
if opts.snmpv3_user:
snmpv3_user = opts.snmpv3_user
else:
snmpv3_user = 'myuser'
if opts.snmpv3_level:
snmpv3_level = opts.snmpv3_level
else:
snmpv3_level = 'authNoPriv'
if opts.snmpv3_auth:
snmpv3_auth = opts.snmpv3_auth
else:
snmpv3_auth = 'MD5'
if opts.snmpv3_auth_pass:
snmpv3_auth_pass = opts.snmpv3_auth_pass
else:
snmpv3_auth_pass = 'mypassword'
oid_aix_linux = ".1.3.6.1.2.1.25.3.8.1.2"# hrFSMountPoint
oid_hpux = ".1.3.6.1.4.1.11.2.3.1.2.2.1.10"# fileSystemName
##############
# functions #
##############
### Search for files systems presents on the target
def get_fs_discovery(oid):
hrFSMountPoint = netsnmp.Varbind(oid)
result = netsnmp.snmpwalk(hrFSMountPoint, Version=version, DestHost=hostname, Community=community, SecName=snmpv3_user, SecLevel=snmpv3_level, AuthProto=snmpv3_auth, AuthPass=snmpv3_auth_pass)
#PrivProto=snmpv3_priv, PrivPass=snmpv3_priv_pass
fsList = list(result)
return fsList
### converts the listed files systems writing and display them on the standard output
def get_fs_discovery_output(liste):
fsTbl = []
for element in liste:
elt = re.sub(r'\W', '_', element)# conversion from / to _
if elt == '_':# if _ is the only detected character
elt = re.sub(r'^_$', '_root', elt)# so we replace _ with _root
fsTbl.append(elt)
print("%s::%s=%s"%(hostname, mode[opts.mode], ','.join(fsTbl))# display like in the nmap model)
###############
# execution #
###############
scan = []
if os == 'aix':
scan = get_fs_discovery(oid_aix_linux)
elif os == 'linux':
scan = get_fs_discovery(oid_aix_linux)
elif os == 'hp-ux':
scan = get_fs_discovery(oid_hpux)
get_fs_discovery_output(scan)
| 5,251
|
Python
|
.py
| 124
| 39.919355
| 196
| 0.69445
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,585
|
cluster_discovery_runner.py
|
shinken-solutions_shinken/libexec/discovery/cluster_discovery_runner.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2012:
# Camille, VACQUIE
# Romain, FORLOT, romain.forlot@sydel.fr
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################
#
# cluster_discovery_runner.py script simply try to get informations
# from HACMP mib and failback on Safekit mib. SNMP for both product
# need to be activated. For Safekit, add a proxy into snmpd conf to
# include its mib into the master agent netsnmp.
#
# For SNMPv3 we created a default user using the command :
# net-snmp-config --create-snmpv3-user -a "mypassword" myuser
# Here the user name is myuser and his password is mypassword
#
###############################################################
from __future__ import absolute_import, division, print_function, unicode_literals
### modules import
import netsnmp
import optparse
import re
##########
# menu #
##########
parser = optparse.OptionParser('%prog [options] -H HOSTADRESS -C SNMPCOMMUNITYREAD -O ARG1 -V SNMPVERSION -l SNMPSECNAME -L SNMPSECLEVEL -p SNMPAUTHPROTO -x SNMPAUTHPASS')
# user name and password are defined in /var/lib/net-snmp/snmpd.conf
parser.add_option("-H", "--hostname", dest="hostname", help="Hostname to scan")
parser.add_option("-C", "--community", dest="community", help="Community to scan (default:public)")
parser.add_option("-O", "--os", dest="os", help="OS from scanned host")
parser.add_option("-V", "--version", dest="version", type=int, help="Version number for SNMP (1, 2 or 3; default:1)")
parser.add_option("-l", "--login", dest="snmpv3_user", help="User name for snmpv3(default:admin)")
parser.add_option("-L", "--level", dest="snmpv3_level", help="Security level for snmpv3(default:authNoPriv)")
parser.add_option("-p", "--authproto", dest="snmpv3_auth", help="Authentication protocol for snmpv3(default:MD5)")
parser.add_option("-x", "--authpass", dest="snmpv3_auth_pass", help="Authentication password for snmpv3(default:monpassword)")
opts, args = parser.parse_args()
hostname = opts.hostname
os = opts.os
clSolution_by_os = { 'aix' : 'hacmp',
'linux': 'safekit',
}
if not opts.hostname:
parser.error("Requires one host and its os to scan (option -H)")
if not opts.os:
parser.error("Requires the os host(option -O)")
if opts.community:
community = opts.community
else:
community = 'public'
if opts.version:
version = opts.version
else:
version = 1
if opts.snmpv3_user:
snmpv3_user = opts.snmpv3_user
else:
snmpv3_user = 'myuser'
if opts.snmpv3_level:
snmpv3_level = opts.snmpv3_level
else:
snmpv3_level = 'authNoPriv'
if opts.snmpv3_auth:
snmpv3_auth = opts.snmpv3_auth
else:
snmpv3_auth = 'MD5'
if opts.snmpv3_auth_pass:
snmpv3_auth_pass = opts.snmpv3_auth_pass
else:
snmpv3_auth_pass = 'mypassword'
oid_safekit_moduleName = ".1.3.6.1.4.1.107.175.10.1.1.2"
oid_hacmp_clusterName = ".1.3.6.1.4.1.2.3.1.2.1.5.1.2"
##############
# functions #
##############
### Search for cluster solution, between safekit or hacmp, presents on the target
def get_cluster_discovery(oid):
name= netsnmp.Varbind(oid)
result = netsnmp.snmpwalk(name, Version=version, DestHost=hostname, Community=community, SecName=snmpv3_user, SecLevel=snmpv3_level, AuthProto=snmpv3_auth, AuthPass=snmpv3_auth_pass)
nameList = list(result)
return nameList
### format the modules list and display them on the standard output
def get_cluster_discovery_output(list):
names = []
if list :
for elt in list:
names.append(elt)
print("%s::%s=1"%(hostname, clSolution)# To add tag)
print("%s::_%s_modules=%s"%(hostname, clSolution, ','.join(names))# Host macros by Safekit modules)
else :
print("%s::%s=0"%(hostname, clSolution)# No cluster detected)
###############
# execution #
###############
scan = []
clSolution = clSolution_by_os[os]
scan = get_cluster_discovery(oid_hacmp_clusterName)
if not scan:
scan = get_cluster_discovery(oid_safekit_moduleName)
clSolution = 'safekit'
get_cluster_discovery_output(scan)
| 4,717
|
Python
|
.py
| 115
| 38.495652
| 186
| 0.697268
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,586
|
windows_shares_discovery_runner.py
|
shinken-solutions_shinken/libexec/discovery/windows_shares_discovery_runner.py
|
#!/usr/bin/env python
#
# Copyright (C) 2009-2012:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import optparse
import subprocess
VERSION = '1.0'
def p_debug(s):
if debug:
print("DEBUG:", s)
def get_elements(line):
elts = line.split('|', 2)
if len(elts) < 2:
p_debug("Not a good line: %r" % line)
return None
return elts
parser = optparse.OptionParser(
"%prog [options] -H HOSTADRESS -u DOMAIN\\USER -p PASSWORD",
version="%prog " + VERSION)
parser.add_option('-H', "--hostname",
help="Hostname to scan")
parser.add_option('-u', '--user', default='guest',
help="Username to scan with. Default to '%default'")
parser.add_option('-p', '--password', default='',
help="Password of your user. Default to ''")
parser.add_option('-d', "--debug", action='store_true',
help="Debug mode")
opts, args = parser.parse_args()
if not opts.hostname:
parser.error("Requires one host to scan (option -H)")
hostname = opts.hostname
user = opts.user
debug = opts.debug
password = opts.password
cred = '%s%%%s' % (user, password)
cmd = ["smbclient", '--user', cred, '--grepable', '-L', hostname]
p_debug("Launching command %s" % cmd)
try:
process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
close_fds=True)
except OSError as exp:
print("Error in launching command:", cmd, exp)
raise SystemExit(2)
p_debug("Try to communicate with the subprocess")
(stdoutdata, stderrdata) = process.communicate()
if process.returncode != 0:
print("Error: the share scanner return an error: '%s'" % (stderrdata + stdoutdata))
raise SystemExit(2)
disks = []
printers = []
p_debug("Good return" + stdoutdata)
for line in stdoutdata.splitlines():
elts = get_elements(line.strip())
# Skip strange lines
if not elts:
continue
typ, sharename, desc = elts
if typ == 'Printer':
printers.append(sharename)
if typ == 'Disk' and not sharename.endswith('$'):
disks.append(sharename)
if len(disks) > 0:
print("%s::shares_detected=1" % hostname)
print("%s::_shares=%s" % (hostname, ','.join(disks)))
if len(printers) > 0:
print("%s::printers_detected=1" % hostname)
print("%s::_printers=%s" % (hostname, ','.join(printers)))
| 3,193
|
Python
|
.py
| 87
| 32.781609
| 87
| 0.67834
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,587
|
specific-cgi-parameters.rst
|
shinken-solutions_shinken/doc/source/11_integration/specific-cgi-parameters.rst
|
.. _integration/specific-cgi-parameters:
The parameters below are deprecated and are **only** useful if you use the old Nagios CGI UI.
=============
Nagios CGI UI
=============
.. _integration/specific-cgi-parameters#object_cache_file:
Object Cache File
==================
Format:
::
object_cache_file=<file_name>
Example:
::
object_cache_file=/var/lib/shinken/objects.cache
This directive is used to specify a file in which a cached copy of :ref:`Object Configuration Overview <configuration/configobject>` should be stored. The cache file is (re)created every time Shinken is (re)started.
.. _integration/specific-cgi-parameters#temp_file:
Temp File
==========
======== ==========================================
Format: temp_file=<file_name>
Example: temp_file=/var/lib/shinken/nagios.tmp
======== ==========================================
This is a temporary file that Nagios periodically creates to use when updating comment data, status data, etc. The file is deleted when it is no longer needed.
.. _integration/specific-cgi-parameters#temp_path:
Temp Path
==========
======== ====================
Format: temp_path=<dir_name>
Example: temp_path=/tmp
======== ====================
This is a directory that Nagios can use as scratch space for creating temporary files used during the monitoring process. You should run **tmpwatch**, or a similar utility, on this directory occasionally to delete files older than 24 hours.
.. _integration/specific-cgi-parameters#status_file:
Status File
============
======== ============================================
Format: status_file=<file_name>
Example: status_file=/var/lib/shinken/status.dat
======== ============================================
This is the file that Nagios uses to store the current status, comment, and downtime information. This file is used by the CGIs so that current monitoring status can be reported via a web interface. The CGIs must have read access to this file in order to function properly. This file is deleted every time Nagios stops and recreated when it starts.
Status File Update Interval
============================
======== ================================
Format: status_update_interval=<seconds>
Example: status_update_interval=15
======== ================================
This setting determines how often (in seconds) that Nagios will update status data in the :ref:`Status File <integration/specific-cgi-parameters#status_file>`. The minimum update interval is 1 second.
| 2,572
|
Python
|
.cgi
| 46
| 52.652174
| 348
| 0.636912
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,588
|
old-cgi-and-vshell.rst
|
shinken-solutions_shinken/doc/source/11_integration/old-cgi-and-vshell.rst
|
.. _integration/old-cgi-and-vshell:
===================================
Use Shinken with Old CGI and VShell
===================================
For the Old CGI & VShell
=========================
The Old CGI and VShell uses the old flat file export. Shinken can export to this file, but beware: this method is very very slooooow!
.. warning:: You should migrate to a Livestatus enabled web interface.
Declare the status_dat module
==============================
Export all status into a flat file in the old Nagios format. It's for small/medium environment because it's very slow to parse. It can be used by the Nagios CGI. It also exports the objects.cache file for this interface.
Edit your /etc/shinken/modules/status-dat.cfg file:
::
define module{
module_name Status-Dat
module_type status_dat
status_file /var/lib/shinken/status.data
object_cache_file /var/lib/shinken/objects.cache
status_update_interval 15 ; update status.dat every 15s
}
Enable it
==========
Edit your /etc/shinken/brokers/broker-master.cfg file and find the object Broker:
::
define broker{
broker_name broker-1
[...]
modules Simple-log,Status-Dat
}
| 1,288
|
Python
|
.cgi
| 29
| 40
| 220
| 0.627755
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,589
|
test_python_crash_with_recursive_bp_rules.py
|
shinken-solutions_shinken/test/test_python_crash_with_recursive_bp_rules.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestConfig(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_python_crash_with_recursive_bp_rules.cfg')
def test_dummy(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print("Get the hosts and services")
now = time.time()
host1 = self.sched.hosts.find_by_name("ht34-peret-2-dif0")
host2 = self.sched.hosts.find_by_name("ht34-peret-2-dif1")
self.scheduler_loop(5, [[host1, 2, 'DOWN | value1=1 value2=2'], [host2, 2, 'DOWN | rtt=10']])
if __name__ == '__main__':
unittest.main()
| 1,634
|
Python
|
.pyt
| 39
| 38.307692
| 101
| 0.713115
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,590
|
shinken_python_crash_with_recursive_bp_rules.cfg
|
shinken-solutions_shinken/test/etc/shinken_python_crash_with_recursive_bp_rules.cfg
|
accept_passive_host_checks=1
accept_passive_service_checks=1
additional_freshness_latency=15
admin_email=shinken@localhost
admin_pager=shinken@localhost
auto_reschedule_checks=0
auto_rescheduling_interval=30
auto_rescheduling_window=180
cached_host_check_horizon=15
cached_service_check_horizon=15
cfg_file=standard/hosts.cfg
cfg_file=standard/services.cfg
cfg_file=standard/contacts.cfg
cfg_file=python_crash_with_recursive_bp_rules/commands.cfg
cfg_file=python_crash_with_recursive_bp_rules/test_specific.cfg
cfg_file=standard/timeperiods.cfg
cfg_file=standard/hostgroups.cfg
cfg_file=standard/servicegroups.cfg
cfg_file=standard/shinken-specific.cfg
check_external_commands=1
check_for_orphaned_hosts=1
check_for_orphaned_services=1
check_host_freshness=0
check_result_path=var/spool/checkresults
check_result_reaper_frequency=10
check_service_freshness=1
command_check_interval=-1
command_file=var/shinken.cmd
daemon_dumps_core=0
date_format=iso8601
debug_file=var/shinken.debug
debug_level=112
debug_verbosity=1
enable_embedded_perl=0
enable_environment_macros=1
enable_event_handlers=1
enable_flap_detection=0
enable_notifications=1
enable_predictive_host_dependency_checks=1
enable_predictive_service_dependency_checks=1
event_broker_options=-1
event_handler_timeout=30
execute_host_checks=1
execute_service_checks=1
external_command_buffer_slots=4096
high_host_flap_threshold=20
high_service_flap_threshold=20
host_check_timeout=30
host_freshness_check_interval=60
host_inter_check_delay_method=s
illegal_macro_output_chars=`~\$&|'"<>
illegal_object_name_chars=`~!\$%^&*|'"<>?,()=
interval_length=60
lock_file=var/shinken.pid
log_archive_path=var/archives
log_event_handlers=1
log_external_commands=1
log_file=var/shinken.log
log_host_retries=1
log_initial_states=0
log_notifications=1
log_passive_checks=1
log_rotation_method=d
log_service_retries=1
low_host_flap_threshold=5
low_service_flap_threshold=5
max_check_result_file_age=3600
max_check_result_reaper_time=30
max_concurrent_checks=0
max_debug_file_size=1000000
max_host_check_spread=30
max_service_check_spread=30
shinken_group=shinken
shinken_user=shinken
notification_timeout=30
object_cache_file=var/objects.cache
obsess_over_hosts=0
obsess_over_services=0
ocsp_timeout=5
#p1_file=/tmp/test_shinken/plugins/p1.pl
p1_file=/usr/local/shinken/bin/p1.pl
passive_host_checks_are_soft=0
perfdata_timeout=5
precached_object_file=var/objects.precache
process_performance_data=1
resource_file=resource.cfg
retain_state_information=1
retained_contact_host_attribute_mask=0
retained_contact_service_attribute_mask=0
retained_host_attribute_mask=0
retained_process_host_attribute_mask=0
retained_process_service_attribute_mask=0
retained_service_attribute_mask=0
retention_update_interval=60
service_check_timeout=60
service_freshness_check_interval=60
service_inter_check_delay_method=s
service_interleave_factor=s
##shinken_group=shinken
##shinken_user=shinken
#shinken_group=shinken
#shinken_user=shinken
sleep_time=0.25
soft_state_dependencies=0
state_retention_file=var/retention.dat
status_file=var/status.dat
status_update_interval=5
temp_file=tmp/shinken.tmp
temp_path=var/tmp
translate_passive_host_checks=0
use_aggressive_host_checking=0
use_embedded_perl_implicitly=0
use_large_installation_tweaks=0
use_regexp_matching=0
use_retained_program_state=1
use_retained_scheduling_info=1
use_syslog=0
use_true_regexp_matching=0
enable_problem_impacts_states_change=1
no_event_handlers_during_downtimes=0
| 3,467
|
Python
|
.pyt
| 120
| 27.9
| 63
| 0.856033
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,591
|
docker-file-UNIT-TEST-python2.txt
|
shinken-solutions_shinken/test/docker-files/docker-file-UNIT-TEST-python2.txt
|
FROM debian:9
MAINTAINER Jean Gabes <naparuba@gmail.com>
RUN apt-get update && apt-get install -y python
# Setup test env, "standard" installation is test with other tests ^^
RUN apt-get install -y python-pip
RUN pip install jinja2
RUN pip install leveldb
RUN pip install pyOpenSSL
RUN pip install pycrypto
RUN pip install requests
RUN pip install Crypto
RUN pip install pygments
RUN pip install coveralls
RUN pip install nose-cov
RUN apt-get install -y python-cherrypy3
RUN pip install rsa
# The internal yaml seems to not be used, thanks nose
RUN pip install ruamel.yaml==0.11.15
RUN apt-get install -y sysstat
RUN apt-get install -y curl
RUN apt-get install -y vim
RUN apt-get install -y procps
RUN apt-get install -y wget
RUN apt-get install -y net-tools
RUN apt-get install -y dnsutils
RUN apt-get install -y python-apt
RUN apt-get install -y strace
RUN apt-get install -y less
RUN apt-get install -y python-blessed
RUN apt-get install -y locales
RUN apt-get install -y python-setuptools
RUN apt-get install -y python-pycurl
RUN apt-get install -y dos2unix
RUN apt-get install -y pep8
ADD . /root/shinken-framework
WORKDIR /root/shinken-framework
#RUN python setup.py install
ENTRYPOINT cd test;./quick_tests.sh
# Specific test, manual launch
#ENTRYPOINT cd test; python test_raft_multiprocess.py TestRaftMultiProcess.test_raft_large_leader_election
#ENTRYPOINT cd test; python test_raft.py
#ENTRYPOINT cd test;python test_yaml.py
#ENTRYPOINT opsbro agent start
| 1,724
|
Python
|
.pyt
| 43
| 38.930233
| 111
| 0.691756
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,592
|
docker-file-DEV-debian9-test-python.txt
|
shinken-solutions_shinken/test/docker-files/docker-file-DEV-debian9-test-python.txt
|
FROM debian:9
MAINTAINER Jean Gabes <naparuba@gmail.com>
RUN apt-get update && apt-get install -y python
# Setup test env, "standard" installation is test with other tests ^^
RUN apt-get install -y python-pip
RUN pip install jinja2
RUN pip install leveldb
RUN pip install pyOpenSSL
RUN pip install pycrypto
RUN pip install requests
RUN pip install Crypto
RUN pip install pygments
RUN pip install coveralls
RUN pip install nose-cov
RUN apt-get install -y python-cherrypy3
RUN pip install rsa
# The internal yaml seems to not be used, thanks nose
RUN pip install ruamel.yaml==0.11.15
ADD . /root/shinken-framework
WORKDIR /root/shinken-framework
RUN python setup.py install
ENTRYPOINT nosetests -xv --processes=1 --process-timeout=300 --process-restartworker --with-cov --cov=shinken --exe
# Specific test, manual launch
#ENTRYPOINT cd test; python test_raft_multiprocess.py TestRaftMultiProcess.test_raft_large_leader_election
#ENTRYPOINT cd test; python test_raft.py
#ENTRYPOINT cd test;python test_yaml.py
#ENTRYPOINT opsbro agent start
| 1,185
|
Python
|
.pyt
| 27
| 42.555556
| 118
| 0.719756
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,593
|
test_nocontacts.py
|
shinken-solutions_shinken/test/test_nocontacts.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestNoContact(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_nocontacts.cfg')
# Seems that Nagios allow non contacts elements, just warning
# and not error. Should do the same.
def test_nocontact(self):
host = self.sched.hosts.find_by_name("test_host_0")
self.assertEqual([], host.contacts)
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
self.assertEqual([], svc.contacts)
self.assertTrue(self.sched.conf.is_correct)
if __name__ == '__main__':
unittest.main()
| 1,588
|
Python
|
.tac
| 37
| 39.837838
| 91
| 0.735409
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,594
|
test_bad_contact_call.py
|
shinken-solutions_shinken/test/test_bad_contact_call.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestConfig(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_bad_contact_call.cfg')
def test_bad_contact_call(self):
# The service got a unknow contact. It should raise an error
svc = self.conf.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
print("Contacts:", svc.contacts)
self.assertEqual(False, svc.is_correct())
if __name__ == '__main__':
unittest.main()
| 1,451
|
Python
|
.tac
| 34
| 39.882353
| 90
| 0.738636
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,595
|
test_contactgroup_nomembers.py
|
shinken-solutions_shinken/test/test_contactgroup_nomembers.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestContactgroupWitoutMembers(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_contactgroup_nomembers.cfg')
# It seems that a contact group with no member cause some crash for the arbiter.
# should fix it :)
def test_contactgroup_nomember(self):
# Look for the members of the test_contact_nomember
cg = self.sched.conf.contactgroups.find_by_name('test_contact_nomember')
self.assertIsNot(cg, None)
print(cg.members)
self.assertEqual([], cg.members)
if __name__ == '__main__':
unittest.main()
| 1,581
|
Python
|
.tac
| 37
| 39.648649
| 84
| 0.743322
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,596
|
test_contactgroups_plus_inheritance.py
|
shinken-solutions_shinken/test/test_contactgroups_plus_inheritance.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test attribute inheritance and the right order
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestPlusInInheritance(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_contactgroups_plus_inheritance.cfg')
def _dump(self, h):
print("Dumping host", h.get_name())
print(h.contact_groups)
for c in h.contacts:
print("->",c.get_name())
def _dump_svc(self,s):
print("Dumping Service", s.get_name())
print(" contact_groups : %s " % s.contact_groups)
for c in s.contacts:
print("->",c.get_name())
def test_contactgroups_plus_inheritance(self):
host0 = self.sched.hosts.find_by_name("test_host_0")
# HOST 1 should have 2 group of contacts
# WARNING, it's a string, not the real objects!
self._dump(host0)
self.assertIn("test_contact_1", [c .get_name() for c in host0.contacts])
self.assertIn("test_contact_2", [c .get_name() for c in host0.contacts])
host2 = self.sched.hosts.find_by_name("test_host_2")
self._dump(host2)
self.assertIn("test_contact_1", [c .get_name() for c in host2.contacts])
host3 = self.sched.hosts.find_by_name("test_host_3")
self._dump(host3)
self.assertIn("test_contact_1", [c .get_name() for c in host3.contacts])
self.assertIn("test_contact_2", [c .get_name() for c in host3.contacts])
host4 = self.sched.hosts.find_by_name("test_host_4")
self._dump(host4)
self.assertIn("test_contact_1", [c .get_name() for c in host4.contacts])
host5 = self.sched.hosts.find_by_name("test_host_5")
self._dump(host5)
self.assertIn("test_contact_1", [c .get_name() for c in host5.contacts])
self.assertIn("test_contact_2", [c .get_name() for c in host5.contacts])
host6 = self.sched.hosts.find_by_name("test_host_6")
self._dump(host6)
self.assertIn("test_contact_1", [c .get_name() for c in host6.contacts])
self.assertIn("test_contact_2", [c .get_name() for c in host6.contacts])
# Now Let's check service inheritance
svc1 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "svc_tmplA")
self._dump_svc(svc1)
self.assertIn("test_contact_1", [c .get_name() for c in svc1.contacts])
svc2 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "svc_tmplB")
self._dump_svc(svc2)
self.assertIn("test_contact_2", [c .get_name() for c in svc2.contacts])
svc3 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "svc_tmplA_tmplB")
self.assertIn("test_contact_1", [c .get_name() for c in svc3.contacts])
self.assertIn("test_contact_2", [c .get_name() for c in svc3.contacts])
self._dump_svc(svc3)
# Now Let's check multi level service inheritance
svc4 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "TEST-DESC4")
self.assertIn("test_contact_1", [c .get_name() for c in svc4.contacts])
self.assertIn("test_contact_2", [c .get_name() for c in svc4.contacts])
self._dump_svc(svc4)
svc5 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "TEST-DESC4b")
self.assertIn("test_contact_2", [c .get_name() for c in svc5.contacts])
self._dump_svc(svc5)
if __name__ == '__main__':
unittest.main()
| 4,371
|
Python
|
.tac
| 84
| 45.452381
| 98
| 0.659315
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,597
|
test_contactdowntimes.py
|
shinken-solutions_shinken/test/test_contactdowntimes.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test host- and service-downtimes.
#
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken_test import *
class TestContactDowntime(ShinkenTest):
def test_contact_downtime(self):
self.print_header()
# schedule a 2-minute downtime
# downtime must be active
# consume a good result, sleep for a minute
# downtime must be active
# consume a bad result
# downtime must be active
# no notification must be found in broks
duration = 600
now = time.time()
# downtime valid for the next 2 minutes
test_contact = self.sched.contacts.find_by_name('test_contact')
cmd = "[%lu] SCHEDULE_CONTACT_DOWNTIME;test_contact;%d;%d;lausser;blablub" % (now, now, now + duration)
self.sched.run_external_command(cmd)
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
# Change the notif interval, so we can notify as soon as we want
svc.notification_interval = 0.001
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
#time.sleep(20)
# We loop, the downtime wil be check and activate
self.scheduler_loop(1, [[svc, 0, 'OK'], [host, 0, 'UP']])
self.assert_any_log_match('CONTACT DOWNTIME ALERT.*;STARTED')
self.show_and_clear_logs()
print("downtime was scheduled. check its activity and the comment\n"*5)
self.assertEqual(1, len(self.sched.contact_downtimes))
self.assertEqual(1, len(test_contact.downtimes))
self.assertIn(test_contact.downtimes[0], self.sched.contact_downtimes.values())
self.assertTrue(test_contact.downtimes[0].is_in_effect)
self.assertFalse(test_contact.downtimes[0].can_be_deleted)
# Ok, we define the downtime like we should, now look at if it does the job: do not
# raise notif during a downtime for this contact
self.scheduler_loop(3, [[svc, 2, 'CRITICAL']])
# We should NOT see any service notification
self.assert_no_log_match('SERVICE NOTIFICATION.*;CRITICAL')
self.show_and_clear_logs()
# Now we short the downtime a lot so it will be stop at now + 1 sec.
test_contact.downtimes[0].end_time = time.time() + 1
time.sleep(2)
# We invalidate it with a scheduler loop
self.scheduler_loop(1, [])
# So we should be out now, with a log
self.assert_any_log_match('CONTACT DOWNTIME ALERT.*;STOPPED')
self.show_and_clear_logs()
print("\n\nDowntime was ended. Check it is really stopped")
self.assertEqual(0, len(self.sched.contact_downtimes))
self.assertEqual(0, len(test_contact.downtimes))
for n in svc.notifications_in_progress.values():
print("NOTIF", n, n.t_to_go, time.time())
# Now we want this contact to be really notify!
# Ok, we define the downtime like we should, now look at if it does the job: do not
# raise notif during a downtime for this contact
time.sleep(1)
self.scheduler_loop(3, [[svc, 2, 'CRITICAL']])
self.assert_any_log_match('SERVICE NOTIFICATION.*;CRITICAL')
self.show_and_clear_logs()
for n in svc.notifications_in_progress.values():
print("NOTIF", n, n.t_to_go, time.time(), time.time() - n.t_to_go)
def test_contact_downtime_and_cancel(self):
self.print_header()
# schedule a 2-minute downtime
# downtime must be active
# consume a good result, sleep for a minute
# downtime must be active
# consume a bad result
# downtime must be active
# no notification must be found in broks
duration = 600
now = time.time()
# downtime valid for the next 2 minutes
test_contact = self.sched.contacts.find_by_name('test_contact')
cmd = "[%lu] SCHEDULE_CONTACT_DOWNTIME;test_contact;%d;%d;lausser;blablub" % (now, now, now + duration)
self.sched.run_external_command(cmd)
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
# Change the notif interval, so we can notify as soon as we want
svc.notification_interval = 0.001
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
#time.sleep(20)
# We loop, the downtime wil be check and activate
self.scheduler_loop(1, [[svc, 0, 'OK'], [host, 0, 'UP']])
self.assert_any_log_match('CONTACT DOWNTIME ALERT.*;STARTED')
self.show_and_clear_logs()
print("downtime was scheduled. check its activity and the comment")
self.assertEqual(1, len(self.sched.contact_downtimes))
self.assertEqual(1, len(test_contact.downtimes))
self.assertIn(test_contact.downtimes[0], self.sched.contact_downtimes.values())
self.assertTrue(test_contact.downtimes[0].is_in_effect)
self.assertFalse(test_contact.downtimes[0].can_be_deleted)
time.sleep(1)
# Ok, we define the downtime like we should, now look at if it does the job: do not
# raise notif during a downtime for this contact
self.scheduler_loop(3, [[svc, 2, 'CRITICAL']])
# We should NOT see any service notification
self.assert_no_log_match('SERVICE NOTIFICATION.*;CRITICAL')
self.show_and_clear_logs()
downtime_id = test_contact.downtimes[0].id
# OK, Now we cancel this downtime, we do not need it anymore
cmd = "[%lu] DEL_CONTACT_DOWNTIME;%d" % (now, downtime_id)
self.sched.run_external_command(cmd)
# We check if the downtime is tag as to remove
self.assertTrue(test_contact.downtimes[0].can_be_deleted)
# We really delete it
self.scheduler_loop(1, [])
# So we should be out now, with a log
self.assert_any_log_match('CONTACT DOWNTIME ALERT.*;CANCELLED')
self.show_and_clear_logs()
print("Downtime was cancelled")
self.assertEqual(0, len(self.sched.contact_downtimes))
self.assertEqual(0, len(test_contact.downtimes))
time.sleep(1)
# Now we want this contact to be really notify!
# Ok, we define the downtime like we should, now look at if it does the job: do not
# raise notif during a downtime for this contact
self.scheduler_loop(3, [[svc, 2, 'CRITICAL']])
self.assert_any_log_match('SERVICE NOTIFICATION.*;CRITICAL')
self.show_and_clear_logs()
if __name__ == '__main__':
unittest.main()
| 7,796
|
Python
|
.tac
| 151
| 43.748344
| 111
| 0.661536
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,598
|
shinken_nocontacts.cfg
|
shinken-solutions_shinken/test/etc/shinken_nocontacts.cfg
|
accept_passive_host_checks=1
accept_passive_service_checks=1
additional_freshness_latency=15
admin_email=shinken@localhost
admin_pager=shinken@localhost
auto_reschedule_checks=0
auto_rescheduling_interval=30
auto_rescheduling_window=180
cached_host_check_horizon=15
cached_service_check_horizon=15
cfg_file=nocontacts/hosts.cfg
cfg_file=nocontacts/services.cfg
cfg_file=standard/contacts.cfg
cfg_file=standard/commands.cfg
cfg_file=standard/timeperiods.cfg
cfg_file=standard/hostgroups.cfg
cfg_file=standard/servicegroups.cfg
cfg_file=standard/shinken-specific.cfg
check_external_commands=1
check_for_orphaned_hosts=1
check_for_orphaned_services=1
check_host_freshness=0
check_result_path=var/spool/checkresults
check_result_reaper_frequency=10
check_service_freshness=1
command_check_interval=-1
command_file=var/shinken.cmd
daemon_dumps_core=0
date_format=iso8601
debug_file=var/shinken.debug
debug_level=112
debug_verbosity=1
enable_embedded_perl=0
enable_environment_macros=1
enable_event_handlers=1
enable_flap_detection=0
enable_notifications=1
enable_predictive_host_dependency_checks=1
enable_predictive_service_dependency_checks=1
event_broker_options=-1
event_handler_timeout=30
execute_host_checks=1
execute_service_checks=1
external_command_buffer_slots=4096
high_host_flap_threshold=20
high_service_flap_threshold=20
host_check_timeout=30
host_freshness_check_interval=60
host_inter_check_delay_method=s
illegal_macro_output_chars=`~\$&|'"<>
illegal_object_name_chars=`~!\$%^&*|'"<>?,()=
interval_length=60
lock_file=var/shinken.pid
log_archive_path=var/archives
log_event_handlers=1
log_external_commands=1
log_file=var/shinken.log
log_host_retries=1
log_initial_states=1
log_notifications=1
log_passive_checks=1
log_rotation_method=d
log_service_retries=1
low_host_flap_threshold=5
low_service_flap_threshold=5
max_check_result_file_age=3600
max_check_result_reaper_time=30
max_concurrent_checks=0
max_debug_file_size=1000000
max_host_check_spread=30
max_service_check_spread=30
shinken_group=shinken
shinken_user=shinken
notification_timeout=30
object_cache_file=var/objects.cache
obsess_over_hosts=0
obsess_over_services=0
ocsp_timeout=5
#p1_file=/tmp/test_shinken/plugins/p1.pl
p1_file=/usr/local/shinken/bin/p1.pl
passive_host_checks_are_soft=0
perfdata_timeout=5
precached_object_file=var/objects.precache
process_performance_data=1
resource_file=resource.cfg
retain_state_information=1
retained_contact_host_attribute_mask=0
retained_contact_service_attribute_mask=0
retained_host_attribute_mask=0
retained_process_host_attribute_mask=0
retained_process_service_attribute_mask=0
retained_service_attribute_mask=0
retention_update_interval=60
service_check_timeout=60
service_freshness_check_interval=60
service_inter_check_delay_method=s
service_interleave_factor=s
##shinken_group=shinken
##shinken_user=shinken
#shinken_group=shinken
#shinken_user=shinken
sleep_time=0.25
soft_state_dependencies=0
state_retention_file=var/retention.dat
status_file=var/status.dat
status_update_interval=5
temp_file=tmp/shinken.tmp
temp_path=var/tmp
translate_passive_host_checks=0
use_aggressive_host_checking=0
use_embedded_perl_implicitly=0
use_large_installation_tweaks=0
use_regexp_matching=0
use_retained_program_state=1
use_retained_scheduling_info=1
use_syslog=0
use_true_regexp_matching=0
enable_problem_impacts_states_change=1
| 3,343
|
Python
|
.tac
| 118
| 27.330508
| 45
| 0.856744
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,599
|
shinken_bad_contact_call.cfg
|
shinken-solutions_shinken/test/etc/shinken_bad_contact_call.cfg
|
accept_passive_host_checks=1
accept_passive_service_checks=1
additional_freshness_latency=15
admin_email=shinken@localhost
admin_pager=shinken@localhost
auto_reschedule_checks=0
auto_rescheduling_interval=30
auto_rescheduling_window=180
cached_host_check_horizon=15
cached_service_check_horizon=15
cfg_file=bad_contact_call/hosts.cfg
cfg_file=bad_contact_call/services.cfg
cfg_file=standard/contacts.cfg
cfg_file=standard/commands.cfg
cfg_file=standard/timeperiods.cfg
cfg_file=standard/hostgroups.cfg
cfg_file=bad_contact_call/servicegroups.cfg
cfg_file=bad_contact_call/shinken-specific.cfg
check_external_commands=1
check_for_orphaned_hosts=1
check_for_orphaned_services=1
check_host_freshness=0
check_result_path=var/spool/checkresults
check_result_reaper_frequency=10
check_service_freshness=1
command_check_interval=-1
command_file=var/shinken.cmd
daemon_dumps_core=0
date_format=iso8601
debug_file=var/shinken.debug
debug_level=112
debug_verbosity=1
enable_embedded_perl=0
enable_environment_macros=1
enable_event_handlers=1
enable_flap_detection=0
enable_notifications=1
enable_predictive_host_dependency_checks=1
enable_predictive_service_dependency_checks=1
event_broker_options=-1
event_handler_timeout=30
execute_host_checks=1
execute_service_checks=1
external_command_buffer_slots=4096
high_host_flap_threshold=20
high_service_flap_threshold=20
host_check_timeout=30
host_freshness_check_interval=60
host_inter_check_delay_method=s
illegal_macro_output_chars=`~\$&|'"<>
illegal_object_name_chars=`~!\$%^&*|'"<>?,()=
interval_length=60
lock_file=var/shinken.pid
log_archive_path=var/archives
log_event_handlers=1
log_external_commands=1
log_file=var/shinken.log
log_host_retries=1
log_initial_states=1
log_notifications=1
log_passive_checks=1
log_rotation_method=d
log_service_retries=1
low_host_flap_threshold=5
low_service_flap_threshold=5
max_check_result_file_age=3600
max_check_result_reaper_time=30
max_concurrent_checks=0
max_debug_file_size=1000000
max_host_check_spread=30
max_service_check_spread=30
shinken_group=shinken
shinken_user=shinken
notification_timeout=30
object_cache_file=var/objects.cache
obsess_over_hosts=0
obsess_over_services=0
ocsp_timeout=5
#p1_file=/tmp/test_shinken/plugins/p1.pl
p1_file=/usr/local/shinken/bin/p1.pl
passive_host_checks_are_soft=0
perfdata_timeout=5
precached_object_file=var/objects.precache
process_performance_data=0
resource_file=resource.cfg
retain_state_information=1
retained_contact_host_attribute_mask=0
retained_contact_service_attribute_mask=0
retained_host_attribute_mask=0
retained_process_host_attribute_mask=0
retained_process_service_attribute_mask=0
retained_service_attribute_mask=0
retention_update_interval=60
service_check_timeout=60
service_freshness_check_interval=60
service_inter_check_delay_method=s
service_interleave_factor=s
##shinken_group=shinken
##shinken_user=shinken
#shinken_group=shinken
#shinken_user=shinken
sleep_time=0.25
soft_state_dependencies=0
state_retention_file=var/retention.dat
status_file=var/status.dat
status_update_interval=5
temp_file=tmp/shinken.tmp
temp_path=var/tmp
translate_passive_host_checks=0
use_aggressive_host_checking=0
use_embedded_perl_implicitly=0
use_large_installation_tweaks=0
use_regexp_matching=0
use_retained_program_state=1
use_retained_scheduling_info=1
use_syslog=0
use_true_regexp_matching=0
enable_problem_impacts_states_change=1
| 3,370
|
Python
|
.tac
| 118
| 27.567797
| 46
| 0.855518
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|