id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6,000
|
colorcal.py
|
psychopy_psychopy/psychopy/hardware/crs/colorcal.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
from psychopy.tools.pkgtools import PluginStub
class ColorCAL(
PluginStub,
plugin="psychopy-crs",
doclink="https://psychopy.github.io/psychopy-crs/coder/ColorCAL"
):
pass
| 422
|
Python
|
.py
| 12
| 32.25
| 79
| 0.74321
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
6,001
|
optical.py
|
psychopy_psychopy/psychopy/hardware/crs/optical.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2012 Valentin Haenel <valentin.haenel@gmx.de>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from psychopy.tools.pkgtools import PluginStub
class OptiCAL(
PluginStub,
plugin="psychopy-crs",
doclink="https://psychopy.github.io/psychopy-crs/coder/OptiCAL"
):
pass
| 1,361
|
Python
|
.py
| 28
| 46.821429
| 79
| 0.780286
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
6,002
|
__init__.py
|
psychopy_psychopy/psychopy/hardware/crs/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""Interfaces for Cambridge Research Systems hardware.
These are optional components that can be obtained by installing the
`psychopy-crs` extension into the current environment.
"""
import psychopy.logging as logging
try:
from .bits import (
BitsSharp,
BitsPlusPlus,
DisplayPlusPlus,
DisplayPlusPlusTouch)
from .optical import OptiCAL
from .colorcal import ColorCAL
except (ModuleNotFoundError, ImportError, NameError):
logging.error(
"Support for Cambridge Research Systems hardware is not available this "
"session. Please install `psychopy-crs` and restart the session to "
"enable support.")
else:
# Monkey-patch our metadata into CRS class if missing required attributes
if not hasattr(OptiCAL, "longName"):
setattr(OptiCAL, "longName", "CRS OptiCal")
if not hasattr(OptiCAL, "driverFor"):
setattr(OptiCAL, "driverFor", ["optical"])
if not hasattr(ColorCAL, "longName"):
setattr(ColorCAL, "longName", "CRS ColorCAL")
if not hasattr(ColorCAL, "driverFor"):
setattr(ColorCAL, "driverFor", ["colorcal"])
if __name__ == "__main__":
pass
| 1,406
|
Python
|
.py
| 35
| 35.2
| 80
| 0.711029
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
6,003
|
__init__.py
|
psychopy_psychopy/psychopy/hardware/joystick/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Control joysticks and gamepads from within PsychoPy.
You do need a window (and you need to be flipping it) for the joystick to be
updated.
Known issues:
- currently under pyglet the joystick axes initialise to a value of zero
and stay like this until the first time that axis moves
- currently pygame (1.9.1) spits out lots of debug messages about the
joystick and these can't be turned off :-/
Typical usage::
from psychopy.hardware import joystick
from psychopy import visual
joystick.backend='pyglet' # must match the Window
win = visual.Window([400,400], winType='pyglet')
nJoys = joystick.getNumJoysticks() # to check if we have any
id = 0
joy = joystick.Joystick(id) # id must be <= nJoys - 1
nAxes = joy.getNumAxes() # for interest
while True: # while presenting stimuli
joy.getX()
# ...
win.flip() # flipping implicitly updates the joystick info
"""
try:
import pygame.joystick
havePygame = True
except Exception:
havePygame = False
try:
from pyglet import input as pyglet_input # pyglet 1.2+
from pyglet import app as pyglet_app
havePyglet = True
except Exception:
havePyglet = False
try:
import glfw
haveGLFW = True
except ImportError:
print("failed to import GLFW.")
haveGLFW = False
from psychopy import logging, visual
backend = 'pyglet' # 'pyglet' or 'pygame'
def getNumJoysticks():
"""Return a count of the number of joysticks available."""
if backend == 'pyglet':
return len(pyglet_input.get_joysticks())
elif backend == 'glfw':
n_joys = 0
for joy in range(glfw.JOYSTICK_1, glfw.JOYSTICK_LAST):
if glfw.joystick_present(joy):
n_joys += 1
return n_joys
else:
pygame.joystick.init()
return pygame.joystick.get_count()
if havePyglet:
class PygletDispatcher:
def dispatch_events(self):
pyglet_app.platform_event_loop.step(timeout=0.001)
pyglet_dispatcher = PygletDispatcher()
class Joystick:
def __init__(self, id):
"""An object to control a multi-axis joystick or gamepad.
.. note:
You do need to be flipping frames (or dispatching events manually)
in order for the values of the joystick to be updated.
:Known issues:
Currently under pyglet backends the axis values initialise to zero
rather than reading the current true value. This gets fixed on the
first change to each axis.
"""
self.id = id
if backend == 'pyglet':
joys = pyglet_input.get_joysticks()
if id >= len(joys):
logging.error("You don't have that many joysticks attached "
"(remember that the first joystick has id=0 "
"etc...)")
else:
self._device = joys[id]
try:
self._device.open()
except pyglet_input.DeviceOpenException as e:
pass
self.name = self._device.device.name
if len(visual.openWindows) == 0:
logging.error(
"You need to open a window before creating your joystick")
else:
for win in visual.openWindows:
win()._eventDispatchers.append(pyglet_dispatcher)
elif backend == 'glfw':
# We can create a joystick anytime after glfwInit() is called, but
# there should be a window open first.
# Joystick events are processed when flipping the associated window.
if not glfw.init():
logging.error("GLFW could not be initialized. Exiting.")
# get all available joysticks, GLFW supports up to 16.
joys = []
for joy in range(glfw.JOYSTICK_1, glfw.JOYSTICK_LAST):
if glfw.joystick_present(joy):
joys.append(joy)
# error checks
if not joys: # if the list is empty, no joysticks were found
error_msg = ("No joysticks were found by the GLFW runtime. "
"Check connections and try again.")
logging.error(error_msg)
raise RuntimeError(error_msg)
elif id not in joys:
error_msg = ("You don't have that many joysticks attached "
"(remember that the first joystick has id=0 "
"etc...)")
logging.error(error_msg)
raise RuntimeError(error_msg)
self._device = id # just need the ID for GLFW
self.name = glfw.get_joystick_name(self._device).decode("utf-8")
if len(visual.openWindows) == 0:
logging.error(
"You need to open a window before creating your joystick")
else:
for win in visual.openWindows:
# sending the raw ID to the window.
win()._eventDispatchers.append(self._device)
else:
pygame.joystick.init()
self._device = pygame.joystick.Joystick(id)
self._device.init()
self.name = self._device.get_name()
def getName(self):
"""Return the manufacturer-defined name describing the device."""
return self.name
def getNumButtons(self):
"""Return the number of digital buttons on the device."""
if backend == 'pyglet':
return len(self._device.buttons)
elif backend == 'glfw':
_, count = glfw.get_joystick_buttons(self._device)
return count
else:
return self._device.get_numbuttons()
def getButton(self, buttonId):
"""Get the state of a given button.
buttonId should be a value from 0 to the number of buttons-1
"""
if backend == 'pyglet':
return self._device.buttons[buttonId]
elif backend == 'glfw':
bs, _ = glfw.get_joystick_buttons(self._device)
return bs[buttonId]
else:
return self._device.get_button(buttonId)
def getAllButtons(self):
"""Get the state of all buttons as a list."""
if backend == 'pyglet':
return self._device.buttons
elif backend == 'glfw':
bs, count = glfw.get_joystick_buttons(self._device)
return [bs[i] for i in range(count)]
else:
bs = []
for id in range(self._device.get_numbuttons()):
bs.append(self._device.get_button(id))
return bs
def getAllHats(self):
"""Get the current values of all available hats as a list of tuples.
Each value is a tuple (x, y) where x and y can be -1, 0, +1
"""
hats = []
if backend == 'pyglet':
for ctrl in self._device.device.get_controls():
if ctrl.name != None and 'hat' in ctrl.name:
hats.append((self._device.hat_x, self._device.hat_y))
elif backend == 'glfw':
# GLFW treats hats as buttons
pass
else:
for n in range(self._device.get_numhats()):
hats.append(self._device.get_hat(n))
return hats
def getNumHats(self):
"""Get the number of hats on this joystick.
The GLFW backend makes no distinction between hats and buttons. Calling
'getNumHats()' will return 0.
"""
if backend == 'pyglet':
return len(self.getAllHats())
elif backend == 'glfw':
return 0
else:
return self._device.get_numhats()
def getHat(self, hatId=0):
"""Get the position of a particular hat.
The position returned is an (x, y) tuple where x and y
can be -1, 0 or +1
"""
if backend == 'pyglet':
if hatId == 0:
return self._device.hat
else:
return self.getAllHats()[hatId]
elif backend == 'glfw':
# does nothing, hats are buttons in GLFW
pass
else:
return self._device.get_hat(hatId)
def getX(self):
"""Return the X axis value (equivalent to joystick.getAxis(0))."""
if backend == 'pyglet':
return self._device.x
elif backend == 'glfw':
return self.getAxis(0)
else:
return self._device.get_axis(0)
def getY(self):
"""Return the Y axis value (equivalent to joystick.getAxis(1))."""
if backend == 'pyglet':
return self._device.y
elif backend == 'glfw':
return self.getAxis(1)
else:
return self._device.get_axis(1)
def getZ(self):
"""Return the Z axis value (equivalent to joystick.getAxis(2))."""
if backend == 'pyglet':
return self._device.z
elif backend == 'glfw':
return self.getAxis(2)
else:
return self._device.get_axis(2)
def getAllAxes(self):
"""Get a list of all current axis values."""
axes = []
if backend == 'pyglet':
names = ['x', 'y', 'z', 'rx', 'ry', 'rz', ]
for axName in names:
if hasattr(self._device, axName):
axes.append(getattr(self._device, axName))
elif backend == 'glfw':
_axes, count = glfw.get_joystick_axes(self._device)
for i in range(count):
axes.append(_axes[i])
else:
for id in range(self._device.get_numaxes()):
axes.append(self._device.get_axis(id))
return axes
def getNumAxes(self):
"""Return the number of joystick axes found.
"""
if backend == 'pyglet':
return len(self.getAllAxes())
elif backend == 'glfw':
_, count = glfw.get_joystick_axes(self._device)
return count
else:
return self._device.get_numaxes()
def getAxis(self, axisId):
"""Get the value of an axis by an integer id.
(from 0 to number of axes - 1)
"""
if backend == 'pyglet':
val = self.getAllAxes()[axisId]
if val is None:
val = 0
return val
elif backend == 'glfw':
val, _ = glfw.get_joystick_axes(self._device)
return val[axisId]
else:
return self._device.get_axis(axisId)
class XboxController(Joystick):
"""Joystick template class for the XBox 360 controller.
Usage:
xbctrl = XboxController(0) # joystick ID
y_btn_state = xbctrl.y # get the state of the 'Y' button
"""
def __init__(self, id, *args, **kwargs):
super(XboxController, self).__init__(id)
# validate if this is an Xbox controller by its reported name
if self.name.find("Xbox 360") == -1:
logging.warning("The connected controller does not appear "
"compatible with the 'XboxController' template. "
"Unexpected input behaviour may result!")
if backend != 'glfw':
logging.error("Controller templates are only supported when using "
"the GLFW window backend. You must also set "
"joystick.backend='glfw' prior to creating a "
"joystick.")
# button mapping for the XBox controller
self._button_mapping = {'a': 0,
'b': 1,
'x': 2,
'y': 3,
'left_shoulder': 4,
'right_shoulder': 5,
'back': 6,
'start': 7,
'left_stick': 8,
'right_stick': 9,
'up': 10, # hat
'down': 11,
'left': 12,
'right': 13}
# axes groups
self._axes_mapping = {'left_thumbstick': (0, 1),
'right_thumbstick': (2, 3),
'triggers': (4, 5),
'dpad': (6, 7)}
@property
def a(self):
return self.get_a()
def get_a(self):
"""Get the 'A' button state.
:return: bool, True if pressed down
"""
return self.getButton(self._button_mapping['a'])
@property
def b(self):
return self.get_b()
def get_b(self):
"""Get the 'B' button state.
:return: bool, True if pressed down
"""
return self.getButton(self._button_mapping['b'])
@property
def x(self):
return self.get_x()
def get_x(self):
"""Get the 'X' button state.
:return: bool, True if pressed down
"""
return self.getButton(self._button_mapping['x'])
@property
def y(self):
return self.get_y()
def get_y(self):
"""Get the 'Y' button state.
:return: bool, True if pressed down
"""
return self.getButton(self._button_mapping['y'])
@property
def left_shoulder(self):
return self.get_left_shoulder()
def get_left_shoulder(self):
"""Get left 'shoulder' trigger state.
:return: bool, True if pressed down
"""
return self.getButton(self._button_mapping['left_shoulder'])
@property
def right_shoulder(self):
return self.get_right_shoulder()
def get_right_shoulder(self):
"""Get right 'shoulder' trigger state.
:return: bool, True if pressed down
"""
return self.getButton(self._button_mapping['right_shoulder'])
@property
def back(self):
return self.get_back()
def get_back(self):
"""Get 'back' button state (button to the right of the left joystick).
:return: bool, True if pressed down
"""
return self.getButton(self._button_mapping['back'])
@property
def start(self):
return self.get_start()
def get_start(self):
"""Get 'start' button state (button to the left of the 'X' button).
:return: bool, True if pressed down
"""
return self.getButton(self._button_mapping['start'])
@property
def hat_axis(self):
return self.get_hat_axis()
def get_hat_axis(self):
"""Get the states of the hat (sometimes called the 'directional pad').
The hat can only indicate direction but not displacement.
This function reports hat values in the same way as a joystick so it may
be used interchangeably with existing analog joystick code.
Returns a tuple (X,Y) indicating which direction the hat is pressed
between -1.0 and +1.0. Positive values indicate presses in the right or
up direction.
:return: tuple, zero centered X, Y values.
"""
# get button states
button_states = self.getAllButtons()
up = button_states[self._button_mapping['up']]
dn = button_states[self._button_mapping['down']]
lf = button_states[self._button_mapping['left']]
rt = button_states[self._button_mapping['right']]
# convert button states to 'analog' values
return -1.0 * lf + rt, -1.0 * dn + up
@property
def left_thumbstick(self):
return self.get_left_thumbstick()
def get_left_thumbstick(self):
"""Get the state of the left joystick button; activated by pressing
down on the stick.
:return: bool, True if pressed down
"""
return self.getButton(self._button_mapping['left_stick'])
@property
def right_thumbstick(self):
return self.get_right_thumbstick()
def get_right_thumbstick(self):
"""Get the state of the right joystick button; activated by pressing
down on the stick.
:return: bool, True if pressed down
"""
return self.getButton(self._button_mapping['right_stick'])
def get_named_buttons(self, button_names):
"""Get the states of multiple buttons using names. A list of button
states is returned for each string in list 'names'.
:param button_names: tuple or list of button names
:return:
"""
button_states = []
for button in button_names:
button_states.append(self.getButton(self._button_mapping[button]))
return button_states
@property
def left_thumbstick_axis(self):
return self.get_left_thumbstick_axis()
def get_left_thumbstick_axis(self):
"""Get the axis displacement values of the left thumbstick.
Returns a tuple (X,Y) indicating thumbstick displacement between -1.0
and +1.0. Positive values indicate the stick is displaced right or up.
:return: tuple, zero centered X, Y values.
"""
ax, ay = self._axes_mapping['left_thumbstick']
# we sometimes get values slightly outside the range of -1.0 < x < 1.0,
# so clip them to give the user what they expect
ax_val = self._clip_range(self.getAxis(ax))
ay_val = self._clip_range(self.getAxis(ay))
return ax_val, ay_val
@property
def right_thumbstick_axis(self):
return self.get_right_thumbstick_axis()
def get_right_thumbstick_axis(self):
"""Get the axis displacement values of the right thumbstick.
Returns a tuple (X,Y) indicating thumbstick displacement between -1.0
and +1.0. Positive values indicate the stick is displaced right or up.
:return: tuple, zero centered X, Y values.
"""
ax, ay = self._axes_mapping['right_thumbstick']
ax_val = self._clip_range(self.getAxis(ax))
ay_val = self._clip_range(self.getAxis(ay))
return ax_val, ay_val
@property
def trigger_axis(self):
return self.get_trigger_axis()
def get_trigger_axis(self):
"""Get the axis displacement values of both index triggers.
Returns a tuple (L,R) indicating index trigger displacement between -1.0
and +1.0. Values increase from -1.0 to 1.0 the further a trigger is
pushed.
:return: tuple, zero centered L, R values.
"""
al, ar = self._axes_mapping['triggers']
al_val = self._clip_range(self.getAxis(al))
ar_val = self._clip_range(self.getAxis(ar))
return al_val, ar_val
def _clip_range(self, val):
"""Clip the range of a value between -1.0 and +1.0. Needed for joystick
axes.
:param val:
:return:
"""
if -1.0 > val:
val = -1.0
if val > 1.0:
val = 1.0
return val
| 19,109
|
Python
|
.py
| 474
| 29.512658
| 80
| 0.568658
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
6,004
|
__init__.py
|
psychopy_psychopy/psychopy/hardware/camera/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Classes and functions for reading and writing camera streams.
A camera may be used to document participant responses on video or used by the
experimenter to create movie stimuli or instructions.
"""
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
__all__ = [
'VIDEO_DEVICE_ROOT_LINUX',
'CAMERA_UNKNOWN_VALUE',
'CAMERA_NULL_VALUE',
# 'CAMERA_MODE_VIDEO',
# 'CAMERA_MODE_CV',
# 'CAMERA_MODE_PHOTO',
'CAMERA_TEMP_FILE_VIDEO',
'CAMERA_TEMP_FILE_AUDIO',
'CAMERA_API_AVFOUNDATION',
'CAMERA_API_DIRECTSHOW',
'CAMERA_API_VIDEO4LINUX2',
'CAMERA_API_ANY',
'CAMERA_API_UNKNOWN',
'CAMERA_API_NULL',
'CAMERA_LIB_FFPYPLAYER',
'CAMERA_LIB_OPENCV',
'CAMERA_LIB_UNKNOWN',
'CAMERA_LIB_NULL',
'CameraError',
'CameraNotReadyError',
'CameraNotFoundError',
'CameraFormatNotSupportedError',
'CameraFrameRateNotSupportedError',
'CameraFrameSizeNotSupportedError',
'FormatNotFoundError',
'PlayerNotAvailableError',
'CameraInterfaceFFmpeg',
'CameraInterfaceOpenCV',
'Camera',
'CameraInfo',
'getCameras',
'getCameraDescriptions',
'getOpenCameras',
'closeAllOpenCameras',
'renderVideo'
]
import platform
import inspect
import os
import os.path
import sys
import math
import uuid
import threading
import queue
import time
import numpy as np
from psychopy.constants import NOT_STARTED
from psychopy.hardware import DeviceManager
from psychopy.visual.movies.frame import MovieFrame, NULL_MOVIE_FRAME_INFO
from psychopy.sound.microphone import Microphone
from psychopy.hardware.microphone import MicrophoneDevice
from psychopy.tools import systemtools as st
import psychopy.tools.movietools as movietools
import psychopy.logging as logging
from psychopy.localization import _translate
# ------------------------------------------------------------------------------
# Constants
#
VIDEO_DEVICE_ROOT_LINUX = '/dev'
CAMERA_UNKNOWN_VALUE = u'Unknown' # fields where we couldn't get a value
CAMERA_NULL_VALUE = u'Null' # fields where we couldn't get a value
# camera operating modes
# CAMERA_MODE_VIDEO = u'video'
# CAMERA_MODE_CV = u'cv'
# CAMERA_MODE_PHOTO = u'photo'
# camera status
CAMERA_STATUS_OK = 'ok'
CAMERA_STATUS_PAUSED = 'paused'
CAMERA_STATUS_EOF = 'eof'
# camera API flags, these specify which API camera settings were queried with
CAMERA_API_AVFOUNDATION = u'AVFoundation' # mac
CAMERA_API_DIRECTSHOW = u'DirectShow' # windows
CAMERA_API_VIDEO4LINUX2 = u'Video4Linux2' # linux
CAMERA_API_ANY = u'Any' # any API (OpenCV only)
CAMERA_API_UNKNOWN = u'Unknown' # unknown API
CAMERA_API_NULL = u'Null' # empty field
# camera libraries for playback nad recording
CAMERA_LIB_FFPYPLAYER = u'FFPyPlayer'
CAMERA_LIB_OPENCV = u'OpenCV'
CAMERA_LIB_UNKNOWN = u'Unknown'
CAMERA_LIB_NULL = u'Null'
# special values
CAMERA_FRAMERATE_NOMINAL_NTSC = '30.000030'
CAMERA_FRAMERATE_NTSC = 30.000030
# FourCC and pixel format mappings, mostly used with AVFoundation to determine
# the FFMPEG decoder which is most suitable for it. Please expand this if you
# know any more!
pixelFormatTbl = {
'yuvs': 'yuyv422', # 4:2:2
'420v': 'nv12', # 4:2:0
'2vuy': 'uyvy422' # QuickTime 4:2:2
}
# Camera standards to help with selection. Some standalone cameras sometimes
# support an insane number of formats, this will help narrow them down.
standardResolutions = {
'vga': (640, 480),
'svga': (800, 600),
'xga': (1024, 768),
'wxga': (1280, 768),
'wxga+': (1440, 900),
'sxga': (1280, 1024),
'wsxga+': (1680, 1050),
'uxga': (1600, 1200),
'wuxga': (1920, 1200),
'wqxga': (2560, 1600),
'wquxga': (3840, 2400),
'720p': (1280, 720), # also known as HD
'1080p': (1920, 1080),
'2160p': (3840, 2160),
'uhd': (3840, 2160),
'dci': (4096, 2160)
}
# ------------------------------------------------------------------------------
# Exceptions
#
class CameraError(Exception):
"""Base class for errors around the camera."""
class CameraNotReadyError(CameraError):
"""Camera is not ready."""
class CameraNotFoundError(CameraError):
"""Raised when a camera cannot be found on the system."""
class CameraFormatNotSupportedError(CameraError):
"""Raised when a camera cannot use the settings requested by the user."""
class CameraFrameRateNotSupportedError(CameraFormatNotSupportedError):
"""Raised when a camera cannot use the frame rate settings requested by the
user."""
class CameraFrameSizeNotSupportedError(CameraFormatNotSupportedError):
"""Raised when a camera cannot use the frame size settings requested by the
user."""
class FormatNotFoundError(CameraError):
"""Cannot find a suitable pixel format for the camera."""
class PlayerNotAvailableError(Exception):
"""Raised when a player object is not available but is required."""
# ------------------------------------------------------------------------------
# Classes
#
class CameraInfo:
"""Information about a specific operating mode for a camera attached to the
system.
Parameters
----------
index : int
Index of the camera. This is the enumeration for the camera which is
used to identify and select it by the `cameraLib`. This value may differ
between operating systems and the `cameraLib` being used.
name : str
Camera name retrieved by the OS. This may be a human-readable name
(i.e. DirectShow on Windows), an index on MacOS or a path (e.g.,
`/dev/video0` on Linux). If the `cameraLib` does not support this
feature, then this value will be generated.
frameSize : ArrayLike
Resolution of the frame `(w, h)` in pixels.
frameRate : ArrayLike
Allowable framerate for this camera mode.
pixelFormat : str
Pixel format for the stream. If `u'Null'`, then `codecFormat` is being
used to configure the camera.
codecFormat : str
Codec format for the stream. If `u'Null'`, then `pixelFormat` is being
used to configure the camera. Usually this value is used for high-def
stream formats.
cameraLib : str
Library used to access the camera. This can be either, 'ffpyplayer',
'opencv'.
cameraAPI : str
API used to access the camera. This relates to the external interface
being used by `cameraLib` to access the camera. This value can be:
'AVFoundation', 'DirectShow' or 'Video4Linux2'.
"""
__slots__ = [
'_index',
'_name',
'_frameSize',
'_frameRate',
'_pixelFormat',
'_codecFormat',
'_cameraLib',
'_cameraAPI' # API in use, e.g. DirectShow on Windows
]
def __init__(self,
index=-1,
name=CAMERA_NULL_VALUE,
frameSize=(-1, -1),
frameRate=-1.0,
pixelFormat=CAMERA_UNKNOWN_VALUE,
codecFormat=CAMERA_UNKNOWN_VALUE,
cameraLib=CAMERA_NULL_VALUE,
cameraAPI=CAMERA_API_NULL):
self.index = index
self.name = name
self.frameSize = frameSize
self.frameRate = frameRate
self.pixelFormat = pixelFormat
self.codecFormat = codecFormat
self.cameraLib = cameraLib
self.cameraAPI = cameraAPI
def __repr__(self):
return (f"CameraInfo(index={repr(self.index)}, "
f"name={repr(self.name)}, "
f"frameSize={repr(self.frameSize)}, "
f"frameRate={self.frameRate}, "
f"pixelFormat={repr(self.pixelFormat)}, "
f"codecFormat={repr(self.codecFormat)}, "
f"cameraLib={repr(self.cameraLib)}, "
f"cameraAPI={repr(self.cameraAPI)})")
def __str__(self):
return self.description()
@property
def index(self):
"""Camera index (`int`). This is the enumerated index of this camera.
"""
return self._index
@index.setter
def index(self, value):
self._index = int(value)
@property
def name(self):
"""Camera name (`str`). This is the camera name retrieved by the OS.
"""
return self._name
@name.setter
def name(self, value):
self._name = str(value)
@property
def frameSize(self):
"""Resolution (w, h) in pixels (`ArrayLike` or `None`).
"""
return self._frameSize
@frameSize.setter
def frameSize(self, value):
if value is None:
self._frameSize = None
return
assert len(value) == 2, "Value for `frameSize` must have length 2."
assert all([isinstance(i, int) for i in value]), (
"Values for `frameSize` must be integers.")
self._frameSize = value
@property
def frameRate(self):
"""Frame rate (`float`) or range (`ArrayLike`).
Depends on the backend being used. If a range is provided, then the
first value is the maximum and the second value is the minimum frame
rate.
"""
return self._frameRate
@frameRate.setter
def frameRate(self, value):
# assert len(value) == 2, "Value for `frameRateRange` must have length 2."
# assert all([isinstance(i, int) for i in value]), (
# "Values for `frameRateRange` must be integers.")
# assert value[0] <= value[1], (
# "Value for `frameRateRange` must be `min` <= `max`.")
self._frameRate = value
@property
def pixelFormat(self):
"""Video pixel format (`str`). An empty string indicates this field is
not initialized.
"""
return self._pixelFormat
@pixelFormat.setter
def pixelFormat(self, value):
self._pixelFormat = str(value)
@property
def codecFormat(self):
"""Codec format, may be used instead of `pixelFormat` for some
configurations. Default is `''`.
"""
return self._codecFormat
@codecFormat.setter
def codecFormat(self, value):
self._codecFormat = str(value)
@property
def cameraLib(self):
"""Camera library these settings are targeted towards (`str`).
"""
return self._cameraLib
@cameraLib.setter
def cameraLib(self, value):
self._cameraLib = str(value)
@property
def cameraAPI(self):
"""Camera API in use to obtain this information (`str`).
"""
return self._cameraAPI
@cameraAPI.setter
def cameraAPI(self, value):
self._cameraAPI = str(value)
def frameSizeAsFormattedString(self):
"""Get image size as as formatted string.
Returns
-------
str
Size formatted as `'WxH'` (e.g. `'480x320'`).
"""
return '{width}x{height}'.format(
width=self.frameSize[0],
height=self.frameSize[1])
def description(self):
"""Get a description as a string.
For all backends, this value is guaranteed to be valid after the camera
has been opened. Some backends may be able to provide this information
before the camera is opened.
Returns
-------
str
Description of the camera format as a human readable string.
"""
codecFormat = self._codecFormat
pixelFormat = self._pixelFormat
codec = codecFormat if not pixelFormat else pixelFormat
if self.frameSize is None:
frameSize = (-1, -1)
else:
frameSize = self.frameSize
return "[{name}] {width}x{height}@{frameRate}fps, {codec}".format(
#index=self.index,
name=self.name,
width=str(frameSize[0]),
height=str(frameSize[1]),
frameRate=str(self.frameRate),
codec=codec
)
class CameraInterface:
"""Base class providing an interface with a camera attached to the system.
This interface handles the opening, closing, and reading of camera streams.
Subclasses provide a specific implementation for a camera interface.
Calls to any instance methods should be asynchronous and non-blocking,
returning immediately with the same data as before if no new frame data is
available. This is to ensure that the main thread is not blocked by the
camera interface and can continue to process other events.
Parameters
----------
device : Any
Camera device to open a stream with. The type of this value is platform
dependent. Calling `start()` will open a stream with this device.
Afterwards, `getRecentFrame()` can be called to get the most recent
frame from the camera.
"""
# default values for class variables, these are read-only and should not be
# changed at runtime
_cameraLib = u'Null'
_frameIndex = 0
_lastPTS = 0.0 # presentation timestamp of the last frame
_supportedPlatforms = ['linux', 'windows', 'darwin']
_device = None
_lastFrame = None
_isReady = False # `True` if the camera is 'hot' and yielding frames
def __init__(self, device):
self._device = device
self._mic = None
@staticmethod
def getCameras():
"""Get a list of devices this interface can open.
Returns
-------
list
List of objects which represent cameras that can be opened by this
interface. Pass any of these values to `device` to open a stream.
"""
return []
@property
def device(self):
"""Camera device this interface is using (`Any`).
"""
return self._device
@property
def frameCount(self):
"""Number of new frames read from the camera since initialization
(`int`).
"""
return self._frameCount
@property
def streamTime(self):
"""Current stream time in seconds (`float`). This time increases
monotonically from startup.
"""
return self._streamTime
def lastFrame(self):
"""The last frame read from the camera. If `None`, no frames have been
read yet.
"""
return self._lastFrame
def _assertMediaPlayer(self):
"""Assert that the media player is available.
Returns
-------
bool
`True` if the media player is available.
"""
return False
def open(self):
"""Open the camera stream.
"""
pass
def isOpen(self):
"""Check if the camera stream is open.
Returns
-------
bool
`True` if the camera stream is open.
"""
return False
def enable(self):
"""Enable passing camera frames to the main thread.
"""
pass
def disable(self):
"""Disable passing camera frames to the main thread.
"""
pass
def close(self):
"""Close the camera stream.
"""
pass
def getMetadata(self):
"""Get metadata about the camera stream.
Returns
-------
dict
Dictionary containing metadata about the camera stream. Returns an
empty dictionary if no metadata is available.
"""
return {}
def _enqueueFrame(self):
"""Enqueue a frame from the camera stream.
"""
pass
def update(self):
"""Update the camera stream.
"""
pass
def getRecentFrame(self):
"""Get the most recent frame from the camera stream.
Returns
-------
numpy.ndarray
Most recent frame from the camera stream. Returns `None` if no
frames are available.
"""
return NULL_MOVIE_FRAME_INFO
class CameraInterfaceFFmpeg(CameraInterface):
"""Camera interface using FFmpeg (ffpyplayer) to open and read camera
streams.
Parameters
----------
device : CameraInfo
Camera device to open a stream with. Calling `start()` will open a
stream with this device. Afterwards, `getRecentFrame()` can be called
to get the most recent frame from the camera.
mic : MicrophoneInterface or None
Microphone interface to use for audio recording. If `None`, no audio
recording is performed.
"""
_cameraLib = u'ffpyplayer'
def __init__(self, device, mic=None):
super().__init__(device=device)
self._bufferSecs = 0.5 # number of seconds to buffer
self._cameraInfo = device
self._mic = mic # microphone interface
self._frameQueue = queue.Queue()
self._enableEvent = threading.Event()
self._enableEvent.clear()
self._exitEvent = threading.Event()
self._exitEvent.clear()
self._syncBarrier = None
self._recordBarrier = None # created in `open()`
self._playerThread = None
def _assertMediaPlayer(self):
return self._playerThread is not None
def _getCameraInfo(self):
"""Get camera information in the format expected by FFmpeg.
"""
pass
def getCameras():
"""Get a list of devices this interface can open.
Returns
-------
list
List of objects which represent cameras that can be opened by this
interface. Pass any of these values to `device` to open a stream.
"""
global _cameraGetterFuncTbl
systemName = platform.system() # get the system name
# lookup the function for the given platform
getCamerasFunc = _cameraGetterFuncTbl.get(systemName, None)
if getCamerasFunc is None: # if unsupported
raise OSError(
"Cannot get cameras, unsupported platform '{}'.".format(
systemName))
return getCamerasFunc()
@property
def frameRate(self):
"""Frame rate of the camera stream (`float`).
"""
return self._cameraInfo.frameRate
@property
def frameSize(self):
"""Frame size of the camera stream (`tuple`).
"""
return self._cameraInfo.frameSize
@property
def framesWaiting(self):
"""Get the number of frames currently buffered (`int`).
Returns the number of frames which have been pulled from the stream and
are waiting to be processed. This value is decremented by calls to
`_enqueueFrame()`.
"""
return self._frameQueue.qsize()
def isOpen(self):
"""Check if the camera stream is open (`bool`).
"""
if self._playerThread is not None:
return self._playerThread.is_alive()
return False
def open(self):
"""Open the camera stream and begin decoding frames (if available).
The value of `lastFrame` will be updated as new frames from the camera
arrive.
"""
if self._playerThread is not None:
raise RuntimeError('Cannot open `MediaPlayer`, already opened.')
self._exitEvent.clear() # signal the thread to stop
def _frameGetterAsync(videoCapture, frameQueue, exitEvent, recordEvent,
warmUpBarrier, recordingBarrier, audioCapture):
"""Get frames from the camera stream asynchronously.
Parameters
----------
videoCapture : ffpyplayer.player.MediaPlayer
FFmpeg media player object. This object will be under direct
control of this function.
frameQueue : queue.Queue
Queue to put frames into. The queue has an unlimited size, so
be careful with memory use. This queue should be flushed when
camera thread is paused.
exitEvent : threading.Event
Event used to signal the thread to stop.
recordEvent : threading.Event
Event used to signal the thread to pass frames along to the main
thread.
warmUpBarrier : threading.Barrier
Barrier which is used hold until camera capture is ready.
recordingBarrier : threading.Barrier
Barrier which is used to synchronize audio and video recording.
This ensures that the audio device is ready before buffering
frames captured by the camera.
audioCapture : psychopy.sound.Microphone or None
Microphone object to use for audio capture. This will be used to
synchronize the audio and video streams. If `None`, no audio
will be captured.
"""
# warmup the stream, wait for metadata
ptsStart = 0.0 # may be used in the future
while True:
frame, val = videoCapture.get_frame()
if frame is not None:
ptsStart = videoCapture.get_pts()
break
time.sleep(0.001)
# if we have a valid frame, determine the polling rate
metadata = videoCapture.get_metadata()
numer, divisor = metadata['frame_rate']
# poll interval is half the frame period, this makes sure we don't
# miss frames while not wasting CPU cycles
pollInterval = (1.0 / float(numer / divisor)) * 0.5
# holds main-thread execution until its ready for frames
# frameQueue.put((frame, val, metadata)) # put the first frame
warmUpBarrier.wait() # wait for main thread to be ready
# start capturing frames in background thread
isRecording = False
lastAbsTime = -1.0 # presentation timestamp of the last frame
while not exitEvent.is_set(): # quit if signaled
# pull a frame from the stream, we keep this running 'hot' so
# that we don't miss frames, we just discard them if we don't
# need them
frame, val = videoCapture.get_frame(force_refresh=False)
if val == 'eof': # thread should exit if stream is done
break
elif val == 'paused':
continue
elif frame is None:
continue
else:
# don't queue frames unless they are newer than the last
if isRecording:
thisFrameAbsTime = videoCapture.get_pts()
if lastAbsTime < thisFrameAbsTime:
frameQueue.put((frame, val, metadata))
lastAbsTime = thisFrameAbsTime
if recordEvent.is_set() and not isRecording:
if audioCapture is not None:
audioCapture.start(waitForStart=1)
recordingBarrier.wait()
isRecording = True
elif not recordEvent.is_set() and isRecording:
if audioCapture is not None:
audioCapture.stop(blockUntilStopped=1)
recordingBarrier.wait()
isRecording = False
if not isRecording:
time.sleep(pollInterval)
continue
if audioCapture is not None:
if audioCapture.isRecording:
audioCapture.poll()
time.sleep(pollInterval)
videoCapture.close_player()
if audioCapture is not None:
audioCapture.stop(blockUntilStopped=1)
# thread is dead when we get here
# configure the camera stream reader
ff_opts = {} # ffmpeg options
lib_opts = {} # ffpyplayer options
_camera = CAMERA_NULL_VALUE
_frameRate = CAMERA_NULL_VALUE
_cameraInfo = self._cameraInfo
# setup commands for FFMPEG
if _cameraInfo.cameraAPI == CAMERA_API_DIRECTSHOW: # windows
ff_opts['f'] = 'dshow'
_camera = 'video={}'.format(_cameraInfo.name)
_frameRate = _cameraInfo.frameRate
if _cameraInfo.pixelFormat:
ff_opts['pixel_format'] = _cameraInfo.pixelFormat
if _cameraInfo.codecFormat:
ff_opts['vcodec'] = _cameraInfo.codecFormat
elif _cameraInfo.cameraAPI == CAMERA_API_AVFOUNDATION: # darwin
ff_opts['f'] = 'avfoundation'
ff_opts['i'] = _camera = self._cameraInfo.name
# handle pixel formats using FourCC
global pixelFormatTbl
ffmpegPixFmt = pixelFormatTbl.get(_cameraInfo.pixelFormat, None)
if ffmpegPixFmt is None:
raise FormatNotFoundError(
"Cannot find suitable FFMPEG pixel format for '{}'. Try a "
"different format or camera.".format(
_cameraInfo.pixelFormat))
_cameraInfo.pixelFormat = ffmpegPixFmt
# this needs to be exactly specified if using NTSC
if math.isclose(CAMERA_FRAMERATE_NTSC, _cameraInfo.frameRate):
_frameRate = CAMERA_FRAMERATE_NOMINAL_NTSC
else:
_frameRate = str(_cameraInfo.frameRate)
# need these since hardware acceleration is not possible on Mac yet
lib_opts['fflags'] = 'nobuffer'
lib_opts['flags'] = 'low_delay'
lib_opts['pixel_format'] = _cameraInfo.pixelFormat
ff_opts['framedrop'] = True
ff_opts['fast'] = True
elif _cameraInfo.cameraAPI == CAMERA_API_VIDEO4LINUX2:
raise OSError(
"Sorry, camera does not support Linux at this time. However, "
"it will in future versions.")
else:
raise RuntimeError("Unsupported camera API specified.")
# set library options
camWidth = _cameraInfo.frameSize[0]
camHeight = _cameraInfo.frameSize[1]
# configure the real-time buffer size
_bufferSize = camWidth * camHeight * 3 * self._bufferSecs
# common settings across libraries
lib_opts['rtbufsize'] = str(int(_bufferSize))
lib_opts['video_size'] = _cameraInfo.frameSizeAsFormattedString()
lib_opts['framerate'] = str(_frameRate)
self._warmupBarrier = threading.Barrier(2)
self._recordBarrier = threading.Barrier(2)
# open the media player
from ffpyplayer.player import MediaPlayer
cap = MediaPlayer(_camera, ff_opts=ff_opts, lib_opts=lib_opts)
# open a stream thread and pause wait until ready
self._playerThread = threading.Thread(
target=_frameGetterAsync,
args=(cap,
self._frameQueue,
self._exitEvent,
self._enableEvent,
self._warmupBarrier,
self._recordBarrier,
self._mic))
self._playerThread.daemon=True
self._playerThread.start()
self._warmupBarrier.wait()
# pass off the player to the thread which will process the stream
self._enqueueFrame() # pull metadata from first frame
def _enqueueFrame(self):
"""Grab the latest frame from the stream.
Returns
-------
bool
`True` if a frame has been enqueued. Returns `False` if the camera
has not acquired a new frame yet.
"""
self._assertMediaPlayer()
try:
frameData = self._frameQueue.get_nowait()
except queue.Empty:
return False
frame, val, metadata = frameData # update the frame
if val == CAMERA_STATUS_EOF: # handle end of stream
return False
elif val == CAMERA_STATUS_PAUSED: # handle when paused
return False
elif frame is None: # handle when no frame is available
return False
frameImage, pts = frame # otherwise, unpack the frame
# if we have a new frame, update the frame information
videoBuffer = frameImage.to_bytearray()[0]
videoFrameArray = np.frombuffer(videoBuffer, dtype=np.uint8)
# provide the last frame
self._lastFrame = MovieFrame(
frameIndex=self._frameIndex,
absTime=pts,
# displayTime=self._recentMetadata['frame_size'],
size=frameImage.get_size(),
colorData=videoFrameArray,
audioChannels=0,
audioSamples=None,
metadata=metadata,
movieLib=self._cameraLib,
userData=None)
return True
def close(self):
"""Close the camera stream and release resources.
This blocks until the camera stream thread is no longer alive.
"""
if self._playerThread is None:
raise RuntimeError('Cannot close `MediaPlayer`, already closed.')
self._exitEvent.set() # signal the thread to stop
self._playerThread.join() # wait for the thread to stop
self._playerThread = None
@property
def isEnabled(self):
"""`True` if the camera is enabled.
"""
return self._enableEvent.is_set()
def enable(self, state=True):
"""Start passing frames to the frame queue.
This method returns when the video and audio stream are both starting to
record or stop recording.
Parameters
----------
state : bool
`True` to enable recording frames to the queue, `False` to disable.
On state change, the audio interface will be started or stopped.
"""
if state:
self._enableEvent.set()
else:
self._enableEvent.clear()
self._recordBarrier.wait()
self._enqueueFrame()
def disable(self):
"""Stop passing frames to the frame queue.
Calling this is equivalent to calling `enable(False)`.
"""
self.enable(False)
def getFrames(self):
"""Get all frames from the camera stream which are waiting to be
processed.
Returns
-------
list
List of `MovieFrame` objects. The most recent frame is the last one
in the list.
"""
self._assertMediaPlayer()
frames = []
while self._enqueueFrame():
frames.append(self._lastFrame)
return frames
def getRecentFrame(self):
"""Get the most recent frame captured from the camera, discarding all
others.
Returns
-------
MovieFrame
The most recent frame from the stream.
"""
while self._enqueueFrame():
pass
return self._lastFrame
class CameraInterfaceOpenCV(CameraInterface):
"""Camera interface using OpenCV to open and read camera streams.
Parameters
----------
device : int
Camera device to open a stream with. This value is platform dependent.
mic : MicrophoneInterface or None
Microphone interface to use for audio recording. If `None`, no audio
recording is performed.
"""
_cameraLib = u'opencv'
def __init__(self, device, mic=None):
super().__init__(device)
try:
import cv2 # just import to check if it's available
except ImportError:
raise ImportError(
"Could not import `cv2`. Please install OpenCV2 to use this "
"camera interface.")
self._cameraInfo = device
self._mic = mic # microphone interface
self._frameQueue = queue.Queue()
self._enableEvent = threading.Event()
self._exitEvent = threading.Event()
self._warmUpBarrier = None
self._recordBarrier = None
def _assertMediaPlayer(self):
"""Assert that the media player thread is running.
"""
return self._playerThread is not None
@staticmethod
def getCameras(maxCameraEnum=16):
"""Get information about available cameras.
OpenCV is not capable of enumerating cameras and getting information
about them. Therefore, we must open a stream with each camera index
and query the information from the stream. This process is quite slow
on systems with many cameras. It's best to run this function once and
save the results for later use if the camera configuration is not
expected to change.
Parameters
----------
maxCameraEnum : int
Maximum number of cameras to check. This is the maximum camera index
to check. For example, if `maxCameraEnum` is 16, then cameras 0-15
will be checked.
Returns
-------
dict
Mapping containing information about each camera. The keys are the
camera index, and the values are `CameraInfo` objects.
"""
import cv2
# recommended camera drivers for each platform
cameraPlatformDrivers = {
'Linux': (cv2.CAP_V4L2, CAMERA_API_VIDEO4LINUX2),
'Windows': (cv2.CAP_DSHOW, CAMERA_API_DIRECTSHOW),
'Darwin': (cv2.CAP_AVFOUNDATION, CAMERA_API_AVFOUNDATION)
}
# select the camera interface for the platform
cameraDriver, cameraAPI = cameraPlatformDrivers.get(
platform.system(), (cv2.CAP_ANY, CAMERA_API_ANY))
logging.info(
'Searching for connected cameras, this may take a while...')
cameras = {}
for cameraIndex in range(maxCameraEnum):
# open a camera
thisCamera = cv2.VideoCapture(cameraIndex, cameraDriver)
# if the camera is not opened, we're done
if not thisCamera.isOpened():
break
# get information about camera capabilities
frameRate = thisCamera.get(cv2.CAP_PROP_FPS)
frameSize = (
int(thisCamera.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(thisCamera.get(cv2.CAP_PROP_FRAME_HEIGHT)))
genName = 'camera:{}'.format(cameraIndex)
cameraInfo = CameraInfo(
index=cameraIndex,
name=genName,
frameSize=frameSize or (-1, -1),
frameRate=frameRate or -1.0,
pixelFormat='bgr24', # always BGR with 8 bpc for OpenCV
cameraLib=CameraInterfaceOpenCV._cameraLib,
cameraAPI=cameraAPI
)
cameras.update({genName: [cameraInfo]})
thisCamera.release()
logging.info('Found {} cameras.'.format(len(cameras)))
return cameras
@property
def framesWaiting(self):
"""Get the number of frames currently buffered (`int`).
Returns the number of frames which have been pulled from the stream and
are waiting to be processed. This value is decremented by calls to
`_enqueueFrame()`.
"""
return self._frameQueue.qsize()
@property
def frameRate(self):
"""Get the frame rate of the camera stream (`float`).
"""
if self._cameraInfo is None:
return -1.0
return self._cameraInfo.frameRate
@property
def frameSize(self):
"""Get the frame size of the camera stream (`tuple`).
"""
if self._cameraInfo is None:
return (-1, -1)
return self._cameraInfo.frameSize
def isOpen(self):
"""Check if the camera stream is open (`bool`).
"""
if self._playerThread is not None:
return self._playerThread.is_alive()
return False
def open(self):
"""Open the camera stream and start reading frames using OpenCV2.
"""
import cv2
def _frameGetterAsync(videoCapture, frameQueue, exitEvent, recordEvent,
warmUpBarrier, recordingBarrier, audioCapture):
"""Get frames asynchronously from the camera stream.
Parameters
----------
videoCapture : cv2.VideoCapture
Handle for the video capture object. This is opened outside the
thread and passed in.
frameQueue : queue.Queue
Queue to store frames in.
exitEvent : threading.Event
Event to signal when the thread should stop.
recordEvent : threading.Event
Event used to signal the thread to pass frames along to the main
thread.
warmUpBarrier : threading.Barrier
Barrier which is used hold until camera capture is ready.
recordingBarrier : threading.Barrier
Barrier which is used to synchronize audio and video recording.
This ensures that the audio device is ready before buffering
frames captured by the camera.
audioCapture : psychopy.sound.Microphone or None
Microphone object to use for audio capture. This will be used to
synchronize the audio and video streams. If `None`, no audio
will be captured.
"""
# poll interval is half the frame period, this makes sure we don't
# miss frames while not wasting CPU cycles
# fps = videoCapture.get(cv2.CAP_PROP_FPS)
# if fps > 0.0:
# pollInterval = (1.0 / fps) * 0.5
# else:
# pollInterval = 1 / 60.0
# if the camera is opened, wait until the main thread is ready to
# take frames
warmUpBarrier.wait()
# start capturing frames
isRecording = False
while not exitEvent.is_set():
# Capture frame-by-frame
ret, frame = videoCapture.read()
# if frame is read correctly ret is True
if not ret: # eol or something else
# val = 'eof'
break
else:
# don't queue frames unless they are newer than the last
if isRecording:
# color conversion is done in the thread here
colorData = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# colorData = frame
frameQueue.put((colorData, 0.0, None))
# check if we should start or stop recording
if recordEvent.is_set() and not isRecording:
if audioCapture is not None:
audioCapture.start(waitForStart=1)
recordingBarrier.wait()
isRecording = True
elif not recordEvent.is_set() and isRecording:
if audioCapture is not None:
audioCapture.stop(blockUntilStopped=1)
recordingBarrier.wait()
isRecording = False
if not isRecording:
# time.sleep(pollInterval)
continue
if audioCapture is not None:
if audioCapture.isRecording:
audioCapture.poll()
# when everything done, release the capture device
videoCapture.release()
if audioCapture is not None: # stop audio capture
audioCapture.stop(blockUntilStopped=1)
# thread is dead if we get here
# barriers used for synchronizing
parties = 2 # main + recording threads
self._warmUpBarrier = threading.Barrier(parties) # camera is ready
self._recordBarrier = threading.Barrier(parties) # audio/video is ready
# drivers for the given camera API
cameraDrivers = {
CAMERA_API_ANY: cv2.CAP_ANY,
CAMERA_API_VIDEO4LINUX2: cv2.CAP_V4L2,
CAMERA_API_DIRECTSHOW: cv2.CAP_DSHOW,
CAMERA_API_AVFOUNDATION: cv2.CAP_AVFOUNDATION
}
_cameraInfo = self._cameraInfo
# create the camera capture object, we keep this internal to the thread
# so that we can control when it is released
cap = cv2.VideoCapture(
_cameraInfo.index,
cameraDrivers[_cameraInfo.cameraAPI])
# check if the camera is opened
if not cap.isOpened():
raise RuntimeError("Cannot open camera using `cv2`")
# if the user didn't specify a frame rate or size, use the defaults
# pulled from the camera
usingDefaults = False
if _cameraInfo.frameRate is None:
_cameraInfo.frameRate = cap.get(cv2.CAP_PROP_FPS)
usingDefaults = True
if _cameraInfo.frameSize is None:
_cameraInfo.frameSize = (
int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
usingDefaults = True
if not usingDefaults:
# set frame rate and size and check if they were set correctly
cap.set(cv2.CAP_PROP_FPS, _cameraInfo.frameRate)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, _cameraInfo.frameSize[0])
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, _cameraInfo.frameSize[1])
if cap.get(cv2.CAP_PROP_FPS) != _cameraInfo.frameRate:
raise CameraFormatNotSupportedError(
"Unsupported frame rate (%s), try %s instead." % (
_cameraInfo.frameRate, cap.get(cv2.CAP_PROP_FPS)))
frameSizeMismatch = (
cap.get(cv2.CAP_PROP_FRAME_WIDTH) != _cameraInfo.frameSize[0] or
cap.get(cv2.CAP_PROP_FRAME_HEIGHT) != _cameraInfo.frameSize[1])
if frameSizeMismatch:
raise CameraFormatNotSupportedError(
"Unsupported frame size: %s" % str(_cameraInfo.frameSize))
# open a stream and pause it until ready
self._playerThread = threading.Thread(
target=_frameGetterAsync,
args=(cap,
self._frameQueue,
self._exitEvent,
self._enableEvent,
self._warmUpBarrier,
self._recordBarrier,
self._mic))
self._playerThread.daemon=True
self._playerThread.start()
self._warmUpBarrier.wait() # wait until the camera is ready
# pass off the player to the thread which will process the stream
self._enqueueFrame() # pull metadata from first frame
def _enqueueFrame(self):
"""Grab the latest frame from the stream.
Returns
-------
bool
`True` if a frame has been enqueued. Returns `False` if the camera
has not acquired a new frame yet.
"""
self._assertMediaPlayer()
try:
frameData = self._frameQueue.get_nowait()
except queue.Empty:
return False
frame, val, _ = frameData # update the frame
if val == 'eof': # handle end of stream
return False
elif val == 'paused': # handle when paused, not used for OpenCV yet
return False
elif frame is None: # handle when no frame is available
return False
frameImage = frame # otherwise, unpack the frame
# if we have a new frame, update the frame information
# videoBuffer = frameImage.to_bytearray()[0]
videoFrameArray = np.ascontiguousarray(
frameImage.flatten(), dtype=np.uint8)
# provide the last frame
self._lastFrame = MovieFrame(
frameIndex=self._frameIndex,
absTime=0.0,
# displayTime=self._recentMetadata['frame_size'],
size=self._cameraInfo.frameSize,
colorFormat='rgb24', # converted in thread
colorData=videoFrameArray,
audioChannels=0,
audioSamples=None,
metadata=None,
movieLib=self._cameraLib,
userData=None)
return True
def close(self):
"""Close the camera stream and release resources.
"""
self._exitEvent.set() # signal the thread to stop
self._playerThread.join() # hold the thread until it stops
self._playerThread = None
@property
def isEnabled(self):
"""`True` if the camera is enabled.
"""
return self._enableEvent.is_set()
def enable(self, state=True):
"""Start passing frames to the frame queue.
This method returns when the video and audio stream are both starting to
record or stop recording. If no audio stream is being recorded, this
method returns quicker.
Parameters
----------
state : bool
`True` to enable recording frames to the queue, `False` to disable.
On state change, the audio interface will be started or stopped.
"""
if state:
self._enableEvent.set()
else:
self._enableEvent.clear()
self._recordBarrier.wait()
self._enqueueFrame()
def disable(self):
"""Stop passing frames to the frame queue.
Calling this is equivalent to calling `enable(False)`.
"""
self.enable(False)
def getFrames(self):
"""Get all frames from the camera stream which are waiting to be
processed.
Returns
-------
list
List of `MovieFrame` objects. The most recent frame is the last one
in the list.
"""
self._assertMediaPlayer()
frames = []
while self._enqueueFrame():
frames.append(self._lastFrame)
return frames
def getRecentFrame(self):
"""Get the most recent frame captured from the camera, discarding all
others.
Returns
-------
MovieFrame
The most recent frame from the stream.
"""
while self._enqueueFrame():
pass
return self._lastFrame
# keep track of camera devices that are opened
_openCameras = {}
class Camera:
"""Class for displaying and recording video from a USB/PCI connected camera.
This class is capable of opening, recording, and saving camera video streams
to disk. Camera stream reading/writing is done in a separate thread,
allowing capture to occur in the background while the main thread is free to
perform other tasks. This allows for capture to occur at higher frame rates
than the display refresh rate. Audio recording is also supported if a
microphone interface is provided, where recording will be synchronized with
the video stream (as best as possible). Video and audio can be saved to disk
either as a single file or as separate files.
GNU/Linux is supported only by the OpenCV backend (`cameraLib='opencv'`).
Parameters
----------
device : str or int
Camera to open a stream with. If the ID is not valid, an error will be
raised when `open()` is called. Value can be a string or number. String
values are platform-dependent: a DirectShow URI or camera name on
Windows, or a camera name/index on MacOS. Specifying a number (>=0) is a
platform-independent means of selecting a camera. PsychoPy enumerates
possible camera devices and makes them selectable without explicitly
having the name of the cameras attached to the system. Use caution when
specifying an integer, as the same index may not reference the same
camera every time.
mic : :class:`~psychopy.sound.microphone.Microphone` or None
Microphone to record audio samples from during recording. The microphone
input device must not be in use when `record()` is called. The audio
track will be merged with the video upon calling `save()`. Make sure
that `Microphone.maxRecordingSize` is specified to a reasonable value to
prevent the audio track from being truncated. Specifying a microphone
adds some latency to starting and stopping camera recording due to the
added overhead involved with synchronizing the audio and video streams.
frameRate : int or None
Frame rate to record the camera stream at. If `None`, the camera's
default frame rate will be used.
frameSize : tuple or None
Size (width, height) of the camera stream frames to record. If `None`,
the camera's default frame size will be used.
cameraLib : str
Interface library (backend) to use for accessing the camera. May either
be `ffpyplayer` or `opencv`. If `None`, the default library for the
recommended by the PsychoPy developers will be used. Switching camera
libraries could help resolve issues with camera compatibility. More
camera libraries may be installed via extension packages.
bufferSecs : float
Size of the real-time camera stream buffer specified in seconds (only
valid on Windows and MacOS). This is not the same as the recording
buffer size. This option might not be available for all camera
libraries.
win : :class:`~psychopy.visual.Window` or None
Optional window associated with this camera. Some functionality may
require an OpenGL context for presenting frames to the screen. If you
are not planning to display the camera stream, this parameter can be
safely ignored.
name : str
Label for the camera for logging purposes.
Examples
--------
Opening a camera stream and closing it::
camera = Camera(device=0)
camera.open() # exception here on invalid camera
camera.close()
Recording 5 seconds of video and saving it to disk::
cam = Camera(0)
cam.open()
cam.record() # starts recording
while cam.recordingTime < 5.0: # record for 5 seconds
if event.getKeys('q'):
break
cam.update()
cam.stop() # stops recording
cam.save('myVideo.mp4')
cam.close()
Providing a microphone as follows enables audio recording::
mic = Microphone(0)
cam = Camera(0, mic=mic)
Overriding the default frame rate and size (if `cameraLib` supports it)::
cam = Camera(0, frameRate=30, frameSize=(640, 480), cameraLib=u'opencv')
"""
def __init__(self, device=0, mic=None, cameraLib=u'ffpyplayer',
frameRate=None, frameSize=None, bufferSecs=4, win=None,
name='cam'):
# add attributes for setters
self.__dict__.update(
{'_device': None,
'_captureThread': None,
'_mic': None,
'_outFile': None,
'_mode': u'video',
'_frameRate': None,
'_frameRateFrac': None,
'_size': None,
'_cameraLib': u''})
# ----------------------------------------------------------------------
# Process camera settings
#
# camera library in use
self._cameraLib = cameraLib
if self._cameraLib == u'opencv':
if device in (None, "None", "none", "Default", "default"):
device = 0 # use the first enumerated camera
# handle all possible input for `frameRate` and `frameSize`
if frameRate is None:
pass # no change
elif isinstance(frameRate, str):
if frameRate in ("None", "none", "Default", "default"):
frameRate = None
elif frameRate.lower() == 'ntsc':
frameRate = CAMERA_FRAMERATE_NTSC
else:
try: # try and convert to float
frameRate = float(frameRate)
except ValueError:
raise ValueError(
"`frameRate` must be a number, string or None")
# catch the value converted to float and process it
if isinstance(frameRate, (int, float)):
if frameRate <= 0:
raise ValueError("`frameRate` must be a positive number")
if frameSize is None:
pass # use the camera default
elif isinstance(frameSize, str):
if frameSize in ("None", "none", "Default", "default"):
frameSize = None
elif len(frameSize.split('x')) == 2:
frameSize = tuple(map(int, frameSize.split('x')))
elif frameSize.upper() in movietools.VIDEO_RESOLUTIONS.keys():
frameSize = movietools.VIDEO_RESOLUTIONS[frameSize.upper()]
else:
raise ValueError("`frameSize` specified incorrectly")
elif isinstance(frameSize, (tuple, list)):
if len(frameSize) != 2:
raise ValueError("`frameSize` must be a 2-tuple or 2-list")
frameSize = tuple(map(int, frameSize))
else:
raise ValueError("`frameSize` specified incorrectly")
# recommended camera drivers for each platform
cameraPlatformDrivers = {
'Linux': CAMERA_API_VIDEO4LINUX2,
'Windows': CAMERA_API_DIRECTSHOW,
'Darwin': CAMERA_API_AVFOUNDATION
}
# get the recommended camera driver for the current platform
cameraAPI = cameraPlatformDrivers[platform.system()]
self._cameraInfo = CameraInfo(
index=device,
frameRate=frameRate, # dummy value
frameSize=frameSize, # dummy value
pixelFormat='bgr24',
cameraLib=cameraLib,
cameraAPI=cameraAPI)
self._device = self._cameraInfo.description()
elif self._cameraLib == u'ffpyplayer':
supportedCameraSettings = CameraInterfaceFFmpeg.getCameras()
# create a mapping of supported camera formats
_formatMapping = dict()
for _, formats in supportedCameraSettings.items():
for _format in formats:
desc = _format.description()
_formatMapping[desc] = _format
# sort formats by resolution then frame rate
orderedFormats = list(_formatMapping.values())
orderedFormats.sort(key=lambda obj: obj.frameRate, reverse=True)
orderedFormats.sort(key=lambda obj: np.prod(obj.frameSize),
reverse=True)
# list of devices
devList = list(_formatMapping)
if not devList: # no cameras found if list is empty
raise CameraNotFoundError('No cameras found of the system!')
# Get best device
bestDevice = _formatMapping[devList[-1]]
for mode in orderedFormats:
sameFrameRate = mode.frameRate == frameRate or frameRate is None
sameFrameSize = mode.frameSize == frameSize or frameSize is None
if sameFrameRate and sameFrameSize:
bestDevice = mode
break
# if given just device name, use frameRate and frameSize to match it
# to a mode
if device in supportedCameraSettings:
match = None
for mode in supportedCameraSettings[device]:
sameFrameRate = \
mode.frameRate == frameRate or frameRate is None
sameFrameSize = \
mode.frameSize == frameSize or frameSize is None
if sameFrameRate and sameFrameSize:
match = mode
if match is not None:
device = match
else:
# if no match found, find closest
byWidth = sorted(
supportedCameraSettings[device],
key=lambda mode: abs(frameSize[0] - mode.frameSize[0])
)
byHeight = sorted(
supportedCameraSettings[device],
key=lambda mode: abs(frameSize[1] - mode.frameSize[1])
)
byFrameRate = sorted(
supportedCameraSettings[device],
key=lambda mode: abs(mode.frameRate)
)
deltas = [
byWidth.index(mode) + byHeight.index(mode) + byFrameRate.index(mode)
for mode in supportedCameraSettings[device]
]
i = deltas.index(min(deltas))
closest = supportedCameraSettings[device][i]
# log warning that settings won't match requested
logging.warn(_translate(
"Device {device} does not support frame rate of "
"{frameRate} and frame size of {frameSize}, using "
"closest supported format: {desc}"
).format(device=device,
frameRate=frameRate,
frameSize=frameSize,
desc=closest.description()))
# use closest
device = closest
# self._origDevSpecifier = device # what the user provided
self._device = None # device identifier
# alias device None or Default as being device 0
if device in (None, "None", "none", "Default", "default"):
self._device = bestDevice.description()
elif isinstance(device, CameraInfo):
if self._cameraLib != device.cameraLib:
raise CameraFormatNotSupportedError(
'Wrong configuration for camera library!')
self._device = device.description()
else:
# resolve getting the camera identifier
if isinstance(device, int): # get camera if integer
try:
self._device = devList[device]
except IndexError:
raise CameraNotFoundError(
'Cannot find camera at index={}'.format(device))
elif isinstance(device, str):
self._device = device
else:
raise TypeError(
f"Incorrect type for `camera`, expected `int` or `str` but received {repr(device)}")
# get the camera information
if self._device in _formatMapping:
self._cameraInfo = _formatMapping[self._device]
else:
# raise error if couldn't find matching camera info
raise CameraFormatNotSupportedError(
f'Specified camera format {repr(self._device)} is not supported.')
# # operating mode
# if mode not in (CAMERA_MODE_VIDEO, CAMERA_MODE_CV, CAMERA_MODE_PHOTO):
# raise ValueError(
# "Invalid value for parameter `mode`, expected one of `'video'` "
# "`'cv'` or `'photo'`.")
# self._mode = mode
_requestedMic = mic
# if not given a Microphone or MicrophoneDevice, get it from DeviceManager
if not isinstance(mic, (Microphone, MicrophoneDevice)):
mic = DeviceManager.getDevice(mic)
# if not known by name, try index
if mic is None:
mic = DeviceManager.getDeviceBy("index", mic, deviceClass="microphone")
# if not known by name or index, raise error
if mic is None:
raise SystemError(f"Could not find microphone {_requestedMic}")
# current camera frame since the start of recording
self._player = None # media player instance
self.status = NOT_STARTED
self._isRecording = False
self._bufferSecs = float(bufferSecs)
self._lastFrame = None # use None to avoid imports for ImageStim
# microphone instance, this is controlled by the camera interface and
# is not meant to be used by the user
self.mic = mic
# other information
self.name = name
# timestamp data
self._streamTime = 0.0
# store win (unused but needs to be set/got safely for parity with JS)
self.win = win
# movie writer instance, this runs in a separate thread
self._movieWriter = None
# if we begin receiving frames, change this flag to `True`
self._captureThread = None
# self._audioThread = None
self._captureFrames = [] # array for storing frames
# thread for polling the microphone
self._audioTrack = None # audio track from the recent recording
# used to sync threads spawned by this class, created on `open()`
self._syncBarrier = None
# keep track of the last video file saved
self._lastVideoFile = None
def authorize(self):
"""Get permission to access the camera. Not implemented locally yet.
"""
pass # NOP
@property
def isReady(self):
"""Is the camera ready (`bool`)?
The camera is ready when the following conditions are met. First, we've
created a player interface and opened it. Second, we have received
metadata about the stream. At this point we can assume that the camera
is 'hot' and the stream is being read.
This is a legacy property used to support older versions of PsychoPy.
The `isOpened` property should be used instead.
"""
return self.isStarted
@property
def frameSize(self):
"""Size of the video frame obtained from recent metadata (`float` or
`None`).
Only valid after an `open()` and successive `_enqueueFrame()` call as
metadata needs to be obtained from the stream. Returns `None` if not
valid.
"""
if self._cameraInfo is None:
return None
return self._cameraInfo.frameSize
@property
def frameRate(self):
"""Frame rate of the video stream (`float` or `None`).
Only valid after an `open()` and successive `_enqueueFrame()` call as
metadata needs to be obtained from the stream. Returns `None` if not
valid.
"""
if self._cameraInfo is None:
return None
return self._cameraInfo.frameRate
def _assertCameraReady(self):
"""Assert that the camera is ready. Raises a `CameraNotReadyError` if
the camera is not ready.
"""
if not self.isReady:
raise CameraNotReadyError("Camera is not ready.")
@property
def isRecording(self):
"""`True` if the video is presently recording (`bool`)."""
# Status flags as properties are pretty useful for users since they are
# self documenting and prevent the user from touching the status flag
# attribute directly.
#
return self._isRecording
@property
def isStarted(self):
"""`True` if the stream has started (`bool`). This status is given after
`open()` has been called on this object.
"""
if self._captureThread is None:
return False
return self._captureThread.isOpen()
@property
def isNotStarted(self):
"""`True` if the stream may not have started yet (`bool`). This status
is given before `open()` or after `close()` has been called on this
object.
"""
return not self.isStarted
@property
def isStopped(self):
"""`True` if the recording has stopped (`bool`). This does not mean that
the stream has stopped, `getVideoFrame()` will still yield frames until
`close()` is called.
"""
return not self._isRecording
@property
def metadata(self):
"""Video metadata retrieved during the last frame update
(`MovieMetadata`).
"""
return self.getMetadata()
def getMetadata(self):
"""Get stream metadata.
Returns
-------
MovieMetadata
Metadata about the video stream, retrieved during the last frame
update (`_enqueueFrame` call).
"""
return self._recentMetadata
# @property
# def mode(self):
# """Operating mode in use for this camera.
# """
# return self._mode
_getCamerasCache = {}
@staticmethod
def getCameras(cameraLib=None):
"""Get information about installed cameras on this system.
Returns
-------
dict
Mapping of camera information objects.
"""
# not pluggable yet, needs to be made available via extensions
if cameraLib == 'opencv':
if 'opencv' not in Camera._getCamerasCache:
Camera._getCamerasCache['opencv'] = \
CameraInterfaceOpenCV.getCameras()
return Camera._getCamerasCache['opencv']
elif cameraLib == 'ffpyplayer':
if 'ffpyplayer' not in Camera._getCamerasCache:
Camera._getCamerasCache['ffpyplayer'] = \
CameraInterfaceFFmpeg.getCameras()
return Camera._getCamerasCache['ffpyplayer']
else:
raise ValueError("Invalid value for parameter `cameraLib`")
@staticmethod
def getAvailableDevices():
devices = []
for dev in st.getCameras():
for spec in dev:
devices.append({
'device': spec['index'],
'frameRate': spec['frameRate'],
'frameSize': spec['frameSize'],
})
return devices
@staticmethod
def getCameraDescriptions(collapse=False):
"""Get a mapping or list of camera descriptions.
Camera descriptions are a compact way of representing camera settings
and formats. Description strings can be used to specify which camera
device and format to use with it to the `Camera` class.
Descriptions have the following format (example)::
'[Live! Cam Sync 1080p] 160x120@30fps, mjpeg'
This shows a specific camera format for the 'Live! Cam Sync 1080p'
webcam which supports 160x120 frame size at 30 frames per second. The
last value is the codec or pixel format used to decode the stream.
Different pixel formats and codecs vary in performance.
Parameters
----------
collapse : bool
Return camera information as string descriptions instead of
`CameraInfo` objects. This provides a more compact way of
representing camera formats in a (reasonably) human-readable format.
Returns
-------
dict or list
Mapping (`dict`) of camera descriptions, where keys are camera names
(`str`) and values are a `list` of format description strings
associated with the camera. If `collapse=True`, all descriptions
will be returned in a single flat list. This might be more useful
for specifying camera formats from a single GUI list control.
"""
return getCameraDescriptions(collapse=collapse)
@property
def device(self):
"""Camera to use (`str` or `None`).
String specifying the name of the camera to open a stream with. This
must be set prior to calling `start()`. If the name is not valid, an
error will be raised when `start()` is called.
"""
return self._device
@device.setter
def device(self, value):
if value in (None, "None", "none", "Default", "default"):
value = 0
self._device = value
@property
def _hasPlayer(self):
"""`True` if we have an active media player instance.
"""
# deprecated - remove in future versions and use `isStarted` instead
return self.isStarted
@property
def mic(self):
"""Microphone to record audio samples from during recording
(:class:`~psychopy.sound.microphone.Microphone` or `None`).
If `None`, no audio will be recorded. Cannot be set after opening a
camera stream.
"""
return self._mic
@mic.setter
def mic(self, value):
if self.isStarted:
raise CameraError("Cannot set microphone after starting camera.")
self._mic = value
@property
def _hasAudio(self):
"""`True` if we have a microphone object for audio recording.
"""
return self._mic is not None
@property
def win(self):
"""Window which frames are being presented (`psychopy.visual.Window` or
`None`).
"""
return self._win
@win.setter
def win(self, value):
self._win = value
@property
def frameCount(self):
"""Number of frames captured in the present recording (`int`).
"""
if not self._isRecording:
return 0
totalFramesBuffered = (
len(self._captureFrames) + self._captureThread.framesWaiting)
return totalFramesBuffered
@property
def streamTime(self):
"""Current stream time in seconds (`float`). This time increases
monotonically from startup.
This is `-1.0` if there is no active stream running or if the backend
does not support this feature.
"""
if self.isStarted and hasattr(self._captureThread, "streamTime"):
return self._captureThread.streamTime
return -1.0
@property
def recordingTime(self):
"""Current recording timestamp (`float`).
This returns the timestamp of the last frame captured in the recording.
This value increases monotonically from the last `record()` call. It
will reset once `stop()` is called. This value is invalid outside
`record()` and `stop()` calls.
"""
if not self._isRecording:
return 0.0
frameInterval = 1.0 / float(self._captureThread.frameRate)
return self.frameCount * frameInterval
@property
def recordingBytes(self):
"""Current size of the recording in bytes (`int`).
"""
if not self._isRecording:
return 0
return self._captureThread.recordingBytes
def _assertMediaPlayer(self):
"""Assert that we have a media player instance open.
This will raise a `RuntimeError` if there is no player open. Use this
function to ensure that a player is present before running subsequent
code.
"""
if self._captureThread is not None:
return
raise PlayerNotAvailableError('Media player not initialized.')
def _enqueueFrame(self):
"""Pull waiting frames from the capture thread.
This function will pull frames from the capture thread and add them to
the buffer. The last frame in the buffer will be set as the most recent
frame (`lastFrame`).
Returns
-------
bool
`True` if a frame has been enqueued. Returns `False` if the camera
is not ready or if the stream was closed.
"""
if self._captureThread is None:
return False
newFrames = self._captureThread.getFrames()
if not newFrames:
return False
# add frames the the buffer
self._captureFrames.extend(newFrames)
# set the last frame in the buffer as the most recent
self._lastFrame = self._captureFrames[-1]
return True
def open(self):
"""Open the camera stream and begin decoding frames (if available).
This function returns when the camera is ready to start getting
frames.
Call `record()` to start recording frames to memory. Captured frames
came be saved to disk using `save()`.
"""
if self._hasPlayer:
raise RuntimeError('Cannot open `MediaPlayer`, already opened.')
# Camera interface to use, these are hard coded but support for each is
# provided by an extension.
desc = self._cameraInfo.description()
if self._cameraLib == u'ffpyplayer':
logging.debug(
"Opening camera stream using FFmpeg. (device={})".format(desc))
self._captureThread = CameraInterfaceFFmpeg(
device=self._cameraInfo,
mic=self._mic)
elif self._cameraLib == u'opencv':
logging.debug(
"Opening camera stream using OpenCV. (device={})".format(desc))
self._captureThread = CameraInterfaceOpenCV(
device=self._cameraInfo,
mic=self._mic)
else:
raise ValueError(
"Invalid value for parameter `cameraLib`, expected one of "
"`'ffpyplayer'` or `'opencv'`.")
self._captureThread.open()
# def snapshot(self):
# """Take a photo with the camera. The camera must be in `'photo'` mode
# to use this method.
# """
# pass
def record(self):
"""Start recording frames.
This function will start recording frames and audio (if available). The
value of `lastFrame` will be updated as new frames arrive and the
`frameCount` will increase. You can access image data for the most
recent frame to be captured using `lastFrame`.
If this is called before `open()` the camera stream will be opened
automatically. This is not recommended as it may incur a longer than
expected delay in the recording start time.
Warnings
--------
If a recording has been previously made without calling `save()` it will
be discarded if `record()` is called again.
"""
if self.isNotStarted:
self.open() # open the camera stream if we call record() first
logging.warning(
"Called `Camera.record()` before opening the camera stream, "
"opening now. This is not recommended as it may incur a longer "
"than expected delay in the recording start time."
)
self._audioTrack = None
self._lastFrame = None
# start recording audio if available
if self._mic is not None:
logging.debug(
"Microphone interface available, starting audio recording.")
else:
logging.debug(
"No microphone interface provided, not recording audio.")
self._captureThread.enable() # start passing frames to queue
self._enqueueFrame()
self._isRecording = True
def stop(self):
"""Stop recording frames and audio (if available).
"""
if self._captureThread is None: # do nothing if not open
return
if not self._captureThread.isOpen():
raise RuntimeError("Cannot stop recording, stream is not open.")
self._captureThread.disable() # stop passing frames to queue
self._enqueueFrame()
# # stop audio recording if `mic` is available
if self._mic is not None:
self._audioTrack = self._mic.getRecording()
self._isRecording = False
def close(self):
"""Close the camera.
This will close the camera stream and free up any resources used by the
device. If the camera is currently recording, this will stop the
recording, but will not discard any frames. You may still call `save()`
to save the frames to disk.
"""
if self._captureThread is None: # nop
return
if not self._captureThread.isOpen():
raise RuntimeError("Cannot close stream, stream is not open.")
if self._isRecording:
logging.warning(
"Closing camera stream while recording, stopping recording "
"first.")
self.stop()
self._captureThread.close()
self._captureThread = None
def save(self, filename, useThreads=True, mergeAudio=True,
encoderLib=None, encoderOpts=None):
"""Save the last recording to file.
This will write frames to `filename` acquired since the last call of
`record()` and subsequent `stop()`. If `record()` is called again before
`save()`, the previous recording will be deleted and lost.
This is a slow operation and will block for some time depending on the
length of the video. This can be sped up by setting `useThreads=True`.
Parameters
----------
filename : str
File to save the resulting video to, should include the extension.
useThreads : bool
Use threading where possible to speed up the saving process. If
`True`, the video will be saved and composited in a separate thread
and this function will return quickly. If `False`, the video will
be saved and composited in the main thread and this function will
block until the video is saved. Default is `True`.
mergeAudio : bool
Merge the audio track from the microphone with the video. If `True`,
the audio track will be merged with the video. If `False`, the
audio track will be saved to a separate file. Default is `True`.
encoderLib : str or None
Encoder library to use for saving the video. This can be either
`'ffpyplayer'` or `'opencv'`. If `None`, the same library that was
used to open the camera stream. Default is `None`.
encoderOpts : dict
Options to pass to the encoder. This is a dictionary of options
specific to the encoder library being used. See the documentation
for `~psychopy.tools.movietools.MovieFileWriter` for more details.
"""
if self._isRecording:
raise RuntimeError(
"Attempting to call `save()` before calling `stop()`.")
# check if a file exists at the given path, if so, delete it
if os.path.exists(filename):
msg = (
"Video file '{}' already exists, overwriting.".format(filename))
logging.warning(msg)
os.remove(filename)
# determine if the `encoderLib` to use
if encoderLib is None:
encoderLib = self._cameraLib
logging.debug(
"Using encoder library '{}' to save video.".format(encoderLib))
# check if the encoder library name string is valid
if encoderLib not in ('ffpyplayer', 'opencv'):
raise ValueError(
"Invalid value for parameter `encoderLib`, expected one of "
"`'ffpyplayer'` or `'opencv'`.")
# check if we have an audio track to save
hasAudio = self._audioTrack is not None
# create a temporary file names for the video and audio
if hasAudio:
if mergeAudio:
tempPrefix = (uuid.uuid4().hex)[:16] # 16 char prefix
videoFileName = "{}_video.mp4".format(tempPrefix)
audioFileName = "{}_audio.wav".format(tempPrefix)
else:
videoFileName = audioFileName = filename
audioFileName += '.wav'
else:
videoFileName = filename
audioFileName = None
# make sure filenames are absolute paths
videoFileName = os.path.abspath(videoFileName)
if audioFileName is not None:
audioFileName = os.path.abspath(audioFileName)
# flush outstanding frames from the camera queue
self._enqueueFrame()
# contain video and not audio
logging.debug("Saving video to file: {}".format(videoFileName))
self._movieWriter = movietools.MovieFileWriter(
filename=videoFileName,
size=self._cameraInfo.frameSize, # match camera params
fps=self._cameraInfo.frameRate,
codec=None, # mp4
pixelFormat='rgb24',
encoderLib=encoderLib,
encoderOpts=encoderOpts)
self._movieWriter.open() # blocks main thread until opened and ready
# flush remaining frames to the writer thread, this is really fast since
# frames are not copied and don't require much conversion
for frame in self._captureFrames:
self._movieWriter.addFrame(frame.colorData)
# push all frames to the queue for the movie recorder
self._movieWriter.close() # thread-safe call
self._movieWriter = None
# save audio track if available
if hasAudio:
logging.debug(
"Saving audio track to file: {}".format(audioFileName))
self._audioTrack.save(audioFileName, 'wav')
# merge audio and video tracks
if mergeAudio:
logging.debug("Merging audio and video tracks.")
movietools.addAudioToMovie(
filename, # file after merging
videoFileName,
audioFileName,
useThreads=useThreads,
removeFiles=True) # disable threading for now
self._lastVideoFile = filename # remember the last video we saved
def _upload(self):
"""Upload video file to an online repository. Not implemented locally,
needed for auto translate to JS.
"""
pass # NOP
def _download(self):
"""Download video file to an online repository. Not implemented locally,
needed for auto translate to JS.
"""
pass # NOP
@property
def lastClip(self):
"""File path to the last recording (`str` or `None`).
This value is only valid if a previous recording has been saved
successfully (`save()` was called), otherwise it will be set to `None`.
"""
return self.getLastClip()
def getLastClip(self):
"""File path to the last saved recording.
This value is only valid if a previous recording has been saved to disk
(`save()` was called).
Returns
-------
str or None
Path to the file the most recent call to `save()` created. Returns
`None` if no file is ready.
"""
return self._lastVideoFile
@property
def lastFrame(self):
"""Most recent frame pulled from the camera (`VideoFrame`) since the
last call of `getVideoFrame`.
"""
return self._lastFrame
def update(self):
"""Acquire the newest data from the camera stream. If the `Camera`
object is not being monitored by a `ImageStim`, this must be explicitly
called.
"""
self._assertMediaPlayer()
self._enqueueFrame()
def getVideoFrame(self):
"""Pull the most recent frame from the stream (if available).
Returns
-------
MovieFrame
Most recent video frame. Returns `NULL_MOVIE_FRAME_INFO` if no
frame was available, or we timed out.
"""
self.update()
return self._lastFrame
def __del__(self):
"""Try to cleanly close the camera and output file.
"""
if hasattr(self, '_captureThread'):
if self._captureThread is not None:
try:
self._captureThread.close()
except AttributeError:
pass
# close the microphone during teardown too
if hasattr(self, '_mic'):
if self._mic is not None:
try:
self._mic.close()
except AttributeError:
pass
DeviceManager.registerClassAlias("camera", "psychopy.hardware.camera.Camera")
# ------------------------------------------------------------------------------
# Functions
#
def _getCameraInfoMacOS():
"""Get a list of capabilities associated with a camera attached to the
system.
This is used by `getCameraInfo()` for querying camera details on MacOS.
Don't call this function directly unless testing.
Returns
-------
list of CameraInfo
List of camera descriptors.
"""
if platform.system() != 'Darwin':
raise OSError(
"Cannot query cameras with this function, platform not 'Darwin'.")
# import objc # may be needed in the future for more advanced stuff
import AVFoundation as avf # only works on MacOS
import CoreMedia as cm
# get a list of capture devices
allDevices = avf.AVCaptureDevice.devices()
# get video devices
videoDevices = {}
devIdx = 0
for device in allDevices:
devFormats = device.formats()
if devFormats[0].mediaType() != 'vide': # not a video device
continue
# camera details
cameraName = device.localizedName()
# found video formats
supportedFormats = []
for _format in devFormats:
# get the format description object
formatDesc = _format.formatDescription()
# get dimensions in pixels of the video format
dimensions = cm.CMVideoFormatDescriptionGetDimensions(formatDesc)
frameHeight = dimensions.height
frameWidth = dimensions.width
# Extract the codec in use, pretty useless since FFMPEG uses its
# own conventions, we'll need to map these ourselves to those
# values
codecType = cm.CMFormatDescriptionGetMediaSubType(formatDesc)
# Convert codec code to a FourCC code using the following byte
# operations.
#
# fourCC = ((codecCode >> 24) & 0xff,
# (codecCode >> 16) & 0xff,
# (codecCode >> 8) & 0xff,
# codecCode & 0xff)
#
pixelFormat4CC = ''.join(
[chr((codecType >> bits) & 0xff) for bits in (24, 16, 8, 0)])
# Get the range of supported framerate, use the largest since the
# ranges are rarely variable within a format.
frameRateRange = _format.videoSupportedFrameRateRanges()[0]
frameRateMax = frameRateRange.maxFrameRate()
# frameRateMin = frameRateRange.minFrameRate() # don't use for now
# Create a new camera descriptor
thisCamInfo = CameraInfo(
index=devIdx,
name=cameraName,
pixelFormat=pixelFormat4CC, # macs only use pixel format
codecFormat=CAMERA_NULL_VALUE,
frameSize=(int(frameWidth), int(frameHeight)),
frameRate=frameRateMax,
cameraAPI=CAMERA_API_AVFOUNDATION,
cameraLib="ffpyplayer",
)
supportedFormats.append(thisCamInfo)
devIdx += 1
# add to output dictionary
videoDevices[cameraName] = supportedFormats
return videoDevices
def _getCameraInfoWindows():
"""Get a list of capabilities for the specified associated with a camera
attached to the system.
This is used by `getCameraInfo()` for querying camera details on Windows.
Don't call this function directly unless testing.
Returns
-------
list of CameraInfo
List of camera descriptors.
"""
if platform.system() != 'Windows':
raise OSError(
"Cannot query cameras with this function, platform not 'Windows'.")
# FFPyPlayer can query the OS via DirectShow for Windows cameras
from ffpyplayer.tools import list_dshow_devices
videoDevs, _, names = list_dshow_devices()
# get all the supported modes for the camera
videoDevices = {}
# iterate over names
devIndex = 0
for devURI in videoDevs.keys():
supportedFormats = []
cameraName = names[devURI]
for _format in videoDevs[devURI]:
pixelFormat, codecFormat, frameSize, frameRateRng = _format
_, frameRateMax = frameRateRng
temp = CameraInfo(
index=devIndex,
name=cameraName,
pixelFormat=pixelFormat,
codecFormat=codecFormat,
frameSize=frameSize,
frameRate=frameRateMax,
cameraAPI=CAMERA_API_DIRECTSHOW,
cameraLib="ffpyplayer",
)
supportedFormats.append(temp)
devIndex += 1
videoDevices[names[devURI]] = supportedFormats
return videoDevices
# Mapping for platform specific camera getter functions used by `getCameras`.
_cameraGetterFuncTbl = {
'Darwin': _getCameraInfoMacOS,
'Windows': _getCameraInfoWindows
}
def getCameras():
"""Get information about installed cameras and their formats on this system.
Use `getCameraDescriptions` to get a mapping or list of human-readable
camera formats.
Returns
-------
dict
Mapping where camera names (`str`) are keys and values are and array of
`CameraInfo` objects.
"""
systemName = platform.system() # get the system name
# lookup the function for the given platform
getCamerasFunc = _cameraGetterFuncTbl.get(systemName, None)
if getCamerasFunc is None: # if unsupported
raise OSError(
"Cannot get cameras, unsupported platform '{}'.".format(
systemName))
return getCamerasFunc()
def getCameraDescriptions(collapse=False):
"""Get a mapping or list of camera descriptions.
Camera descriptions are a compact way of representing camera settings and
formats. Description strings can be used to specify which camera device and
format to use with it to the `Camera` class.
Descriptions have the following format (example)::
'[Live! Cam Sync 1080p] 160x120@30fps, mjpeg'
This shows a specific camera format for the 'Live! Cam Sync 1080p' webcam
which supports 160x120 frame size at 30 frames per second. The last value
is the codec or pixel format used to decode the stream. Different pixel
formats and codecs vary in performance.
Parameters
----------
collapse : bool
Return camera information as string descriptions instead of `CameraInfo`
objects. This provides a more compact way of representing camera formats
in a (reasonably) human-readable format.
Returns
-------
dict or list
Mapping (`dict`) of camera descriptions, where keys are camera names
(`str`) and values are a `list` of format description strings associated
with the camera. If `collapse=True`, all descriptions will be returned
in a single flat list. This might be more useful for specifying camera
formats from a single GUI list control.
"""
connectedCameras = getCameras()
cameraDescriptions = {}
for devName, formats in connectedCameras.items():
cameraDescriptions[devName] = [
_format.description() for _format in formats]
if not collapse:
return cameraDescriptions
# collapse to a list if requested
collapsedList = []
for _, formatDescs in cameraDescriptions.items():
collapsedList.extend(formatDescs)
return collapsedList
def getFormatsForDevice(device):
"""Get a list of formats available for the given device.
Parameters
----------
device : str or int
Name or index of the device
Returns
-------
list
List of formats, specified as strings in the format
`{width}x{height}@{frame rate}fps`
"""
# get all devices
connectedCameras = getCameras()
# get formats for this device
formats = connectedCameras.get(device, [])
# sanitize
formats = [f"{_format.frameSize[0]}x{_format.frameSize[1]}@{_format.frameRate}fps" for _format in formats]
return formats
def getAllCameraInterfaces():
"""Get a list of all camera interfaces supported by the system.
Returns
-------
dict
Mapping of camera interface class names and references to the class.
"""
# get all classes in this module
classes = inspect.getmembers(sys.modules[__name__], inspect.isclass)
# filter for classes that are camera interfaces
cameraInterfaces = {}
for name, cls in classes:
if issubclass(cls, CameraInterface):
cameraInterfaces[name] = cls
return cameraInterfaces
def getOpenCameras():
"""Get a list of all open cameras.
Returns
-------
list
List of references to open camera objects.
"""
global _openCameras
return _openCameras.copy()
def closeAllOpenCameras():
"""Close all open cameras.
This closes all open cameras and releases any resources associated with
them. This should only be called before exiting the application or after you
are done using the cameras.
This is automatically called when the application exits to cleanly free up
resources, as it is registered with `atexit` when the module is imported.
Returns
-------
int
Number of cameras closed. Useful for debugging to ensure all cameras
were closed.
"""
global _openCameras
numCameras = len(_openCameras)
for cam in _openCameras:
cam.close()
_openCameras.clear()
return numCameras
def renderVideo(outputFile, videoFile, audioFile=None, removeFiles=False):
"""Render a video.
Combine visual and audio streams into a single movie file. This is used
mainly for compositing video and audio data for the camera. Video and audio
should have roughly the same duration.
This is a legacy function used originally for compositing video and audio
data from the camera. It is not used anymore internally, but is kept here
for reference and may be removed in the future. If you need to composite
video and audio data, use `movietools.addAudioToMovie` instead.
Parameters
----------
outputFile : str
Filename to write the movie to. Should have the extension of the file
too.
videoFile : str
Video file path.
audioFile : str or None
Audio file path. If not provided the movie file will simply be copied
to `outFile`.
removeFiles : bool
If `True`, the video (`videoFile`) and audio (`audioFile`) files will be
deleted after the movie is rendered.
Returns
-------
int
Size of the resulting file in bytes.
"""
# if no audio file, just copy the video file
if audioFile is None:
import shutil
shutil.copyfile(videoFile, outputFile)
if removeFiles:
os.remove(videoFile) # delete the old movie file
return os.path.getsize(outputFile)
# merge video and audio, now using the new `movietools` module
movietools.addAudioToMovie(
videoFile,
audioFile,
outputFile,
useThreads=False, # didn't use this before
removeFiles=removeFiles)
return os.path.getsize(outputFile)
if __name__ == "__main__":
pass
| 96,633
|
Python
|
.py
| 2,255
| 32.231929
| 110
| 0.606997
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
6,005
|
__init__.py
|
psychopy_psychopy/psychopy/hardware/photometer/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Classes and functions for using photometers.
This module serves as the entry point for plugin classes implementing
third-party photometer interfaces. All installed interfaces are discoverable
by calling the :func:`getAllPhotometers()` function.
"""
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
__all__ = [
'addPhotometer',
'getAllPhotometers',
'getAllPhotometerClasses'
]
from psychopy.tools.pkgtools import PluginStub
# Special handling for legacy classes which have been offloaded to optional
# packages. This will change to allow more flexibility in the future to avoid
# updating this package for additions to these sub-packages. We'll need a
# photometer type to do that, but for now we're doing it like this.
from psychopy.hardware.crs.colorcal import ColorCAL
from psychopy.hardware.crs.optical import OptiCAL
# Photo Resaerch Inc. spectroradiometers
from psychopy.hardware.pr import PR655, PR650
# Konica Minolta light-measuring devices
from psychopy.hardware.minolta import LS100, CS100A
# Gamma scientific devices
from psychopy.hardware.gammasci import S470
# photometer interfaces will be stored here after being registered
photometerInterfaces = {}
def addPhotometer(cls):
"""Register a photometer interface class.
Once a photometer class is registered, it will be discoverable when
:func:`getAllPhotometers()` is called. This function is also used by the
plugin interface to add new interfaces at runtime.
This function will overwrite interface with the same `driverFor` name
automatically.
Parameters
----------
cls : Any
Class specifying a photometer interface.
"""
global photometerInterfaces
# photometers interfaces are IDed by the model they interface with
if not hasattr(cls, 'driverFor') or cls.driverFor is None:
raise AttributeError(
"Photometer interface class does not define member `driverFor` and "
"cannot be added.")
# add interface references to dictionary
if isinstance(cls.driverFor, (list, tuple)):
# multiple devices sharing the same interface
for devModel in cls.driverFor:
if not isinstance(devModel, str): # items must be all strings
raise TypeError(
"Invalid item type for array `driverFor`. Items must all "
"have type `str`.")
photometerInterfaces[devModel] = cls
elif isinstance(cls.driverFor, str):
devModel = cls.driverFor
photometerInterfaces[devModel] = cls
else:
raise TypeError(
"Invalid type for `driverFor` member specified. Must be either "
"`str`, `tuple` or `list`.")
def getAllPhotometers():
"""Gets all available photometers.
The returned photometers may vary depending on which drivers are installed.
Standalone PsychoPy ships with libraries for all supported photometers.
Returns
-------
dict
A mapping of all photometer classes. Where the keys (`str`) are model
names the interface works with and the values are references to the
unbound interface class associated with it. Keys can have the same value
the interface is common to multiple devices.
"""
# Given that we need to preserve legacy namespaces for the time being, we
# need to import supported photometer classes from their extant namespaces.
# In the future, all photometer classes will be identified by possessing a
# common base class and being a member of this module. This is much like
# how Builder components are discovered.
# build a dictionary with names
foundPhotometers = {}
# Classes from extant namespaces. Even though these are optional, we need
# to respect the namespaces for now.
optionalPhotometers = (
'ColorCAL', 'OptiCAL', 'S470', 'PR650', 'PR655', 'LS100', 'CS100A')
incPhotomList = []
for photName in optionalPhotometers:
try:
photClass = globals()[photName]
except (ImportError, AttributeError):
continue
if issubclass(photClass, PluginStub):
continue
incPhotomList.append(photClass)
# iterate over all classes and register them as if they were plugins
for photom in incPhotomList:
addPhotometer(photom)
# Merge with classes from plugins. Duplicate names will be overwritten by
# the plugins.
foundPhotometers.update(photometerInterfaces)
return foundPhotometers.copy()
def getAllPhotometerClasses():
"""Get unique photometer interface classes presently available.
This is used to preserve compatibility with the legacy
:func:`~psychopy.hardware.getAllPhotometers()` function call.
Returns
-------
list
Discovered unique photometer classes.
"""
# iterate over known photometers
photometers = getAllPhotometers()
if not photometers: # do nothing if no photometers found
return []
interfaceIDs = [] # a store unique IDs for interfaces
# Remove items the are duplicated, i.e. multiple IDs that have a common
# interface.
knownInterfaces = []
for cls in photometers.values():
clsID = id(cls)
if clsID in interfaceIDs: # already added
continue
interfaceIDs.append(clsID)
knownInterfaces.append(cls)
return knownInterfaces
if __name__ == "__main__":
pass
| 5,626
|
Python
|
.py
| 129
| 37.596899
| 80
| 0.717112
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
6,006
|
__init__.py
|
psychopy_psychopy/psychopy/hardware/bbtk/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""Interfaces for Black Box Toolkit Ltd. devices.
These are optional components that can be obtained by installing the
`psychopy-bbtk` extension into the current environment.
"""
import psychopy.logging as logging
try:
from psychopy_bbtk import BlackBoxToolkit
except (ModuleNotFoundError, ImportError):
logging.error(
"Support for Black Box Toolkit hardware is not available this session. "
"Please install `psychopy-bbtk` and restart the session to enable "
"support.")
except Exception as e: # misc errors during module loading
logging.error(
"Error encountered while loading `psychopy-bbtk`. Check logs for more "
"information.")
if __name__ == "__main__":
pass
| 957
|
Python
|
.py
| 23
| 37.913043
| 80
| 0.734914
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
6,007
|
ffpyplayer_player.py
|
psychopy_psychopy/psychopy/visual/movies/players/ffpyplayer_player.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Classes for movie player interfaces.
"""
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
__all__ = [
'FFPyPlayer'
]
import sys
from ffpyplayer.player import MediaPlayer # very first thing to import
import time
import psychopy.logging as logging
import math
import numpy as np
import threading
import queue
from psychopy.core import getTime
from ._base import BaseMoviePlayer
from ..metadata import MovieMetadata
from ..frame import MovieFrame, NULL_MOVIE_FRAME_INFO
from psychopy.constants import (
FINISHED, NOT_STARTED, PAUSED, PLAYING, STOPPED, STOPPING, INVALID, SEEKING)
from psychopy.tools.filetools import pathToString
import atexit
# Options that PsychoPy devs picked to provide better performance, these can
# be overridden, but it might result in undefined behavior.
DEFAULT_FF_OPTS = {
'sync': 'audio', # sync to audio
'paused': True, # start paused
'autoexit': False, # don't exit ffmpeg automatically
'loop': 0 # enable looping
}
# default queue size for the stream reader
DEFAULT_FRAME_QUEUE_SIZE = 1
# event to close all opened movie reader threads
_evtCleanUpMovieEvent = threading.Event()
_evtCleanUpMovieEvent.clear()
# Cleanup routines for threads. This allows the app to crash gracefully rather
# than locking up on error.
def _closeMovieThreads():
"""Callback function when the application exits which cleans up movie
threads. When this function is called, any outstanding movie threads will
be closed automatically. This must not be called at any other point of the
program.
"""
global _evtCleanUpMovieEvent
_evtCleanUpMovieEvent.set()
atexit.register(_closeMovieThreads) # register the function
class StreamStatus:
"""Descriptor class for stream status.
This class is used to report the current status of the movie stream at the
time the movie frame was obtained.
Parameters
----------
status : int
Status flag for the stream.
streamTime : float
Current stream (movie) time in seconds. Resets after a loop has
completed.
frameIndex : int
Current frame index, increases monotonically as a movie plays and resets
when finished or beginning another loop.
loopCount : int
If looping is enabled, this value increases by 1 each time the movie
loops. Initial value is 0.
"""
__slots__ = ['_status',
'_streamTime',
'_frameIndex',
'_loopCount']
def __init__(self,
status=NOT_STARTED,
streamTime=0.0,
frameIndex=-1,
loopCount=-1):
self._status = int(status)
self._streamTime = float(streamTime)
self._frameIndex = frameIndex
self._loopCount = loopCount
@property
def status(self):
"""Status flag for the stream (`int`).
"""
return self._status
@property
def streamTime(self):
"""Current stream time in seconds (`float`).
This value increases monotonically and is common timebase for all
cameras attached to the system.
"""
return self._streamTime
@property
def frameIndex(self):
"""Current frame in the stream (`float`).
This value increases monotonically as the movie plays. The first frame
has an index of 0.
"""
return self._frameIndex
@property
def loopCount(self):
"""Number of times the movie has looped (`float`).
This value increases monotonically as the movie plays. This is
incremented when the movie finishes.
"""
return self._loopCount
class StreamData:
"""Descriptor class for movie stream data.
Instances of this class are produced by the movie stream reader thread
which contains metadata about the stream, frame image data (i.e. pixel
values), and the stream status.
Parameters
----------
metadata : MovieMetadata
Stream metadata.
frameImage : object
Video frame image data.
streamStatus : StreamStatus
Video stream status.
cameraLib : str
Camera library in use to process the stream.
"""
__slots__ = ['_metadata',
'_frameImage',
'_streamStatus',
'_cameraLib']
def __init__(self, metadata, frameImage, streamStatus, cameraLib):
self._metadata = metadata
self._frameImage = frameImage
self._streamStatus = streamStatus
self._cameraLib = cameraLib
@property
def metadata(self):
"""Stream metadata at the time the video frame was acquired
(`MovieMetadata`).
"""
return self._metadata
@metadata.setter
def metadata(self, value):
if not isinstance(value, MovieMetadata) or value is not None:
raise TypeError("Incorrect type for property `metadata`, expected "
"`MovieMetadata` or `None`.")
self._metadata = value
@property
def frameImage(self):
"""Frame image data from the codec (`ffpyplayer.pic.Image`).
"""
return self._frameImage
@frameImage.setter
def frameImage(self, value):
self._frameImage = value
@property
def streamStatus(self):
"""Stream status (`StreamStatus`).
"""
return self._streamStatus
@streamStatus.setter
def streamStatus(self, value):
if not isinstance(value, StreamStatus) or value is not None:
raise TypeError("Incorrect type for property `streamStatus`, "
"expected `StreamStatus` or `None`.")
self._streamStatus = value
@property
def cameraLib(self):
"""Camera library in use to obtain the stream (`str`). Value is
blank if `metadata` is `None`.
"""
if self._metadata is not None:
return self._metadata.movieLib
return u''
class MovieStreamThreadFFPyPlayer(threading.Thread):
"""Class for reading movie streams asynchronously.
The rate of which frames are read is controlled dynamically based on values
within stream metadata. This will ensure that CPU load is kept to a minimum,
only polling for new frames at the rate they are being made available.
Parameters
----------
player : `ffpyplayer.player.MediaPlayer`
Media player instance, should be configured and initialized. Note that
player instance methods might not be thread-safe after handing off the
object to this thread.
bufferFrames : int
Number of frames to buffer. Sets the frame queue size for the thread.
Use a queue size >1 for video recorded with a framerate above 60Hz.
"""
def __init__(self, player, bufferFrames=DEFAULT_FRAME_QUEUE_SIZE):
threading.Thread.__init__(self)
# Make this thread daemonic since we don't yet have a way of tracking
# them down. Since we're only reading resources, it's unlikely that
# we'll break or corrupt something. Good practice is to call `stop()`
# before exiting, this thread will join as usual and cleanly free up
# any resources.
self.daemon = True
self._player = player # player interface to FFMPEG
self._frameQueue = queue.Queue(maxsize=bufferFrames)
self._cmdQueue = queue.Queue() # queue for player commands
# some values the user might want
self._status = NOT_STARTED
self._streamTime = 0.0
self._isIdling = False
self._isFinished = False
# Locks for syncing the player and main application thread
self._warmUpLock = threading.Lock()
self._warmUpLock.acquire(blocking=False)
def run(self):
"""Main sub-routine for this thread.
When the thread is running, data about captured frames are put into the
`frameQueue` as `(metadata, img, pts)`. If the queue is empty, that
means the main application thread is running faster than the encoder
can get frames. Recommended behaviour in such cases it to return the
last valid frame when the queue is empty.
"""
global _evtCleanUpMovieEvent
if self._player is None:
return # exit thread if no player
# these should stay within the scope of this subroutine
frameInterval = 0.004 # frame interval, start at 4ms (250Hz)
frameData = None # frame data from the reader
val = '' # status value from reader
statusFlag = NOT_STARTED # status flag for stream reader state
frameIndex = -1 # frame index, 0 == first frame
loopCount = 0 # number of movie loops so far
mustShutdown = False # player thread should shut down
# Subroutines for various player functions -----------------------------
def seekTo(player, ptsTarget, maxAttempts=16):
"""Seek to a position in the video. Return the frame at that
position.
Parameters
----------
player : `MediaPlayer`
Handle to player.
ptsTarget : float
Location in the movie to seek to. Must be a positive number.
maxAttempts : int
Number of attempts to converge.
Returns
-------
tuple
Frame data and value from the `MediaPlayer` after seeking to the
position.
"""
wasPaused = player.get_pause()
player.set_pause(False)
player.set_mute(True)
# issue seek command to the player
player.seek(ptsTarget, relative=False, accurate=True)
# wait until we are at the seek position
n = 0
ptsLast = float(2 ** 32)
while n < maxAttempts: # converge on position
frameData_, val_ = player.get_frame(show=True)
if frameData_ is None:
time.sleep(0.0025)
n += 1
continue
_, pts_ = frameData_
ptsClock = player.get_pts()
# Check if the PTS is the same as the last attempt, if so
# we are likely not going to converge on some other value.
if math.isclose(ptsClock, ptsLast):
break
# If the PTS is different than the last one, check if it's
# close to the target.
if math.isclose(pts_, ptsTarget) and math.isclose(
ptsClock, ptsTarget):
break
ptsLast = ptsClock
n += 1
else:
frameData_, val_ = None, ''
player.set_mute(False)
player.set_pause(wasPaused)
return frameData_, val_
def calcFrameIndex(pts_, frameInterval_):
"""Calculate the frame index from the presentation time stamp and
frame interval.
Parameters
----------
pts_ : float
Presentation timestamp.
frameInterval_ : float
Frame interval of the movie in seconds.
Returns
-------
int
Frame index.
"""
return int(math.floor(pts_ / frameInterval_)) - 1
# ----------------------------------------------------------------------
# Initialization
#
# Warmup the reader and get the first frame, this will be presented when
# the player is first initialized, we should block until this process
# completes using a lock object. To get the first frame we start the
# video, acquire the frame, then seek to the beginning. The frame will
# remain in the queue until accessed. The first frame is important since
# it is needed to configure the texture buffers in the rendering thread.
#
# We need to start playback to access the first frame. This can be done
# "silently" by muting the audio and playing the video for a single
# frame. We then seek back to the beginning and pause the video. This
# will ensure the first frame is presented.
#
self._player.set_mute(True)
self._player.set_pause(False)
# consume frames until we get a valid one, need its metadata
while frameData is None or val == 'not ready':
frameData, val = self._player.get_frame(show=True)
# end of the file? ... at this point? something went wrong ...
if val == 'eof':
break
time.sleep(frameInterval) # sleep a bit to avoid mashing the CPU
# Obtain metadata from the frame now that we have a flowing stream. This
# data is needed by the main thread to process to configure additional
# resources needed to present the video.
metadata = self._player.get_metadata()
# Compute the frame interval that will be used, this is dynamically set
# to reduce the amount of CPU load when obtaining new frames. Aliasing
# may occur sometimes, possibly looking like a frame is being skipped,
# but we're not sure if this actually happens in practice.
frameRate = metadata['frame_rate']
numer, denom = frameRate
try:
frameInterval = 1.0 / (numer / float(denom))
except ZeroDivisionError:
# likely won't happen since we always get a valid frame before
# reaching here, but you never know ...
raise RuntimeError(
"Cannot play movie. Failed to acquire metadata from video "
"stream!")
# Get the movie duration, needed to determine when we get to the end of
# movie. We need to reset some params when there. This is in seconds.
duration = metadata['duration']
# Get color and timestamp data from the returned frame object, this will
# be encapsulated in a `StreamData` object and passed back to the main
# thread with status information.
colorData, pts = frameData
# Build up the object which we'll pass to the application thread. Stream
# status information hold timestamp and playback information.
streamStatus = StreamStatus(
status=statusFlag, # current status flag, should be `NOT_STARTED`
streamTime=pts) # frame timestamp
# Put the frame in the frame queue so the main thread can read access it
# safely. The main thread should hold onto any frame it gets when the
# queue is empty.
if self._frameQueue.full():
raise RuntimeError(
"Movie decoder frame queue is full and it really shouldn't be "
"at this point.")
# Object to pass video frame data back to the application thread for
# presentation or processing.
lastFrame = StreamData(
metadata,
colorData,
streamStatus,
u'ffpyplayer')
# Pass the object to the main thread using the frame queue.
self._frameQueue.put(lastFrame) # put frame data in here
# Rewind back to the beginning of the file, we should have the first
# frame and metadata from the file by now.
self._player.set_pause(True) # start paused
self._player.set_mute(False)
# frameData, val = seekTo(self._player, 0.0)
# set the volume again because irt doesn't seem to remember it
self._player.set_volume(self._player.get_volume())
# Release the lock to unblock the parent thread once we have the first
# frame and valid metadata from the stream. After this returns the
# main thread should call `getRecentFrame` to get the frame data.
self._warmUpLock.release()
# ----------------------------------------------------------------------
# Playback
#
# Main playback loop, this will continually pull frames from the stream
# and push them into the frame queue. The user can pause and resume
# playback. Avoid blocking anything outside the use of timers to prevent
# stalling the thread.
#
while 1:
# pull a new frame
frameData, val = self._player.get_frame()
# if no frame, just pause the thread and restart the loop
if val == 'eof': # end of stream/file
self._isFinished = self._isIdling = True
time.sleep(frameInterval)
elif frameData is None or val == 'paused': # paused or not ready
self._isIdling = True
self._isFinished = False
time.sleep(frameInterval)
else: # playing
self._isIdling = self._isFinished = False
colorData, pts = frameData # got a valid frame
# updated last valid frame data
lastFrame = StreamData(
metadata,
colorData,
StreamStatus(
status=statusFlag, # might remove
streamTime=pts,
frameIndex=calcFrameIndex(pts, frameInterval),
loopCount=loopCount),
u'ffpyplayer')
# is the next frame the last? increment the number of loops then
if pts + frameInterval * 1.5 >= duration:
loopCount += 1 # inc number of loops
if isinstance(val, float):
time.sleep(val) # time to sleep
else:
time.sleep(frameInterval)
# If the queue is full, just discard the frame and get the
# next one to allow us to catch up.
try:
self._frameQueue.put_nowait(lastFrame)
except queue.Full:
pass # do nothing
# ------------------------------------------------------------------
# Process playback controls
#
# Check the command queue for playback commands. Process all
# commands in the queue before progressing. A command is a tuple put
# into the queue where the first value is the op-code and the second
# is the value:
#
# OPCODE, VALUE = COMMAND
#
# The op-code is a string specifying the command to execute, while
# the value can be any object needed to carry out the command.
# Possible opcodes and their values are shown in the table below:
#
# OPCODE | VALUE | DESCRIPTION
# ------------+--------------------+------------------------------
# 'volume' | float (0.0 -> 1.0) | Set the volume
# 'mute' | bool | Enable/disable sound
# 'play' | None | Play a stream
# 'pause' | None | Pause a stream
# 'stop' | None | Pause and restart
# 'seek' | pts, bool | Seek to a movie position
# 'shutdown' | None | Kill the thread
#
needsWait = False
if not self._cmdQueue.empty():
cmdOpCode, cmdVal = self._cmdQueue.get_nowait()
# process the command
if cmdOpCode == 'volume': # set the volume
self._player.set_volume(float(cmdVal))
needsWait = True
elif cmdOpCode == 'mute':
self._player.set_mute(bool(cmdVal))
needsWait = True
elif cmdOpCode == 'play':
self._player.set_mute(False)
self._player.set_pause(False)
elif cmdOpCode == 'pause':
self._player.set_mute(True)
self._player.set_pause(True)
elif cmdOpCode == 'seek':
seekToPts, seekRel = cmdVal
self._player.seek(
seekToPts,
relative=seekRel,
accurate=True)
time.sleep(0.1) # long wait for seeking
elif cmdOpCode == 'stop': # stop playback, return to start
self._player.set_mute(True)
self._player.seek(
-1.0, # seek to beginning
relative=False,
accurate=True)
self._player.set_pause(True)
loopCount = 0 # reset loop count
time.sleep(0.1)
elif cmdOpCode == 'shutdown': # shutdown the player
mustShutdown = True
# signal to the main thread that the command has been processed
if not mustShutdown:
self._cmdQueue.task_done()
else:
break
# if the command needs some additional processing time
if needsWait:
time.sleep(frameInterval)
# close the player when the thread exits
self._player.close_player()
self._cmdQueue.task_done()
@property
def isFinished(self):
"""Is the movie done playing (`bool`)? This is `True` if the movie
stream is at EOF.
"""
return self._isFinished
@property
def isIdling(self):
"""Is the movie reader thread "idling" (`bool`)? If `True`, the movie is
finished playing and no frames are being polled from FFMPEG.
"""
return self._isIdling
@property
def isReady(self):
"""`True` if the stream reader is ready (`bool`).
"""
return not self._warmUpLock.locked()
def begin(self):
"""Call this to start the thread and begin reading frames. This will
block until we get a valid frame.
"""
self.start() # start the thread, will begin decoding frames
# hold until the lock is released when the thread gets a valid frame
# this will prevent the main loop for executing until we're ready
self._warmUpLock.acquire(blocking=True)
def play(self):
"""Start playing the video from the stream.
"""
cmd = ('play', None)
self._cmdQueue.put(cmd)
self._cmdQueue.join()
def pause(self):
"""Stop recording frames to the output file.
"""
cmd = ('pause', None)
self._cmdQueue.put(cmd)
self._cmdQueue.join()
def seek(self, pts, relative=False):
"""Seek to a position in the video.
"""
cmd = ('seek', (pts, relative))
self._cmdQueue.put(cmd)
self._cmdQueue.join()
def stop(self):
"""Stop playback, reset the movie to the beginning.
"""
cmd = ('stop', None)
self._cmdQueue.put(cmd)
self._cmdQueue.join()
def shutdown(self):
"""Shutdown the movie reader thread.
"""
cmd = ('shutdown', None)
self._cmdQueue.put(cmd)
self._cmdQueue.join()
def isDone(self):
"""Check if the video is done playing.
Returns
-------
bool
Is the video done?
"""
return not self.is_alive()
def getVolume(self):
"""Get the current volume level."""
if self._player is not None:
return self._player.get_volume()
return 0.0
def setVolume(self, volume):
"""Set the volume for the video.
Parameters
----------
volume : float
New volume level, ranging between 0 and 1.
"""
cmd = ('volume', volume)
self._cmdQueue.put(cmd)
self._cmdQueue.join()
def setMute(self, mute):
"""Set the volume for the video.
Parameters
----------
mute : bool
Mute state. If `True`, audio will be muted.
"""
cmd = ('mute', mute)
self._cmdQueue.put(cmd)
self._cmdQueue.join()
def getRecentFrame(self):
"""Get the most recent frame data from the feed (`tuple`).
Returns
-------
tuple or None
Frame data formatted as `(metadata, frameData, val)`. The `metadata`
is a `dict`, `frameData` is a `tuple` with format (`colorData`,
`pts`) and `val` is a `str` returned by the
`MediaPlayer.get_frame()` method. Returns `None` if there is no
frame data.
"""
if self._frameQueue.empty():
return None
# hold only last frame and return that instead of None?
return self._frameQueue.get()
class FFPyPlayer(BaseMoviePlayer):
"""Interface class for the FFPyPlayer library for use with `MovieStim`.
This class also serves as the reference implementation for classes which
interface with movie codec libraries for use with `MovieStim`. Creating new
player classes which closely replicate the behaviour of this one should
allow them to smoothly plug into `MovieStim`.
"""
_movieLib = 'ffpyplayer'
def __init__(self, parent):
self._filename = u""
self.parent = parent
# handle to `ffpyplayer`
self._handle = None
# thread for reading frames asynchronously
self._tStream = None
# data from stream thread
self._lastFrame = NULL_MOVIE_FRAME_INFO
self._frameIndex = -1
self._loopCount = 0
self._metadata = None # metadata from the stream
self._lastPlayerOpts = DEFAULT_FF_OPTS.copy()
self._lastPlayerOpts['out_fmt'] = 'bgra'
# options from the parent
if self.parent.loop: # infinite loop
self._lastPlayerOpts['loop'] = 0
else:
self._lastPlayerOpts['loop'] = 1 # play once
if hasattr(self.parent, '_noAudio'):
self._lastPlayerOpts['an'] = self.parent._noAudio
# status flags
self._status = NOT_STARTED
def start(self, log=True):
"""Initialize and start the decoder. This method will return when a
valid frame is made available.
"""
# clear queued data from previous streams
self._lastFrame = None
self._frameIndex = -1
# open the media player
handle = MediaPlayer(self._filename, ff_opts=self._lastPlayerOpts)
handle.set_pause(True)
# Pull the first frame to get metadata. NB - `_enqueueFrame` should be
# able to do this but the logic in there depends on having access to
# metadata first. That may be rewritten at some point to reduce all of
# this to just a single `_enqeueFrame` call.
#
self._status = NOT_STARTED
# hand off the player interface to the thread
self._tStream = MovieStreamThreadFFPyPlayer(handle)
self._tStream.begin()
# make sure we have metadata
self.update()
def load(self, pathToMovie):
"""Load a movie file from disk.
Parameters
----------
pathToMovie : str
Path to movie file, stream (URI) or camera. Must be a format that
FFMPEG supports.
"""
# set the file path
self._filename = pathToString(pathToMovie)
# Check if the player is already started. Close it and load a new
# instance if so.
if self._tStream is not None: # player already started
# make sure it's the correct type
# if not isinstance(self._handle, MediaPlayer):
# raise TypeError(
# 'Incorrect type for `FFMovieStim._player`, expected '
# '`ffpyplayer.player.MediaPlayer`. Got type `{}` '
# 'instead.'.format(type(self._handle).__name__))
# close the player and reset
self.unload()
# self._selectWindow(self.win) # free buffers here !!!
self.start()
self._status = NOT_STARTED
def unload(self):
"""Unload the video stream and reset.
"""
self._tStream.shutdown()
self._tStream.join() # wait until thread exits
self._tStream = None
# if self._handle is not None:
# self._handle.close_player()
# self._handle = None # reset
self._filename = u""
self._frameIndex = -1
self._handle = None # reset
# @property
# def handle(self):
# """Handle to the `MediaPlayer` object exposed by FFPyPlayer. If `None`,
# no media player object has yet been initialized.
# """
# return self._handle
@property
def isLoaded(self):
return self._tStream is not None
@property
def metadata(self):
"""Most recent metadata (`MovieMetadata`).
"""
return self.getMetadata()
def getMetadata(self):
"""Get metadata from the movie stream.
Returns
-------
MovieMetadata
Movie metadata object. If no movie is loaded, `NULL_MOVIE_METADATA`
is returned. At a minimum, fields `duration`, `size`, and
`frameRate` are populated if a valid movie has been previously
loaded.
"""
self._assertMediaPlayer()
metadata = self._metadata
# write metadata to the fields of a `MovieMetadata` object
toReturn = MovieMetadata(
mediaPath=self._filename,
title=metadata['title'],
duration=metadata['duration'],
frameRate=metadata['frame_rate'],
size=metadata['src_vid_size'],
pixelFormat=metadata['src_pix_fmt'],
movieLib=self._movieLib,
userData=None
)
return toReturn
def _assertMediaPlayer(self):
"""Ensure the media player instance is available. Raises a
`RuntimeError` if no movie is loaded.
"""
if self._tStream is not None:
return # nop if we're good
raise RuntimeError(
"Calling this class method requires a successful call to "
"`load` first.")
@property
def status(self):
"""Player status flag (`int`).
"""
return self._status
@property
def isPlaying(self):
"""`True` if the video is presently playing (`bool`)."""
# Status flags as properties are pretty useful for users since they are
# self documenting and prevent the user from touching the status flag
# attribute directly.
#
return self.status == PLAYING
@property
def isNotStarted(self):
"""`True` if the video has not be started yet (`bool`). This status is
given after a video is loaded and play has yet to be called.
"""
return self.status == NOT_STARTED
@property
def isStopped(self):
"""`True` if the movie has been stopped.
"""
return self.status == STOPPED
@property
def isPaused(self):
"""`True` if the movie has been paused.
"""
self._assertMediaPlayer()
return self._status == PAUSED
@property
def isFinished(self):
"""`True` if the video is finished (`bool`).
"""
# why is this the same as STOPPED?
return self._status == FINISHED
def play(self, log=False):
"""Start or continue a paused movie from current position.
Parameters
----------
log : bool
Log the play event.
Returns
-------
int or None
Frame index playback started at. Should always be `0` if starting at
the beginning of the video. Returns `None` if the player has not
been initialized.
"""
self._assertMediaPlayer()
self._tStream.play()
self._status = PLAYING
def stop(self, log=False):
"""Stop the current point in the movie (sound will stop, current frame
will not advance). Once stopped the movie cannot be restarted - it must
be loaded again.
Use `pause()` instead if you may need to restart the movie.
Parameters
----------
log : bool
Log the stop event.
"""
self._tStream.stop()
self._status = STOPPED
def pause(self, log=False):
"""Pause the current point in the movie. The image of the last frame
will persist on-screen until `play()` or `stop()` are called.
Parameters
----------
log : bool
Log this event.
"""
self._assertMediaPlayer()
self._tStream.pause()
self._enqueueFrame()
self._status = PAUSED
return False
def seek(self, timestamp, log=False):
"""Seek to a particular timestamp in the movie.
Parameters
----------
timestamp : float
Time in seconds.
log : bool
Log the seek event.
"""
self._assertMediaPlayer()
self._tStream.seek(timestamp, relative=False)
self._enqueueFrame()
def rewind(self, seconds=5, log=False):
"""Rewind the video.
Parameters
----------
seconds : float
Time in seconds to rewind from the current position. Default is 5
seconds.
log : bool
Log this event.
Returns
-------
float
Timestamp after rewinding the video.
"""
self._assertMediaPlayer()
self._tStream.seek(-seconds, relative=True)
def fastForward(self, seconds=5, log=False):
"""Fast-forward the video.
Parameters
----------
seconds : float
Time in seconds to fast forward from the current position. Default
is 5 seconds.
log : bool
Log this event.
"""
self._assertMediaPlayer()
self._tStream.seek(seconds, relative=True)
def replay(self, autoStart=False, log=False):
"""Replay the movie from the beginning.
Parameters
----------
autoStart : bool
Start playback immediately. If `False`, you must call `play()`
afterwards to initiate playback.
log : bool
Log this event.
"""
self._assertMediaPlayer()
self.pause(log=log)
self.seek(0.0, log=log)
if autoStart:
self.play(log=log)
def restart(self, autoStart=True, log=False):
"""Restart the movie from the beginning.
Parameters
----------
autoStart : bool
Start playback immediately. If `False`, you must call `play()`
afterwards to initiate playback.
log : bool
Log this event.
Notes
-----
* This tears down the current media player instance and creates a new
one. Similar to calling `stop()` and `loadMovie()`. Use `seek(0.0)` if
you would like to restart the movie without reloading.
"""
lastMovieFile = self._filename
self.load(lastMovieFile) # will play if auto start
# --------------------------------------------------------------------------
# Audio stream control methods
#
# @property
# def muted(self):
# """`True` if the stream audio is muted (`bool`).
# """
# return self._handle.get_mute() # thread-safe?
#
# @muted.setter
# def muted(self, value):
# self._tStream.setMute(value)
def volumeUp(self, amount):
"""Increase the volume by a fixed amount.
Parameters
----------
amount : float or int
Amount to increase the volume relative to the current volume.
"""
self._assertMediaPlayer()
# get the current volume from the player
self.volume = self.volume + amount
return self.volume
def volumeDown(self, amount):
"""Decrease the volume by a fixed amount.
Parameters
----------
amount : float or int
Amount to decrease the volume relative to the current volume.
"""
self._assertMediaPlayer()
# get the current volume from the player
self.volume = self.volume - amount
return self.volume
@property
def volume(self):
"""Volume for the audio track for this movie (`int` or `float`).
"""
self._assertMediaPlayer()
return self._tStream.getVolume()
@volume.setter
def volume(self, value):
self._assertMediaPlayer()
self._tStream.setVolume(max(min(value, 1.0), 0.0))
@property
def loopCount(self):
"""Number of loops completed since playback started (`int`). This value
is reset when either `stop` or `loadMovie` is called.
"""
return self._loopCount
# --------------------------------------------------------------------------
# Timing related methods
#
# The methods here are used to handle timing, such as converting between
# movie and experiment timestamps.
#
@property
def pts(self):
"""Presentation timestamp for the current movie frame in seconds
(`float`).
The value for this either comes from the decoder or some other time
source. This should be synchronized to the start of the audio track. A
value of `-1.0` is invalid.
"""
if self._tStream is None:
return -1.0
return self._lastFrame.absTime
def getStartAbsTime(self):
"""Get the absolute experiment time in seconds the movie starts at
(`float`).
This value reflects the time which the movie would have started if
played continuously from the start. Seeking and pausing the movie causes
this value to change.
Returns
-------
float
Start time of the movie in absolute experiment time.
"""
self._assertMediaPlayer()
return getTime() - self._lastFrame.absTime
def movieToAbsTime(self, movieTime):
"""Convert a movie timestamp to absolute experiment timestamp.
Parameters
----------
movieTime : float
Movie timestamp to convert to absolute experiment time.
Returns
-------
float
Timestamp in experiment time which is coincident with the provided
`movieTime` timestamp. The returned value should usually be precise
down to about five decimal places.
"""
self._assertMediaPlayer()
# type checks on parameters
if not isinstance(movieTime, float):
raise TypeError(
"Value for parameter `movieTime` must have type `float` or "
"`int`.")
return self.getStartAbsTime() + movieTime
def absToMovieTime(self, absTime):
"""Convert absolute experiment timestamp to a movie timestamp.
Parameters
----------
absTime : float
Absolute experiment time to convert to movie time.
Returns
-------
float
Movie time referenced to absolute experiment time. If the value is
negative then provided `absTime` happens before the beginning of the
movie from the current time stamp. The returned value should usually
be precise down to about five decimal places.
"""
self._assertMediaPlayer()
# type checks on parameters
if not isinstance(absTime, float):
raise TypeError(
"Value for parameter `absTime` must have type `float` or "
"`int`.")
return absTime - self.getStartAbsTime()
def movieTimeFromFrameIndex(self, frameIdx):
"""Get the movie time a specific a frame with a given index is
scheduled to be presented.
This is used to handle logic for seeking through a video feed (if
permitted by the player).
Parameters
----------
frameIdx : int
Frame index. Negative values are accepted but they will return
negative timestamps.
"""
self._assertMediaPlayer()
return frameIdx * self._metadata.frameInterval
def frameIndexFromMovieTime(self, movieTime):
"""Get the frame index of a given movie time.
Parameters
----------
movieTime : float
Timestamp in movie time to convert to a frame index.
Returns
-------
int
Frame index that should be presented at the specified movie time.
"""
self._assertMediaPlayer()
return math.floor(movieTime / self._metadata.frameInterval)
@property
def isSeekable(self):
"""Is seeking allowed for the video stream (`bool`)? If `False` then
`frameIndex` will increase monotonically.
"""
return False # fixed for now
@property
def frameInterval(self):
"""Duration a single frame is to be presented in seconds (`float`). This
is derived from the framerate information in the metadata. If not movie
is loaded, the returned value will be invalid.
"""
return self.metadata.frameInterval
@property
def frameIndex(self):
"""Current frame index (`int`).
Index of the current frame in the stream. If playing from a file or any
other seekable source, this value may not increase monotonically with
time. A value of `-1` is invalid, meaning either the video is not
started or there is some issue with the stream.
"""
return self._lastFrame.frameIndex
def getPercentageComplete(self):
"""Provides a value between 0.0 and 100.0, indicating the amount of the
movie that has been already played (`float`).
"""
duration = self.metadata.duration
return (self.pts / duration) * 100.0
# --------------------------------------------------------------------------
# Methods for getting video frames from the encoder
#
def _enqueueFrame(self):
"""Grab the latest frame from the stream.
Returns
-------
bool
`True` if a frame has been enqueued. Returns `False` if the camera
is not ready or if the stream was closed.
"""
self._assertMediaPlayer()
# If the queue is empty, the decoder thread has not yielded a new frame
# since the last call.
enqueuedFrame = self._tStream.getRecentFrame()
if enqueuedFrame is None:
return False
# Unpack the data we got back ...
# Note - Bit messy here, we should just hold onto the `enqueuedFrame`
# instance and reference its fields from properties. Keeping like this
# for now.
frameImage = enqueuedFrame.frameImage
streamStatus = enqueuedFrame.streamStatus
self._metadata = enqueuedFrame.metadata
self._frameIndex = streamStatus.frameIndex
self._loopCount = streamStatus.loopCount
# status information
self._streamTime = streamStatus.streamTime # stream time for the camera
# if we have a new frame, update the frame information
videoBuffer = frameImage.to_memoryview()[0].memview
videoFrameArray = np.frombuffer(videoBuffer, dtype=np.uint8)
# provide the last frame
self._lastFrame = MovieFrame(
frameIndex=self._frameIndex,
absTime=self._streamTime,
displayTime=self.metadata.frameInterval,
size=frameImage.get_size(),
colorData=videoFrameArray,
audioChannels=0, # not populated yet ...
audioSamples=None,
metadata=self.metadata,
movieLib=u'ffpyplayer',
userData=None,
keepAlive=frameImage)
return True
def update(self):
"""Update this player.
This get the latest data from the video stream and updates the player
accordingly. This should be called at a higher frequency than the frame
rate of the movie to avoid frame skips.
"""
self._assertMediaPlayer()
# check if the stream reader thread is present and alive, if not the
# movie is finished
self._enqueueFrame()
if self._tStream.isFinished: # are we done?
self._status = FINISHED
def getMovieFrame(self):
"""Get the movie frame scheduled to be displayed at the current time.
Returns
-------
`~psychopy.visual.movies.frame.MovieFrame`
Current movie frame.
"""
self.update()
return self._lastFrame
def __del__(self):
"""Cleanup when unloading.
"""
global _evtCleanUpMovieEvent
if hasattr(self, '_tStream'):
if self._tStream is not None:
if not _evtCleanUpMovieEvent.is_set():
self._tStream.shutdown()
self._tStream.join()
if __name__ == "__main__":
pass
| 45,899
|
Python
|
.pyp
| 1,123
| 30.816563
| 81
| 0.586287
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
6,008
|
test_component_compile_python.py
|
psychopy_psychopy/psychopy/tests/test_experiment/test_component_compile_python.py
|
import os
import shutil
from pathlib import Path
from tempfile import mkdtemp
from psychopy.experiment import getAllComponents, Experiment
from psychopy.tests.utils import compareTextFiles, TESTS_DATA_PATH
from psychopy.scripts import psyexpCompile
from psychopy import constants
class _TestBoilerplateMixin:
"""
Mixin for tests of classes in the PsychoPy library to check they are able to work with the compiled code from
Builder.
"""
obj = None
def test_input_params(self):
"""
All classes called from boilerplate should accept name and autoLog as input params
"""
if self.obj is None:
return
# Define list of names which need to be accepted by init
required = (
"name",
"autoLog"
)
# Get names of input variables
varnames = type(self.obj).__init__.__code__.co_varnames
# Make sure required names are accepted
for name in required:
assert name in varnames, (
f"{type(self.obj)} init function should accept {name}, but could not be found in list of kw args."
)
def test_status(self):
"""
All classes called from boilerplate should have a settable status attribute which accepts psychopy constants
"""
if self.obj is None:
return
# Check that status can be NOT_STARTED without error
self.obj.status = constants.NOT_STARTED
# Check that status can be STARTED without error
self.obj.status = constants.STARTED
# Check that status can be FINISHED without error
self.obj.status = constants.FINISHED
# Set back to NOT_STARTED for other tests
self.obj.status = constants.NOT_STARTED
# Define classes to skip depth tests on
depthExceptions = ("NoneType", "PanoramaStim")
# Error string for how to mark depth exempt
exemptInstr = (
"If this component is a special case, you can mark it as exempt by adding its class name to the "
"`depthExceptions` variable in this test."
)
def test_can_accept_depth(self):
# Get class name
compName = type(self.obj).__name__
# Skip if exception
if compName in self.depthExceptions:
return
# Get accepted varnames for init function
varnames = type(self.obj).__init__.__code__.co_varnames
# Check whether depth is in there
assert "depth" in varnames, (
f"Init function for class {compName} cannot accept `depth` as an input, only accepts:\n"
f"{varnames}\n"
f"Any component drawn to the screen should be given a `depth` on init. {self.exemptInstr}\n"
)
def test_depth_attr(self):
# Get class name
compName = type(self.obj).__name__
# Skip if exception
if compName in self.depthExceptions:
return
# Check that created object has a depth
assert hasattr(self.obj, "depth"), (
f"Could not find depth attribute in {compName}.\n"
f"\n"
f"Any component drawn to the screen should have a `depth` attribute. {self.exemptInstr}\n"
)
class TestComponentCompilerPython():
"""A class for testing the Python code compiler for all components"""
def setup_method(self):
self.temp_dir = mkdtemp()
self.allComp = getAllComponents(fetchIcons=False)
self.exp = Experiment() # create once, not every test
self.exp.addRoutine('trial')
self.exp.flow.addRoutine(self.exp.routines['trial'], pos=0)
# Create correctScript subdir for holding correct scripts
if not os.path.isdir(os.path.join(TESTS_DATA_PATH, "correctScript", "python")):
os.mkdir(os.path.join(TESTS_DATA_PATH, "correctScript", "python"))
def teardown_method(self):
shutil.rmtree(self.temp_dir)
def test_all_components(self):
"""Test all component code outputs, except for Settings and Unknown"""
for compName in self.allComp:
if compName not in ['SettingsComponent', 'UnknownComponent']:
# reset exp
self.reset_experiment()
# Add components
self.add_components(compName)
# Create output script
self.create_component_output(compName)
# Get correct script path
# correctPath = os.path.join(TESTS_DATA_PATH, "correctScript", "python", 'correct{}.py'.format(compName))
# Compare files, raising assertions on fails above tolerance (%)
# try:
# compareTextFiles('new{}.py'.format(compName), correctPath, tolerance=5)
# except IOError as err:
# compareTextFiles('new{}.py'.format(compName), correctPath, tolerance=5)
def reset_experiment(self):
"""Resets the exp object for each component"""
self.exp = Experiment()
self.exp.addRoutine('trial')
self.exp.flow.addRoutine(self.exp.routines['trial'], pos=0)
def add_components(self, compName):
"""Add components to routine"""
thisComp = self.allComp[compName](parentName='trial', exp=self.exp)
if compName == 'StaticComponent':
# Create another component to trigger param updates for static
textStim = self.allComp['TextComponent'](parentName='trial', exp=self.exp)
textStim.params['color'].allowedUpdates.append('set during: trial.ISI')
textStim.params['color'].updates = 'set during: trial.ISI'
self.exp.routines['trial'].addComponent(textStim)
# Create static component
thisComp.addComponentUpdate('trial', 'text', 'color')
thisComp.params['code'].val = "customStaticCode = True" # Add the custom code
self.exp.routines['trial'].addComponent(thisComp)
else:
self.exp.routines['trial'].addComponent(thisComp)
def create_component_output(self, compName):
"""Create the Python script"""
pyFilePath = os.path.join(self.temp_dir, 'new{}.py'.format(compName))
psyexpCompile.compileScript(infile=self.exp, outfile=pyFilePath)
def test_component_type_in_experiment(self):
for compName, compObj in self.allComp.items():
if (compName not in [
'SettingsComponent', 'UnknownComponent',
'UnknownPluginComponent', 'RoutineSettingsComponent'
] and "PsychoPy" in compObj.targets):
# reset exp
self.reset_experiment()
# Add components
self.add_components(compName)
# Check component in exp
component = compName.split('Component')[0]
assert self.exp.getComponentFromType(component), (
f"Could not find component of type {compName} in: {self.exp.flow}"
)
| 7,002
|
Python
|
.pyt
| 148
| 37.195946
| 121
| 0.631564
|
psychopy/psychopy
| 1,662
| 900
| 218
|
GPL-3.0
|
9/5/2024, 5:09:29 PM (Europe/Amsterdam)
|
6,009
|
wsgi.py
|
amonapp_amon/wsgi.py
|
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "amon.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| 162
|
Python
|
.py
| 4
| 39.5
| 64
| 0.822785
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,010
|
manage.py
|
amonapp_amon/manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "amon.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 247
|
Python
|
.py
| 7
| 32.142857
| 68
| 0.734177
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,011
|
settings.py
|
amonapp_amon/amon/settings.py
|
import sys
import logging
import socket
import os
import hashlib
import yaml
from amon.utils.parsehost import parsehost
# dirname, dirname - climbs one dir higher
APPS_ROOT = os.path.abspath(os.path.dirname(__file__))
PROJECT_ROOT = os.path.dirname(APPS_ROOT)
TESTING = True if 'test' in sys.argv else False
TRAVIS = True if os.getenv('TRAVIS') else False
DEBUG = False
TEMPLATE_DEBUG = DEBUG
TIME_ZONE = 'UTC'
DATE_FORMAT = "d/m/Y"
DATETIME_FORMAT = "d/m/Y H:i"
DATE_FORMAT_ISO = "%d/%m/%Y"
SITE_ID = 1
USE_I18N = False
USE_L10N = False
APPEND_SLASH = True
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SESSION_COOKIE_AGE = 86400
SECRET_KEY_UNIQUE = (socket.gethostname() + u'(71%ck467tyf=ty$c81r#96*!sy5bjg235^78y)&u4vpy1$b$^').encode()
SECRET_KEY = hashlib.md5(SECRET_KEY_UNIQUE).hexdigest()
TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
APPS_ROOT + '/templates',
APPS_ROOT + '/templates/notifications/_alerts/emails',
APPS_ROOT + '/templates/notifications/_alerts/thirdparty',
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.tz',
'django.template.context_processors.request',
"django.template.context_processors.static",
'django.contrib.messages.context_processors.messages',
'amon.apps.charts.context_processors.charts_global_variables',
],
'builtins': [
'amon.templatetags.url',
'amon.templatetags.date',
'amon.templatetags.setvar',
'amon.templatetags.mongoid',
'amon.templatetags.helpers',
'amon.templatetags.math',
'amon.templatetags.metrics',
'amon.templatetags.baseurl',
'amon.templatetags.formhelpers',
'amon.templatetags.charts',
'amon.templatetags.plugins'
],
},
}]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'amon.apps.account.middleware.AccountMiddleware',
]
ROOT_URLCONF = 'amon.urls'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'timezone_field',
'kronos',
'amon.apps.organizations',
'amon.apps.users',
'amon.templatetags',
'amon.apps.metrics',
'amon.apps.checks',
# 'amon.apps.dashboards',
# 'amon.apps.servers',
# 'amon.apps.alerts',
# 'amon.apps.cloudservers',
# 'amon.apps.notifications',
# 'amon.apps.healthchecks',
# 'amon.apps.charts',
# 'amon.apps.account',
)
REST_FRAMEWORK = {
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
)
}
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = [
'--verbosity=3',
'--with-yanc',
'--with-timer',
'--stop',
# '--with-coverage',
# '--cover-inclusive',
'-x'
]
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'amon.apps.account.backends.EmailAuthBackend',
]
AUTH_USER_MODEL = 'users.AmonUser'
API_RESULTS = {
"ok": 200,
"not-found": 404,
"created": 201,
"server-error": 500,
"conflict": 409,
"forbidden": 403,
'unprocessable': 422
}
LOGIN_URL = '/account/login/'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
###########################
#
# LOCAL SETTINGS
#
###########################
LOGFILE = '/var/log/amon/amonapp.log'
LOGFILE_REQUESTS = '/var/log/amon/amon_requests.log'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '/opt/amon/amon.sqlite',
}
}
config_path = "/opt/amon/amon.yml"
# Overwrite for the test suite
if TESTING:
logging.disable(logging.CRITICAL)
DATABASES = {
'default': {
'NAME': os.path.join(PROJECT_ROOT, 'amon_testing.sqlite'),
'ENGINE': 'django.db.backends.sqlite3',
},
}
config_path = os.path.join(PROJECT_ROOT, 'amon.yml')
LOGFILE = os.path.join(PROJECT_ROOT, 'amoapp.log')
LOGFILE_REQUESTS = os.path.join(PROJECT_ROOT, 'amoapp_requests.log')
config = {} # Don't trigger exceptions if the config file is empty
try:
with open(config_path, 'r') as f:
config = yaml.load(f)
except yaml.YAMLError as exc:
print(exc)
except Exception as exc:
print(exc)
HOST = config.get('host', '127.0.0.1')
STATIC_URL = config.get('static_url', None)
host_struct = parsehost(HOST)
ALLOWED_HOSTS = [
host_struct.hostname,
"127.0.0.1",
"localhost",
"amon.localhost",
"*.amon.cx"
]
HOST = host_struct.host
HOSTNAME = host_struct.hostname
if STATIC_URL is None:
STATIC_URL = '{0}/static/'.format(HOST)
SSL = config.get('ssl', None)
# Global retention period in days, overwrites settings set from the web interface
KEEP_DATA = config.get('keep_data', None)
# SMTP Settings - optionally store these in a config file
smtp = config.get('smtp', {})
EMAIL_USE_TLS = smtp.get('use_tls', False)
EMAIL_HOST = smtp.get('host', 'localhost')
EMAIL_PORT = smtp.get('port', 25)
EMAIL_HOST_USER = smtp.get('username', '')
EMAIL_HOST_PASSWORD = smtp.get('password', '')
DEFAULT_FROM_EMAIL = smtp.get('sent_from', EMAIL_HOST_USER)
if SSL or host_struct.scheme == 'https':
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
logging.getLogger("requests").setLevel(logging.WARNING)
LOGGING = {
"version": 1,
"disable_existing_loggers": True,
"formatters": {
'verbose': {
'format': "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
},
"simple": {
"format": "%(levelname)s %(message)s"
},
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "simple"
},
'request_handler': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': LOGFILE_REQUESTS,
'maxBytes': 1024 * 1024 * 5, # 5 MB
'backupCount': 5,
'formatter': 'simple',
},
'default': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': LOGFILE,
'maxBytes': 1024 * 1024 * 5, # 5 MB
'backupCount': 5,
'formatter': 'verbose',
},
},
"loggers": {
"": {
"handlers": ["console", "default"],
"level": "DEBUG",
},
'django': {
'handlers': ['default', "console"],
'level': 'ERROR',
'propagate': False,
},
"django.request": {
"handlers": ["request_handler"],
'level': 'DEBUG',
"propagate": True,
},
}
}
try:
# Overwrite all settings with dev
from amon.local_settings import *
except:
pass
| 7,608
|
Python
|
.py
| 245
| 25.220408
| 111
| 0.623099
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,012
|
urls.py
|
amonapp_amon/amon/urls.py
|
from django.conf.urls import include, url
from django.views.generic import RedirectView
urlpatterns = [
url(r'^$', RedirectView.as_view(pattern_name='login')),
# url(r'^install/', include('amon.apps.install.urls')),
# url(r'^account/', include('amon.apps.account.urls')),
url(r'^api/', include('amon.apps.api.urls')),
# App
# url(r'^cloud-servers/', include('amon.apps.cloudservers.urls')),
# url(r"^servers/", include('amon.apps.servers.urls')),
# url(r"^servers/map/", include('amon.apps.map.urls')),
# url(r"^checks/", include('amon.apps.healthchecks.urls')),
# url(r"^system/", include('amon.apps.system.urls')),
# url(r"^processes/", include('amon.apps.processes.urls')),
# url(r"^alerts/", include('amon.apps.alerts.urls')),
# url(r"^plugins/", include('amon.apps.plugins.urls')),
# url(r"^charts/", include('amon.apps.charts.urls')),
# url(r"^dashboards/", include('amon.apps.dashboards.urls')),
# url(r"^settings/", include('amon.apps.settings.urls')),
# url(r"^settings/notifications/", include('amon.apps.notifications.urls')),
# url(r"^tags/", include('amon.apps.tags.urls')),
# url(r"^bookmarks/", include('amon.apps.bookmarks.urls')),
# url(r"^users/", include('amon.apps.users.urls')),
]
| 1,289
|
Python
|
.py
| 24
| 49.083333
| 80
| 0.649444
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,013
|
wsgi.py
|
amonapp_amon/amon/wsgi.py
|
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "amon.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| 163
|
Python
|
.py
| 4
| 39.5
| 64
| 0.822785
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,014
|
mongoid.py
|
amonapp_amon/amon/templatetags/mongoid.py
|
from django import template
register = template.Library()
@register.filter("mongo_id")
def mongo_id(value):
try:
mongo_id = str(value['_id'])
except:
mongo_id = "doesnotexist"
return mongo_id
@register.filter("to_str")
def to_str(value):
return str(value)
| 301
|
Python
|
.py
| 12
| 20.333333
| 36
| 0.677536
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,015
|
plugins.py
|
amonapp_amon/amon/templatetags/plugins.py
|
from django import template
from amon.utils.filesize import size
register = template.Library()
@register.filter
def format_plugin_value(value, column):
if column in ['size', 'totalIndexSize', 'indexes', 'total', 'bytes']:
try:
value = size(value)
except Exception as e:
pass
return value
@register.filter
def format_plugin_header(value):
try:
value = value.replace("_", " ").title()
except:
pass
return value
@register.filter
def counter_to_int(value):
if isinstance(value, float):
value = int(value)
return value
| 638
|
Python
|
.py
| 23
| 21.347826
| 73
| 0.658662
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,016
|
url.py
|
amonapp_amon/amon/templatetags/url.py
|
from django import template
from django.urls import resolve
register = template.Library()
@register.simple_tag(takes_context=True)
def active(context, url_name, return_value=' active', **kwargs):
matches = current_url_equals(context, url_name, **kwargs)
return return_value if matches else ''
def current_url_equals(context, url_name, **kwargs):
resolved = False
try:
resolved = resolve(context.get('request').path)
except:
pass
matches = resolved and resolved.url_name == url_name
if matches and kwargs:
for key in kwargs:
kwarg = kwargs.get(key)
resolved_kwarg = resolved.kwargs.get(key)
if not resolved_kwarg or kwarg != resolved_kwarg:
return False
return matches
| 782
|
Python
|
.py
| 21
| 30.809524
| 64
| 0.676821
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,017
|
setvar.py
|
amonapp_amon/amon/templatetags/setvar.py
|
from django import template
register = template.Library()
class SetVarNode(template.Node):
def __init__(self, var_name, var_value):
self.var_name = var_name
self.var_value = var_value
def render(self, context):
try:
value = template.Variable(self.var_value).resolve(context)
except template.VariableDoesNotExist:
value = ""
context[self.var_name] = value
return u""
def set_var(parser, token):
"""
{% set <var_name> = <var_value> %}
"""
parts = token.split_contents()
if len(parts) < 4:
raise template.TemplateSyntaxError("'set' tag must be of the form: {% set <var_name> = <var_value> %}")
return SetVarNode(parts[1], parts[3])
register.tag('set', set_var)
| 786
|
Python
|
.py
| 22
| 29.045455
| 113
| 0.619868
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,018
|
date.py
|
amonapp_amon/amon/templatetags/date.py
|
from datetime import datetime
from django import template
from collections import OrderedDict
import re
from amon.utils.dates import (
dateformat_local,
datetimeformat_local,
dateformat,
timeformat,
day_local,
time_local,
dateformatcharts_local
)
register = template.Library()
def date_to_js(value, format='%Y, %m, %d, %H, %M'):
# Converts unixtime to a javascript Date list
_ = datetime.utcfromtimestamp(value)
js_time_list = _.strftime(format).split(',')
# Substract one month in js January is 0, February is 1, etc.
js_time_list[1] = str(int(js_time_list[1])-1)
return ",".join(js_time_list)
def extract_days_from_unixdate(value, days):
day = 86400 # 1 day in seconds
return value-(day*days)
def add_days_to_unixdate(value, days):
day = 86400 # 1 day in seconds
return value+(day*days)
def days_remaining(value):
today = datetime.utcnow()
remaining = value-today.date()
try:
remaining = value-today.date()
remaining_days = remaining.days
except:
remaining_days = None
return remaining_days
"""Convert seconds to human readable interval back and forth."""
interval_dict = OrderedDict([("Y", 365*86400), # 1 year
("M", 30*86400), # 1 month
("W", 7*86400), # 1 week
("D", 86400), # 1 day
(" hours", 3600), # 1 hour
(" minutes", 60), # 1 minute
("s", 1)]) # 1 second
@register.filter
def seconds_to_human(seconds):
"""Convert seconds to human readable format like 1M.
:param seconds: Seconds to convert
:type seconds: int
:rtype: int
:return: Human readable string
"""
seconds = int(seconds)
string = ""
for unit, value in interval_dict.items():
subres = seconds / value
if subres:
seconds -= value * subres
string += str(subres) + unit
return string
register.filter('time', timeformat)
register.filter('date_to_js', date_to_js)
register.filter('date', dateformat)
register.filter('date_local', dateformat_local)
register.filter('day_local', day_local)
register.filter('time_local', time_local)
register.filter('datetime_local', datetimeformat_local)
register.filter('datetimecharts_local', dateformatcharts_local)
register.filter('extract_days_from_unixdate', extract_days_from_unixdate)
register.filter('add_days_to_unixdate', add_days_to_unixdate)
register.filter('days_remaining', days_remaining)
| 2,624
|
Python
|
.py
| 71
| 30.450704
| 73
| 0.640853
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,019
|
metrics.py
|
amonapp_amon/amon/templatetags/metrics.py
|
from __future__ import division
from django import template
from amon.utils.filesize import size
register = template.Library()
@register.filter
def kb_to_mb(value):
mb = float(value)/1000
mb = "{0:.2f}".format(mb)
return mb
@register.filter
def seconds_to_minutes(value):
minutes = value/60
return minutes
# Used in alerts/all
@register.filter
def metric_type_value(value, metric):
result = ""
metric = metric.lower()
if metric == 'memory':
result = 'MB'
elif metric == 'down':
result = 'Down between'
else:
result = value
return result
@register.filter
def bytes_to_mb(value):
return size(value)
| 691
|
Python
|
.py
| 28
| 20.321429
| 37
| 0.683801
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,020
|
__init__.py
|
amonapp_amon/amon/templatetags/__init__.py
|
# from django.template.base import add_to_builtins
# from amon.settings import AUTOLOAD_TAGS
# for tag in AUTOLOAD_TAGS:
# add_to_builtins(tag)
| 155
|
Python
|
.py
| 4
| 36
| 50
| 0.770833
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,021
|
charts.py
|
amonapp_amon/amon/templatetags/charts.py
|
from django import template
register = template.Library()
@register.filter
def yaxis(value):
yaxis_value = ''
if type(value) is str:
value = value.lower()
if value == 'cpu':
yaxis_value = '%'
elif value in ['memory', 'disk']:
yaxis_value = 'MB'
elif value in ['io','network']:
yaxis_value = 'KB'
if type(value) is dict:
unit = value.get('unit', '')
unit = '' if unit == None else unit
yaxis_value = unit
return yaxis_value
| 524
|
Python
|
.py
| 18
| 22.5
| 43
| 0.581633
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,022
|
baseurl.py
|
amonapp_amon/amon/templatetags/baseurl.py
|
from django.urls import reverse
from django.conf import settings
from django import template
from django.utils.encoding import smart_str
register = template.Library()
import re
kwarg_re = re.compile(r"(?:(\w+)=)?(.+)")
class URLNode(template.Node):
def __init__(self, view_name, args, kwargs):
self.view_name = view_name
self.args = args
self.kwargs = kwargs
def render(self, context):
kwargs = dict([(smart_str(k,'ascii'), v.resolve(context))
for k, v in self.kwargs.items()])
url = reverse(self.view_name, kwargs=kwargs)
domain_url = settings.HOST
if domain_url.endswith('/'):
domain_url = domain_url[:-1]
_url = "{base}{url}".format(base=domain_url, url=url)
return _url
def base_url(parser, token):
bits = token.split_contents()
view_name = bits[1]
view_name = view_name.strip('\'"')
args = []
kwargs = {}
for bit in bits:
match = kwarg_re.match(bit)
if not match:
raise template.TemplateSyntaxError("Malformed arguments to url tag")
name, value = match.groups()
if name:
kwargs[name] = parser.compile_filter(value)
else:
args.append(parser.compile_filter(value))
return URLNode(view_name, args, kwargs)
register.tag('base_url', base_url)
| 1,396
|
Python
|
.py
| 38
| 29.078947
| 80
| 0.622642
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,023
|
formhelpers.py
|
amonapp_amon/amon/templatetags/formhelpers.py
|
from django.template import Library
register = Library()
def silence_without_field(fn):
def wrapped(field, attr):
if not field:
return ""
return fn(field, attr)
return wrapped
def _process_field_attributes(field, attr, process):
# split attribute name and value from 'attr:value' string
params = attr.split(':', 1)
attribute = params[0]
value = params[1] if len(params) == 2 else ''
# decorate field.as_widget method with updated attributes
old_as_widget = field.as_widget
def as_widget(self, widget=None, attrs=None, only_initial=False):
attrs = attrs or {}
process(widget or self.field.widget, attrs, attribute, value)
return old_as_widget(widget, attrs, only_initial)
bound_method = type(old_as_widget)
try:
field.as_widget = bound_method(as_widget, field, field.__class__)
except TypeError: # python 3
field.as_widget = bound_method(as_widget, field)
return field
@register.filter
def fieldtype(obj):
return obj.__class__.__name__.lower()
@register.filter("attr")
@silence_without_field
def set_attr(field, attr):
def process(widget, attrs, attribute, value):
attrs[attribute] = value
return _process_field_attributes(field, attr, process)
@register.filter("append_attr")
@silence_without_field
def append_attr(field, attr):
def process(widget, attrs, attribute, value):
if attrs.get(attribute):
attrs[attribute] += ' ' + value
elif widget.attrs.get(attribute):
attrs[attribute] = widget.attrs[attribute] + ' ' + value
else:
attrs[attribute] = value
return _process_field_attributes(field, attr, process)
@register.filter("add_class")
@silence_without_field
def add_class(field, css_class):
return append_attr(field, 'class:' + css_class)
@register.filter("add_error_class")
@silence_without_field
def add_error_class(field, css_class):
if hasattr(field, 'errors') and field.errors:
return add_class(field, css_class)
return field
@register.filter("set_data")
@silence_without_field
def set_data(field, data):
return set_attr(field, 'data-' + data)
| 2,208
|
Python
|
.py
| 59
| 32.118644
| 73
| 0.685002
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,024
|
math.py
|
amonapp_amon/amon/templatetags/math.py
|
from __future__ import division
from django import template
register = template.Library()
@register.filter
def sum(element, second_element):
result = float(element)+float(second_element)
return result
@register.filter
def substract(element, second_element):
result = float(element)-float(second_element)
return result
@register.filter
def sum_int(element, second_element):
result = element+second_element
return result
@register.filter
def substract_int(element, second_element):
result = 0
if type(element) == int and type(second_element) == int:
result = element-second_element
return result
@register.filter
def percentage_of(element, second_element):
percentage = '{0:.2f}'.format((element / second_element * 100))
return percentage
# CSS Helper function, 20% to 0.2, 80% to 0.8
@register.filter
def percent_to_opacity(value):
# Combining the calculations you can get the formula for P% * X = (P/100) * X = Y so 10% * 150 = (10/100) * 150 = 15
value = float(value)
result = (value / 100) * 1
return result
@register.filter
def divide(value, arg): return int(value) / int(arg)
| 1,165
|
Python
|
.py
| 34
| 30.882353
| 120
| 0.720036
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,025
|
helpers.py
|
amonapp_amon/amon/templatetags/helpers.py
|
import re
from django import template
register = template.Library()
from amon.utils.charts import get_disk_unit
@register.filter
def disk_unit(server):
unit = get_disk_unit(server=server)
return unit
@register.filter
def get_distro(distro):
try:
name = distro.get('name', '').lower()
except:
name = ''
return name
@register.filter
def parse_ignored_list(list_with_values):
result = []
if list_with_values:
for v in list_with_values:
try:
name, cpu, memory = v.split('::')
process_dict = {'name': name, 'cpu': cpu, 'memory': memory}
result.append(process_dict)
except:
pass
return result
def to_int(value):
number = re.compile('(\d+)')
try:
_int = number.search(value).group(1)
except:
_int = 0
return int(_int)
# Removes the letters from a string
# From 24.5MB -> 24.5 -> used in the progress width
def clean_string(variable):
if isinstance(variable, int)\
or isinstance(variable, float)\
or isinstance(variable, long):
variable = float(variable) if not isinstance(variable, float) else variable
return variable
else:
value_regex = re.compile(r'\d+[\.,]\d+')
extracted_value = value_regex.findall(variable)
if len(extracted_value) > 0:
extracted_value = extracted_value[0]
extracted_value.replace(",",".")
extracted_value = float(extracted_value)
else:
extracted_value = 0
return extracted_value
# Used in the charts, where a disk drive could be with several slashes
def clean_slashes(string):
return re.sub('[^A-Za-z0-9]+', '', string).strip().lower()
def get_key(dictionary, key):
try:
value = dictionary[key]
except:
value = ''
return value
# Used in Alerts - Not Sending Data
@register.filter
def add_spaces(value):
if value != 'CPU':
return re.sub(r"(?<=\w)([A-Z])", r" \1", value)
return value
def dehumanize(value):
values_dict = {
"more_than": ">",
"less_than": "<",
"minute": "1 minute",
"week": "1 week",
"month": "1 month",
"all": "All",
"five_minutes": "5 minutes",
"fifteen_minutes": "15 minutes"
}
try:
_value = values_dict[value]
except:
_value = ''
return _value
def empty_if_none(value):
if value in ['None', 'none', None, False, 'False']:
return ""
return value
class StripspacesNode(template.base.Node):
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
return re.sub(r'\s+', '', (self.nodelist.render(context).strip()))
def stripspaces(parser, token):
nodelist = parser.parse(('endstripspaces',))
parser.delete_first_token()
return StripspacesNode(nodelist)
def percentage(value, total):
try:
return "%.2f%%" % ((float(value) / float(total)) * 100)
except ValueError:
return ''
register.tag('stripspaces', stripspaces)
register.filter('to_int', to_int)
register.filter('clean_slashes', clean_slashes)
register.filter('clean_string', clean_string)
register.filter('get_key', get_key)
register.filter('dehumanize', dehumanize)
register.filter('empty_if_none', empty_if_none)
register.filter('percentage', percentage)
| 3,518
|
Python
|
.py
| 109
| 25.357798
| 83
| 0.613106
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,026
|
daterange.py
|
amonapp_amon/amon/utils/daterange.py
|
# -*- coding: utf-8 -*-
"""
Example Usage
=============
>>> import datetime
>>> start = datetime.date(2009, 6, 21)
>>> g1 = daterange(start)
>>> g1.next()
datetime.date(2009, 6, 21)
>>> g1.next()
datetime.date(2009, 6, 22)
>>> g1.next()
datetime.date(2009, 6, 23)
>>> g1.next()
datetime.date(2009, 6, 24)
>>> g1.next()
datetime.date(2009, 6, 25)
>>> g1.next()
datetime.date(2009, 6, 26)
>>> g2 = daterange(start, to=datetime.date(2009, 6, 25))
>>> g2.next()
datetime.date(2009, 6, 21)
>>> g2.next()
datetime.date(2009, 6, 22)
>>> g2.next()
datetime.date(2009, 6, 23)
>>> g2.next()
datetime.date(2009, 6, 24)
>>> g2.next()
datetime.date(2009, 6, 25)
>>> g2.next()
Traceback (most recent call last):
...
StopIteration
>>> g3 = daterange(start, step='2 days')
>>> g3.next()
datetime.date(2009, 6, 21)
>>> g3.next()
datetime.date(2009, 6, 23)
>>> g3.next()
datetime.date(2009, 6, 25)
>>> g3.next()
datetime.date(2009, 6, 27)
>>> g4 = daterange(start, to=datetime.date(2009, 6, 25), step='2 days')
>>> g4.next()
datetime.date(2009, 6, 21)
>>> g4.next()
datetime.date(2009, 6, 23)
>>> g4.next()
datetime.date(2009, 6, 25)
>>> g4.next()
Traceback (most recent call last):
...
StopIteration
"""
import datetime
import re
def daterange(date, to=None, step=datetime.timedelta(days=1)):
"""
Similar to the built-in ``xrange()``, only for datetime objects.
If called with just a ``datetime`` object, it will keep yielding values
forever, starting with that date/time and counting in steps of 1 day.
If the ``to_date`` keyword is provided, it will count up to and including
that date/time (again, in steps of 1 day by default).
If the ``step`` keyword is provided, this will be used as the step size
instead of the default of 1 day. It should be either an instance of
``datetime.timedelta``, an integer, a string representing an integer, or
a string representing a ``delta()`` value (consult the documentation for
``delta()`` for more information). If it is an integer (or string thereof)
then it will be interpreted as a number of days. If it is not a simple
integer string, then it will be passed to ``delta()`` to get an instance
of ``datetime.timedelta()``.
Note that, due to the similar interfaces of both objects, this function
will accept both ``datetime.datetime`` and ``datetime.date`` objects. If
a date is given, then the values yielded will be dates themselves. A
caveat is in order here: if you provide a date, the step should have at
least a ‘days’ component; otherwise the same date will be yielded forever.
"""
if to is None:
condition = lambda d: True
else:
condition = lambda d: (d <= to)
if isinstance(step, (int, long)):
# By default, integers are interpreted in days. For more granular
# steps, use a `datetime.timedelta()` instance.
step = datetime.timedelta(days=step)
elif isinstance(step, basestring):
# If the string
if re.match(r'^(\d+)$', str(step)):
step = datetime.timedelta(days=int(step))
else:
try:
step = delta(step)
except ValueError:
pass
if not isinstance(step, datetime.timedelta):
raise TypeError('Invalid step value: %r' % (step,))
# The main generation loop.
while condition(date):
yield date
date += step
class delta(object):
"""
Build instances of ``datetime.timedelta`` using short, friendly strings.
``delta()`` allows you to build instances of ``datetime.timedelta`` in
fewer characters and with more readability by using short strings instead
of a long sequence of keyword arguments.
A typical (but very precise) spec string looks like this:
'1 day, 4 hours, 5 minutes, 3 seconds, 120 microseconds'
``datetime.timedelta`` doesn’t allow deltas containing months or years,
because of the differences between different months, leap years, etc., so
this function doesn’t support them either.
The parser is very simple; it takes a series of comma-separated values,
each of which represents a number of units of time (such as one day,
four hours, five minutes, et cetera). These ‘specifiers’ consist of a
number and a unit of time, optionally separated by whitespace. The units
of time accepted are (case-insensitive):
* Days ('d', 'day', 'days')
* Hours ('h', 'hr', 'hrs', 'hour', 'hours')
* Minutes ('m', 'min', 'mins', 'minute', 'minutes')
* Seconds ('s', 'sec', 'secs', 'second', 'seconds')
* Microseconds ('ms', 'microsec', 'microsecs' 'microsecond',
'microseconds')
If an illegal specifier is present, the parser will raise a ValueError.
This utility is provided as a class, but acts as a function (using the
``__new__`` method). This is so that the names and aliases for units are
stored on the class object itself: as ``UNIT_NAMES``, which is a mapping
of names to aliases, and ``UNIT_ALIASES``, the converse.
"""
UNIT_NAMES = {
## unit_name: unit_aliases
'days': 'd day'.split(),
'hours': 'h hr hrs hour'.split(),
'minutes': 'm min mins minute'.split(),
'seconds': 's sec secs second'.split(),
'microseconds': 'ms microsec microsecs microsecond'.split(),
}
# Turn `UNIT_NAMES` inside-out, so that unit aliases point to canonical
# unit names.
UNIT_ALIASES = {}
for cname, aliases in UNIT_NAMES.items():
for alias in aliases:
UNIT_ALIASES[alias] = cname
# Make the canonical unit name point to itself.
UNIT_ALIASES[cname] = cname
def __new__(cls, string):
specifiers = (specifier.strip() for specifier in string.split(','))
kwargs = {}
for specifier in specifiers:
match = re.match(r'^(\d+)\s*(\w+)$', specifier)
if not match:
raise ValueError('Invalid delta specifier: %r' % (specifier,))
number, unit_alias = match.groups()
number, unit_alias = int(number), unit_alias.lower()
unit_cname = cls.UNIT_ALIASES.get(unit_alias)
if not unit_cname:
raise ValueError('Invalid unit: %r' % (unit_alias,))
kwargs[unit_cname] = kwargs.get(unit_cname, 0) + number
return datetime.timedelta(**kwargs)
if __name__ == '__main__':
import doctest
doctest.testmod()
| 6,639
|
Python
|
.py
| 162
| 34.716049
| 78
| 0.647854
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,027
|
dates.py
|
amonapp_amon/amon/utils/dates.py
|
from __future__ import division
import calendar
import pytz
from datetime import datetime, time, timedelta
from pytz import timezone
from operator import itemgetter
# Gets the UTC unix timestamp for 1800 before now
def utc_delta_from_now(period=1800):
now = datetime.utcnow()
now_unix = datetime_to_unixtime(now)
delta = now_unix-period
return delta
def expire_date(days=30):
date_now = datetime.utcnow()
expires_at = date_now + timedelta(days=days)
return expires_at
def localtime_utc_timedelta(tz='UTC'):
local_timezone = timezone(tz)
local_time = datetime.now(local_timezone)
is_dst = False # Check the local timezone for Daylight saving time
if local_time.dst():
is_dst = True
naive_local_time = local_time.replace(tzinfo=None)
# Return 0 for UTC
if tz == 'UTC':
return ('positive', 0)
# timedelta betweeen the local timezone and UTC
td = local_timezone.utcoffset(naive_local_time, is_dst=is_dst)
offset = (td.microseconds + (td.seconds + td.days * 24 * 3600)* 10**6 ) / 10.0**6
if offset < 0:
# Negative timedelta is actually an UTC+ timezone
offset = -offset
offset_list = ('negative', int(offset))
else:
offset_list = ('positive', int(offset))
return offset_list
# Converts date strings: '17:46' to an UTC datetime object for today -> 31-07-2011-17:46 or 1438936491
def time_to_utc_today(timestring, format="%H:%M", tz='UTC', to_utc=None):
formated_time = datestring_to_utc_datetime(timestring, format=format)
timezone = pytz.timezone(tz)
today = datetime.now(timezone).date()
time_datetime_obj = datetime.combine(today, time(formated_time.hour, formated_time.minute))
if to_utc:
time_datetime_obj = datetime_to_unixtime(time_datetime_obj)
return time_datetime_obj
# Converts date strings: '31-07-2011-17:46' to an UTC datetime object using the
def datestring_to_utc_datetime(datestring, format="%d.%m.%Y-%H:%M", tz='UTC'):
_datetime = datetime.strptime(datestring, format)
local_timezone = timezone(tz)
# Adjust for Daylight savings time
local_datetime = local_timezone.localize(_datetime)
utc_datetime = local_datetime.astimezone(pytz.UTC)
return utc_datetime
def unixtime_to_midnight(unixtime):
dt = datetime.fromtimestamp(unixtime)
midnight = time(0)
return datetime.combine(dt.date(), midnight)
def unixtime_to_datetime_utc(unixtime):
return datetime.fromtimestamp(unixtime, tz=pytz.UTC)
def unixtime_to_datetime(unixtime):
return datetime.fromtimestamp(unixtime)
# Internal function, always pass UTC date objects
# Converts datetime objects to unix integers
def datetime_to_unixtime(datetime):
return int(calendar.timegm(datetime.timetuple()))
# Converts date string to unix UTC time
def datestring_to_utc_unixtime(datestring):
datetime_object = datestring_to_utc_datetime(datestring)
return datetime_to_unixtime(datetime_object)
def utc_unixtime_to_localtime(unixtime, tz='UTC'):
local_timezone = timezone(tz)
unixtime = float(unixtime)
utc = pytz.UTC
utc_datetime = utc.localize(datetime.utcfromtimestamp(unixtime))
local_datetime = utc_datetime.astimezone(local_timezone)
local_unix_datetime = datetime_to_unixtime(local_datetime)
local_unix_datetime = int(local_unix_datetime)
return local_unix_datetime
# Used in the collector, saves all the data in UTC
def unix_utc_now():
d = datetime.utcnow()
_unix = calendar.timegm(d.utctimetuple())
return _unix
def utc_now_to_localtime(tz='UTC'):
now = unix_utc_now()
local_unix_time = utc_unixtime_to_localtime(now, tz)
local_unix_time = int(local_unix_time)
return local_unix_time
def dateformat(value, format='%d.%m.%Y'):
# Converts unix time to a readable date format
try:
_ = datetime.utcfromtimestamp(value)
return _.strftime(format)
except:
return None
# Used in the alert emails
def time_local(value, tz='UTC', format='%H:%M'):
value = utc_unixtime_to_localtime(value, tz=tz)
return dateformat(value, format)
# Used in the alert emails
def day_local(value, tz='UTC', format='%d.%m.%Y'):
value = utc_unixtime_to_localtime(value, tz=tz)
return dateformat(value, format)
# Localized unix timestamp
def dateformat_local(value, format='%d-%m-%Y-%H:%M'):
result = None
try:
value = utc_unixtime_to_localtime(value)
except:
value = None
if value:
result = dateformat(value, format)
return result
def dateformatcharts_local(value, tz='UTC',format="%d.%m.%Y-%H:%M"):
result = None
try:
value = utc_unixtime_to_localtime(value, tz=tz)
except:
value = None
if value:
result = dateformat(value, format)
return result
# Localized unix timestamp
def datetimeformat_local(value, tz='UTC', format='%d.%m.%Y-%H:%M:%S'):
result = None
try:
value = utc_unixtime_to_localtime(value, tz=tz)
except:
value = None
if value:
result = dateformat(value, format)
return result
def timeformat(value, format='%H:%M'):
# Converts unix time to a readable 24 hour-minute format
_ = datetime.utcfromtimestamp(value)
return _.strftime(format)
def timedelta_total_seconds(td):
return int((td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6)
def timezone_choices():
TIMEZONE_CHOICES = []
for timezone in pytz.common_timezones:
now = datetime.now(pytz.timezone(timezone))
offset = now.strftime("%z")
TIMEZONE_CHOICES.append({"timezone": timezone,
"offset": int(offset),
"value": "(GMT{0}) {1}".format(offset, timezone)})
sorted_timezones = sorted(TIMEZONE_CHOICES, key=itemgetter('offset'))
return sorted_timezones
| 5,926
|
Python
|
.py
| 148
| 34.581081
| 102
| 0.699364
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,028
|
security.py
|
amonapp_amon/amon/utils/security.py
|
import base64
import hashlib
from Crypto.Cipher import AES
from Crypto import Random
from amon.settings import SECRET_KEY
class AESCipher(object):
"""
A classical AES Cipher. Can use any size of data and any size of password thanks to padding.
Also ensure the coherence and the type of the data with a unicode to byte converter.
"""
def __init__(self):
self.bs = 32
self.key = hashlib.sha256(AESCipher.str_to_bytes(SECRET_KEY)).digest()
@staticmethod
def str_to_bytes(data):
u_type = type(b''.decode('utf8'))
if isinstance(data, u_type):
return data.encode('utf8')
return data
def _pad(self, s):
return s + (self.bs - len(s) % self.bs) * AESCipher.str_to_bytes(chr(self.bs - len(s) % self.bs))
@staticmethod
def _unpad(s):
return s[:-ord(s[len(s) - 1:])]
def encrypt(self, raw):
raw = self._pad(AESCipher.str_to_bytes(raw))
iv = Random.new().read(AES.block_size)
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return base64.b64encode(iv + cipher.encrypt(raw)).decode('utf-8')
def decrypt(self, enc):
enc = base64.b64decode(enc)
iv = enc[:AES.block_size]
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return self._unpad(cipher.decrypt(enc[AES.block_size:])).decode('utf-8')
| 1,359
|
Python
|
.py
| 34
| 33.411765
| 105
| 0.639058
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,029
|
haiku.py
|
amonapp_amon/amon/utils/haiku.py
|
import random
def generate_haiku_name():
adjs = [
"autumn", "hidden", "bitter", "misty", "silent", "empty", "dry", "dark",
"summer", "icy", "delicate", "quiet", "white", "cool", "spring", "winter",
"patient", "twilight", "dawn", "crimson", "wispy", "weathered", "blue",
"billowing", "broken", "cold", "damp", "falling", "frosty", "green",
"long", "late", "lingering", "bold", "little", "morning", "muddy", "old",
"red", "rough", "still", "small", "sparkling", "throbbing", "shy",
"wandering", "withered", "wild", "black", "young", "holy", "solitary",
"fragrant", "aged", "snowy", "proud", "floral", "restless", "divine",
"polished", "ancient", "purple", "lively", "nameless", "teal", "charming",
"lush", "tropical", "stunning", "thriving", "fluffy", "gentle", "enigmatic"
]
nouns = [
"waterfall", "river", "breeze", "moon", "rain", "wind", "sea", "morning",
"snow", "lake", "sunset", "pine", "shadow", "leaf", "dawn", "glitter",
"forest", "hill", "cloud", "meadow", "sun", "glade", "bird", "brook",
"butterfly", "bush", "dew", "dust", "field", "fire", "flower", "firefly",
"feather", "grass", "haze", "mountain", "night", "pond", "darkness",
"snowflake", "silence", "sound", "sky", "shape", "surf", "thunder",
"violet", "water", "wildflower", "wave", "water", "resonance", "sun",
"wood", "dream", "cherry", "tree", "fog", "frost", "voice", "paper",
"frog", "smoke", "star"
]
adj = random.choice(adjs)
noun = random.choice(nouns)
rand_num = '{0:04}'.format(random.randint(1000, 10000))
haiku_name = "{0}-{1}-{2}".format(adj, noun, rand_num)
return haiku_name
| 1,790
|
Python
|
.py
| 30
| 50.933333
| 85
| 0.531393
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,030
|
__init__.py
|
amonapp_amon/amon/utils/__init__.py
|
import random
import string
def generate_random_string(size=6, chars=string.ascii_lowercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
class AmonStruct(object):
pass
| 210
|
Python
|
.py
| 6
| 32.166667
| 81
| 0.776119
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,031
|
filesize.py
|
amonapp_amon/amon/utils/filesize.py
|
traditional = [
(1024 ** 5, 'P'),
(1024 ** 4, 'T'),
(1024 ** 3, 'G'),
(1024 ** 2, 'M'),
(1024 ** 1, 'K'),
(1024 ** 0, 'B'),
]
alternative = [
(1024 ** 5, ' PB'),
(1024 ** 4, ' TB'),
(1024 ** 3, ' GB'),
(1024 ** 2, ' MB'),
(1024 ** 1, ' KB'),
(1024 ** 0, (' byte', ' bytes')),
]
verbose = [
(1024 ** 5, (' petabyte', ' petabytes')),
(1024 ** 4, (' terabyte', ' terabytes')),
(1024 ** 3, (' gigabyte', ' gigabytes')),
(1024 ** 2, (' megabyte', ' megabytes')),
(1024 ** 1, (' kilobyte', ' kilobytes')),
(1024 ** 0, (' byte', ' bytes')),
]
iec = [
(1024 ** 5, 'Pi'),
(1024 ** 4, 'Ti'),
(1024 ** 3, 'Gi'),
(1024 ** 2, 'Mi'),
(1024 ** 1, 'Ki'),
(1024 ** 0, ''),
]
si = [
(1000 ** 5, 'P'),
(1000 ** 4, 'T'),
(1000 ** 3, 'G'),
(1000 ** 2, 'M'),
(1000 ** 1, 'K'),
(1000 ** 0, 'B'),
]
def size(bytes, system=alternative):
"""Human-readable file size.
Using the traditional system, where a factor of 1024 is used::
>>> size(10)
'10B'
>>> size(100)
'100B'
>>> size(1000)
'1000B'
>>> size(2000)
'1K'
>>> size(10000)
'9K'
>>> size(20000)
'19K'
>>> size(100000)
'97K'
>>> size(200000)
'195K'
>>> size(1000000)
'976K'
>>> size(2000000)
'1M'
Using the SI system, with a factor 1000::
>>> size(10, system=si)
'10B'
>>> size(100, system=si)
'100B'
>>> size(1000, system=si)
'1K'
>>> size(2000, system=si)
'2K'
>>> size(10000, system=si)
'10K'
>>> size(20000, system=si)
'20K'
>>> size(100000, system=si)
'100K'
>>> size(200000, system=si)
'200K'
>>> size(1000000, system=si)
'1M'
>>> size(2000000, system=si)
'2M'
"""
for factor, suffix in system:
if bytes >= factor:
break
amount = int(bytes/factor)
if isinstance(suffix, tuple):
singular, multiple = suffix
if amount == 1:
suffix = singular
else:
suffix = multiple
return str(amount) + suffix
| 2,166
|
Python
|
.py
| 96
| 16.979167
| 66
| 0.457926
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,032
|
parsehost.py
|
amonapp_amon/amon/utils/parsehost.py
|
from urllib.parse import urlparse
class AmonStruct(object):
pass
def parsehost(host=None):
parsed_url = urlparse(host)
result = AmonStruct()
result.hostname = parsed_url.hostname if parsed_url.hostname != None else parsed_url.path
result.scheme = 'http' if parsed_url.scheme == '' else parsed_url.scheme
result.port = 80 if parsed_url.port == None else parsed_url.port
if result.scheme == 'https' and parsed_url.port == None:
result.port = 443
result.host = "{scheme}://{hostname}".format(
scheme=result.scheme,
hostname=result.hostname
)
return result
| 636
|
Python
|
.py
| 16
| 33.5625
| 93
| 0.69218
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,033
|
charts.py
|
amonapp_amon/amon/utils/charts.py
|
import random
base_colors = [
"#4B97DE",
"#1fb25a",
"#a489d8",
"#F2832A",
'#8dc48b',
'#15b5c1',
"#ffbc14"]
colors = []
for i in range(0, 5):
for color in base_colors:
colors.append(color)
def select_colors(index, type=None, random_color=None):
if random_color:
selected_color = random.choice(colors)
else:
selected_color = colors[index]
result = {"color": selected_color}
return result
# def hex_to_rgb(value, opacity=1):
# value = value.lstrip('#')
# lv = len(value)
# return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))
# def rgb_to_hex(rgb):
# return '#%02x%02x%02x' % rgb
def chart_type(type=None):
if type in ['memory','disk']:
chart_type = 'area'
else:
chart_type = 'line'
return chart_type
def get_disk_unit(server=None):
unit = 'MB'
try:
distro = server.get('distro', {})
name = distro.get('name', '')
except:
name = "linux"
if 'windows' in name.lower():
unit = 'GB'
return unit
| 1,088
|
Python
|
.py
| 42
| 21.071429
| 80
| 0.594461
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,034
|
generators.py
|
amonapp_amon/amon/utils/generators.py
|
import random
import string
def random_id_generator(size=32, chars=string.ascii_lowercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
| 170
|
Python
|
.py
| 4
| 40.5
| 79
| 0.771084
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,035
|
test_security.py
|
amonapp_amon/amon/utils/tests/test_security.py
|
import unittest
from nose.tools import eq_
from amon.utils.security import AESCipher
from faker import Factory
fake = Factory.create()
class EncryptDecryptTest(unittest.TestCase):
def setUp(self):
self.aes_cipher = AESCipher()
def test_encrypt_decrypt(self):
for i in range(0, 100):
name = fake.name()
encrypted_string = self.aes_cipher.encrypt(name)
decrypted_message = self.aes_cipher.decrypt(encrypted_string)
print(decrypted_message)
eq_(name, decrypted_message)
for i in range(0, 100):
text = fake.text(max_nb_chars=200)
encrypted_string = self.aes_cipher.encrypt(text)
decrypted_message = self.aes_cipher.decrypt(encrypted_string)
eq_(text, decrypted_message)
for i in range(0, 100):
text = fake.text(max_nb_chars=200)
encrypted_string = self.aes_cipher.encrypt(name)
decrypted_message = self.aes_cipher.decrypt(encrypted_string)
eq_(name, decrypted_message)
| 1,085
|
Python
|
.py
| 25
| 33.6
| 73
| 0.648649
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,036
|
test_parsehost.py
|
amonapp_amon/amon/utils/tests/test_parsehost.py
|
import unittest
from nose.tools import eq_
from amon.utils.parsehost import parsehost
class ParseHostTest(unittest.TestCase):
def test_parsehost(self):
# Test domain
result = parsehost('http://amon.cx')
assert result.__dict__ == {'scheme': 'http', 'host': 'http://amon.cx', 'hostname': 'amon.cx', 'port': 80}
# Test domain
result = parsehost('amon.cx')
assert result.__dict__ == {'scheme': 'http', 'host': 'http://amon.cx', 'hostname': 'amon.cx', 'port': 80}
# Test HTTPS
result = parsehost('https://amon.cx')
assert result.__dict__ == {'scheme': 'https', 'host': 'https://amon.cx', 'hostname': 'amon.cx', 'port': 443}
# Test Subdomain
result = parsehost('https://simplistic.amon.cx')
assert result.__dict__ == {'scheme': 'https', 'host': 'https://simplistic.amon.cx', 'hostname': 'simplistic.amon.cx', 'port': 443}
# Test Subdomain
result = parsehost('http://simplistic.amon.cx')
assert result.__dict__ == {'scheme': 'http', 'host': 'http://simplistic.amon.cx', 'hostname': 'simplistic.amon.cx', 'port': 80}
# Test Custom port
result = parsehost('http://simplistic.amon.cx:900')
assert result.__dict__ == {'scheme': 'http', 'host': 'http://simplistic.amon.cx', 'hostname': 'simplistic.amon.cx', 'port': 900}
| 1,378
|
Python
|
.py
| 23
| 51.565217
| 138
| 0.601949
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,037
|
models.py
|
amonapp_amon/amon/apps/_healthchecks/models.py
|
import hashlib
from amon.apps.core.basemodel import BaseModel
from amon.utils.dates import unix_utc_now
from amon.utils import AmonStruct
from operator import itemgetter
from amon.apps.servers.models import server_model
from datetime import datetime, timedelta
class HealthChecksResultsModel(BaseModel):
def __init__(self):
super(HealthChecksResultsModel, self).__init__()
self.collection = self.mongo.get_collection("health_checks_data")
def save(self, data=None, server=None):
now = unix_utc_now()
date_now = datetime.utcnow()
expires_at = date_now + timedelta(days=2)
for i, check in enumerate(data):
command = check.get('command')
check_id = health_checks_model.save(
server=server,
command=command
)
check_id = self.object_id(check_id)
exit_codes = {0: "ok", 1: "warning", 2: "critical"}
try:
status = exit_codes[check["exit_code"]]
except:
status = "unknown"
error = check.get('error')
output = check.get('output', "").strip()
params = {
'check_id': check_id,
'time': now,
'output': output,
'status': status,
'error': error,
'expires_at': expires_at,
}
health_checks_data_id = self.collection.insert(params)
self.collection.ensure_index([('expires_at', 1)], expireAfterSeconds=0)
self.collection.ensure_index([('time', self.desc)])
self.collection.ensure_index([('check_id', self.desc)])
last_check = {
'time': now,
'output': output,
'status': status,
'error': error
}
health_checks_model.save_last_result(check_id=check_id, last_check=last_check, timestamp=now)
data[i]['health_checks_data_id'] = health_checks_data_id
return data
class HealthChecksModel(BaseModel):
def __init__(self):
super(HealthChecksModel, self).__init__()
self.collection = self.mongo.get_collection('health_checks')
self.data_collection = self.mongo.get_collection("health_checks_data")
def save_last_result(self, check_id=None, last_check=None, timestamp=None):
self.collection.update({"_id": check_id}, {"$set": {"last_check": last_check, "time": timestamp}})
def save(self, command=None, server=None):
command_unique_id = hashlib.md5(command.encode()).hexdigest()
command_split = command.split(" ")
if len(command_split) > 1:
command_params = " ".join(command_split[1:])
command_string = command_split[0]
else:
command_params = False
command_string = command_split[0]
params = {'unique_id': command_unique_id, 'server_id': server['_id']}
result = self.collection.find_one(params)
if result is None:
data = {
'unique_id': command_unique_id,
'server_id': server['_id'],
"command": command_string,
'params': command_params
}
check_id = self.collection.insert(data)
self.collection.ensure_index([('unique_id', self.desc)])
self.collection.ensure_index([('params', self.desc)])
self.collection.ensure_index([('command', self.desc)])
self.collection.ensure_index([('server_id', self.desc)])
else:
check_id = result.get('_id')
return check_id
def sort_and_filter(self, sort_by=None, filter_by=None):
flat_list = []
sorted_result = []
all_checks = []
all_servers = server_model.get_all()
for check in self.get_all():
last_check = check.get('last_check')
try:
server = server_model.get_by_id(check['server_id'])
except:
server = None
check['server'] = server
# Append result only for existing servers
if server != None:
flat_list.append(check)
if sort_by is None and filter_by is None:
if server != None:
all_checks.append(check)
sort_priority = {'critical': 1, 'warning': 2, 'ok': 3, 'unknown': 4}
count_statuses = {'critical': 0, 'warning': 0, 'ok': 0, 'unknown': 0}
for r in flat_list:
result = r.get("last_check", {})
if result:
check_status = result.get('status')
try:
count_statuses[check_status] = count_statuses[check_status] + 1
except:
pass
if filter_by:
reodered_list = []
for el in flat_list:
check_status = el.get('last_check', {}).get('status')
if check_status == filter_by:
reodered_list.append(el)
sorted_result = sorted(reodered_list, key=lambda d: d.get('last_check', {}).get('status'))
if sort_by:
# ?sort_by=host&filter_by=critical
if filter_by:
flat_list = sorted_result
if sort_by == 'status':
reodered_list = []
for el in flat_list:
try:
el['priority'] = sort_priority.get(el.get('last_check', {}).get('status'))
except:
pass
reodered_list.append(el)
sorted_result = sorted(reodered_list, key=itemgetter('priority'))
elif sort_by == 'host':
sorted_result = sorted(flat_list, key=lambda d: d.get('server', {}).get('name'))
result = AmonStruct()
result.all_checks = all_checks
result.sorted_result = sorted_result
result.count_statuses = count_statuses
result.flat_list = flat_list
return result
def delete(self, check_id=None):
check_id = self.object_id(check_id)
self.collection.remove(check_id)
class HealthChecksAPIModel(BaseModel):
def __init__(self):
super(HealthChecksAPIModel, self).__init__()
self.collection = self.mongo.get_collection('health_checks')
def get_commands_for_server(self, server_id=None):
server_id = self.object_id(server_id)
result = self.collection.find({'server_id': server_id})
return result
def get_unique_commands(self):
all_commands = self.collection.find()
unique_commands_list = []
for c in all_commands:
command = c.get('command')
unique_commands_list.append(command)
result = list(set(unique_commands_list))
return result
def get_params_for_command(self, command_string=None):
unique_params_list = []
query = self.collection.find({'command': command_string})
for r in query:
params = r.get('params')
if params != False:
unique_params_list.append(params)
result = list(set(unique_params_list))
return result
health_checks_api_model = HealthChecksAPIModel()
health_checks_model = HealthChecksModel()
health_checks_results_model = HealthChecksResultsModel()
| 7,683
|
Python
|
.py
| 170
| 31.876471
| 109
| 0.549431
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,038
|
urls.py
|
amonapp_amon/amon/apps/_healthchecks/urls.py
|
from django.conf.urls import url
from amon.apps.healthchecks import views
urlpatterns = (
url(r'^$', views.view, name='healthchecks_view'),
url(r'^delete/(?P<check_id>\w+)/$', views.delete, name='delete_healthcheck'),
)
| 230
|
Python
|
.py
| 6
| 35.666667
| 81
| 0.707207
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,039
|
views.py
|
amonapp_amon/amon/apps/_healthchecks/views.py
|
from amon.apps.core.views import *
from amon.utils.dates import unix_utc_now
from amon.apps.healthchecks.models import health_checks_model
@login_required
def view(request):
now = unix_utc_now()
sort_by = request.GET.get('sort_by')
filter_by = request.GET.get('filter_by')
result = health_checks_model.sort_and_filter(sort_by=sort_by, filter_by=filter_by)
return render(request, 'healthchecks/view.html', {
"all_checks": result.all_checks,
"now": now,
"sort_by": sort_by,
"filter_by": filter_by,
"sorted_result": result.sorted_result,
"flat_list": result.flat_list,
"count_statuses": result.count_statuses,
})
@login_required
def delete(request, check_id=None):
health_checks_model.delete(check_id)
return redirect(reverse('healthchecks_view'))
| 844
|
Python
|
.py
| 22
| 32.863636
| 86
| 0.690506
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,040
|
models_test.py
|
amonapp_amon/amon/apps/_healthchecks/tests/models_test.py
|
import unittest
import hashlib
from datetime import datetime
from amon.apps.healthchecks.models import (
health_checks_model,
health_checks_results_model,
health_checks_api_model
)
from amon.apps.servers.models import server_model
from amon.utils.dates import unix_utc_now
class HealthChecksResultsModelTest(unittest.TestCase):
def setUp(self):
server_model.collection.insert({"name": "test"})
self.server = server_model.collection.find_one()
def tearDown(self):
self._cleanup()
def _cleanup(self):
health_checks_model.collection.remove()
health_checks_results_model.collection.remove()
server_model.collection.remove()
def save_test(self):
self._cleanup()
data = [
{u'output': u'CheckDisk WARNING: / 83.35% bytes usage (29 GiB/35 GiB)\n', u'command': u'check-disk-usage.rb -w 80 -c 90', u'exit_code': 1}
]
formated_data = health_checks_results_model.save(data=data, server=self.server)
for d in formated_data:
assert set(d.keys()) == set(['output', 'command', 'exit_code', 'health_checks_data_id'])
assert health_checks_results_model.collection.find().count() == 1
assert health_checks_model.collection.find().count() == 1
result = health_checks_model.collection.find_one()
assert result['command'] == "check-disk-usage.rb"
assert result['params'] == "-w 80 -c 90"
assert result['unique_id'] == hashlib.md5("check-disk-usage.rb -w 80 -c 90".encode()).hexdigest()
assert result['last_check']['status'] == 'warning'
self._cleanup()
for i in range(50):
health_checks_results_model.save(data=data, server=self.server)
assert health_checks_results_model.collection.find().count() == 50
assert health_checks_model.collection.find().count() == 1
result = health_checks_model.collection.find_one()
class HealthChecksModelTest(unittest.TestCase):
def _cleanup(self):
health_checks_model.collection.remove()
health_checks_results_model.collection.remove()
server_model.collection.remove()
def test_sort_and_filter(self):
self._cleanup()
server_model.collection.insert({"name": "check_sort_and_filter_default"})
server = server_model.collection.find_one()
for i in range(0, 10):
data = [{
'command': "check_sort_and_filter_default.rb",
'exit_code': 1,
'output': 'CheckDisk WARNING: / 83.35% bytes usage (29 GiB/35 GiB)'
}]
health_checks_results_model.save(data=data, server=server)
result = health_checks_model.sort_and_filter()
assert len(result.all_checks) == 1
assert result.all_checks[0]['last_check']
assert result.all_checks[0]['last_check']['status'] == 'warning'
self._cleanup()
for i in range(0, 10):
server_id = server_model.collection.insert({"name": "{0}_server_check_sort_and_filter_by_host".format(i)})
server = server_model.get_by_id(server_id)
# exit_codes = {0: "ok", 1: "warning", 2: "critical"}
exit_code = 2 if i <= 5 else 2
exit_code = 1 if i > 5 else exit_code
for j in range(0, 100):
data = [{
'command': '{0}_check_sort_and_filter_by_host.rb'.format(i),
'exit_code': exit_code,
'output': 'CheckBanner OK: port 22 open'
}]
health_checks_results_model.save(data=data, server=server)
result = health_checks_model.sort_and_filter(sort_by='status')
assert len(result.sorted_result) == 10
for i in range(0, 10):
status = 'critical' if i <= 5 else 'ok'
status = 'warning' if i > 5 else status
assert result.sorted_result[i]['last_check']['status'] == status
result = health_checks_model.sort_and_filter(sort_by='host')
assert len(result.sorted_result) == 10
for i in range(0, 10):
assert result.sorted_result[i]['server']['name'] == "{0}_server_check_sort_and_filter_by_host".format(i)
result = health_checks_model.sort_and_filter(sort_by='host', filter_by='critical')
assert len(result.sorted_result) == 6
result = health_checks_model.sort_and_filter(sort_by='host', filter_by='warning')
assert len(result.sorted_result) == 4
def test_save(self):
self._cleanup()
server_id = server_model.collection.insert({"name": "server_check_sort_and_filter_by_host"})
server = server_model.get_by_id(server_id)
command = "testmehere"
for i in range(0, 10):
health_checks_model.save(command=command, server=server)
assert health_checks_model.collection.find().count() == 1
def test_delete(self):
self._cleanup()
server_id = server_model.collection.insert({"name": "server_check_sort_and_filter_by_host"})
server = server_model.get_by_id(server_id)
command = "testmehere"
for i in range(0, 5):
health_checks_model.save(command=command, server=server)
result = health_checks_model.collection.count()
check = health_checks_model.collection.find_one()
assert result == 1
health_checks_model.delete(check_id=check['_id'])
result = health_checks_model.collection.count()
assert result == 0
def tearDown(self):
health_checks_model.collection.remove()
class HealthChecksAPIModelTest(unittest.TestCase):
def _cleanup(self):
health_checks_model.collection.remove()
health_checks_results_model.collection.remove()
server_model.collection.remove()
def test_get_commands_for_server(self):
self._cleanup()
server_id = server_model.collection.insert({"name": "server_check_sort_and_filter_by_host"})
server = server_model.get_by_id(server_id)
command = "testmehere -w 10"
for i in range(0, 10):
health_checks_model.save(command=command, server=server)
second_command = "testmeagain -c 10"
for i in range(0, 5):
health_checks_model.save(command=second_command, server=server)
result = health_checks_api_model.get_commands_for_server(server_id=server['_id'])
assert result.count() == 2
def test_get_unique_commands(self):
self._cleanup()
server_id = server_model.collection.insert({"name": "server_check_sort_and_filter_by_host"})
server = server_model.get_by_id(server_id)
for i in range(0, 10):
command = "testcommand{0} -w {0} -c {0}".format(i)
health_checks_model.save(command=command, server=server)
result = health_checks_api_model.get_unique_commands()
assert len(result) == 10
def test_get_params_for_command(self):
self._cleanup()
server_id = server_model.collection.insert({"name": "server_check_sort_and_filter_by_host"})
server = server_model.get_by_id(server_id)
for i in range(0, 10):
command = "testcommand -w {0} -c {0}".format(i)
health_checks_model.save(command=command, server=server)
# Duplicate - still has to return only 10 unique params
for i in range(0, 10):
command = "testcommand -w {0} -c {0}".format(i)
health_checks_model.save(command=command, server=server)
result = health_checks_api_model.get_params_for_command(command_string="testcommand")
assert len(result) == 10
| 7,785
|
Python
|
.py
| 151
| 41.238411
| 150
| 0.628125
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,041
|
models.py
|
amonapp_amon/amon/apps/dashboards/models.py
|
from operator import itemgetter
from django.urls import reverse
from amon.apps.core.basemodel import BaseModel
from amon.apps.api.utils import generate_api_key
from amon.utils.dates import unix_utc_now
from amon.templatetags.charts import yaxis
from amon.apps.processes.models import process_model
from amon.apps.servers.models import server_model
from amon.apps.system.models import system_model
from amon.apps.plugins.models import plugin_model
from amon.apps.devices.models import volumes_model, interfaces_model
from amon.apps.tags.models import tags_model
from amon.apps.healthchecks.models import health_checks_model
class DashboardModel(BaseModel):
def __init__(self):
super(DashboardModel, self).__init__()
self.collection = self.mongo.get_collection('dashboards')
def create(self, data=None):
result = self.insert(data)
return result
def get_all(self, account_id=None):
result = None
if account_id:
params = {'account_id': account_id}
result = super(DashboardModel, self).get(params=params)
return result
class DashboardMetricsModel(BaseModel):
def __init__(self):
super(DashboardMetricsModel, self).__init__()
self.collection = self.mongo.get_collection('dashboard_metrics')
def get_or_create_metric(self, data=None):
result = None
process_id = data.get('process_id', '')
plugin_id = data.get('plugin_id')
metric_id = data.get('metric_id')
healthcheck_id = data.get('healthcheck_id')
dashboard_id = data.get('dashboard_id')
metric_type = data.get('metric_type') # Global metrics all have this
data['unique_id'] = generate_api_key()
data['metric_type'] = 'system' if process_id == '' else 'process'
data['metric_type'] = 'plugin' if plugin_id else data['metric_type']
data['metric_type'] = 'healthcheck' if healthcheck_id else data['metric_type']
# Check for global metrics here
if metric_type in ['system_global', 'process_global', 'plugin_global']:
data['metric_type'] = metric_type
data = self.remove_keys(data, ['server_id', 'process_id', 'plugin_id'])
data = self.keys_to_mongoid(data=data, keys=['server_id', 'process_id',
'dashboard_id', 'plugin_id', 'metric_id', 'gauge_id', 'healthcheck_id'])
if dashboard_id:
result = super(DashboardMetricsModel, self).get_or_create(data)
self.collection.ensure_index([('account_id', self.desc)], background=True)
self.collection.ensure_index([('server_id', self.desc)], background=True)
self.collection.ensure_index([('process_id', self.desc)], background=True)
self.collection.ensure_index([('plugin_id', self.desc)], background=True)
self.collection.ensure_index([('healthcheck_id', self.desc)], background=True)
return result
# Global
def get_all_metrics(self, account=None):
data = []
process_checks = ['cpu', 'memory']
system_additional_checks = {'memory': 'used_percent', 'disk': 'percent'}
system_keys = system_model.keys.copy()
try:
system_keys.pop('windows_cpu')
system_keys.pop('memory')
system_keys.pop('disk')
except:
pass
for check, metric_list in system_keys.items():
for metric in metric_list:
key = metric.key
# Overwrite keys for better visual presentation
if check == 'network':
key = 'inbound' if metric.key == 'i' else 'outbound'
name = "{0}.{1}".format(check, key)
_id = "check:{0}.key:{1}.metric_type:system_global".format(check, metric.key)
data.append([_id, name, 'System metrics'])
for check, key in system_additional_checks.items():
name = "{0}.percent".format(check)
_id = "check:{0}.key:{1}.metric_type:system_global".format(check, key)
data.append([_id, name, 'System metrics'])
for p in process_model.get_all_unique():
for check in process_checks:
name = "{0}.{1}".format(p, check)
_id = "check:{0}.key:{1}.metric_type:process_global".format(check, p)
data.append([_id, name, 'Process Metrics'])
for el in plugin_model.get_all_unique_gauge_keys_list():
append = True
try:
plugin, gauge, key = el.split('.')
except:
append = False
if append:
_id = "plugin:{0}.gauge:{1}.key:{2}.metric_type:plugin_global".format(plugin, gauge, key)
name = "{0}.{1}.{2}".format(plugin, gauge, key)
data.append([_id, name, 'Plugin Metrics'])
return data
def get_server_metrics(self, account_id=None, server_id=None):
data = []
constants = ['cpu', 'memory', 'loadavg', 'network']
process_charts = ['cpu', 'memory']
processes = process_model.get_all_for_server(server_id)
plugins = plugin_model.get_for_server(server_id=server_id)
volumes = volumes_model.get_all_for_server(server_id=server_id)
interfaces = interfaces_model.get_all_for_server(server_id=server_id)
for v in volumes:
name = "Disk.{0}".format(v['name'])
_id = "server_id:{0}.check:disk.device_id:{1}".format(server_id, v['_id'])
data.append([_id, name, 'System metrics'])
for v in interfaces:
name = "Adapter.{0}".format(v['name'])
_id = "server_id:{0}.check:network.device_id:{1}".format(server_id, v['_id'])
data.append([_id, name, 'System metrics'])
for check in constants:
name = "{0}".format(check.title())
_id = "server_id:{0}.check:{1}".format(server_id, check)
data.append([_id, name, 'System metrics'])
for p in processes:
for check in process_charts:
name = "{0}.{1}".format(p['name'], check)
_id = "server_id:{0}.process_id:{1}.check:{2}".format(server_id, p['_id'], check)
data.append([_id, name, 'Process Metrics'])
for p in plugins:
gauges = plugin_model.get_gauges_cursor(plugin=p)
for g in gauges:
name = "{0}.{1}".format(p.get('name'), g.get('name'))
_id = "server_id:{0}.plugin_id:{1}.gauge_id:{2}.check:plugin".format(server_id, p['_id'], g['_id'])
data.append([_id, name, 'Plugin Metrics'])
data = sorted(data, key=itemgetter(1))
return data
def get_all(self, account_id=None, dashboard_id=None, public=None):
result_list = []
query = []
params = {'dashboard_id': dashboard_id}
params = self.keys_to_mongoid(data=params, keys=['dashboard_id'])
if dashboard_id:
query = super(DashboardMetricsModel, self).get(params=params)
utc_now = unix_utc_now()
for metric in query:
mongo_id = metric.get('_id')
server_id = metric.get('server_id')
metric_type = metric.get('metric_type')
unique_id = metric.get('unique_id')
check = metric.get('check')
order = metric.get('order', 0)
tags = metric.get('tags', [])
tags_list = tags_model.get_list_of_tags(tags_list=tags, to_dict=True)
server = server_model.get_by_id(server_id)
process = process_model.get_by_id(metric.get('process_id'))
plugin = plugin_model.get_by_id(metric.get('plugin_id'))
gauge = plugin_model.get_gauge_by_id(gauge_id=metric.get('gauge_id'))
volume = volumes_model.get_by_id(metric.get('device_id'))
interface = interfaces_model.get_by_id(metric.get('device_id'))
healthcheck_metric = health_checks_model.get_by_id(metric.get('healthcheck_id'))
append = False
unit = yaxis(check)
if metric_type == 'system_global' and check == 'memory':
unit = "%"
if metric_type == 'system_global' and check == 'disk':
unit = '%'
if public:
url = reverse('public_dashboard_metric', kwargs={"metric_id": mongo_id})
else:
url = reverse('dashboard_metric', kwargs={"metric_id": mongo_id})
result = {
'id': mongo_id,
'unique_id': unique_id,
'metric_type': metric_type,
'url': url,
'utcnow': utc_now,
'name': '',
'unit': unit,
'tags': tags_list,
'order': order
}
if server:
result.update({'server_id': server_id, 'type': 'server_metric','server_name' :server.get('name')})
if metric_type == 'system':
result['name'] = "{0}".format(check)
if volume:
result['name'] = u"{0}.{1}".format(result['name'], volume['name'])
if interface:
result['name'] = u"{0}.{1}".format(result['name'], interface['name'])
append = True
elif metric_type == 'process' and process:
process_name = process.get('name')
result['name'] = u"{0}.{1}".format(process_name, check)
result['process_id'] = process['_id']
append = True
elif metric_type == 'plugin' and plugin and gauge:
result['name'] = u"{0}.{1}".format(plugin.get('name'), gauge.get('name'))
result['plugin_id'] = plugin['_id']
result['gauge_id'] = gauge['_id']
append = True
result['name'] = u"{0}.{1}".format(server.get('name'), result['name'])
elif healthcheck_metric:
result['healthcheck'] = healthcheck_metric
result['healthcheck_id'] = healthcheck_metric.get('_id')
try:
del result['healthcheck']['_id']
del result['healthcheck']['server_id']
del result['healthcheck']['tags']
del result['healthcheck']['file_id'] # Custom scripts
except:
pass
result['type'] = 'healthcheck'
append = True
else:
key = metric.get('key')
# Overwrite keys for better visual presentation
if check == 'network':
key = 'inbound' if key == 'i' else 'outbound'
result['name'] = u"{0}.{1}".format(check, key)
append = True
if metric_type == 'plugin_global':
result['name'] = u'{0}.{1}.{2}'.format(metric.get('plugin'), metric.get('gauge'), metric.get('key'))
append = True
result = self.mongoid_to_str(result, ['server_id', 'id', 'process_id', 'plugin_id', 'metric_id', 'gauge_id', 'healthcheck_id',])
if append:
result_list.append(result)
from operator import itemgetter
sorted_list = sorted(result_list, key=itemgetter('order'))
return sorted_list
def get_all_grouped_by_server_name(self, account_id=None, dashboard_id=None):
all_metrics = self.get_all(account_id=account_id, dashboard_id=dashboard_id)
server_ids = [m.get('server_id') for m in all_metrics if m.get('server_id')] # Don't add app metrics here
server_ids = list(set(server_ids))
metrics_dict = {'server_metrics': {}, "app_metrics": [], "global_metrics": []}
server_dict = {}
for i in server_ids:
server_dict[i] = {
'metrics': [],
'name': ""
}
for metric in all_metrics:
server_id = metric.get('server_id', False)
server_name = metric.get('server_name')
metric_type = metric.get('metric_type')
if metric_type == 'application':
metrics_dict['app_metrics'].append(metric)
elif metric_type in ['system_global', 'process_global', 'plugin_global']:
metrics_dict['global_metrics'].append(metric)
elif server_id:
server_dict[server_id]['metrics'].append(metric)
server_dict[server_id]['name'] = server_name
metrics_dict['server_metrics'] = server_dict
return metrics_dict
def delete_all(self, account_id=None, dashboard_id=None):
params = {'account_id': account_id, 'dashboard_id': dashboard_id}
params = self.keys_to_mongoid(data=params, keys=['dashboard_id'])
if account_id and dashboard_id:
self.collection.remove(params)
def update_order(self, dashboard_id=None, new_order=None):
if type(new_order) is list:
for order, elem in enumerate(new_order):
_id = self.object_id(elem)
self.collection.update({"_id": _id}, {"$set": {'order': order}}, upsert=True)
self.collection.ensure_index([('order', self.desc)], background=True)
dashboard_model = DashboardModel()
dashboard_metrics_model = DashboardMetricsModel()
| 13,523
|
Python
|
.py
| 260
| 39.273077
| 140
| 0.561949
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,042
|
urls.py
|
amonapp_amon/amon/apps/dashboards/urls.py
|
from django.conf.urls import url
from amon.apps.dashboards import views
from amon.apps.dashboards import api
urlpatterns = (
url(r'^$', views.index, name='dashboards'),
url(r'^create/$', views.create_dashboard, name='create_dashboard'),
url(r'^edit/(?P<dashboard_id>\w+)/$', views.edit_dashboard, name='edit_dashboard'),
url(r'^reorder/(?P<dashboard_id>\w+)/$', views.reorder_dashboard, name='reorder_dashboard'),
url(r'^view/(?P<dashboard_id>\w+)/$', views.view_dashboard, name='view_dashboard'),
url(r'^delete/(?P<dashboard_id>\w+)/$', views.delete_dashboard, name='delete_dashboard'),
# Ajax
url(r'^a/edit_dashboard/(?P<dashboard_id>\w+)/$', api.edit_dashboard, name='ajax_dashboard_edit'),
url(r'^a/reorder_metrics/(?P<dashboard_id>\w+)/$', api.reorder_metrics, name='ajax_dashboard_reorder_metrics'),
url(r'^a/add_metric/(?P<dashboard_id>\w+)/$', api.add_metric, name='ajax_dashboard_add_metric'),
url(r'^a/remove_metric/$', api.remove_metric, name='ajax_dashboard_remove_metric'),
url(r'^a/get_all_metrics/(?P<dashboard_id>\w+)/$', api.get_all_metrics, name='ajax_dashboard_get_all_metrics'),
url(r'^a/get_server_metrics/$', api.get_server_metrics, name='ajax_dashboard_get_server_metrics'),
# Metric views
url(r'^chart/(?P<metric_id>\w+)/$', api.dashboard_metric, name='dashboard_metric'),
# Public
url(r'^public/charts/(?P<metric_id>\w+)/$', api.public_dashboard_metric, name='public_dashboard_metric'),
url(r'^(?P<account_id>\w+)/(?P<dashboard_id>\w+)/$', views.public_dashboard, name='public_dashboard'),
)
| 1,600
|
Python
|
.py
| 23
| 64.956522
| 115
| 0.677522
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,043
|
api.py
|
amonapp_amon/amon/apps/dashboards/api.py
|
from amon.apps.servers.models import server_model
from django.contrib.auth.decorators import login_required
from amon.apps.dashboards.models import dashboard_model, dashboard_metrics_model
from rest_framework.decorators import api_view
from rest_framework.response import Response
from amon.apps.system.views import get_system_data_after, get_global_system_data_after
from amon.apps.processes.views import get_process_data_after, get_global_process_data_after
from amon.apps.plugins.views import get_plugin_data_after, get_global_plugin_data_after
@api_view(['GET'])
def public_dashboard_metric(request, metric_id=None):
timestamp = request.GET.get('timestamp')
dashboard_metric = dashboard_metrics_model.get_by_id(metric_id)
server_id = dashboard_metric.get('server_id')
process_id = dashboard_metric.get('process_id')
plugin_id = dashboard_metric.get('plugin_id')
metric_type = dashboard_metric.get('metric_type')
check = dashboard_metric.get('check')
# App metrics here
tags = dashboard_metric.get('tags', [])
filtered_servers = server_model.get_all()
if len(tags) > 0:
filtered_servers = server_model.get_with_tags(tags=tags)
response = {}
if metric_type == 'system':
response = get_system_data_after(timestamp=timestamp, server_id=server_id,
check=check)
elif metric_type == 'process':
response = get_process_data_after(timestamp=timestamp, server_id=server_id,
process_id=process_id, check=check)
elif metric_type == 'plugin':
gauge_id = dashboard_metric.get('gauge_id')
response = get_plugin_data_after(timestamp=timestamp,
plugin_id=plugin_id, gauge_id=gauge_id)
elif metric_type == 'system_global':
key = dashboard_metric.get('key') # loadavg.minute
response = get_global_system_data_after(timestamp=timestamp, check=check, key=key, filtered_servers=filtered_servers)
elif metric_type == 'process_global':
key = dashboard_metric.get('key') # loadavg.minute
response = get_global_process_data_after(timestamp=timestamp, check=check, key=key, filtered_servers=filtered_servers)
elif metric_type == 'plugin_global':
response = get_global_plugin_data_after(timestamp=timestamp, metric=dashboard_metric, filtered_servers=filtered_servers)
return Response(response)
@login_required
@api_view(['GET'])
def dashboard_metric(request, metric_id=None):
return public_dashboard_metric(request, metric_id=metric_id)
@login_required
@api_view(['POST'])
def edit_dashboard(request, dashboard_id):
data = request.data
dashboard_model.update(data, dashboard_id)
response = {
'response': [],
}
return Response(response)
@login_required
@api_view(['POST'])
def add_metric(request, dashboard_id):
data = request.data
check = data.get('check')
metric_type = data.get('metric_type')
valid_checks = ['cpu', 'memory', 'loadavg', 'network', 'disk', 'plugin', 'metric', 'healthcheck']
result = None
add_metric = False
# Check if the metric/server exists and belongs to the same account
if check in valid_checks:
add_metric = True
if metric_type in ['process_global', 'plugin_global', 'system_global']:
add_metric = True
if add_metric:
data['account_id'] = request.account_id
data['dashboard_id'] = dashboard_id
result = dashboard_metrics_model.get_or_create_metric(data=data)
response = 'Created' if result else 'Error'
response = {
'response': response,
}
return Response(response)
@login_required
@api_view(['POST'])
def reorder_metrics(request, dashboard_id):
data = request.data
new_order = data.get('new_order')
result = None
if type(new_order) is list:
dashboard_metrics_model.update_order(dashboard_id=dashboard_id, new_order=new_order)
result = True
response = 'Metrics Order Updated' if result else 'Error'
response = {
'response': response,
}
return Response(response)
@login_required
@api_view(['POST'])
def remove_metric(request):
data = request.data
metric_id = data.get('metric_id')
response = {'Response': 'OK'}
dashboard_metrics_model.delete(metric_id)
response = {
'response': response,
}
return Response(response)
@login_required
@api_view(['GET'])
def get_all_metrics(request, dashboard_id):
all_metrics = dashboard_metrics_model.get_all(account_id=request.account_id, dashboard_id=dashboard_id)
response = {
'data': all_metrics,
}
return Response(response)
# Used in edit dashboard, select server - dropdown menu
@login_required
@api_view(['POST', 'GET'])
def get_server_metrics(request):
data = request.data
server_id = data.get('server_id')
if server_id == 'all':
metrics = dashboard_metrics_model.get_all_metrics()
else:
metrics = dashboard_metrics_model.get_server_metrics(account_id=request.account_id, server_id=server_id)
response = {
'data': metrics,
}
return Response(response)
| 5,138
|
Python
|
.py
| 125
| 35.736
| 128
| 0.700948
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,044
|
tests.py
|
amonapp_amon/amon/apps/dashboards/tests.py
|
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
| 383
|
Python
|
.py
| 12
| 27.583333
| 79
| 0.705722
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,045
|
views.py
|
amonapp_amon/amon/apps/dashboards/views.py
|
from django.shortcuts import render
from django.contrib import messages
from django.shortcuts import redirect
from django.urls import reverse
from django.contrib.auth.decorators import login_required
from django.http import Http404
from amon.apps.dashboards.models import dashboard_model, dashboard_metrics_model
from amon.apps.servers.models import server_model
from amon.apps.healthchecks.models import health_checks_model
from amon.utils.dates import (
datetime_to_unixtime,
)
@login_required
def index(request):
dashboards = dashboard_model.get_all(account_id=request.account_id)
dashboards_data = []
if dashboards.clone().count() > 0:
for d in dashboards:
metrics = dashboard_metrics_model.get_all_grouped_by_server_name(account_id=request.account_id, dashboard_id=d['_id'])
dashboards_data.append({
'metrics': metrics,
'dashboard': d
})
return render(request, 'dashboards/all.html', {
"dashboards_data": dashboards_data
})
@login_required
def create_dashboard(request):
data = {'name': '', 'account_id': request.account_id}
dashboard_id = dashboard_model.create(data=data)
url = reverse('edit_dashboard', kwargs={'dashboard_id': dashboard_id})
return redirect(url)
def _fill_metrics_arrays(all_metrics=None):
charts_list = []
health_checks_list = []
for m in all_metrics:
metric_type = m.get('type')
if metric_type == 'healthcheck':
check_id = m.get('healthcheck_id')
server = None
if check_id:
check = health_checks_model.get_by_id(check_id)
server = server_model.get_by_id(check.get('server_id', None))
if server != None:
check['server'] = server
health_checks_list.append(check)
else:
charts_list.append(m)
return {
'charts': charts_list,
'health_checks': health_checks_list
}
def public_dashboard(request, account_id, dashboard_id):
enddate = request.GET.get('enddate')
duration = request.GET.get('duration', 1800)
duration = int(duration)
now_unix = datetime_to_unixtime(request.now)
max_date = now_unix * 1000
if enddate:
date_to = int(enddate)
else:
date_to = now_unix
date_from = date_to - int(duration)
dashboard = dashboard_model.get_by_id(dashboard_id)
public = dashboard.get('shared', False)
if public == False:
raise Http404
all_metrics = dashboard_metrics_model.get_all(
dashboard_id=dashboard_id,
public=public)
all_existing_server_ids = server_model.get_all_ids()
metrics_array = _fill_metrics_arrays(all_metrics=all_metrics)
return render(request, 'dashboards/public.html', {
"account_id": account_id,
"duration": duration,
"health_checks": metrics_array['health_checks'],
"selected_charts": metrics_array['charts'],
"date_from" : date_from,
"date_to" : date_to,
"now": now_unix,
"max_date": max_date,
"dashboard": dashboard,
"enddate": enddate,
"all_existing_server_ids": all_existing_server_ids
})
@login_required
def view_dashboard(request, dashboard_id):
enddate = request.GET.get('enddate')
duration = request.GET.get('duration', 1800)
duration = int(duration)
now_unix = datetime_to_unixtime(request.now)
max_date = now_unix * 1000
if enddate:
date_to = int(enddate)
else:
date_to = now_unix
date_from = date_to - int(duration)
all_dashboards = dashboard_model.get_all(account_id=request.account_id)
all_existing_server_ids = server_model.get_all_ids()
dashboard = dashboard_model.get_by_id(dashboard_id)
all_metrics = dashboard_metrics_model.get_all(account_id=request.account_id, dashboard_id=dashboard_id)
metrics_array = _fill_metrics_arrays(all_metrics=all_metrics)
if len(all_metrics) == 0:
messages.add_message(request, messages.INFO, 'To view this dashboard add at least 1 metric')
return redirect(reverse('edit_dashboard', kwargs={'dashboard_id': dashboard_id}))
return render(request, 'dashboards/view.html', {
"all_dashboards": all_dashboards,
"duration": duration,
"selected_charts": metrics_array['charts'],
"health_checks": metrics_array['health_checks'],
"date_from" : date_from,
"date_to" : date_to,
"now": now_unix,
"max_date": max_date,
"dashboard": dashboard,
"enddate": enddate,
"all_existing_server_ids": all_existing_server_ids
})
@login_required
def delete_dashboard(request, dashboard_id):
dashboard_metrics_model.delete_all(account_id=request.account_id, dashboard_id=dashboard_id)
dashboard_model.delete(dashboard_id)
url = reverse('dashboards')
return redirect(url)
@login_required
def edit_dashboard(request, dashboard_id):
dashboard = dashboard_model.get_by_id(dashboard_id)
all_servers = server_model.get_all(account_id=request.account_id)
all_healthchecks = health_checks_model.get_all()
return render(request, 'dashboards/edit.html', {
"dashboard": dashboard,
"all_servers": all_servers,
"all_healthchecks": all_healthchecks
})
@login_required
def reorder_dashboard(request, dashboard_id):
dashboard = dashboard_model.get_by_id(dashboard_id)
return render(request, 'dashboards/reorder.html', {
"dashboard": dashboard,
})
| 5,598
|
Python
|
.py
| 140
| 33.157143
| 130
| 0.668886
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,046
|
models_test.py
|
amonapp_amon/amon/apps/dashboards/tests/models_test.py
|
import unittest
from nose.tools import *
from django.contrib.auth import get_user_model
User = get_user_model()
from amon.apps.dashboards.models import dashboard_model, dashboard_metrics_model
from amon.apps.plugins.models import plugin_model
class DashboardModelTest(unittest.TestCase):
def setUp(self):
User.objects.all().delete()
self.user = User.objects.create_user(password='qwerty', email='foo@test.com')
self.account_id = 1
self.collection = dashboard_model.collection
self.servers_collection = dashboard_model.mongo.get_collection('servers')
self.servers_collection.insert({"name" : "test"})
self.server = self.servers_collection.find_one()
self.server_id = self.server['_id']
def tearDown(self):
self.servers_collection.remove()
self.collection.remove()
User.objects.all().delete()
def _cleanup(self):
self.collection.remove()
def get_all_test(self):
self._cleanup()
for i in range(5):
self.collection.insert({'test': 1, 'server': i})
result = dashboard_model.get_all()
eq_(result, None)
for i in range(5):
self.collection.insert({'test': 1, 'account_id': i})
result = dashboard_model.get_all(account_id=1)
eq_(result.count(), 1)
self._cleanup()
def create_test(self):
self._cleanup()
result = dashboard_model.create({'test': 1})
assert len(str(result)) == 24 # object_id
self._cleanup()
class DashboardMetricsModelTest(unittest.TestCase):
def setUp(self):
self.user = User.objects.create_user(password='qwerty', email='foo@test.com')
self.account_id = 1
self.collection = dashboard_metrics_model.collection
self.servers_collection = dashboard_metrics_model.mongo.get_collection('servers')
self.servers_collection.insert({"name" : "testserver"})
self.server = self.servers_collection.find_one()
self.server_id = self.server['_id']
dashboard_model.collection.insert({"name" : "testdashboard"})
self.dashboard = dashboard_model.collection.find_one()
self.dashboard_id = self.dashboard['_id']
self.process_collection = dashboard_metrics_model.mongo.get_collection('processes')
self.process_collection.insert({"name" : "testprocess"})
self.process = self.process_collection.find_one()
self.process_id = self.process['_id']
self.plugins_collection = dashboard_metrics_model.mongo.get_collection('plugins')
self.plugins_collection.insert({"name" : "testplugin"})
self.plugin = self.plugins_collection.find_one()
self.plugin_id = self.plugin['_id']
# Will populate the keys
data = {'t': 1, 'count.count_key': 2, 'second.second_key': 4, 'more.more_key': 5 }
plugin_model.save_gauges(plugin=self.plugin, data=data, time=1)
self.metrics_collection = dashboard_metrics_model.mongo.get_collection('metrics')
self.metrics_collection.insert({"name" : "testmetric", "type": "gauge"})
self.metric = self.metrics_collection.find_one()
self.metric_id = self.metric['_id']
def tearDown(self):
self.collection.remove()
dashboard_model.collection.remove()
self.servers_collection.remove()
self.process_collection.remove()
self.plugins_collection.remove()
plugin_model.gauge_collection.remove()
User.objects.all().delete()
def _cleanup(self):
dashboard_model.collection.remove()
self.collection.remove()
def get_all_metrics_test(self):
self.process_collection.remove()
plugin_model.collection.remove()
for i in range(0, 2):
self.process_collection.insert({"name" : "test-{0}".format(i), 'server': self.server_id})
plugin = plugin_model.get_or_create(name='test1', server_id=self.server['_id'])
data = {'t': 1, 'count.count_key': 2, 'second.second_key': 4, 'more.more_key': 5 }
plugin_model.save_gauges(plugin=plugin, data=data, time=1)
result = dashboard_metrics_model.get_all_metrics()
for r in result:
params = r[0].split('.')
final_params = dict(x.split(':') for x in params)
assert final_params['metric_type']
metric_type = final_params['metric_type']
if metric_type == 'system_global':
assert final_params['check'] in ['disk', 'memory', 'network', 'loadavg', 'cpu']
if final_params['check'] == 'network':
assert final_params['key'] in ['i', 'o']
if final_params['check'] == 'disk':
assert final_params['key'] == 'percent'
if final_params['check'] == 'memory':
assert final_params['key'] == 'used_percent'
if final_params['check'] == 'cpu':
assert final_params['key'] in ['idle', 'system', 'user', 'iowait', 'steal']
elif metric_type == 'process_global':
assert final_params['key'] in ['test-0', 'test-1']
assert final_params['check'] in ['cpu', 'memory']
elif metric_type == 'plugin_global':
assert final_params['gauge'] in ['count', 'second', 'more']
assert final_params['key'] in ['count_key', 'second_key', 'more_key']
def get_server_metrics_test(self):
result = dashboard_metrics_model.get_server_metrics(server_id=self.server_id)
eq_(len(result), 4) # Cpu, Memory, Loadavg, Network
self.process_collection.insert({"name" : "test", 'server': self.server_id})
result = dashboard_metrics_model.get_server_metrics(server_id=self.server_id)
eq_(len(result), 6) # Cpu, Memory, Loadavg, test:cpu, test:memory, Network
self.process_collection.remove({'server': self.server_id})
def get_or_create_metric_test(self):
self._cleanup()
dashboard_metrics_model.get_or_create_metric({'metric': 'boo', 'server_id': self.server_id})
result = self.collection.find().count()
eq_(result, 0) # Don't save without dashboard_id
dashboard_metrics_model.get_or_create_metric({'metric': 'boo', 'server_id': self.server_id,
'dashboard_id': self.dashboard_id})
result = self.collection.find_one()
assert 'unique_id' in result.keys()
assert 'server_id' in result.keys()
# Todo - save only valid metrics
self._cleanup()
dashboard_metrics_model.get_or_create_metric({'check': 'systemd', 'server_id': 'all',
'process_id': 'all',
'key': 'cpu',
'metric_type': 'process_global',
'dashboard_id': self.dashboard_id})
result = self.collection.find().count()
eq_(result, 1) # Save global process metrics
result = self.collection.find_one()
assert result['metric_type'] == 'process_global'
assert result['key'] == 'cpu'
assert result['check'] == 'systemd'
self._cleanup()
dashboard_metrics_model.get_or_create_metric({'check': 'memory', 'server_id': 'all',
'key': 'percent',
'metric_type': 'system_global',
'dashboard_id': self.dashboard_id})
result = self.collection.find().count()
eq_(result, 1) # Save global system metrics
result = self.collection.find_one()
assert result['metric_type'] == 'system_global'
assert result['key'] == 'percent'
assert result['check'] == 'memory'
self._cleanup()
dashboard_metrics_model.get_or_create_metric({'check': 'memory', 'server_id': 'all',
'key': 'percent',
'metric_type': 'system_global',
'dashboard_id': self.dashboard_id})
result = self.collection.find().count()
eq_(result, 1) # Save global plugin metrics
result = self.collection.find_one()
assert result['metric_type'] == 'system_global'
assert result['key'] == 'percent'
assert result['check'] == 'memory'
def get_all_test(self):
self._cleanup()
# Test system metrics
for i in range(5):
dashboard_metrics_model.get_or_create_metric({'check': 'cpu', 'server_id': self.server_id,
'dashboard_id': self.dashboard_id, 'account_id': self.account_id})
result = dashboard_metrics_model.get_all()
eq_(len(result), 0) # Don't get anything without account/dashboard id
result = dashboard_metrics_model.get_all(account_id=self.account_id, dashboard_id=self.dashboard_id)
eq_(len(result), 5) # Returns a list with values
for r in result:
self.assertCountEqual(set(r.keys()),
set(['tags', 'url', 'unit', 'utcnow', 'type', 'server_id', 'name', 'id', 'unique_id', 'metric_type', 'server_name', 'order']))
assert r['type'] == 'server_metric'
assert r['metric_type'] == 'system'
assert r['server_name'] == self.server['name']
dashboard_metrics_model.collection.remove()
# Test process metrics
for i in range(5):
dashboard_metrics_model.get_or_create_metric({'check': 'cpu', 'server_id': self.server_id,
'dashboard_id': self.dashboard_id, 'account_id': self.account_id,
'process_id': self.process_id})
result = dashboard_metrics_model.get_all(account_id=self.account_id, dashboard_id=self.dashboard_id)
eq_(len(result), 5) # Returns a list with values
for r in result:
self.assertCountEqual(set(r.keys()),
set(['tags', 'url', 'unit', 'utcnow', 'server_id', 'type', 'name', 'id', 'unique_id','metric_type', 'process_id', 'server_name', 'order']))
assert r['type'] == 'server_metric'
assert r['metric_type'] == 'process'
dashboard_metrics_model.collection.remove()
plugin_model.gauge_collection.remove()
gauge = plugin_model.get_or_create_gauge_by_name(plugin=self.plugin, name='count')
# Test plugin metrics
for i in range(5):
dashboard_metrics_model.get_or_create_metric({
'gauge_id': gauge['_id'],
'server_id': self.server_id,
'dashboard_id': self.dashboard_id,
'account_id': self.account_id,
'plugin_id': self.plugin_id,
})
result = dashboard_metrics_model.get_all(account_id=self.account_id, dashboard_id=self.dashboard_id)
eq_(len(result), 5) # Returns a list with values
for r in result:
self.assertCountEqual(set(r.keys()), set(['url', 'unit', 'utcnow','server_id', 'name', 'id', 'unique_id',
'metric_type', 'plugin_id', 'type', 'tags',
'gauge_id', 'server_name', 'order']))
assert r['type'] == 'server_metric'
assert r['metric_type'] == 'plugin'
assert r['name'] == "{0}.{1}.{2}".format(self.server['name'], self.plugin['name'], gauge['name'])
plugin_model.gauge_collection.remove()
self._cleanup()
def get_all_grouped_by_server_name_test(self):
self._cleanup()
dashboard_metrics_model.collection.remove()
# Test system metrics
for i in range(5):
dashboard_metrics_model.get_or_create_metric({
'check': 'cpu',
'server_id': self.server_id,
'dashboard_id': self.dashboard_id,
'account_id': self.account_id})
result = dashboard_metrics_model.get_all_grouped_by_server_name(account_id=self.account_id, dashboard_id=self.dashboard_id)
server_metrics = result['server_metrics']
eq_(len(server_metrics), 1)
for i, v in server_metrics.items():
eq_(len(v['metrics']), 5)
assert i == str(self.server_id)
assert v['name'] == self.server['name']
dashboard_metrics_model.collection.remove()
self._cleanup()
def delete_all_test(self):
self._cleanup()
for i in range(5):
dashboard_metrics_model.get_or_create_metric({
'metric': 'boo',
'server_id': self.server_id,
'dashboard_id': self.dashboard_id,
'account_id': self.account_id}
)
result = dashboard_metrics_model.get_all(account_id=self.account_id, dashboard_id=self.dashboard_id)
eq_(len(result), 5) # Returns a list with values
dashboard_metrics_model.delete_all(account_id=self.account_id, dashboard_id=self.dashboard_id)
result = dashboard_metrics_model.get_all(account_id=self.account_id, dashboard_id=self.dashboard_id)
eq_(len(result), 0) # Returns a list with values
self._cleanup()
def reoder_test(self):
self._cleanup()
for i in range(5):
dashboard_metrics_model.get_or_create_metric({
'metric': 'order-{0}'.format(i),
'server_id': self.server_id,
'dashboard_id': self.dashboard_id,
'account_id': self.account_id}
)
result = dashboard_metrics_model.get_all(account_id=self.account_id, dashboard_id=self.dashboard_id)
ordered_ids = [x['id'] for x in result]
dashboard_metrics_model.update_order(dashboard_id=self.dashboard_id, new_order=ordered_ids)
result = dashboard_metrics_model.get_all(account_id=self.account_id, dashboard_id=self.dashboard_id)
new_order = []
for r in result:
new_order.append(r['id'])
assert ordered_ids == new_order
self._cleanup()
| 14,160
|
Python
|
.py
| 262
| 41.969466
| 155
| 0.602434
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,047
|
api_test.py
|
amonapp_amon/amon/apps/dashboards/tests/api_test.py
|
import json
from django.test.client import Client
from django.urls import reverse
from django.test import TestCase
from nose.tools import *
from django.contrib.auth import get_user_model
User = get_user_model()
from amon.apps.dashboards.models import dashboard_model, dashboard_metrics_model
from amon.apps.plugins.models import plugin_model
class TestDashboardAPI(TestCase):
def setUp(self):
User.objects.all().delete()
self.c = Client()
self.user = User.objects.create_user(password='qwerty', email='foo@test.com')
self.c.login(username='foo@test.com', password='qwerty')
self.account_id = 1
self.collection = dashboard_model.collection
self.servers_collection = dashboard_model.mongo.get_collection('servers')
self.servers_collection.insert({"name" : "testserver", 'account_id': self.account_id})
self.server = self.servers_collection.find_one()
self.server_id = self.server['_id']
self.server_id_str = str(self.server['_id'])
self.collection.insert({"name" : "testdashboard"})
self.dashboard = self.collection.find_one()
self.dashboard_id = self.dashboard['_id']
self.process_collection = dashboard_metrics_model.mongo.get_collection('processes')
self.process_collection.remove()
self.process_collection.insert({"name" : "testprocess"})
self.process = self.process_collection.find_one()
self.process_id = self.process['_id']
self.plugins_collection = dashboard_metrics_model.mongo.get_collection('plugins')
self.plugins_collection.remove()
self.plugins_collection.insert({"name" : "testplugin"})
self.plugin = self.plugins_collection.find_one()
self.plugin_id = self.plugin['_id']
self.metrics_collection = dashboard_metrics_model.mongo.get_collection('metrics')
self.metrics_collection.remove()
self.metrics_collection.insert({"name" : "testmetric" , 'account_id': self.account_id, "type": 'gauge'})
self.metric = self.metrics_collection.find_one()
self.metric_id = self.metric['_id']
def tearDown(self):
self.servers_collection.remove()
self.collection.remove()
self.process_collection.remove()
self.plugins_collection.remove()
self.metrics_collection.remove()
self.c.logout()
self.user.delete()
def _cleanup(self):
self.collection.remove()
dashboard_metrics_model.collection.remove()
def test_add_metric(self):
self._cleanup()
url = reverse('ajax_dashboard_add_metric', kwargs={'dashboard_id': self.dashboard_id})
# Check adding different metrics
# System metrics
data = {"check": "cpu" , "server_id": "{0}".format(self.server_id_str),
"account_id": self.account_id }
response = self.c.post(url, data=json.dumps(data), content_type='application/json')
assert response.status_code == 200
json_string = response.content.decode('utf-8')
result_data = json.loads(json_string)
assert result_data['response'] == 'Created'
result = dashboard_metrics_model.collection.find_one()
assert result['server_id'] == self.server_id
assert result['dashboard_id'] == self.dashboard_id
assert result['check'] == 'cpu'
# Process metrics
dashboard_metrics_model.collection.remove()
data = {"check": "memory" ,
"server_id": "{0}".format(self.server_id_str),
"process_id": "{0}".format(str(self.process_id)),
"account_id": self.account_id
}
response = self.c.post(url, data=json.dumps(data), content_type='application/json')
assert response.status_code == 200
json_string = response.content.decode('utf-8')
result_data = json.loads(json_string)
assert result_data['response'] == 'Created'
result = dashboard_metrics_model.collection.find_one()
assert result['server_id'] == self.server_id
assert result['process_id'] == self.process_id
assert result['dashboard_id'] == self.dashboard_id
assert result['check'] == 'memory'
# Plugin metrics
dashboard_metrics_model.collection.remove()
plugin_model.gauge_collection.remove()
gauge = plugin_model.get_or_create_gauge_by_name(plugin=self.plugin, name='count')
data = {"check": "plugin" ,
"gauge_id": "{0}".format(str(gauge['_id'])),
"server_id": "{0}".format(self.server_id_str),
"plugin_id": "{0}".format(str(self.plugin_id)),
"account_id": self.account_id
}
response = self.c.post(url, data=json.dumps(data),
content_type='application/json')
assert response.status_code == 200
json_string = response.content.decode('utf-8')
result_data = json.loads(json_string)
assert result_data['response'] == 'Created'
result = dashboard_metrics_model.collection.find_one()
assert result['server_id'] == self.server_id
assert result['gauge_id'] == gauge['_id']
assert result['plugin_id'] == self.plugin_id
assert result['dashboard_id'] == self.dashboard_id
assert result['check'] == 'plugin'
self._cleanup()
def test_remove_metric(self):
dashboard_metrics_model.collection.remove()
data = {"check": "metric" ,
"metric_id": "{0}".format(str(self.metric['_id'])),
"account_id": self.account_id
}
url = reverse('ajax_dashboard_add_metric', kwargs={'dashboard_id': self.dashboard_id})
response = self.c.post(url, data=json.dumps(data), content_type='application/json')
assert dashboard_metrics_model.collection.find().count() == 1
result = dashboard_metrics_model.collection.find_one()
url = reverse('ajax_dashboard_remove_metric')
data = {
"metric_id": "{0}".format(str(result['_id'])),
}
response = self.c.post(url, data=json.dumps(data), content_type='application/json')
assert response.status_code == 200
assert dashboard_metrics_model.collection.find().count() == 0
def test_get_all_metrics(self):
self._cleanup()
url = reverse('ajax_dashboard_add_metric', kwargs={'dashboard_id': self.dashboard_id})
dashboard_metrics_model.collection.remove()
data = {
"check": "metric" ,
"metric_type": "application",
"metric_id": "{0}".format(str(self.metric['_id'])),
"account_id": self.account_id
}
response = self.c.post(url, data=json.dumps(data),
content_type='application/json')
plugin_model.gauge_collection.remove()
gauge = plugin_model.get_or_create_gauge_by_name(plugin=self.plugin, name='count')
data = {"check": "plugin" ,
"plugin_id": "{0}".format(str(self.plugin_id)),
"gauge_id": "{0}".format(str(gauge['_id'])),
"server_id": "{0}".format(self.server_id_str),
"account_id": self.account_id
}
response = self.c.post(url, data=json.dumps(data),
content_type='application/json')
data = {"check": "memory" ,
"server_id": "{0}".format(self.server_id_str),
"process_id": "{0}".format(str(self.process_id)),
"account_id": self.account_id
}
response = self.c.post(url, data=json.dumps(data),
content_type='application/json')
url = reverse('ajax_dashboard_get_all_metrics',
kwargs={'dashboard_id': self.dashboard_id})
result = self.c.get(url)
assert result.status_code == 200
for r in result.data['data']:
metric_type = r.get('metric_type')
type = r.get('type')
if metric_type == 'plugin':
assert r['name'] == "{0}.{1}.{2}".format(self.server['name'], self.plugin['name'], gauge['name'])
assert r['gauge_id'] == str(gauge['_id'])
assert r['plugin_id'] == str(self.plugin['_id'])
elif metric_type == 'process':
assert r['name'] == "{0}.{1}.{2}".format(self.server['name'], self.process['name'], 'memory')
assert r['process_id'] == str(self.process['_id'])
assert r['server_id'] == str(self.server['_id'])
# def test_public_dashboard_metrics(self):
# assert False
| 8,870
|
Python
|
.py
| 170
| 40.711765
| 113
| 0.612973
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,048
|
views_test.py
|
amonapp_amon/amon/apps/dashboards/tests/views_test.py
|
from django.test.client import Client
from django.urls import reverse
from django.test import TestCase
from nose.tools import *
from django.contrib.auth import get_user_model
User = get_user_model()
from amon.apps.dashboards.models import dashboard_model
class TestDashboardUrls(TestCase):
def setUp(self):
User.objects.all().delete()
self.c = Client()
self.user = User.objects.create_user(password='qwerty', email='foo@test.com')
self.c.login(username='foo@test.com', password='qwerty')
self.account_id = 1
self.collection = dashboard_model.collection
self.servers_collection = dashboard_model.mongo.get_collection('servers')
self.servers_collection.insert({"name" : "test"})
self.server = self.servers_collection.find_one()
self.server_id = self.server['_id']
def tearDown(self):
self.servers_collection.remove()
self.collection.remove()
self.c.logout()
self.user.delete()
def _cleanup(self):
self.collection.remove()
def test_dashboards(self):
self._cleanup()
for i in range(5):
self.collection.insert({'test': 1, 'server': i, 'account_id':self.account_id})
url = reverse('dashboards')
response = self.c.get(url)
assert response.status_code == 200
assert len(response.context['dashboards_data']) == 5
# Test the urls
self._cleanup()
def test_edit_dashboard(self):
self._cleanup()
dashboard_id = self.collection.insert({'server': 'test', 'account_id':self.account_id})
url = reverse('edit_dashboard', kwargs={'dashboard_id': dashboard_id})
response = self.c.get(url)
assert response.status_code == 200
self._cleanup()
def test_delete_dashboard(self):
self._cleanup()
dashboard_id = self.collection.insert({'server': 'test', 'account_id':self.account_id})
assert self.collection.find().count() == 1
url = reverse('delete_dashboard', kwargs={'dashboard_id': dashboard_id})
response = self.c.get(url)
assert response.status_code == 302
assert self.collection.find().count() == 0
self._cleanup()
def test_public_dashboard(self):
self._cleanup()
dashboard_id = self.collection.insert({'server': 'test', 'account_id':self.account_id})
url = reverse('public_dashboard', kwargs={'dashboard_id': dashboard_id, 'account_id': self.account_id})
response = self.c.get(url)
# Default - not shared
assert response.status_code == 404
dashboard_model.update({'shared': True}, dashboard_id)
response = self.c.get(url)
assert response.status_code == 200
self._cleanup()
| 2,902
|
Python
|
.py
| 63
| 36.349206
| 115
| 0.642175
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,049
|
models.py
|
amonapp_amon/amon/apps/_tags/models.py
|
from operator import itemgetter
from amon.apps.core.basemodel import BaseModel
class TagsModel(BaseModel):
def __init__(self):
super(TagsModel, self).__init__()
self.collection = self.mongo.get_collection('tags')
def get_for_group(self, group_id):
group_id = self.object_id(group_id)
result = self.collection.find({'group_id': group_id})
return result
def get_list_of_tags(self, tags_list=None, to_dict=None):
result = []
tags = [self.object_id(x) for x in tags_list]
query = self.collection.find({'_id': {"$in": tags}})
for tag in query:
group = tag_groups_model.get_by_id(tag.get('group_id'))
if group:
tag['full_name'] = "{0}.{1}".format(group.get('name', ''), tag.get('name'))
else:
tag['full_name'] = "{0}".format(tag.get('name'))
tag['text'] = tag['full_name'] # Used for the dropdown menu
if to_dict == True:
tag = self.mongoid_to_str(tag, keys=['_id', 'group_id'])
tag['id'] = tag['_id'] # Used in the dropdown
result.append(tag)
return result
def create_and_return_ids(self, tags=None):
tags_list = []
# {'rds': 'value'}
if type(tags) is dict:
for group, tag in tags.items():
group_id = tag_groups_model.get_or_create_by_name(group)
_id = self.get_or_create(name=tag, group_id=group_id)
tags_list.append(_id)
# ['tag', 'onemore', 'provider:digitalocean']
if type(tags) is list:
for t in tags:
# provider:digitalocean
try:
group, tag = t.split(":")
# tag
except ValueError:
tag = t
group = False
if group:
group_id = tag_groups_model.get_or_create_by_name(group)
_id = self.get_or_create(name=tag, group_id=group_id)
else:
_id = self.get_or_create_by_name(name=tag)
tags_list.append(_id)
return tags_list
def get_by_id(self, id):
result = super(TagsModel, self).get_by_id(id)
if result:
group_id = result.get('group_id')
if group_id:
result['group'] = tag_groups_model.get_by_id(group_id)
return result
def get_all(self):
result_list = []
result = super(TagsModel, self).get_all()
for r in result:
group_id = r.get('group_id')
if group_id:
r['group'] = tag_groups_model.get_by_id(group_id)
result_list.append(r)
try:
result_list = sorted(result_list, key=itemgetter('group'))
except:
pass
return result_list
def get_or_create(self, name=None, group_id=None):
params = {'name': name}
if group_id:
group_id = self.object_id(group_id)
params['group_id'] = group_id
result = self.collection.find_one(params)
if result == None:
_id = self.collection.insert(params)
else:
_id = result['_id']
self.collection.ensure_index([('name', self.desc)], background=True)
return _id
def get_or_create_by_name(self, name=None):
cleanup_name = name.strip()
# Don't create empty tags
if len(cleanup_name) < 2:
return
result = self.collection.find_one({'name': cleanup_name})
if result == None:
_id = self.collection.insert({'name': cleanup_name})
else:
_id = result['_id']
self.collection.ensure_index([('name', self.desc)], background=True)
return _id
def update(self, data, id):
data = self.keys_to_mongoid(data, ['group_id'])
super(TagsModel, self).update(data, id)
def get_tags_ids(self, tags_string=None):
tags_list = tags_string.split(',')
result = []
for t in tags_list:
try:
tag_id = self.get_by_id(t)['_id']
except:
tag_id = self.get_or_create_by_name(name=t)
result.append(tag_id)
return result
class TagGroupsModel(BaseModel):
def __init__(self):
super(TagGroupsModel, self).__init__()
self.collection = self.mongo.get_collection('tag_groups')
def get_or_create_by_name(self, name=None):
cleanup_name = name.strip()
# Don't create empty tags
if len(cleanup_name) < 2:
return
result = self.collection.find_one({'name': cleanup_name})
if result == None:
_id = self.collection.insert({'name': cleanup_name})
else:
_id = result['_id']
self.collection.ensure_index([('name', self.desc)], background=True)
return _id
tag_groups_model = TagGroupsModel()
tags_model = TagsModel()
| 5,204
|
Python
|
.py
| 126
| 28.928571
| 91
| 0.53876
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,050
|
urls.py
|
amonapp_amon/amon/apps/_tags/urls.py
|
from django.conf.urls import url
from django.views.generic import TemplateView
from amon.apps.tags import api
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name="tags/view.html"), name='tags'),
url(r'^$', TemplateView.as_view(template_name="tags/view.html"), name='view_tags'),
url(r'^groups/$', TemplateView.as_view(template_name="tags/groups.html"), name='tag_groups'),
]
api_patterns = [
url(
r'^a/get_tags$',
api.ajax_get_tags,
name='api_tags_get_tags'
),
# return list of tags for comma separated list url?tags=tag_id, tag_id
url(
r'^a/get_tags_list$',
api.ajax_get_tags_list,
name='api_tags_get_tags_list'
),
# Tags assigned to servers
url(
r'^a/get_server_tags$',
api.ajax_get_only_server_tags,
name='api_tags_only_server_tags'
),
# Tags assigned to individual server
url(
r'^a/get_tags_for_server/(?P<server_id>\w+)/$',
api.ajax_get_tags_for_server,
name='api_tags_get_tags_for_server'
),
]
urlpatterns = urlpatterns + api_patterns
| 1,118
|
Python
|
.py
| 34
| 26.970588
| 97
| 0.633396
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,051
|
api.py
|
amonapp_amon/amon/apps/_tags/api.py
|
from operator import itemgetter
from django.contrib.auth.decorators import login_required
from rest_framework.decorators import api_view
from rest_framework.response import Response
from amon.apps.tags.models import tags_model
from amon.apps.servers.models import server_model
def _tag_dict__repr__(tag=None):
result = {}
group_name = tag.get('group', {}).get('name', '')
separator = ":" if group_name else ""
text = "{0}{1}{2}".format(group_name, separator, tag.get('name'))
tag_id = tag.get("_id", False)
if tag_id:
result = {
'id': str(tag_id),
"text": text,
'group': group_name,
}
return result
# AJAX
@login_required
@api_view(['GET'])
def ajax_get_tags(request):
all_tags = tags_model.get_all()
q = request.GET.get('q')
result = []
for tag in all_tags:
append = True
tag_dict = _tag_dict__repr__(tag=tag)
if q:
append = False
text = tag_dict.get('text', "")
lookup = text.find(q)
if lookup != -1:
append = True
if append is True:
result.append(tag_dict)
result = sorted(result, key=itemgetter('group'))
return Response(result)
# AJAX
@login_required
@api_view(['GET'])
def ajax_get_tags_list(request):
tags = request.GET.get('tags')
tags_list = [x.strip() for x in tags.split(',')]
result = tags_model.get_list_of_tags(tags_list=tags_list, to_dict=True)
return Response(result)
# AJAX
@login_required
@api_view(['GET'])
def ajax_get_tags_for_server(request, server_id=None):
result = []
server = server_model.get_by_id(server_id)
server_tags = server_model.get_tags(server=server)
for tag in server_tags:
tag_dict = _tag_dict__repr__(tag=tag)
if len(tag_dict) > 0:
result.append(tag_dict)
result = sorted(result, key=itemgetter('group'))
return Response(result)
# AJAX
@login_required
@api_view(['GET'])
def ajax_get_only_server_tags(request):
all_servers = server_model.get_all()
filtered_tags = []
for s in all_servers:
server_tags = s.get('tags', [])
tags_list = [x for x in server_tags]
if len(tags_list) > 0:
filtered_tags.extend(tags_list)
# Filter by tag_id and leave only unique tags
filtered_tags = dict((v['_id'], v) for v in filtered_tags).values()
result = []
for tag in filtered_tags:
tag_dict = _tag_dict__repr__(tag=tag)
if len(tag_dict) > 0:
result.append(tag_dict)
result = sorted(result, key=itemgetter('group'))
return Response(result)
| 2,686
|
Python
|
.py
| 80
| 27.2625
| 75
| 0.620031
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,052
|
models_test.py
|
amonapp_amon/amon/apps/_tags/tests/models_test.py
|
import unittest
from nose.tools import eq_
from django.contrib.auth import get_user_model
User = get_user_model()
from amon.apps.tags.models import tags_model, tag_groups_model
class TagsModelTest(unittest.TestCase):
def setUp(self):
self.user = User.objects.create_user(password='qwerty', email='foo@test.com')
self.account_id = 1
def tearDown(self):
self.user.delete()
User.objects.all().delete()
def _cleanup(self):
tags_model.collection.remove()
tag_groups_model.collection.remove()
# def get_for_group_test(self):
# assert False
def get_list_of_tags_test(self):
self._cleanup()
tags = {'rds': 'value', 'ebs': 'volume'}
first_result = tags_model.create_and_return_ids(tags)
result = tags_model.get_list_of_tags(first_result)
for r in result:
assert r['full_name'] in ['rds.value', 'ebs.volume']
result = tags_model.get_list_of_tags(first_result, to_dict=True)
for r in result:
assert type(r['group_id']) is str
def create_and_return_ids_test(self):
self._cleanup()
# Group
tags = {'rds': 'value', 'ebs': 'volume'}
first_result = tags_model.create_and_return_ids(tags)
assert len(first_result) == 2
assert tag_groups_model.collection.find().count() == 2
second_result = tags_model.create_and_return_ids(tags)
assert len(second_result) == 2
assert tag_groups_model.collection.find().count() == 2
assert first_result == second_result
self._cleanup()
# List
tags = ['rds', 'ebs:value']
first_result = tags_model.create_and_return_ids(tags)
assert len(first_result) == 2
assert tag_groups_model.collection.find().count() == 1
second_result = tags_model.create_and_return_ids(tags)
assert len(second_result) == 2
assert tag_groups_model.collection.find().count() == 1
assert first_result == second_result
def get_or_create_test(self):
self._cleanup()
result = tags_model.get_or_create(name='testmeagain')
assert tags_model.collection.find().count() == 1
result = tags_model.get_or_create(name='testmeagain')
assert tags_model.collection.find().count() == 1
# def get_or_create_by_name_test(self):
# assert False
| 2,483
|
Python
|
.py
| 56
| 35.035714
| 85
| 0.63133
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,053
|
api_test.py
|
amonapp_amon/amon/apps/_tags/tests/api_test.py
|
import json
from django.test.client import Client
from django.test import TestCase
from django.urls import reverse
from django.contrib.auth import get_user_model
from amon.apps.tags.models import tags_model
from amon.apps.servers.models import server_model
User = get_user_model()
class TestTagsApi(TestCase):
def setUp(self):
self.c = Client()
self.user = User.objects.create_user(password='qwerty', email='foo@test.com')
self.c.login(username='foo@test.com', password='qwerty')
def tearDown(self):
self.c.logout()
self.user.delete()
def _cleanup(self):
server_model.collection.remove()
tags_model.collection.remove()
def test_ajax_get_tags(self):
self._cleanup()
tags = {'rds': 'value', 'ebs': 'volume'}
tags_model.create_and_return_ids(tags)
url = reverse('api_tags_get_tags')
response = self.c.get(url)
response = json.loads(response.content.decode('utf-8'))
assert len(response) == 2
for i in response:
assert i['group'] in ['rds', 'ebs']
assert i['text'] in ['rds:value', 'ebs:volume']
def test_ajax_get_tags_list(self):
self._cleanup()
tags = {'rds': 'value', 'ebs': 'volume'}
result = tags_model.create_and_return_ids(tags)
assert len(result) == 2
tags_ids_to_string = ','.join(map(str, result))
url = reverse('api_tags_get_tags_list')
url = "{0}?tags={1}".format(url, tags_ids_to_string)
response = self.c.get(url)
response = json.loads(response.content.decode('utf-8'))
assert len(response) == 2
for i in response:
assert i['name'] in ['value', 'volume']
assert i['full_name'] in ['rds.value', 'ebs.volume']
def test_ajax_get_tags_for_server(self):
self._cleanup()
def test_ajax_get_only_server_tags(self):
self._cleanup()
# Create unassigned tags
tags = {'rds': 'value', 'ebs': 'volume'}
tags_model.create_and_return_ids(tags)
# Create a server with tags
data = {'name': 'testserver_one', 'key': 'd3vopqnzdnm677keoq3ggsgkg5dw94xg', 'tags': ['provider:digitalocean', "nyc1"]}
url = reverse('api_servers_create')
response = self.c.post(url, json.dumps(data), content_type='application/json')
# Create a server with tags
data = {'name': 'testserver_one_one', 'key': 'd3vopqnzdnm677keoq3ggsgkg5dw94xg', 'tags': ['provider:digitalocean', "nyc1"]}
url = reverse('api_servers_create')
response = self.c.post(url, json.dumps(data), content_type='application/json')
# Create a server with no tags
data = {'name': 'testserver_two', 'key': 'd3vopqnzdnm677keoq3ggsgkg5dw94dg',}
url = reverse('api_servers_create')
response = self.c.post(url, json.dumps(data), content_type='application/json')
url = reverse('api_tags_only_server_tags')
response = self.c.get(url)
response = json.loads(response.content.decode('utf-8'))
# provider:digitalocean, nyc1
assert len(response) == 2
for r in response:
assert r['text'] in ['nyc1', 'provider:digitalocean']
| 3,298
|
Python
|
.py
| 70
| 38.3
| 131
| 0.626068
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,054
|
models.py
|
amonapp_amon/amon/apps/users/models.py
|
from django.db import models
from django.conf import settings
from django.contrib.auth.models import (
AbstractBaseUser,
BaseUserManager,
PermissionsMixin
)
from django.utils import timezone
from datetime import datetime, timedelta
from amon.utils.generators import random_id_generator
class AmonUserManager(BaseUserManager):
def create_user(self, email=None, password=None):
"""
Creates and saves a User with the given email, and password.
"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email=self.normalize_email(email),
)
user.set_password(password)
user.is_active = True
user.save()
return user
def create_superuser(self, email, password=None):
user = self.create_user(email, password=password)
user.is_admin = True
user.save()
class AmonUser(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(max_length=255, unique=True)
date_joined = models.DateTimeField(('date joined'), default=timezone.now)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
is_staff = models.BooleanField(default=False)
organizations = models.ManyToManyField('organizations.Organization')
USERNAME_FIELD = 'email'
objects = AmonUserManager()
def get_short_name(self):
return self.email
def get_username(self):
return self.email
def __str__(self):
return "Email: {0}".format(self.email)
class Meta:
verbose_name = 'User'
class ResetPasswordCode(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name='user_password_reset_code',
on_delete=models.CASCADE
)
code = models.CharField(max_length=128)
expires_on = models.DateTimeField(auto_now=False, auto_now_add=False)
date_created = models.DateTimeField(('date created'), default=timezone.now)
@staticmethod
def generate_password_reset_token(user):
activation_code = ResetPasswordCode.objects.create(
user=user,
code=random_id_generator(size=64),
expires_on=datetime.utcnow() + timedelta(days=1)
)
return activation_code
def __str__(self):
return "Email: {0} / Code: {1}".format(self.user.email, self.code)
| 2,502
|
Python
|
.py
| 64
| 31.0625
| 80
| 0.667499
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,055
|
emails.py
|
amonapp_amon/amon/apps/users/emails.py
|
from django.template.loader import render_to_string
from amon.apps.notifications.mail.sender import _send_email
from django.conf import settings
def send_invitation_email(invite):
if invite:
recipients_list = [invite['email']]
subject = 'You have been invited to join Amon.'
html_content = render_to_string('users/emails/invite.html', {
'invite': invite,
"domain_url": settings.HOST
})
_send_email(subject=subject,
recipients_list=recipients_list,
html_content=html_content)
def send_revoked_email(user=None):
if user:
email = user['email']
subject = 'Your access to {0} has been revoked.'.format(settings.HOST)
html_content = render_to_string('users/emails/revoked.html', {
'user': user,
"domain_url": settings.HOST
})
_send_email(subject=subject,
recipients_list=[email],
html_content=html_content)
| 1,058
|
Python
|
.py
| 25
| 31.04
| 79
| 0.626543
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,056
|
urls.py
|
amonapp_amon/amon/apps/users/urls.py
|
from django.conf.urls import url
from amon.apps.users import views
# urlpatterns = [
# url(r'^$', views.view_users, name='view_users'),
# url(r'^revoke_access/(?P<user_id>\w+)/$', views.revoke_access, name='users_revoke_access'),
# url(r'^remove_pending/(?P<invitation_id>\w+)/$', views.remove_pending, name='users_remove_pending'),
# url(r'^invite/confirm/(?P<invitation_code>\w+)/$', views.confirm_invite, name='users_confirm_invite'),
# ]
| 468
|
Python
|
.py
| 8
| 56.25
| 109
| 0.66302
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,057
|
decorators.py
|
amonapp_amon/amon/apps/users/decorators.py
|
from functools import wraps
from django.urls import reverse
from django.shortcuts import redirect
def user_is_admin(view):
@wraps(view)
def inner(request, *args, **kwargs):
if request.user.is_authenticated():
pass
# if request.role.type != 'Admin':
# return redirect(reverse('login'))
return view(request, *args, **kwargs)
return inner
| 411
|
Python
|
.py
| 12
| 27.333333
| 51
| 0.647959
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,058
|
forms.py
|
amonapp_amon/amon/apps/users/forms.py
|
import hashlib
import uuid
from django import forms
from annoying.functions import get_object_or_None
from django.contrib.auth import get_user_model
from amon.utils.dates import unix_utc_now
from amon.apps.users.models import invite_model
User = get_user_model()
class InviteForm(forms.Form):
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user', None)
super(InviteForm, self).__init__(*args, **kwargs)
email = forms.EmailField(required=True, widget=forms.TextInput(attrs={'placeholder': 'Email'}))
def clean_email(self):
cleaned_data = self.cleaned_data
email = cleaned_data['email']
# Check if invitation already exists
email_count = invite_model.collection.find({'email': email}).count()
if email_count > 0:
raise forms.ValidationError('This user has already been invited.')
# Ignore invitations for the same email as the logged in user
if email == self.user.email:
raise forms.ValidationError("You can't invite yourself.")
return cleaned_data['email']
def save(self):
cleaned_data = self.cleaned_data
new_invite_email = cleaned_data['email']
data = {
'email': new_invite_email,
'invited_by': self.user.id,
'sent': unix_utc_now()
}
invitation_code_string = "{0}{1}{2}".format(self.user.id, new_invite_email , unix_utc_now())
encoded_invitation_code = invitation_code_string.encode()
data['invitation_code'] = hashlib.sha224(encoded_invitation_code).hexdigest()
invite_model.create_invitation(data=data)
return data
class InviteNewUserForm(forms.Form):
password = forms.CharField(required=True, widget=(forms.PasswordInput(attrs={'placeholder': 'Password'})))
def __init__(self, *args, **kwargs):
self.invite = kwargs.pop('invite', None)
super(InviteNewUserForm, self).__init__(*args, **kwargs)
def clean_password(self):
password = self.cleaned_data.get('password', None)
if len(password) < 6:
raise forms.ValidationError('Your password has to be at least 6 charactes')
return self.cleaned_data['password']
def save(self):
password = self.cleaned_data.get('password')
email = self.invite.get('email')
user = User.objects.create_user(password=password, email=email)
user.is_admin = False
user.is_staff = False
user.is_superuser = False
user.save()
invite_model.delete(self.invite['_id'])
return user
| 2,772
|
Python
|
.py
| 57
| 37.859649
| 111
| 0.6433
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,059
|
views.py
|
amonapp_amon/amon/apps/users/views.py
|
# from django.shortcuts import redirect
# from django.urls import reverse
# from django.shortcuts import render
# from django.contrib.auth.decorators import login_required
# from django.contrib import messages
# from django.http import Http404
# from django.conf import settings
# from django.contrib.auth.decorators import user_passes_test
# from django.contrib.auth import get_user_model
# # from amon.apps.users.forms import InviteForm, InviteNewUserForm
# # from amon.apps.users.models import invite_model
# # from amon.apps.users.emails import send_invitation_email, send_revoked_email
# User = get_user_model()
# @user_passes_test(lambda u: u.is_superuser)
# @login_required
# def view_users(request):
# if request.method == 'POST':
# form = InviteForm(request.POST, user=request.user)
# if form.is_valid():
# new_invite = form.save()
# try:
# send_invitation_email(new_invite)
# messages.add_message(request, messages.INFO, 'Invitation sent.')
# except Exception as e:
# messages.add_message(request, messages.ERROR, "Error sending email invite. Please check your SMTP settings.")
# return redirect(reverse('view_users'))
# else:
# form = InviteForm()
# active_users = User.objects.filter(is_superuser=False)
# pending = invite_model.get_all()
# return render(request, 'users/view.html', {
# "form": form,
# "active_users": active_users,
# "pending": pending,
# })
# @user_passes_test(lambda u: u.is_superuser)
# @login_required
# def revoke_access(request, user_id=None):
# user = User.objects.get(id=user_id)
# send_revoked_email(user=user)
# user.delete()
# messages.add_message(request, messages.INFO, 'Access revoked.')
# return redirect(reverse('view_users'))
# @user_passes_test(lambda u: u.is_superuser)
# @login_required
# def remove_pending(request, invitation_id=None):
# invite_model.delete(invitation_id)
# messages.add_message(request, messages.INFO, 'Invitation removed.')
# return redirect(reverse('view_users'))
# def confirm_invite(request, invitation_code=None):
# invite = invite_model.collection.find_one({'invitation_code': invitation_code})
# if invite is None:
# raise Http404
# if request.method == 'POST':
# form = InviteNewUserForm(request.POST, invite=invite)
# if form.is_valid():
# form.save()
# return redirect(reverse('login'))
# else:
# form = InviteNewUserForm(invite=invite)
# return render(request, 'users/confirm_invite.html', {
# "form": form,
# "invite": invite,
# "domain_url": settings.HOST
# })
| 2,919
|
Python
|
.py
| 65
| 41.276923
| 128
| 0.654308
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,060
|
0002_amonuser_organizations.py
|
amonapp_amon/amon/apps/users/migrations/0002_amonuser_organizations.py
|
# Generated by Django 2.0.2 on 2018-02-19 20:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organizations', '__first__'),
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='amonuser',
name='organizations',
field=models.ManyToManyField(to='organizations.Organization'),
),
]
| 444
|
Python
|
.py
| 14
| 24.357143
| 74
| 0.616471
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,061
|
0001_initial.py
|
amonapp_amon/amon/apps/users/migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2017-01-05 10:03
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0007_alter_validators_add_error_messages'),
]
operations = [
migrations.CreateModel(
name='AmonUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('is_active', models.BooleanField(default=True)),
('is_admin', models.BooleanField(default=False)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'User',
},
),
]
| 1,939
|
Python
|
.py
| 31
| 51.806452
| 266
| 0.641956
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,062
|
0003_resetpasswordcode.py
|
amonapp_amon/amon/apps/users/migrations/0003_resetpasswordcode.py
|
# Generated by Django 2.0.2 on 2018-02-21 19:14
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('users', '0002_amonuser_organizations'),
]
operations = [
migrations.CreateModel(
name='ResetPasswordCode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=128)),
('expires_on', models.DateTimeField()),
('date_created', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date created')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_password_reset_code', to=settings.AUTH_USER_MODEL)),
],
),
]
| 958
|
Python
|
.py
| 21
| 36.952381
| 159
| 0.651288
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,063
|
forms_test.py
|
amonapp_amon/amon/apps/users/tests/forms_test.py
|
from django.test.client import Client
from django.urls import reverse
from django.test import TestCase
from nose.tools import *
from django.contrib.auth import get_user_model
User = get_user_model()
from amon.apps.users.forms import InviteForm
from amon.apps.users.models import invite_model
class TestInvite(TestCase):
def setUp(self):
self.c = Client()
self.user = User.objects.create_user(password='qwerty', email='foo@test.com')
self.c.login(username='foo@test.com', password='qwerty')
def tearDown(self):
self.c.logout()
self.user.delete()
invite_model.collection.remove()
def test_invite(self):
form_data = {'email': 'foo@test.com'}
form = InviteForm(data=form_data, user=self.user)
self.assertEqual(form.is_valid(), False) # Can't invite yourself error
form_data = {'email': 'foo1@test.com'}
form = InviteForm(data=form_data, user=self.user)
self.assertEqual(form.is_valid(), True)
form.save()
form_data = {'email': 'foo1@test.com'}
form = InviteForm(data=form_data, user=self.user,)
self.assertEqual(form.is_valid(), False) # Duplicate invitation
result = invite_model.collection.find().count()
assert result == 1
class TestUser(TestCase):
def test_forgotten_password_form(self):
self._cleanup()
url = reverse('forgotten_password')
response = self.c.post(url, {'email': self.email})
assert response.context['form'].errors
# Create user and reset password
self.user = User.objects.create_user(password='qwerty', email=self.email)
response = self.c.post(url, {'email': self.email})
# assert forgotten_pass_tokens_model.collection.find().count() == 1
response = self.c.post(url, {'email': self.email})
# assert forgotten_pass_tokens_model.collection.find().count() == 1
def test_reset_password_form(self):
self._cleanup()
self.user = User.objects.create_user(self.email, 'qwerty')
# Generate token
url = reverse('forgotten_password')
response = self.c.post(url, {'email': self.email})
assert forgotten_pass_tokens_model.collection.find().count() == 1
token = forgotten_pass_tokens_model.collection.find_one()
url = reverse("reset_password", kwargs={'token': token['token']})
response = self.c.post(url, {'password': 'newpass', 'repeat_password': 'newpasssssss'})
assert response.context['form'].errors
url = reverse("reset_password", kwargs={'token': token['token']})
response = self.c.post(url, {'password': 'newpass', 'repeat_password': 'newpass'})
self.assertFalse(self.c.login(email=self.email, password='qwerty'))
self.assertTrue(self.c.login(email=self.email, password='newpass'))
| 3,007
|
Python
|
.py
| 57
| 42.719298
| 96
| 0.648298
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,064
|
views_test.py
|
amonapp_amon/amon/apps/users/tests/views_test.py
|
from django.test.client import Client
from django.urls import reverse
from django.test import TestCase
from nose.tools import *
from django.contrib.auth import get_user_model
User = get_user_model()
from amon.apps.users.models import invite_model
from amon.apps.notifications.mail.models import email_model
class TestUsersViews(TestCase):
def setUp(self):
self.c = Client()
self.user = User.objects.create_user(password='qwerty', email='foo@test.com')
self.user.is_superuser = True
self.user.save()
self.c.login(username='foo@test.com', password='qwerty')
email_model.save_email_settings({'sent_from': 'test@example.com'})
def tearDown(self):
self.c.logout()
self.user.delete()
User.objects.all().delete()
email_model.collection.remove()
def _cleanup(self):
invite_model.collection.remove()
def test_send_invite(self):
self._cleanup()
email = 'invite@test.com'
url = reverse('view_users')
response = self.c.post(url, {'email':email})
self.assertRedirects(response, reverse('view_users'))
result = invite_model.collection.find().count()
assert result == 1
def test_revoke_access(self):
self._cleanup()
email = 'revoke@test.com'
new_user = User.create_user('revoke', 'qwerty', email=email)
url = reverse('users_revoke_access', kwargs={'user_id': str(new_user.id)})
response = self.c.get(url)
assert User.objects.filter(email__iexact=email).count() == 0
def test_confirm_invite(self):
self._cleanup()
email = 'confirm@test.com'
url = reverse('view_users')
response = self.c.post(url, {'email':email})
result = invite_model.collection.find_one()
url = reverse('users_confirm_invite',
kwargs={'invitation_code': result['invitation_code']})
response = self.c.post(url, {'password':'qwerty'})
invited_user = User.objects.get(email__iexact=email)
assert invited_user
| 2,243
|
Python
|
.py
| 49
| 34.897959
| 86
| 0.633946
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,065
|
views.py
|
amonapp_amon/amon/apps/_core/views.py
|
from django.shortcuts import render_to_response
from django.shortcuts import render
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
from django.urls import reverse
from django.shortcuts import redirect
from django.contrib import messages
from django.conf import settings
from django.http import Http404
| 355
|
Python
|
.py
| 9
| 38.555556
| 57
| 0.881844
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,066
|
cron.py
|
amonapp_amon/amon/apps/alerts/cron.py
|
import kronos
import logging
logger = logging.getLogger(__name__)
from amon.apps.alerts.alerter import notsendingdata_alerter
@kronos.register('* * * * *')
def check_not_sending_data_task():
notsendingdata_alerter.check()
logger.debug('Checking for servers that do not send data ...')
| 298
|
Python
|
.py
| 8
| 34.625
| 66
| 0.743945
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,067
|
alerter.py
|
amonapp_amon/amon/apps/alerts/alerter.py
|
from amon.apps.alerts.checkers.system import system_alerts
from amon.apps.alerts.checkers.process import process_alerts
from amon.apps.alerts.checkers.plugin import plugin_alerts
from amon.apps.alerts.checkers.healthcheck import healthcheck_alert_checker
from amon.apps.alerts.models import alerts_model
from amon.apps.plugins.models import plugin_model
from amon.apps.processes.models import process_model
from amon.utils.dates import unix_utc_now
class Alerter(object):
def check_tags(self, server=None, rule=None):
valid_rule = True
server_tags = server.get('tags', [])
server_tags = [str(t) for t in server_tags]
tags = rule.get('tags', [])
tags = [str(t) for t in tags]
# Check tags first
if len(server_tags) > 0 and len(tags) > 0:
valid_rule = set(tags).issubset(server_tags)
return valid_rule
class ServerAlerter(Alerter):
def check(self, data, server):
alerts = False
account_id = server.get('account_id', None)
# System alerts
rules = alerts_model.get_alerts(type='system', server=server)
if rules:
alerts = system_alerts.check(data=data, rules=rules, server=server)
if alerts:
alerts_model.save_system_occurence(alerts, server_id=server['_id'])
# Global rules
global_rules = alerts_model.get_global_alerts(account_id=account_id)
if global_rules:
alerts = system_alerts.check(data=data, rules=global_rules, server=server)
if alerts:
alerts_model.save_system_occurence(alerts, server_id=server['_id'])
return alerts # For the test suite
class ProcessAlerter(Alerter):
def check_rule_and_save(self, process_data_dict=None, rule=None, process_id=None, server_id=None):
process_data = next((item for item in process_data_dict if item["p"] == process_id), None)
if process_data:
alert = process_alerts.check(process_data, rule)
if alert:
alerts_model.save_occurence(alert, server_id=server_id)
def check(self, data, server):
process_data_dict = data.get('data', None)
rules = alerts_model.get_alerts(type='process', server=server)
if len(rules) + len(process_data_dict) > 0:
for rule in rules:
process_id = rule['process']
self.check_rule_and_save(process_id=process_id, rule=rule, process_data_dict=process_data_dict, server_id=server['_id'])
# Global alerts
rules = alerts_model.get_alerts(type='process_global')
if len(rules) + len(process_data_dict) > 0:
all_processes = process_model.get_all_for_server(server['_id'])
for rule in rules:
valid_rule = self.check_tags(server=server, rule=rule)
if valid_rule:
process_name = rule.get('process')
process_id = None
# Check if this server has a process with this name
for p in all_processes.clone():
if p.get('name') == process_name:
process_id = p.get('_id')
if process_id:
self.check_rule_and_save(process_id=process_id, rule=rule, process_data_dict=process_data_dict, server_id=server['_id'])
class PluginAlerter(Alerter):
def check(self, data=None, plugin=None, server=None):
plugin_data = data.get('gauges', None)
rules = alerts_model.get_alerts_for_plugin(plugin=plugin)
if len(rules) > 0:
for rule in rules:
alert = plugin_alerts.check(data=plugin_data, rule=rule)
if alert:
alerts_model.save_occurence(alert)
# Global alerts
rules = alerts_model.get_alerts(type='plugin_global')
if len(rules) > 0:
all_plugins = plugin_model.get_for_server(server_id=server['_id'])
for rule in rules:
valid_rule = self.check_tags(server=server, rule=rule)
if valid_rule:
plugin_name = rule.get('plugin')
plugin_id = None
# Check if this server has a plugin with this name
for p in all_plugins.clone():
if p.get('name') == plugin_name:
plugin_id = p.get('_id')
if plugin_id:
alert = plugin_alerts.check(data=plugin_data, rule=rule)
if alert:
alerts_model.save_occurence(alert, server_id=server['_id'])
class UptimeAlerter(object):
def check(self, data, server):
process_data_dict = data.get('data', None)
rules = alerts_model.get_alerts(type='uptime', server=server)
if len(rules) + len(process_data_dict) > 0:
for rule in rules:
process_id = rule['process']
process_data = next((item for item in process_data_dict if item["p"] == process_id), None)
# Process is down
if not process_data:
alerts_model.save_uptime_occurence(rule, data=process_data)
class NotSendingDataAlerter(object):
def check(self):
time_now = unix_utc_now()
alerts = alerts_model.get_alerts_not_sending_data()
for alert in alerts:
period = alert.get('period')
for server in alert.get('server_data'):
last_check = server.get('last_check')
# Skip all the servers with no agent installed
if last_check != None:
since_last_check = time_now - last_check # 65 seconds, 60 seconds sleep, 5 seconds to collect
if since_last_check > (period + 10): # Trigger alert, add 10 seconds buffer
alert['server'] = server
alerts_model.save_notsendingdata_occurence(alert=alert)
class HealthCheckAlerter(object):
def check(self, data=None, server=None):
alerts = alerts_model.get_alerts(type='health_check')
for alert in alerts:
# Data is list
for d in data:
trigger = healthcheck_alert_checker.check(data=d, rule=alert)
# Will scan all the data, check for relevancy and then check the specific entry
if trigger:
alerts_model.save_healtcheck_occurence(trigger=trigger, server_id=server['_id'])
server_alerter = ServerAlerter()
process_alerter = ProcessAlerter()
uptime_alerter = UptimeAlerter()
plugin_alerter = PluginAlerter()
health_check_alerter = HealthCheckAlerter()
notsendingdata_alerter = NotSendingDataAlerter()
| 6,829
|
Python
|
.py
| 132
| 39.287879
| 144
| 0.602411
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,068
|
urls.py
|
amonapp_amon/amon/apps/alerts/urls.py
|
from django.conf.urls import url
from amon.apps.alerts.views import alerts as alert_views
from amon.apps.alerts.views import pause as pause_views
from amon.apps.alerts.views import healthchecks as healthcheck_views
from amon.apps.alerts import api
urlpatterns = [
url(
r'^$',
alert_views.all,
name='alerts'
),
url(
r'^clear_triggers/(?P<alert_id>\w+)/$',
alert_views.clear_triggers,
name='alerts_clear_triggers'
),
url(
r'^delete/(?P<alert_id>\w+)/$',
alert_views.delete_alert,
name='delete_alert'
),
url(
r'^add/$',
alert_views.add_alert,
name='add_alert'
),
url(
r'^edit/(?P<alert_id>\w+)/$',
alert_views.edit_alert,
name='edit_alert'
),
url(
r'^history/(?P<alert_id>\w+)/$',
alert_views.history,
name='global_alert_history'
),
url(
r'^history/system/(?P<alert_id>\w+)/$',
alert_views.history_system,
name='system_alert_history'
),
# Ajax
url(
r'^a/history/(?P<alert_id>\w+)/$',
alert_views.ajax_alert_triggers,
name='ajax_alert_history'
),
# Mute
url(
r'^pause/global/$',
pause_views.mute_servers,
name='alerts_mute_servers'
),
url(
r'^unpause/server/(?P<mute_id>\w+)$',
pause_views.unmute_server,
name='alerts_unmute_server'
),
url(
r'^pause/id/(?P<alert_id>\w+)/$',
pause_views.mute,
name='mute_alert'
),
url(
r'^pause_all/$',
pause_views.mute_all,
name='mute_all_alerts'
),
# HealthCheck alerts
url(
r'^add/healthcheck/$',
healthcheck_views.add_alert,
name='add_healthcheck_alert'
),
url(
r'^edit/healthcheck/(?P<alert_id>\w+)/$',
healthcheck_views.edit_alert,
name='edit_healthcheck_alert'
),
]
ajax_patterns = [
url(
r'^a/get_metrics/$',
api.ajax_get_metrics,
name='api_alerts_get_metrics'
),
url(
r'^a/get_health_check_commands/$',
api.ajax_get_health_check_commands,
name='api_alerts_get_health_check_commands'
),
url(
r'^a/get_health_check_params_for_command/$',
api.ajax_get_health_check_get_params_for_command,
name='api_alerts_get_health_check_params_for_command'
)
]
urlpatterns = urlpatterns + ajax_patterns
| 2,493
|
Python
|
.py
| 98
| 18.632653
| 68
| 0.56717
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,069
|
api.py
|
amonapp_amon/amon/apps/alerts/api.py
|
from django.contrib.auth.decorators import login_required
from django.conf import settings
from rest_framework.decorators import api_view
from rest_framework.response import Response
from amon.apps.alerts.models import alerts_api_model
from amon.apps.healthchecks.models import health_checks_api_model
@login_required
@api_view(['POST', 'GET'])
def ajax_get_metrics(request):
status = settings.API_RESULTS['ok']
if request.method == 'POST':
data = request.data
server_id = data.get('server_id')
else:
server_id = request.GET.get('server_id')
if server_id == 'all':
result = alerts_api_model.get_global_metrics()
else:
result = alerts_api_model.get_server_metrics(server_id=server_id)
return Response(result)
@login_required
@api_view(['POST', 'GET'])
def ajax_get_health_check_commands(request):
result = []
if request.method == 'POST':
data = request.data
server_id = data.get('server_id')
else:
server_id = request.GET.get('server_id')
if server_id == 'all':
result = health_checks_api_model.get_unique_commands()
else:
cursor = health_checks_api_model.get_commands_for_server(server_id=server_id)
for r in cursor.clone():
params = r.get("params", False)
params = "" if params is False else params
command = "{0} {1}".format(r.get("command"), params)
result.append(command)
return Response(result)
# AJAX
@login_required
@api_view(['POST', 'GET'])
def ajax_get_health_check_get_params_for_command(request):
if request.method == 'POST':
data = request.data
command = data.get('command')
else:
command = request.GET.get('command')
result = health_checks_api_model.get_params_for_command(command_string=command)
return Response(result)
| 1,895
|
Python
|
.py
| 50
| 31.7
| 85
| 0.673481
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,070
|
forms.py
|
amonapp_amon/amon/apps/alerts/forms.py
|
from django import forms
from django.conf import settings
from django.urls import reverse
from amon.apps.servers.models import server_model
from amon.apps.tags.models import tags_model
from amon.apps.alerts.models import alert_mute_servers_model
ABOVE_BELOW_CHOICES = (
('above', 'More than'),
('below', 'Less than')
)
HEALTCHECKS_STATUS_CHOICES = (
('critical', 'CRITICAL'),
('warning', 'WARNING')
)
PERIOD_CHOICES = [
(180, '3 minutes'),
(300, '5 minutes'),
(900, '15 minutes'),
(1800, '30 minutes'),
(3600, '1 hour'),
]
MUTE_CHOICES = [
(1, '1 hour'),
(2, '2 hours'),
(4, '4 hours'),
(8, '8 hours'),
(0, 'Forever'),
]
PROCESS_CHOICES = [
('CPU', 'CPU'),
('Memory', 'Memory'),
('Down', 'Down'),
]
if settings.DEBUG == True:
PERIOD_CHOICES = [(5, '5 seconds'), (30, '30 seconds'), ] + PERIOD_CHOICES
class AlertForm(forms.Form):
def __init__(self, *args, **kwargs):
self.servers = kwargs.pop('all_servers')
super(AlertForm, self).__init__(*args, **kwargs)
if self.servers:
server_fields = [('',''), ('all','All'),]+[(v['_id'],v['name']) for v in self.servers]
else:
server_fields = [('',''),('all','All'),]
self.fields['server'] = forms.ChoiceField(choices=server_fields)
self.fields['above_below'].widget.attrs.update({'select2-dropdown': '', 'data-size': 120})
self.fields['server'].widget.attrs.update({
'server-dropdown': '',
'data-placeholder': 'Select server',
'data-url': reverse('api_alerts_get_metrics'),
})
self.fields['period'].widget.attrs.update({'select2-dropdown': ''})
metric = forms.CharField(widget=forms.Select())
command = forms.CharField(required=False)
metric_value = forms.IntegerField(min_value=0, initial=0)
above_below = forms.ChoiceField(choices=ABOVE_BELOW_CHOICES)
period = forms.ChoiceField(choices=PERIOD_CHOICES)
class EditAlertForm(AlertForm):
def __init__(self, *args, **kwargs):
super(EditAlertForm, self).__init__(*args, **kwargs)
if self.servers:
server_fields = [('',''), ('all','All'),]+[(v['_id'],v['name']) for v in self.servers]
else:
server_fields = [('',''),('all','All'),]
self.fields['server'] = forms.ChoiceField(choices=server_fields, required=False)
self.fields['server'].widget.attrs.update({
'server-dropdown': '',
})
metric = forms.CharField(widget=forms.Select(), required=False)
class MuteForm(forms.Form):
def __init__(self, *args, **kwargs):
super(MuteForm, self).__init__(*args, **kwargs)
all_servers = server_model.get_all()
if all_servers:
server_fields = [('all','All'),]+[(v['_id'],v['name']) for v in all_servers]
else:
server_fields = [('all','All'),]
self.fields['server'] = forms.ChoiceField(choices=server_fields)
self.fields['server'].widget.attrs.update({'server-dropdown': '', 'data-size': 250 })
all_tags = tags_model.get_all()
if all_tags:
tags_fields = [(v['_id'],"{0}:{1}".format(v.get('group', {}).get('name'), v['name']) ) for v in all_tags]
self.fields['tags'] = forms.MultipleChoiceField(choices=tags_fields, required=False)
self.fields['tags'].widget.attrs.update({'select2-dropdown': '', 'data-size': 400})
period = forms.ChoiceField(choices=MUTE_CHOICES, widget=forms.Select(attrs={'select2-dropdown': '', 'data-size': 150}))
def save(self):
data = self.cleaned_data
alert_mute_servers_model.save(data=data)
class HealthCheckAlertForm(forms.Form):
def __init__(self, *args, **kwargs):
self.servers = kwargs.pop('all_servers')
super(HealthCheckAlertForm, self).__init__(*args, **kwargs)
if self.servers:
server_fields = [('',''), ('all','All'),]+[(v['_id'],v['name']) for v in self.servers]
else:
server_fields = [('',''),('all','All'),]
self.fields['server'] = forms.ChoiceField(choices=server_fields)
self.fields['server'].widget.attrs.update({
'server-dropdown': '',
'data-placeholder': 'Select server',
'data-url': reverse('api_alerts_get_health_check_commands'),
})
self.fields['status'].widget.attrs.update({'select2-dropdown': ''})
self.fields['period'].widget.attrs.update({'select2-dropdown': ''})
status = forms.ChoiceField(choices=HEALTCHECKS_STATUS_CHOICES)
period = forms.ChoiceField(choices=PERIOD_CHOICES)
class EditHealthCheckAlertForm(HealthCheckAlertForm):
def __init__(self, *args, **kwargs):
super(EditHealthCheckAlertForm, self).__init__(*args, **kwargs)
if self.servers:
server_fields = [('',''), ('all','All'),]+[(v['_id'],v['name']) for v in self.servers]
else:
server_fields = [('',''),('all','All'),]
self.fields['server'] = forms.ChoiceField(choices=server_fields, required=False)
self.fields['server'].widget.attrs.update({
'server-dropdown': '',
})
| 5,278
|
Python
|
.py
| 116
| 37.431034
| 123
| 0.598537
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,071
|
plugin.py
|
amonapp_amon/amon/apps/alerts/checkers/plugin.py
|
class PluginAlerts(object):
def __init__(self):
self.alert = {}
def check_value(self, rule=None, incoming_value=None):
trigger = False
if rule['above_below'] == 'above':
if float(incoming_value) > float(rule['metric_value']):
trigger = True
if rule['above_below'] == 'below':
if float(incoming_value) < float(rule['metric_value']):
trigger = True
return trigger
def check(self, data=None, rule=None):
trigger = False
gauge_data = rule.get("gauge_data")
key = rule.get('key')
rule_type = rule.get('rule_type', 'plugin')
if type(gauge_data) is dict and key and rule_type == 'plugin':
gauge_name = gauge_data.get('name')
key_name = u"{0}.{1}".format(gauge_name, key)
incoming_value = data.get(key_name)
if incoming_value:
trigger = self.check_value(rule=rule, incoming_value=incoming_value)
self.alert = {'value': incoming_value, 'alert_id': rule['_id'], 'trigger': trigger}
if rule_type == 'plugin_global':
key_name = u"{0}.{1}".format(rule.get('gauge'), rule.get('key'))
incoming_value = data.get(key_name)
if incoming_value:
trigger = self.check_value(rule=rule, incoming_value=incoming_value)
self.alert = {'value': incoming_value, 'alert_id': rule['_id'], 'trigger': trigger}
return self.alert
plugin_alerts = PluginAlerts()
| 1,575
|
Python
|
.py
| 32
| 37.34375
| 99
| 0.568498
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,072
|
process.py
|
amonapp_amon/amon/apps/alerts/checkers/process.py
|
class ProcessAlerts(object):
def __init__(self):
self.alert = {}
def check(self, data, rule=None):
value = rule.get("check") if rule.get('check') else rule.get('metric')
value = value.lower()
if value == 'memory':
self.check_memory(rule, data)
if value == 'cpu':
self.check_cpu(rule, data)
return self.alert
def check_memory(self, rule, data):
trigger = False
if rule['above_below'] == 'above':
if float(data['m']) > float(rule['metric_value']):
trigger = True
if rule['above_below'] == 'below':
if float(data['m']) < float(rule['metric_value']):
trigger = True
self.alert = {'value': data['m'], 'alert_id': rule['_id'], 'trigger': trigger}
def check_cpu(self, rule, data):
trigger = False
utilization = float(data['c'])
if rule['above_below'] == 'above':
if float(rule['metric_value']) < utilization:
trigger = True
if rule['above_below'] == 'below':
if float(rule['metric_value']) > utilization:
trigger = True
self.alert = {'value': utilization, 'alert_id': rule['_id'], 'trigger': trigger}
process_alerts = ProcessAlerts()
| 1,373
|
Python
|
.py
| 31
| 32.129032
| 88
| 0.542587
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,073
|
system.py
|
amonapp_amon/amon/apps/alerts/checkers/system.py
|
class SystemAlerts(object):
def __init__(self):
self.alerts = {}
def check(self, data=None, rules=None, server=None):
if rules:
for rule in rules:
if rule['metric'] == 'CPU':
cpu_data = data.get('cpu', None)
if cpu_data:
self.check_cpu(rule, data['cpu'])
elif rule['metric'] == 'Memory':
memory_data = data.get('memory', None)
if memory_data:
self.check_memory(rule, data['memory'])
elif rule['metric'] == 'Loadavg':
load_data = data.get('loadavg', None)
if load_data:
self.check_loadavg(rule, data['loadavg'])
elif rule['metric'] == 'Disk':
disk_data = data.get('disk', None)
if disk_data:
self.check_disk(rule, data['disk'])
elif rule['metric'] in ['Network/inbound', 'Network/outbound']:
network_data = data.get('network', None)
if network_data:
self.check_network(rule, data['network'])
if len(self.alerts) > 0:
alerts = self.alerts
self.alerts = {}
return alerts
else:
return False
def check_memory(self, rule, data):
trigger = False
# Calculate rules with MB
metric_type = rule.get('metric_type')
if rule['metric_type'] == 'MB':
used_memory = float(data['used_mb'])
else:
used_memory = float(data['used_percent'])
if rule['above_below'] == 'above':
if used_memory > float(rule['metric_value']):
trigger = True
if rule['above_below'] == 'below':
if used_memory < float(rule['metric_value']):
trigger = True
alert = {"value": used_memory , "rule": str(rule['_id']),
"metric_type": metric_type,
"trigger": trigger}
if alert:
try:
len(self.alerts['memory'])
self.alerts['memory'].append(alert)
except:
self.alerts['memory'] = [alert]
return True
def check_cpu(self, rule, data):
last = data.get('last', None)
if last:
return False
trigger = False
# Utitlization show total cpu usage
utilization = float(100) - float(data['idle'])
if rule['above_below'] == 'above':
if float(rule['metric_value']) < utilization:
trigger = True
if rule['above_below'] == 'below':
if float(rule['metric_value']) > utilization:
trigger = True
alert = {"value": utilization , "rule": str(rule['_id']), "trigger": trigger}
if alert:
try:
len(self.alerts['cpu'])
self.alerts['cpu'].append(alert)
except:
self.alerts['cpu'] = [alert]
return True
def check_loadavg(self, rule, data):
last = data.get('last', None)
if last:
return False
trigger = False
value_to_compare = 0
values = [float(data['minute']), float(data['five_minutes']), float(data['fifteen_minutes'])]
value_to_compare = float(sum(values)) / len(values) if len(values) > 0 else float('nan')
if rule['above_below'] == 'above':
if float(rule['metric_value']) < value_to_compare:
trigger = True
if rule['above_below'] == 'below':
if float(rule['metric_value']) > value_to_compare:
trigger = True
alert = {"value": value_to_compare ,
"rule": str(rule['_id']),
'trigger': trigger
}
if alert:
try:
len(self.alerts['loadavg'])
self.alerts['loadavg'].append(alert)
except:
self.alerts['loadavg'] = [alert]
return True
# Internal - checks a single volume
def _check_volume(self, volume_data, rule, volume):
trigger = False
used = volume_data['percent'] if rule['metric_type'] == "%" else volume_data['used']
metric_type = '%' if rule['metric_type'] == '%' else 'MB'
# Convert the data value to MB
if isinstance(used, str):
if 'G' in used:
used = used.replace('G','')
used = float(used) * 1024
elif 'MB' in used:
used = used.replace('MB','')
elif 'M' in used:
used = used.replace('M', '')
used = float(used)
# Convert the rule value to MB if necessary
if rule['metric_type'] == 'GB':
metric_value = float(rule['metric_value']) * 1024
else:
metric_value = float(rule['metric_value'])
if rule['above_below'] == 'above':
if metric_value < used:
trigger = True
if rule['above_below'] == 'below':
if metric_value > used:
trigger = True
alert = {
"value": used ,
"rule": str(rule['_id']),
'metric_type': metric_type,
'volume': volume,
"trigger": trigger
}
return alert
def check_disk(self, rule, data):
# New golang agent [{"name": sda1, used: 100, free: 100}]
# Format to the old format {"sda1": {"used": 100, "free": 100}}
if type(data) is list:
data = self._format_golang_device_data(data=data)
last = data.get('last', None)
if last:
return False
volumes = []
single_volume = rule.get('volume', None)
if single_volume:
volumes.append(single_volume)
else:
volumes = data.keys()
if len(volumes) > 0:
# ["sda1": {'used': '', "free": }]
for volume in volumes:
volume_data = data.get(volume, False)
if volume_data:
alert = self._check_volume(volume_data, rule, volume)
disk_alerts = self.alerts.get('disk', [])
if len(disk_alerts) == 0:
self.alerts['disk'] = [alert]
else:
self.alerts['disk'].append(alert)
return True
# Internal - checks a single volume
def _check_interface(self, data, rule, iface):
trigger = False
rule_type = rule.get('metric').lower()
if rule_type == 'network/inbound':
value_to_compare = data.get('inbound')
elif rule_type == 'network/outbound':
value_to_compare = data.get('outbound')
metric_value = float(rule['metric_value'])
value_to_compare = float(value_to_compare)
if rule['above_below'] == 'above':
if metric_value < value_to_compare:
trigger = True
if rule['above_below'] == 'below':
if metric_value > value_to_compare:
trigger = True
alert = {
"value": value_to_compare,
"rule": str(rule['_id']),
'interface': iface,
"trigger": trigger
}
return alert
def check_network(self, rule, data):
# New golang agent [{"name": sda1, used: 100, free: 100}]
# Format to the old format {"sda1": {"used": 100, "free": 100}}
if type(data) is list:
data = self._format_golang_device_data(data=data)
last = data.get('network', None)
if last:
return False
single_interface = rule.get('interface', None)
interfaces = []
if single_interface:
interfaces.append(single_interface)
else:
interfaces = data.keys()
if len(interfaces) > 0:
# ["eth1": {'inbound': '', "inbound": }]
for iface in interfaces:
interface_data = data.get(iface, False)
if interface_data:
alert = self._check_interface(interface_data, rule, iface)
network_alerts = self.alerts.get('network', [])
if len(network_alerts) == 0:
self.alerts['network'] = [alert]
else:
self.alerts['network'].append(alert)
return True
def _format_golang_device_data(self, data=None):
formatted_data = {}
for device in data:
name = device.get("name")
if name:
formatted_data[name] = device
return formatted_data
system_alerts = SystemAlerts()
| 8,895
|
Python
|
.py
| 219
| 27.424658
| 101
| 0.497088
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,074
|
healthcheck.py
|
amonapp_amon/amon/apps/alerts/checkers/healthcheck.py
|
class HealthCheckAlertChecker(object):
def check(self, data=None, rule=None):
self.alert = False
exit_codes = {0: "ok", 1: "warning", 2: "critical"}
data_command_full = data.get("command", "")
data_command_list = data_command_full.split(" ")
if len(data_command_list) > 0:
data_command = data_command_list[0]
else:
data_command = data_command_full
rule_command = rule.get("command").strip()
rule_command_param = rule.get("param", "")
rule_data_match = False
# Global alert
if len(rule_command_param) == 0:
if rule_command == data_command:
rule_data_match = True
else:
rule_command_param = rule_command_param.strip()
rule_command_full = "{0} {1}".format(rule_command, rule_command_param)
if rule_command_full == data_command_full:
rule_data_match = True
# Now check the conditions
if rule_data_match == True:
exit_code = data.get('exit_code')
rule_status = rule.get("status")
try:
current_status = exit_codes[exit_code]
except:
current_status = False
if rule_status == current_status:
self.alert = {
'value': current_status,
'alert_id': rule['_id'],
'trigger': True,
'health_checks_data_id': data.get('health_checks_data_id')
}
return self.alert
healthcheck_alert_checker = HealthCheckAlertChecker()
| 1,667
|
Python
|
.py
| 39
| 29.74359
| 82
| 0.539185
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,075
|
system_test.py
|
amonapp_amon/amon/apps/alerts/checkers/tests/system_test.py
|
from amon.apps.alerts.checkers.system import SystemAlerts
from nose.tools import eq_
import unittest
class SystemAlertsTest(unittest.TestCase):
def check_memory_test(self):
system_alerts = SystemAlerts()
data = {u'free_mb': 1, u'total_mb': 102, u'used_mb': 101}
rule = {'metric_value': 100, 'above_below': 'above', 'metric_type': 'MB', '_id':'test'}
system_alerts.check_memory(rule, data)
eq_(system_alerts.alerts['memory'][0]['trigger'], True)
system_alerts = SystemAlerts()
data = {u'free_mb': 101, u'total_mb': 102, u'used_mb': 1}
rule = {'metric_value': 2, 'above_below': 'below', 'metric_type': 'MB', "_id": "test"}
system_alerts.check_memory(rule, data)
eq_(system_alerts.alerts['memory'][0]['trigger'], True)
system_alerts = SystemAlerts()
data = {u'free_mb': 49, u'total_mb': 100, u'used_percent': 49}
rule = {'metric_value': 50, 'above_below': 'below', 'metric_type': '%', "_id": "test"}
system_alerts.check_memory(rule, data)
eq_(system_alerts.alerts['memory'][0]['trigger'], True)
system_alerts = SystemAlerts()
data = {u'free_mb': 51, u'total_mb': 100, u'used_percent': 51}
rule = {'metric_value': 50, 'above_below': 'above', 'metric_type': '%', "_id": "test"}
system_alerts.check_memory(rule, data)
eq_(system_alerts.alerts['memory'][0]['trigger'], True)
# Trigger false, still return the data
system_alerts = SystemAlerts()
data = {u'free_mb': 50, u'total_mb': 100, u'used_mb': 50}
rule = {'metric_value': 49, 'above_below': 'below', 'metric_type': 'MB', "_id": "test"}
system_alerts.check_memory(rule, data)
eq_(system_alerts.alerts['memory'][0]['trigger'], False)
def check_cpu_test(self):
system_alerts = SystemAlerts()
data = {u'idle': 89} # utilization 11.00
rule = {'metric_value': 10, 'above_below': 'above', 'metric_type': '%', "_id": "test"}
system_alerts.check_cpu(rule, data)
eq_(system_alerts.alerts['cpu'][0]['trigger'], True)
system_alerts = SystemAlerts()
data = {u'idle': 91} # utilization 9.0
rule = {'metric_value': 10, 'above_below': 'above', 'metric_type': '%', "_id": "test"}
system_alerts.check_cpu(rule, data)
eq_(system_alerts.alerts['cpu'][0]['trigger'], False)
system_alerts = SystemAlerts()
data = {u'idle': 89} # utilization 11.00, "_id": "test"}
rule = {'metric_value': 10, 'above_below': 'below', 'metric_type': '%', "_id": "test"}
system_alerts.check_cpu(rule, data)
eq_(system_alerts.alerts['cpu'][0]['trigger'], False)
system_alerts = SystemAlerts()
data = {u'idle': 91} # utilization 9.0
rule = {'metric_value': 10, 'above_below': 'below', 'metric_type': '%', "_id": "test"}
system_alerts.check_cpu(rule, data)
eq_(system_alerts.alerts['cpu'][0]['trigger'], True)
def check_disk_test(self):
system_alerts = SystemAlerts()
data = {'sda1': {u'percent': 60, u'used': '6G'}}
rule = {'metric_value': 55, 'above_below': 'above',
'metric_type': '%','volume': 'sda1', "_id": "test"}
system_alerts.check_disk(rule, data)
eq_(system_alerts.alerts['disk'][0]['trigger'], True)
system_alerts = SystemAlerts()
data = {'sda1': {u'percent': 60, u'used': '6G'}}
rule = {'metric_value': 61, 'above_below': 'below',
'metric_type': '%','volume': 'sda1', "_id": "test"}
system_alerts.check_disk(rule, data)
eq_(system_alerts.alerts['disk'][0]['trigger'], True)
system_alerts = SystemAlerts()
data = {'sda1': {u'used': '6G'}}
rule = {'metric_value': 5.9, 'above_below': 'above',
'metric_type': 'GB','volume': 'sda1', "_id": "test"}
system_alerts.check_disk(rule, data)
eq_(system_alerts.alerts['disk'][0]['trigger'], True)
system_alerts = SystemAlerts()
data = {'sda1': {u'used': '6G'}}
rule = {'metric_value': 6.1, 'above_below': 'below',
'metric_type': 'GB','volume': 'sda1', "_id": "test"}
system_alerts.check_disk(rule, data)
eq_(system_alerts.alerts['disk'][0]['trigger'], True)
system_alerts = SystemAlerts()
data = {'sda1': {u'used': '6G'}} # 6144 MB
rule = {'metric_value': 6143, 'above_below': 'above',
'metric_type': 'MB','volume': 'sda1', "_id": "test"}
system_alerts.check_disk(rule, data)
eq_(system_alerts.alerts['disk'][0]['trigger'], True)
system_alerts = SystemAlerts()
data = {'sda1': {u'used': '6G'}} # 6144 MB
rule = {'metric_value': 6145, 'above_below': 'below',
'metric_type': 'MB','volume': 'sda1', "_id": "test"}
system_alerts.check_disk(rule, data)
eq_(system_alerts.alerts['disk'][0]['trigger'], True)
# Check the new golang agent
system_alerts = SystemAlerts()
data = [{"name": "sda1", "used": "6G"}] # 6144 MB
rule = {'metric_value': 6145, 'above_below': 'below',
'metric_type': 'MB','volume': 'sda1', "_id": "test"}
system_alerts.check_disk(rule, data)
eq_(system_alerts.alerts['disk'][0]['trigger'], True)
# Trigger False, return the data
system_alerts = SystemAlerts()
data = {'sda1': {u'used': '6G'}} # 6144 MB
rule = {'metric_value': 6140, 'above_below': 'below',
'metric_type': 'MB','volume': 'sda1', "_id": "test"}
system_alerts.check_disk(rule, data)
eq_(system_alerts.alerts['disk'][0]['trigger'], False)
# Different volume, No incoming data
system_alerts = SystemAlerts()
data = {'sda1': {u'used': '6G'}} # 6144 MB
rule = {'metric_value': 6140, 'above_below': 'below',
'metric_type': 'MB','volume': 'sda2', "_id": "test"}
system_alerts.check_disk(rule, data)
assert len(system_alerts.alerts) == 0
def check_network_test(self):
system_alerts = SystemAlerts()
data = {u'eth1-inbound': {u'inbound': u'100', u'outbound': u'0.00'}}
rule = {'metric_value': 55, 'above_below': 'above', "_id": "test", "metric": "Network/inbound"}
system_alerts.check_network(rule, data)
eq_(system_alerts.alerts['network'][0]['trigger'], True)
eq_(system_alerts.alerts['network'][0]['interface'], 'eth1-inbound')
system_alerts = SystemAlerts()
data = {u'eth1': {u'inbound': u'45', u'outbound': u'0.00'}}
rule = {'metric_value': 55, 'above_below': 'above', "_id": "test", "metric": "Network/inbound"}
system_alerts.check_network(rule, data)
eq_(system_alerts.alerts['network'][0]['trigger'], False)
system_alerts = SystemAlerts()
data = {u'eth1-outbound': {u'inbound': u'1', u'outbound': u'65'}}
rule = {'metric_value': 55, 'above_below': 'above', "_id": "test", "metric": "Network/outbound"}
system_alerts.check_network(rule, data)
eq_(system_alerts.alerts['network'][0]['trigger'], True)
eq_(system_alerts.alerts['network'][0]['interface'], 'eth1-outbound')
system_alerts = SystemAlerts()
data = {u'eth1': {u'inbound': u'1', u'outbound': u'45'}}
rule = {'metric_value': 55, 'above_below': 'above', "_id": "test", "metric": "Network/outbound"}
system_alerts.check_network(rule, data)
eq_(system_alerts.alerts['network'][0]['trigger'], False)
# Check the new golang agent
system_alerts = SystemAlerts()
data = [{"name": "eth1", u'inbound': u'1', u'outbound': u'45'}]
rule = {'metric_value': 55, 'above_below': 'above', "_id": "test", "metric": "Network/outbound"}
system_alerts.check_network(rule, data)
eq_(system_alerts.alerts['network'][0]['trigger'], False)
# Check the new golang agent
system_alerts = SystemAlerts()
data = [{"name": "eth1", u'inbound': u'1', u'outbound': u'45'}]
rule = {'metric_value': 55, 'above_below': 'above', "_id": "test", "metric": "Network/outbound", "interface": "eth2"}
system_alerts.check_network(rule, data)
assert len(system_alerts.alerts) == 0
def check_loadavg_test(self):
system_alerts = SystemAlerts()
data = {u'minute': 1, u'five_minutes': 1, u'fifteen_minutes': 1}
rule = {'metric_value': 0.9, 'above_below': 'above', 'metric_options': 'minute',"_id": "test"}
system_alerts.check_loadavg(rule, data)
eq_(system_alerts.alerts['loadavg'][0]['trigger'], True)
system_alerts = SystemAlerts()
data = {u'minute': 1, u'five_minutes': 1, u'fifteen_minutes': 1}
rule = {'metric_value': 1.1, 'above_below': 'below', 'metric_options': 'minute',"_id": "test"}
system_alerts.check_loadavg(rule, data)
eq_(system_alerts.alerts['loadavg'][0]['trigger'], True)
system_alerts = SystemAlerts()
data = {u'minute': 1, u'five_minutes': 1, u'fifteen_minutes': 1}
rule = {'metric_value': 0.9, 'above_below': 'above', 'metric_options': 'five_minutes',"_id": "test"}
system_alerts.check_loadavg(rule, data)
eq_(system_alerts.alerts['loadavg'][0]['trigger'], True)
system_alerts = SystemAlerts()
data = {u'minute': 1 , u'five_minutes': 1, u'fifteen_minutes': 1}
rule = {'metric_value': 1.1, 'above_below': 'below', 'metric_options': 'five_minutes',"_id": "test"}
system_alerts.check_loadavg(rule, data)
eq_(system_alerts.alerts['loadavg'][0]['trigger'], True)
system_alerts = SystemAlerts()
data = {u'minute': 1 , u'five_minutes': 1, u'fifteen_minutes': 1}
rule = {'metric_value': 0.9, 'above_below': 'above', 'metric_options': 'fifteen_minutes',"_id": "test"}
system_alerts.check_loadavg(rule, data)
eq_(system_alerts.alerts['loadavg'][0]['trigger'], True)
system_alerts = SystemAlerts()
data = {u'minute': 1 , u'five_minutes': 1, u'fifteen_minutes': 1}
rule = {'metric_value': 1.1, 'above_below': 'below', 'metric_options': 'fifteen_minutes',"_id": "test"}
system_alerts.check_loadavg(rule, data)
eq_(system_alerts.alerts['loadavg'][0]['trigger'], True)
# Trigger false
system_alerts = SystemAlerts()
data = {u'minute': 1 , u'five_minutes': 1, u'fifteen_minutes': 1}
rule = {'metric_value': 0.9, 'above_below': 'below', 'metric_options': 'fifteen_minutes',"_id": "test"}
system_alerts.check_loadavg(rule, data)
eq_(system_alerts.alerts['loadavg'][0]['trigger'], False)
| 10,763
|
Python
|
.py
| 182
| 49.752747
| 125
| 0.576963
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,076
|
process_test.py
|
amonapp_amon/amon/apps/alerts/checkers/tests/process_test.py
|
from amon.apps.alerts.checkers.process import ProcessAlerts
from nose.tools import eq_
import unittest
class ProcessAlertsTest(unittest.TestCase):
def setUp(self):
self.process_alerts = ProcessAlerts()
def check_memory_test(self):
data = {u'm': u'40.0', u'time': 1327169023}
rule = {'metric_value': 39, 'above_below': 'above', 'metric_type': 'MB','process': 'test', '_id':'test', 'check': 'Memory'}
alert = self.process_alerts.check(data, rule=rule)
eq_(alert['trigger'], True)
data = {u'm': u'39.9', u'time': 1327169023}
rule = {'metric_value': 40, 'above_below': 'below', 'metric_type': 'MB','process': 'test', '_id':'test', 'check': 'Memory'}
alert = self.process_alerts.check(data, rule=rule)
eq_(alert['trigger'], True)
def check_cpu_test(self):
data = {u'c': u'40.0', u'time': 1327169023}
rule = {'metric_value': 39, 'above_below': 'above', 'metric_type': '%','process': 'test', '_id':'test' , 'check': 'CPU'}
alert = self.process_alerts.check(data, rule=rule)
eq_(alert['trigger'], True)
data = { u'c': u'39.99', u'time': 1327169023}
rule = {'metric_value': 40, 'above_below': 'below', 'metric_type': '%','process': 'test', '_id':'test' , 'check': 'CPU'}
alert = self.process_alerts.check(data, rule=rule)
eq_(alert['trigger'], True)
| 1,412
|
Python
|
.py
| 24
| 50.958333
| 131
| 0.594457
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,077
|
plugin_test.py
|
amonapp_amon/amon/apps/alerts/checkers/tests/plugin_test.py
|
from amon.apps.alerts.checkers.plugin import PluginAlerts
from nose.tools import eq_
import unittest
class PluginAlertsTest(unittest.TestCase):
def setUp(self):
self.plugin_alerts = PluginAlerts()
def check_test(self):
data = {u'myplugin.test_above': u'40.0', u'time': 1327169023}
rule = {'metric_value': 39, 'above_below': 'above', 'gauge_data': {'name': 'myplugin'}, '_id':'test', 'key': 'test_above'}
alert = self.plugin_alerts.check(data, rule=rule)
eq_(alert['trigger'], True)
data = {u'myplugin.test_below': u'39.9', u'time': 1327169023}
rule = {'metric_value': 40, 'above_below': 'below', 'gauge_data': {'name': 'myplugin'}, '_id':'test', 'key': 'test_below'}
alert = self.plugin_alerts.check(data, rule=rule)
eq_(alert['trigger'], True)
| 835
|
Python
|
.py
| 15
| 48.933333
| 130
| 0.63145
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,078
|
healthcheck_test.py
|
amonapp_amon/amon/apps/alerts/checkers/tests/healthcheck_test.py
|
from amon.apps.alerts.checkers.healthcheck import HealthCheckAlertChecker
from nose.tools import eq_
import unittest
class HealthChecksAlertsTest(unittest.TestCase):
def setUp(self):
self.health_check_alerts = HealthCheckAlertChecker()
def test_check_invalid(self):
# Alert for specific check
for i in range(1, 2):
exit_codes = {0: "ok", 1: "warning", 2: "critical"}
exit_status = exit_codes[i]
data = {
u'output': u'CheckBanner CRITICAL',
u'command': u'check-something.rb',
u'exit_code': i
}
rule = {
"status" : 2,
"param" : "-u https://amon.cx",
"command" : "check-http.rb",
"_id": "test"
}
alert = self.health_check_alerts.check(data=data, rule=rule)
self.assertFalse(alert)
def test_check_valid(self):
# Alert for specific check
for i in range(1, 2):
exit_codes = {0: "ok", 1: "warning", 2: "critical"}
exit_status = exit_codes[i]
data = {
u'output': u'CheckBanner CRITICAL',
u'command': u'check-http.rb -u https://amon.cx',
u'exit_code': i
}
rule = {
"status" : exit_status,
"param" : "-u https://amon.cx",
"command" : "check-http.rb",
"_id": "test"
}
alert = self.health_check_alerts.check(data=data, rule=rule)
eq_(alert['trigger'], True)
# Global alert
for i in range(1, 2):
exit_codes = {0: "ok", 1: "warning", 2: "critical"}
exit_status = exit_codes[i]
data = {
u'output': u'CheckBanner CRITICAL',
u'command': u'check-http.rb',
u'exit_code': i
}
rule = {
"status" : exit_status,
"param" : "",
"command" : "check-http.rb",
"_id": "test"
}
alert = self.health_check_alerts.check(data=data, rule=rule)
eq_(alert['trigger'], True)
data_different_input = {
u'output': u'CheckBanner CRITICAL',
u'command': u'check-http.rb -u something - w else',
u'exit_code': i
}
rule = {
"status" : exit_status,
"param" : "",
"command" : "check-http.rb",
"_id": "test"
}
alert = self.health_check_alerts.check(data=data_different_input, rule=rule)
eq_(alert['trigger'], True)
| 2,817
|
Python
|
.py
| 72
| 24.972222
| 89
| 0.4705
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,079
|
mute.py
|
amonapp_amon/amon/apps/alerts/models/mute.py
|
from datetime import datetime, timedelta
from amon.apps.core.basemodel import BaseModel
from amon.utils.dates import datetime_to_unixtime
from amon.apps.tags.models import tags_model
from amon.apps.servers.models import server_model
class AlertMuteServersModel(BaseModel):
def __init__(self):
super(AlertMuteServersModel, self).__init__()
self.collection = self.mongo.get_collection('alerts_muted_servers')
def check_if_server_is_muted(self, server=None):
muted = False
cursor = super(AlertMuteServersModel, self).get_all()
server_id = str(server.get('_id'))
server_tags = server.get('tags', [])
server_tags = [str(t) for t in server_tags]
for r in cursor:
tags = r.get('tags', [])
tags = [str(t) for t in tags]
mute_server_id = str(r.get('server'))
# Check tags first
if len(server_tags) > 0 and len(tags) > 0:
muted = set(tags).issubset(server_tags)
# Don't overwrite
if muted == False:
# Check IDS now
muted = True if mute_server_id == server_id else False
# Finally check for global mute, no tags
if mute_server_id == 'all' and len(tags) == 0:
muted = True
return muted
def get_all(self):
result_list = []
result = super(AlertMuteServersModel, self).get_all()
for r in result:
tags = r.get('tags', [])
r['tags'] = [tags_model.get_by_id(x) for x in tags]
r['server'] = server_model.get_by_id(r.get('server'))
result_list.append(r)
return result_list
def save(self, data=None):
period = int(data.get('period'))
if period > 0:
expire_at = datetime.utcnow() + timedelta(hours=period)
data['expires_at_utc'] = datetime_to_unixtime(expire_at)
data['expires_at'] = expire_at
self.collection.insert(data)
self.collection.ensure_index([('expires_at', 1)], expireAfterSeconds=0)
| 2,138
|
Python
|
.py
| 47
| 34.595745
| 79
| 0.594103
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,080
|
alertshistory.py
|
amonapp_amon/amon/apps/alerts/models/alertshistory.py
|
import math
from datetime import datetime, timedelta
from amon.apps.core.basemodel import BaseModel
from amon.apps.servers.models import server_model
from amon.utils.dates import unix_utc_now
from amon.apps.devices.models import volumes_model
from amon.apps.devices.models import interfaces_model
from amon.apps.healthchecks.models import health_checks_results_model, health_checks_model
class AlertHistoryModel(BaseModel):
def __init__(self):
super(AlertHistoryModel, self).__init__()
self.collection = self.mongo.get_collection('alert_history')
def set_task_id(self, trigger_id=None, task_id=None):
task_id = self.mongo.get_object_id(task_id)
self.collection.update({'_id': self.mongo.get_object_id(trigger_id)}, {"$set": {"task_id": task_id}})
def mark_as_sent(self, trigger_id):
self.collection.update({'_id': self.mongo.get_object_id(trigger_id)}, {"$set": {"sent": True}})
def get_all_unsent(self):
query_params = {'sent': False}
results = self.collection.find(query_params)
data = {
'data': results.clone(),
'count': results.count()
}
return data
def get_unsent(self, server_id=None):
hour_ago = unix_utc_now()-3600
query_params = {'sent': False, "time": {"$gte": int(hour_ago)}}
if server_id:
query_params['server_id'] = server_id
results = self.collection.find(query_params)
data = {
'data': results.clone(),
'count': results.count()
}
return data
# Used in the ajax popup in the alerts screen
def get_notifications_list(self, alert_id=None, server=None, limit=0, skip=0):
notifications_list = []
server_id = None
if server:
server_id = server.get('_id') # Could be none
notifications = self.get_notifications(alert_id=alert_id, server_id=server_id, limit=limit, skip=skip)
if notifications['count'] > 0:
for notification in notifications['data']:
notification_dict = {
"period_from": notification['from'],
"period_to": notification['time'],
"average_value": notification['average_value'],
"id": notification.get('_id')
}
# System notifications specific here
server = server_model.get_by_id(notification.get('server_id'))
if server:
notification_dict['server_id'] = server['_id']
notification_dict['server'] = server['name']
notification_dict['last_check'] = server.get('last_check')
volume = notification.get('volume')
if volume:
notification_dict['volume_data'] = volumes_model.get_by_id(volume)
interface = notification.get('interface')
if interface:
notification_dict['interface_data'] = interfaces_model.get_by_id(interface)
health_checks_data_id = notification.get("health_checks_data_id")
if health_checks_data_id:
health_check_data = health_checks_results_model.get_by_id(health_checks_data_id)
if type(health_check_data) is dict:
health_check_id = health_check_data.get('check_id')
notification_dict['health_check'] = health_checks_model.get_by_id(health_check_id)
notification_dict['health_check_data'] = health_check_data
notifications_list.append(notification_dict)
return notifications_list
def count_notifications(self, alert_id=None, date_after=None):
query_params = {'alert_id': alert_id, 'notify': True}
result = self.collection.find(query_params).count()
return result
def get_notifications(self, alert_id=None, server_id=None, limit=0, skip=0):
query_params = {'alert_id': alert_id, 'notify': True,}
if server_id:
query_params['server_id'] = server_id
results = self.collection.find(query_params).sort([('time', self.desc)]).limit(limit).skip(skip)
data = {
'data': results,
'count': results.count()
}
return data
def get_last_trigger(self, alert_id=None):
query_params = {'alert_id': alert_id, 'notify': True,}
try:
last_trigger = self.collection.find(query_params).sort([('time', self.desc)]).limit(1)[0]
except:
last_trigger = None
return last_trigger
def get_all(self, alert=None, server_id=None, reverse=False):
server_id = self.mongo.get_object_id(server_id)
query_params = {'server_id': server_id,'alert_id': alert['_id']}
sort = self.asc if reverse == True else self.desc
results = self.collection.find(query_params)
data = {
'data': results.clone().sort([('time', sort)]),
'count': results.clone().count()
}
return data
def get_for_period(self, alert=None, server_id=None, date_after=None):
server_id = self.mongo.get_object_id(server_id)
query_params = {
'server_id': server_id,
'alert_id': alert['_id'],
'time': {'$gte' :date_after}
}
results = self.collection.find(query_params)
data = {
'data': results.clone().sort([('time', self.desc)]),
'count': results.clone().count()
}
return data
def clear(self, alert_id=None, server_id=None):
query_params = {}
if alert_id:
query_params['alert_id'] = self.mongo.get_object_id(alert_id)
if server_id:
query_params['server_id'] = self.mongo.get_object_id(server_id)
if len(query_params) > 0:
self.collection.remove(query_params)
def save(self, alert=None, server_id=None, data=None):
query_params = {'alert_id': alert['_id'], "notify": {'$exists': False}}
# Plugins and Health check alerts
if server_id:
query_params['server_id'] = server_id
# Disk alerts
volume = data.get('volume', None)
if volume:
query_params['volume'] = volume
# Network alerts
interface = data.get('interface', None)
if interface:
query_params['interface'] = interface
trigger = data.get('trigger', False)
time = data.get('time')
if trigger == True:
last_notification = self.collection.find_one(query_params)
if last_notification == None:
query_dict = query_params.copy()
del query_dict['notify']
data['value'] = [data['value'], ]
data['from'] = data.get('time')
data.update(query_dict)
self.collection.insert(data)
else:
# Update time
self.collection.update({"_id": last_notification['_id']}, {"$set": {"time": time}, "$push": {"value": data['value']}})
last_notification = self.collection.find_one(query_params)
# Calculate time
time_difference = last_notification['time'] - last_notification['from']
if time_difference >= alert['period']:
value = last_notification['value']
average_value = (math.fsum(value)/len(value))
average_value = float("{0:.2f}".format(average_value))
try:
average_value = (math.fsum(value)/len(value))
average_value = float("{0:.2f}".format(average_value))
except:
average_value = None
expires_at = datetime.utcnow() + timedelta(days=7)
# Trigger the alert
trigger_data = {
'sent': False,
'notify': True,
'average_value': average_value,
'expires_at': expires_at
}
# Health check alert here, save result id for future reference
health_checks_data_id = data.get('health_checks_data_id', False)
if health_checks_data_id:
trigger_data['health_checks_data_id'] = health_checks_data_id
self.collection.update({"_id": last_notification['_id']}, {"$set": trigger_data, "$unset": {'value': ""}})
# Cleanup old notifications
else:
self.collection.remove(query_params)
self.collection.ensure_index([('notify', self.desc)], background=True)
self.collection.ensure_index([('sent', self.desc)], background=True)
self.collection.ensure_index([('time', self.desc)], background=True)
self.collection.ensure_index([('server_id', self.desc)], background=True)
self.collection.ensure_index([('alert_id', self.desc)], background=True)
self.collection.ensure_index([('expires_at', 1)], expireAfterSeconds=0)
alertshistory_model = AlertHistoryModel()
| 9,539
|
Python
|
.py
| 186
| 37.231183
| 134
| 0.574053
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,081
|
api.py
|
amonapp_amon/amon/apps/alerts/models/api.py
|
from amon.apps.core.basemodel import BaseModel
from amon.apps.processes.models import process_model
from amon.apps.plugins.models import plugin_model
from amon.apps.devices.models import volumes_model
from amon.apps.devices.models import interfaces_model
class AlertsAPIModel(BaseModel):
def __init__(self):
super(AlertsAPIModel, self).__init__()
def get_selected_metric(self, alert=None):
selected_metric = ''
rule_type = alert.get('rule_type')
check = alert.get('metric')
if rule_type in ['process', 'uptime',]:
process = alert.get('process')
selected_metric = "{0}.{1}".format(process['name'], check)
elif rule_type in ['global', 'system']:
selected_metric = check
# Append volumes / interfaces if needed
volume_interface = alert.get('interface', False)
if volume_interface == False:
volume_interface = alert.get('volume', False)
if volume_interface:
selected_metric = "{0}.{1}".format(selected_metric, volume_interface)
elif rule_type == 'plugin':
plugin = alert.get('plugin')
gauge = alert.get('gauge')
key = alert.get('key')
selected_metric = "{0}.{1}.{2}".format(plugin['name'], gauge['name'], key)
elif rule_type == 'process_global':
process = alert.get('process')
selected_metric = "{0}.{1}".format(process, check)
elif rule_type == 'plugin_global':
plugin = alert.get('plugin')
gauge = alert.get('gauge')
key = alert.get('key')
selected_metric = "{0}.{1}.{2}".format(plugin, gauge, key)
return selected_metric
def get_global_metrics(self):
data = []
system_alerts = ['CPU', 'Memory', 'Loadavg', 'Disk', 'Network/inbound', 'Network/outbound', 'Not Sending Data']
process_checks = ['cpu', 'memory', 'down']
for metric in system_alerts:
spaceless_metric = metric.replace(" ", "")
_id = "server:all.metric:{0}.rule_type:global".format(spaceless_metric)
data.append({'value': _id, 'name': metric, 'metric': metric})
for p in process_model.get_all_unique():
for check in process_checks:
name = "{0}.{1}".format(p, check)
_id = "server:all.process:{0}.metric:{1}.rule_type:process_global".format(p, check)
data.append({'value': _id, 'name': name, 'metric': check})
for el in plugin_model.get_all_unique_gauge_keys_list():
append = True
try:
plugin, gauge, key = el.split('.')
except:
append = False
if append:
_id = "server:all.plugin:{0}.gauge:{1}.key:{2}.rule_type:plugin_global".format(plugin, gauge, key)
name = "{0}.{1}.{2}".format(plugin, gauge, key)
data.append({'value': _id, 'name': name, 'metric': 'plugin'})
return data
def get_server_metrics(self, server_id=None):
data = []
system_alerts = ['CPU', 'Memory', 'Loadavg', 'Disk', 'Network/inbound', 'Network/outbound', 'Not Sending Data']
process_alerts = ['CPU', 'Memory', 'Down']
processes = process_model.get_all_for_server(server_id)
plugin_gauges = plugin_model.get_gauge_keys_for_server(server_id)
volumes = volumes_model.get_all_for_server(server_id)
interfaces = interfaces_model.get_all_for_server(server_id)
for metric in system_alerts:
spaceless_metric = metric.replace(" ", "")
_id = "server:{0}.metric:{1}.rule_type:system".format(server_id, spaceless_metric)
data.append({'value': _id, 'name': metric, 'metric': metric})
if metric == 'Disk':
for volume in volumes.clone():
name = "Disk.{0}".format(volume.get('name'))
_id = "server:{0}.metric:Disk.rule_type:system.volume:{1}".format(server_id, volume.get('name'))
data.append({'value': _id,
'name': name,
'metric': metric,
})
if metric.startswith('Network'):
for interface in interfaces.clone():
name = "{0}.{1}".format(metric, interface.get('name'))
_id = "server:{0}.metric:{1}.rule_type:system.interface:{2}".format(server_id, metric, interface.get('name'))
data.append({
'value': _id,
'name': name,
'metric': metric,
})
if processes:
for p in processes:
for metric in process_alerts:
name = "{0}.{1}".format(p['name'], metric)
rule_type = 'process' if metric != 'Down' else 'uptime'
_id = "server:{0}.process:{1}.metric:{2}.rule_type:{3}".format(server_id, p['_id'], metric, rule_type)
data.append({'value': _id, 'name': name, 'metric': metric})
if len(plugin_gauges) > 0:
for g in plugin_gauges:
plugin = g.get('plugin')
gauge = g.get('gauge')
key = g.get('key')
name = "{0}.{1}.{2}".format(plugin['name'], gauge['name'], key)
_id = "server:{0}.plugin:{1}.gauge:{2}.key:{3}.rule_type:plugin".format(
server_id,
plugin['_id'],
gauge['_id'],
key
)
data.append({'value': _id, 'name': name})
return data
| 5,794
|
Python
|
.py
| 111
| 38.036036
| 129
| 0.530387
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,082
|
__init__.py
|
amonapp_amon/amon/apps/alerts/models/__init__.py
|
from amon.apps.alerts.models.alerts import AlertsModel
from amon.apps.alerts.models.alertshistory import AlertHistoryModel
from amon.apps.alerts.models.mute import AlertMuteServersModel
from amon.apps.alerts.models.api import AlertsAPIModel
alerts_model = AlertsModel()
alerts_history_model = AlertHistoryModel()
alert_mute_servers_model = AlertMuteServersModel()
alerts_api_model = AlertsAPIModel()
| 400
|
Python
|
.py
| 8
| 49
| 67
| 0.857143
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,083
|
alerts.py
|
amonapp_amon/amon/apps/alerts/models/alerts.py
|
from django.conf import settings
from amon.apps.core.basemodel import BaseModel
from amon.apps.notifications.models import notifications_model
from amon.apps.tags.models import tags_model
from amon.apps.processes.models import process_model
from amon.apps.servers.models import server_model
from amon.apps.plugins.models import plugin_model
from amon.apps.devices.models import volumes_model
from amon.apps.devices.models import interfaces_model
from amon.utils.dates import unix_utc_now
from amon.apps.alerts.models.alertshistory import AlertHistoryModel
class AlertsModel(BaseModel):
def __init__(self):
super(AlertsModel, self).__init__()
self.collection = self.mongo.get_collection('alerts')
self.alert_history_model = AlertHistoryModel()
def add_initial_data(self, recipient=None):
count = self.collection.find().count()
if count == 0:
email_recepients = [str(recipient)] if recipient else []
default_alert = {
"above_below": "above",
"email_recepients": email_recepients,
"rule_type": "global",
"server": "all",
"account_id": settings.ACCOUNT_ID,
"period": 300,
}
# Disk alert
disk_alert = {'metric': 'Disk', 'metric_value': 80, 'metric_type': "%"}
disk_alert_dict = dict(list(default_alert.items()) + list(disk_alert.items()))
self.collection.insert(disk_alert_dict)
# Memory alert
memory_alert = {'metric': 'Memory', 'metric_value': 80, 'metric_type': "%"}
memory_alert_dict = dict(list(default_alert.items()) + list(memory_alert.items()))
self.collection.insert(memory_alert_dict)
# CPU alert
cpu_alert = {'metric': 'CPU', 'metric_value': 80, 'metric_type': "%"}
cpu_alert_dict = dict(list(default_alert.items()) + list(cpu_alert.items()))
self.collection.insert(cpu_alert_dict)
def get_process_check(self, collection=None, time=None):
collection = self.mongo.get_collection(collection)
params = {'t': time}
return collection.find_one(params)
def save(self, data):
data['period'] = int(data.get('period', 1))
mongo_keys = ['process', 'plugin', 'gauge', 'custom_metric_id', 'server']
for key in mongo_keys:
value = data.get(key, None)
if value:
try:
data[key] = self.mongo.get_object_id(data[key])
except:
pass
self.collection.insert(data)
self.collection.ensure_index([('metric', self.desc)])
self.collection.ensure_index([('server', self.desc)])
self.collection.ensure_index([('rule_type', self.desc)])
self.collection.ensure_index([('plugin', self.desc)])
self.collection.ensure_index([('account_id', self.desc)])
def _get_notifications(self, alert):
notifications_list = []
notifications = alert.get('notifications', None)
if notifications:
for x in notifications:
split_provider_id = x.split(':') # email:id
if len(split_provider_id) == 2: # New format, ignore old ['hipchat', 'something']
_id = split_provider_id[1]
result = notifications_model.get_by_id(_id)
notifications_list.append(result)
return notifications_list
def _get_tags(self, alert):
tags = []
alert_tags = alert.get('tags', None)
if alert_tags:
tags = [tags_model.get_by_id(x) for x in alert_tags]
return tags
def get_by_id(self, alert_id, recipients_dict=True):
alert_id = self.mongo.get_object_id(alert_id)
alert = self.collection.find_one({"_id": alert_id})
rule_type = alert.get('rule_type')
# Return a full dictionary with recipients instead of list
if recipients_dict is True:
alert['notifications'] = self._get_notifications(alert)
process = alert.get('process')
if process and rule_type != 'process_global':
alert['process'] = process_model.get_by_id(process)
plugin = alert.get('plugin', None)
gauge = alert.get('gauge', None)
if plugin and gauge and rule_type != 'plugin_global':
alert['plugin'] = plugin_model.get_by_id(plugin)
alert['gauge'] = plugin_model.get_gauge_by_id(gauge)
return alert
def update(self, data, id):
object_id = self.mongo.get_object_id(id)
server = data.get('server', None)
if server != 'all':
data['server'] = self.mongo.get_object_id(server)
data['period'] = int(data.get('period'))
self.collection.update({"_id": object_id}, {"$set": data}, upsert=True)
def get_global_alerts_with_notifications(self, all_servers=None, account_id=None, limit=5, include_all_types=None):
rules_list = self.get_global_alerts(account_id=account_id, include_all_types=include_all_types)
rules_with_notifications = []
if len(rules_list) > 0:
for rule in rules_list:
rule['total_triggers'] = self.alert_history_model.count_notifications(alert_id=rule['_id'])
rule['last_trigger'] = self.alert_history_model.get_last_trigger(alert_id=rule['_id'])
rules_with_notifications.append(rule)
return rules_with_notifications
def _get_alert_tags(self, alert):
alert['tags'] = self._get_tags(alert)
return alert
# Used both on the front and in the API
def get_global_alerts(self, account_id=None, include_all_types=None):
params = {"rule_type": {"$in": ["global"]}}
if include_all_types is True:
params['rule_type'] = {"$in": ["global", 'process_global', 'plugin_global']}
alerts = self.collection.find(params).count()
alerts_list = []
if alerts > 0:
alerts = self.collection.find(params)
for alert in alerts:
alert['notifications'] = self._get_notifications(alert)
alert = self._get_alert_tags(alert)
alerts_list.append(alert)
return alerts_list
# Used internally in the alert checker
def get_alerts_for_metric(self, metric=None):
params = {'custom_metric_id': metric.get('_id')}
result = self.collection.find(params)
return result
# Used internally in the alert checker
def get_alerts_for_plugin(self, plugin=None):
rules_list = []
params = {'plugin': plugin.get('_id')}
result = self.collection.find(params)
if result.clone().count() > 0:
for rule in result:
rule['gauge_data'] = plugin_model.get_gauge_by_id(rule['gauge'])
rules_list.append(rule)
return rules_list
# Used for the not sending data only at the moment
def get_alerts_not_sending_data(self, metric=None):
params = {"metric": 'NotSendingData'}
alerts_list = []
result = self.collection.find(params)
if result.clone().count() > 0:
for rule in result:
rule['notifications'] = self._get_notifications(rule)
server = rule.get('server')
if server:
rule['server_data'] = [server_model.get_by_id(server)]
if rule['rule_type'] == 'global':
rule['server_data'] = server_model.get_all()
alerts_list.append(rule)
return alerts_list
def get_alerts(self, type=None, server=None, limit=None):
params = {"rule_type": type}
if server:
params['server'] = server['_id']
rules_list = []
rules = self.collection.find(params).count()
if rules > 0:
rules = self.collection.find(params)
rules_list = []
for rule in rules:
process_id = rule.get('process', None)
if process_id:
rule['process_data'] = process_model.get_by_id(process_id)
plugin_id = rule.get('plugin', None)
gauge_id = rule.get('gauge', None)
if plugin_id and gauge_id:
rule['plugin_data'] = plugin_model.get_by_id(plugin_id)
rule['gauge_data'] = plugin_model.get_gauge_by_id(gauge_id)
if server:
rule['server'] = server
# Check if the rule is for specific server and get the data
else:
rule_server = rule.get('server', False)
server_id = self.object_id(rule_server)
if server_id:
rule['server'] = server_model.get_by_id(rule_server)
tags = rule.get('tags', False)
if tags:
rule = self._get_alert_tags(rule)
rule['notifications'] = self._get_notifications(rule)
rule['last_trigger'] = self.alert_history_model.get_last_trigger(alert_id=rule['_id'])
rule['total_triggers'] = self.alert_history_model.count_notifications(alert_id=rule['_id'])
rules_list.append(rule)
return rules_list
def delete(self, server_id=None, alert_id=None):
self.alert_history_model.clear(server_id=server_id, alert_id=alert_id)
super(AlertsModel, self).delete(alert_id)
def delete_server_alerts(self, server_id):
params = {"server": server_id} # Could be object ID or all
self.collection.remove(params)
def delete_metric_alerts(self, metric_id):
metric_id = self.object_id(metric_id)
self.collection.remove({"custom_metric_id": metric_id})
def save_notsendingdata_occurence(self, alert=None):
time = unix_utc_now()
data = {
"value": 1,
"time": time,
"trigger": True
}
server_id = alert['server']['_id']
self.alert_history_model.save(alert=alert, server_id=server_id, data=data)
def save_healtcheck_occurence(self, trigger=None, server_id=None):
time = trigger.get('time', None)
alert_id = trigger.get('alert_id')
trigger_state = trigger.get('trigger', False)
health_checks_data_id = trigger.get("health_checks_data_id")
# For the test suite, add an option to overwrite time
if time is None:
time = unix_utc_now()
data = {
"value": 1,
"time": time,
"trigger": trigger_state,
"health_checks_data_id": health_checks_data_id # Save a reference to the actual result
}
alert = self.get_by_id(alert_id)
self.alert_history_model.save(alert=alert, server_id=server_id, data=data)
def save_uptime_occurence(self, alert, data=None):
time = unix_utc_now()
data = {
"value": 1,
"time": time,
"trigger": True
}
server_id = alert['server']['_id']
self.alert_history_model.save(alert=alert, server_id=server_id, data=data)
# Custom metrics, plugins, processes
def save_occurence(self, alert, server_id=None):
alert_id = alert.get('alert_id')
alert_on = alert.get('value', None)
trigger = alert.get('trigger', False)
alert_on = "{0:.2f}".format(float(alert_on))
time = alert.get('time', None)
if time is None:
time = unix_utc_now()
data = {
"value": float(alert_on),
"time": time,
"trigger": trigger
}
alert = self.get_by_id(alert_id)
# Global alerts here
if server_id:
self.alert_history_model.save(alert=alert, server_id=server_id, data=data)
else:
self.alert_history_model.save(alert=alert, data=data)
def _server_tags_in_alert(self, server=None, alert=None):
check = True
server_tags = server.get('tags', [])
server_tags = [str(x) for x in server_tags]
alert_tags = alert.get('tags', [])
if len(alert_tags) > 0:
check = any(t in alert_tags for t in server_tags)
return check
# System alerts
def save_system_occurence(self, alerts, server_id=None):
server = server_model.get_by_id(server_id)
# Format: {'cpu': [{'value': 2.6899999999999977, 'rule': '4f55da92925d75158d0001e0'}}]}
for key, values_list in alerts.items():
for value in values_list:
data = {}
save_to_db = False
alert_on = value.get('value', None)
trigger = value.get('trigger', False)
rule_id = value.get('rule', None)
time = value.get('time', None)
if time is None:
time = unix_utc_now()
alert_on = "{0:.2f}".format(float(alert_on))
alert = self.get_by_id(rule_id)
data = {"value": float(alert_on),
"time": time,
"trigger": trigger}
if key == 'disk':
volume = value.get('volume', None)
volume_data = volumes_model.get_by_name(server, volume)
if volume_data:
data['volume'] = volume_data.get('_id', None)
if key == 'network':
interface = value.get('interface', None)
interface_data = interfaces_model.get_by_name(server, interface)
if interface_data:
data['interface'] = interface_data.get('_id', None)
server_id = self.mongo.get_object_id(server_id)
# Check for tagged global alerts
alert_server = alert.get('server')
if alert_server == 'all':
if self._server_tags_in_alert(server=server, alert=alert):
save_to_db = True
else:
save_to_db = True
if save_to_db:
self.alert_history_model.save(alert=alert, server_id=server_id, data=data)
def mute(self, alert_id):
alert_id = self.mongo.get_object_id(alert_id)
result = self.collection.find_one({"_id": alert_id})
current_mute = result.get('mute', None)
toggle = False if current_mute is True else True
self.collection.update({"_id": alert_id}, {"$set": {"mute": toggle}})
def get_mute_state(self, account_id=None, mute=None):
alerts = self.get_all(account_id)
state_list = []
for a in alerts.clone():
state_list.append(a.get('mute', False))
state = True if len(state_list) > state_list.count(True) else False
state = mute if mute != None else state
return state
def mute_all(self, account_id=None, mute=None):
alerts = self.get_all(account_id=account_id)
state = self.get_mute_state(account_id=account_id, mute=mute)
for alert in alerts:
self.collection.update({"_id": alert['_id']}, {"$set": {"mute": state}})
def get_all(self, account_id=None):
return self.collection.find()
| 15,495
|
Python
|
.py
| 321
| 36.205607
| 119
| 0.573359
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,084
|
mute_model_test.py
|
amonapp_amon/amon/apps/alerts/models/tests/mute_model_test.py
|
import unittest
from amon.apps.alerts.models import alert_mute_servers_model
from amon.apps.servers.models import server_model
from amon.apps.tags.models import tags_model
class MuteModelTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
alert_mute_servers_model.collection.remove()
server_model.collection.remove()
def _cleanup(self):
self.tearDown()
def test_check_if_server_is_muted(self):
self._cleanup()
tags_model.get_or_create_by_name('test_tag')
tag = tags_model.collection.find_one()
server_model.add('name', account_id=1, tags=[tag['_id']])
server = server_model.collection.find_one()
# First check, plain server_id
data = {
'server': server['_id'],
'period': 0
}
alert_mute_servers_model.save(data)
result = alert_mute_servers_model.check_if_server_is_muted(server=server)
assert result == True
# Second check, all servers, no tags
alert_mute_servers_model.collection.remove()
data = {
'server': 'all',
'period': 0
}
alert_mute_servers_model.save(data)
result = alert_mute_servers_model.check_if_server_is_muted(server=server)
assert result == True
# Third check, all servers, different tag
alert_mute_servers_model.collection.remove()
tags_model.get_or_create_by_name('global_tag')
global_tag = tags_model.collection.find_one({'name': 'global_tag'})
data = {
'server': 'all',
'tags': [global_tag['_id']],
'period': 0
}
alert_mute_servers_model.save(data)
result = alert_mute_servers_model.check_if_server_is_muted(server=server)
assert result == False
# Check all servers with server_tag included
alert_mute_servers_model.collection.remove()
data = {
'server': 'all',
'tags': [tag['_id']],
'period': 0
}
alert_mute_servers_model.save(data)
result = alert_mute_servers_model.check_if_server_is_muted(server=server)
assert result == True
| 2,240
|
Python
|
.py
| 57
| 29.947368
| 81
| 0.611085
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,085
|
api_model_test.py
|
amonapp_amon/amon/apps/alerts/models/tests/api_model_test.py
|
import unittest
from nose.tools import eq_
from django.contrib.auth import get_user_model
User = get_user_model()
from amon.apps.alerts.models import AlertsAPIModel
from amon.apps.processes.models import process_model
from amon.apps.plugins.models import plugin_model
from amon.apps.devices.models import volumes_model, interfaces_model
class AlertsAPIModelTest(unittest.TestCase):
def setUp(self):
User.objects.all().delete()
self.user_email = 'foo@test.com'
self.user = User.objects.create_user(password='qwerty', email=self.user_email)
self.account_id = 1
self.model = AlertsAPIModel()
self.model.mongo.database = 'amontest'
self.collection = self.model.mongo.get_collection('alerts')
self.server_collection = self.model.mongo.get_collection('servers')
self.history_collection = self.model.mongo.get_collection('alert_history')
self.server_collection.insert({"name" : "test",
"key": "test_me",
"account_id": 199999
})
server = self.server_collection.find_one()
self.server_id = server['_id']
def tearDown(self):
self.user.delete()
User.objects.all().delete()
def _cleanup(self):
self.collection.remove()
process_model.collection.remove()
plugin_model.collection.remove()
interfaces_model.collection.remove()
volumes_model.collection.remove()
gauges_collection = plugin_model.gauge_collection.remove()
def get_global_metrics_test(self):
self._cleanup()
process_name = "alertest-process"
plugin_name = "alertest-plugin"
process = process_model.get_or_create(server_id=self.server_id, name=process_name)
plugin = plugin_model.get_or_create(name=plugin_name, server_id=self.server_id)
plugin_data = {
'count.count_first_key': 2,
'second.second_key': 4,
'more.more_key': 5,
'count.count_second_key': 4
}
plugin_model.save_gauges(plugin=plugin, data=plugin_data, time=1)
plugin_gauges_keys = plugin_model.get_gauge_keys_for_server(server_id=self.server_id)
result = self.model.get_global_metrics()
assert len(result) == 14 # 7 system + 3 process(cpu/memory/down) + 4 plugin(for every key)
system_metrics = ['CPU', 'Memory', 'Loadavg', 'Disk', 'Network/inbound', 'Network/outbound', 'Not Sending Data']
process_metrics = ['cpu', 'memory', 'down']
for r in result:
value = r.get('value')
metric_dict = dict(v.split(":") for v in value.split("."))
assert metric_dict['rule_type']
assert metric_dict['rule_type'] in ['global', 'process_global', 'plugin_global']
alert_type = metric_dict.get('rule_type')
if alert_type == 'system':
assert r['metric'] in system_metrics
if alert_type == 'process_global':
assert r['metric'] in process_metrics
def get_server_metrics_test(self):
self._cleanup()
process_name = "alertest-process"
plugin_name = "alertest-plugin"
process = process_model.get_or_create(server_id=self.server_id, name=process_name)
plugin = plugin_model.get_or_create(name=plugin_name, server_id=self.server_id)
plugin_data = {
'count.count_first_key': 2,
'second.second_key': 4,
'more.more_key': 5,
'count.count_second_key': 4
}
plugin_model.save_gauges(plugin=plugin, data=plugin_data, time=1)
plugin_gauges_keys = plugin_model.get_gauge_keys_for_server(server_id=self.server_id)
volumes_model.get_or_create(server_id=self.server_id, name='get_server_metrics_volume')
interfaces_model.get_or_create(server_id=self.server_id, name='get_server_metrics_interface')
result = self.model.get_server_metrics(server_id=self.server_id)
system_metrics = ['CPU', 'Memory', 'Loadavg', 'Disk', 'Network/inbound', 'Network/outbound', 'Not Sending Data']
system_values = ["server:{0}.metric:{1}.rule_type:system".format(self.server_id, x.replace(" ", "")) for x in system_metrics]
volumes = volumes_model.get_all_for_server(server_id=self.server_id)
for v in volumes:
value = "server:{0}.metric:Disk.rule_type:system.volume:{1}".format(self.server_id, v.get('name'))
system_values.append(value)
interfaces = interfaces_model.get_all_for_server(server_id=self.server_id)
for i in interfaces:
value = "server:{0}.metric:Network/inbound.rule_type:system.interface:{1}".format(self.server_id, i.get('name'))
system_values.append(value)
value = "server:{0}.metric:Network/outbound.rule_type:system.interface:{1}".format(self.server_id, i.get('name'))
system_values.append(value)
process_metrics = ['CPU', 'Memory']
process_alerts_names = ["{0}.{1}".format(process_name, x.replace(" ", "")) for x in process_metrics]
process_values = ["server:{0}.process:{1}.metric:{2}.rule_type:process".format(self.server_id, process['_id'], x) for x in process_metrics]
process_uptime = ['Down']
process_uptime_alerts_names = ["{0}.{1}".format(process_name, x.replace(" ", "")) for x in process_uptime]
process_uptime_values = ["server:{0}.process:{1}.metric:{2}.rule_type:uptime".format(self.server_id, process['_id'], x) for x in process_uptime]
plugin_alert_names = ["{0}.{1}".format(plugin_name, x) for x in plugin_data.keys()]
plugin_values = []
for plugin_gauge_key in plugin_gauges_keys:
gauge = plugin_gauge_key.get('gauge')
key = plugin_gauge_key.get('key')
_id = "server:{0}.plugin:{1}.gauge:{2}.key:{3}.rule_type:plugin".format(
self.server_id,
plugin['_id'],
gauge['_id'],
key
)
plugin_values.append(_id)
assert len(plugin_values) == 4 # All the keys from plugin_data
for r in result:
value = r.get('value')
metric_dict = dict(v.split(":") for v in value.split("."))
alert_type = metric_dict.get('rule_type')
assert alert_type != None
assert alert_type in ['system', 'process', 'plugin', 'uptime']
if alert_type == 'system':
assert r.get('metric') in system_metrics
assert r.get('value') in system_values
elif alert_type == 'process':
assert r.get('metric') in ['CPU', 'Memory']
assert r.get('name') in process_alerts_names
assert r.get('value') in process_values
elif alert_type == 'uptime':
assert r.get('metric') in ['Down']
assert r.get('name') in process_uptime_alerts_names
assert r.get('value') in process_uptime_values
elif alert_type == 'plugin':
assert r.get('name') in plugin_alert_names
assert r.get('value') in plugin_values
else:
assert False # Should not be here
def get_selected_metric_test(self):
self._cleanup()
example_alert_dict = {
"above_below": "above",
"email_recepients": [],
"rule_type": "global",
"server": "all",
"account_id": 199999,
"period": 300,
"metric": "CPU"
}
self.collection.insert(example_alert_dict)
result = self.collection.find_one()
db_result = self.model.get_selected_metric(alert=result)
eq_(db_result, 'CPU')
self._cleanup()
example_alert_dict = {
"above_below": "above",
"email_recepients": [],
"rule_type": "global",
"server": "ubuntu",
"account_id": 199999,
"period": 300,
"interface": "eth1",
"metric": "Network/inbound"
}
self.collection.insert(example_alert_dict)
result = self.collection.find_one()
db_result = self.model.get_selected_metric(alert=result)
eq_(db_result, 'Network/inbound.eth1')
self._cleanup()
| 8,475
|
Python
|
.py
| 164
| 40.158537
| 153
| 0.600539
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,086
|
alerts_history_model_test.py
|
amonapp_amon/amon/apps/alerts/models/tests/alerts_history_model_test.py
|
import unittest
from nose.tools import eq_
from amon.apps.alerts.models import AlertHistoryModel
class AlertsHistoryModelTest(unittest.TestCase):
def setUp(self):
self.model = AlertHistoryModel()
self.collection = self.model.mongo.get_collection('alert_history')
self.server_collection = self.model.mongo.get_collection('servers')
self.alerts_collection = self.model.mongo.get_collection('alerts')
self.server_collection.insert({"name" : "test", "key": "test_me"})
self.server = self.server_collection.find_one()
server_id = str(self.server['_id'])
rule = {"server": server_id, "rule_type": 'system', 'metric': 2}
self.alerts_collection.insert(rule)
self.rule = self.alerts_collection.find_one()
def get_all_test(self):
self.collection.remove()
self.collection.insert({'server_id': self.server['_id'],
'alert_id': self.rule['_id'],
'time':1})
results = self.model.get_all(alert=self.rule, server_id=self.server['_id'])
eq_(results['count'], 1)
eq_(results['data'].count(True), 1)
self.collection.insert({'server_id': self.server['_id'],
'alert_id': self.rule['_id'],
'time':2})
self.collection.insert({'server_id': self.server['_id'],
'alert_id': self.rule['_id'],
'time':3})
results = self.model.get_all(alert=self.rule, server_id=self.server['_id'])
eq_(results['count'], 3)
eq_(results['data'].count(True), 3)
self.collection.remove()
def get_for_period_test(self):
self.collection.remove()
for i in range(0, 100):
self.collection.insert({'server_id': self.server['_id'],
'alert_id': self.rule['_id'],
'time':i})
results =self.model.get_for_period(alert=self.rule, server_id=self.server['_id'],
date_after=50)
eq_(results['count'], 50)
eq_(results['data'].count(True), 50)
self.collection.remove()
def get_last_trigger_test(self):
self.collection.remove()
for i in range(0, 100):
self.collection.insert({'server_id': self.server['_id'],
'alert_id': self.rule['_id'],
'time':i}
)
self.collection.insert({'server_id': self.server['_id'],
'alert_id': self.rule['_id'],
'time':101,
'notify': True}
)
result = self.model.get_last_trigger(alert_id=self.rule['_id'])
assert result['time'] == 101
self.collection.remove()
def clear_test(self):
self.collection.remove()
for i in range(0, 100):
self.collection.insert({'server_id': self.server['_id'],
'alert_id': self.rule['_id'],
'time':i})
self.model.clear(alert_id=self.rule['_id'])
results = self.collection.find().count()
eq_(results, 0)
for i in range(0, 100):
self.collection.insert({'server_id': self.server['_id'],
'alert_id': self.rule['_id'],
'time':i})
self.model.clear(server_id=self.server['_id'])
results = self.collection.find().count()
eq_(results, 0)
self.collection.remove()
def tearDown(self):
self.server_collection.remove()
self.alerts_collection.remove()
| 3,471
|
Python
|
.py
| 79
| 33.455696
| 89
| 0.577376
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,087
|
alerts_model_test.py
|
amonapp_amon/amon/apps/alerts/models/tests/alerts_model_test.py
|
import unittest
from nose.tools import eq_
from django.contrib.auth import get_user_model
from amon.apps.alerts.models import AlertsModel
from amon.apps.processes.models import process_model
from amon.apps.plugins.models import plugin_model
from amon.apps.servers.models import server_model
from amon.apps.devices.models import volumes_model, interfaces_model
User = get_user_model()
class AlertsModelTest(unittest.TestCase):
def setUp(self):
User.objects.all().delete()
self.user_email = 'foo@test.com'
self.user = User.objects.create_user(password='qwerty', email=self.user_email)
self.account_id = 1
self.model = AlertsModel()
self.model.mongo.database = 'amontest'
self.collection = self.model.mongo.get_collection('alerts')
self.server_collection = self.model.mongo.get_collection('servers')
self.history_collection = self.model.mongo.get_collection('alert_history')
self.server_collection.insert({"name" : "test",
"key": "test_me",
"account_id": 199999
})
server = self.server_collection.find_one()
self.server_id = server['_id']
def tearDown(self):
self.user.delete()
User.objects.all().delete()
def _cleanup(self):
self.collection.remove()
process_model.collection.remove()
plugin_model.collection.remove()
interfaces_model.collection.remove()
volumes_model.collection.remove()
gauges_collection = plugin_model.gauge_collection.remove()
def add_initial_data_test(self):
self._cleanup()
default_alert = {
"above_below": "above",
"email_recepients": [],
"rule_type": "global",
"server": "all",
"period": 300,
"account_id": self.account_id
}
# Add initial data only if this is empty
self.collection.insert(default_alert)
assert self.collection.find().count() == 1
self.model.add_initial_data()
assert self.collection.find().count() == 1
self._cleanup()
assert self.collection.find().count() == 0
self.model.add_initial_data()
assert self.collection.find().count() == 3
self._cleanup()
def get_alerts_for_plugin_test(self):
self._cleanup()
plugin = plugin_model.get_or_create(server_id=self.server_id, name='testplugin')
gauge = plugin_model.get_or_create_gauge_by_name(plugin=plugin, name='gauge')
plugin_alert = {
"above_below": "above",
"rule_type": "plugin",
"server": self.server_id,
"gauge": gauge['_id'],
"plugin": plugin['_id'],
"account_id": self.account_id,
"key": "testkey",
"period": 0,
"metric_value": 5
}
for i in range(0,5):
try:
del plugin_alert['_id']
except:
pass
plugin_alert['period'] = i
plugin_alert['metric_value'] = i+5
self.model.collection.insert(plugin_alert)
result = self.model.get_alerts_for_plugin(plugin=plugin)
assert len(result) == 5
self._cleanup()
def save_alert_test(self):
self.collection.remove()
self.model.save({'rule': "test", 'server': self.server_id})
eq_(self.collection.count(), 1)
def update_test(self):
self.collection.remove()
self.model.save({'rule': "test" , 'server': self.server_id, 'period': 10})
alert = self.collection.find_one()
alert_id = str(alert['_id'])
self.model.update({'rule': 'updated_test', 'period': 10}, alert_id)
alert = self.collection.find_one()
eq_(alert['rule'], 'updated_test')
def mute_test(self):
self.collection.remove()
self.collection.insert({"name" : "test", "key": "test_me"})
alert = self.collection.find_one()
alert_id = str(alert['_id'])
self.model.mute(alert_id)
result = self.collection.find_one()
eq_(result["mute"], True)
self.model.mute(alert_id)
result = self.collection.find_one()
eq_(result["mute"], False)
def get_mute_state_test(self):
self.collection.remove()
for i in range(0, 10):
self.collection.insert({"name" : "test", "mute": True,"account_id": self.account_id})
result = self.model.get_mute_state(account_id=self.account_id)
eq_(result, False) # A toggle function -> this is the next state
self.collection.remove()
for i in range(0, 10):
self.collection.insert({"name" : "test", "mute": False,"account_id": self.account_id})
result = self.model.get_mute_state(account_id=self.account_id)
eq_(result, True) # A toggle function -> this is the next state
def mute_all_test(self):
self.collection.remove()
for i in range(0, 10):
self.collection.insert({"name" : "test", "mute": False ,"account_id": self.account_id})
result = self.model.mute_all(account_id=self.account_id)
for r in self.collection.find():
eq_(r['mute'], True)
self.collection.remove()
for i in range(0, 10):
self.collection.insert({"name" : "test", "mute": True ,"account_id": self.account_id})
result = self.model.mute_all(account_id=self.account_id)
for r in self.collection.find():
eq_(r['mute'], False)
self.collection.remove()
def get_alerts_test(self):
self.collection.remove()
self.server_collection.remove()
self.server_collection.insert({"name" : "test", "key": "test_me"})
server = self.server_collection.find_one()
rule = { "server": server['_id'], "rule_type": 'system', 'metric': 2, 'period': 10}
self.collection.insert(rule)
rule = { "server": server['_id'], "rule_type": 'system', 'metric': 1, 'period': 10}
self.collection.insert(rule)
rules = self.model.get_alerts(type='system', server=server)
eq_(len(rules), 2)
self.collection.remove()
def delete_alerts_test(self):
self.collection.remove()
self.collection.insert({"name" : "test", "key": "test_me"})
rule = self.collection.find_one()
self.model.delete(alert_id=rule['_id'])
result = self.collection.count()
eq_(result,0)
self.collection.remove()
def save_healthcheck_occurence_test(self):
self.history_collection.remove()
self.collection.remove()
def save_occurence_test(self):
self.history_collection.remove()
self.collection.remove()
self.collection.insert({
"rule_type" : "custom_metric_gauge",
"metric_value" : 10,
"metric_type" : "more_than",
"period": 10
})
rule = self.collection.find_one()
rule_id = str(rule['_id'])
for i in range(300, 330):
self.model.save_occurence({
'value': 11,
'alert_id': rule_id,
'trigger': True,
'time': i
})
trigger_result = self.history_collection.find({'alert_id': rule['_id'] , 'notify': True})
assert trigger_result.count() == 2 # 310 and 321
def save_health_check_occurence_test(self):
self.history_collection.remove()
self.server_collection.remove()
self.server_collection.insert({'name': 'test'})
server = self.server_collection.find_one()
self.collection.remove()
self.collection.insert({
"rule_type" : "health_check",
"server": server['_id'],
"command" : "check-http.rb",
"status": "critical",
"period": 10
})
rule = self.collection.find_one()
rule['server'] = server
rule_id = str(rule['_id'])
for i in range(0, 110):
trigger_dict = {
'value': 1,
'alert_id': rule_id,
'trigger': True,
'time': i,
'health_checks_data_id': 'test'
}
self.model.save_healtcheck_occurence(trigger=trigger_dict, server_id=server['_id'])
trigger_result = self.history_collection.find({'alert_id': rule['_id'] , 'notify': True})
eq_(trigger_result.count(), 10)
for r in trigger_result.clone():
assert r['from'] in [0, 11, 22, 33, 44, 55, 66, 77, 88, 99]
assert r['time'] in [10, 21, 32, 43, 54, 65, 76, 87, 98, 109]
assert r['health_checks_data_id']
self.history_collection.remove()
# Second test with some of the triggers set to False
for i in range(300, 400):
trigger = True
if i % 2 == 1:
trigger = False
trigger_dict = {
'value': 1,
'alert_id': rule_id,
'trigger': trigger,
'time': i,
'health_checks_data_id': 'test'
}
self.model.save_healtcheck_occurence(trigger=trigger_dict, server_id=server['_id'])
trigger_result = self.history_collection.find({'alert_id': rule['_id'] , 'notify': True})
eq_(trigger_result.count(), 0)
self.history_collection.remove()
def save_system_occurence_test(self):
self.history_collection.remove()
self.server_collection.remove()
self.server_collection.insert({'name': 'test'})
server = self.server_collection.find_one()
self.collection.remove()
self.collection.insert({
"rule_type" : "system",
"server": server['_id'],
"metric_type_value" : "%",
"metric_value" : "10",
"metric_type" : "more_than",
"metric" : "CPU",
"period": 10
})
rule = self.collection.find_one()
rule_id = str(rule['_id'])
server_id = rule['server']
for i in range(300, 320):
self.model.save_system_occurence({'cpu':
[{
'value': 11,
'rule': rule_id,
'trigger': True,
'server_id': server_id,
'time': i
}]}, server_id=server_id)
trigger_result = self.history_collection.find({'alert_id': rule['_id'] , 'notify': True})
eq_(trigger_result.count(), 1) # Only 1 trigger on 400
for r in trigger_result.clone():
eq_(r['time'], 310)
eq_(r['from'], 300)
self.history_collection.remove()
# Second test with some of the triggers set to False
for i in range(300, 400):
trigger = True
if i % 2 == 1:
trigger = False
self.model.save_system_occurence({'cpu':
[{
'value': 11,
'rule': rule_id,
'trigger': trigger,
'server': server['_id'],
'time': i
}]}, server_id=server_id)
trigger_result = self.history_collection.find({'alert_id': rule['_id'] , 'notify': True})
eq_(trigger_result.count(), 0)
self.history_collection.remove()
# Try with bigger range and multiple triggers
for i in range(300, 333):
self.model.save_system_occurence({'cpu':
[{
'value': 11,
'rule': rule_id,
'trigger': True,
'server': server['_id'],
'time': i
}]}, server_id=server_id)
trigger_result = self.history_collection.find({'alert_id': rule['_id'] , 'notify': True})
eq_(trigger_result.count(), 3)
for r in trigger_result.clone():
time_list = [310, 321, 332]
eq_(r['time'] in time_list, True)
self.history_collection.remove()
self.server_collection.remove()
def delete_server_alerts_test(self):
server_model.collection.remove()
self.collection.remove()
server_id = server_model.add('testserver')
self.collection.insert({"rule_type" : "process",})
self.collection.insert({"rule_type" : "system",})
self.collection.insert({"rule_type" : "log", "server": server_id})
self.collection.insert({"rule_type" : "dummy", "server":server_id})
self.collection.insert({"rule_type" : "dummy", "server": server_id})
self.model.delete_server_alerts(server_id)
eq_(self.collection.count(), 2)
self.collection.remove()
def get_by_id_test(self):
self.collection.remove()
server_model.collection.remove()
plugin_model.collection.remove()
server_id = server_model.add('testserver')
plugin = plugin_model.get_or_create(name='testplugin', server_id=server_id)
self.collection.insert({
"rule_type" : "process",
"server": server_id,
"plugin": plugin['_id'],
'sms_recepients': [],
'email_recepients': [],
'webhooks': []}
)
alert = self.collection.find_one()
alert_from_model = self.model.get_by_id(alert['_id'])
assert alert_from_model['plugin'] == plugin['_id']
| 13,755
|
Python
|
.py
| 316
| 31.797468
| 99
| 0.561584
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,088
|
pause.py
|
amonapp_amon/amon/apps/alerts/views/pause.py
|
from amon.apps.core.views import *
from amon.apps.alerts.models import alerts_model
from amon.apps.alerts.forms import MuteForm
from amon.apps.alerts.models import alert_mute_servers_model
@login_required
def mute(request, alert_id):
alerts_model.mute(alert_id)
return redirect(reverse('alerts'))
@login_required
def mute_all(request):
alerts_model.mute_all(account_id=request.account_id)
return redirect(reverse('alerts'))
@login_required
def mute_servers(request):
all_muted = alert_mute_servers_model.get_all()
if request.method == 'POST':
form = MuteForm(request.POST)
if form.is_valid():
form.save()
return redirect(reverse('alerts_mute_servers'))
else:
form = MuteForm()
return render(request, 'alerts/mute.html', {
"form": form,
"all_muted": all_muted
})
@login_required
def unmute_server(request, mute_id):
alert_mute_servers_model.delete(mute_id)
return redirect(reverse('alerts_mute_servers'))
| 1,038
|
Python
|
.py
| 30
| 29.133333
| 60
| 0.703854
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,089
|
healthchecks.py
|
amonapp_amon/amon/apps/alerts/views/healthchecks.py
|
from amon.apps.core.views import *
from amon.apps.servers.models import server_model
from amon.apps.alerts.models import alerts_model
from amon.apps.alerts.forms import HealthCheckAlertForm, EditHealthCheckAlertForm
from amon.apps.tags.models import tags_model
from amon.apps.notifications.models import notifications_model
@login_required
def add_alert(request):
all_servers = server_model.get_all()
tags = tags_model.get_all()
notifications = notifications_model.get_all_formated()
if request.method == 'POST':
form = HealthCheckAlertForm(request.POST, all_servers=all_servers)
if form.is_valid():
data = form.cleaned_data
form_data = {
"command": request.POST.get('command'),
"param": request.POST.get('param'),
"tags": request.POST.getlist('tags'),
"notifications": request.POST.getlist('notifications'),
"rule_type": "health_check",
}
data.update(form_data)
alerts_model.save(data)
return redirect(reverse('alerts'))
else:
form = HealthCheckAlertForm(all_servers=all_servers)
return render(request, 'alerts/add_healthcheck.html', {
"form": form,
'tags': tags,
'notifications': notifications,
"all_servers": all_servers
})
@login_required
def edit_alert(request, alert_id):
all_servers = server_model.get_all(account_id=request.account_id)
alert = alerts_model.get_by_id(alert_id, recipients_dict=False)
tags = tags_model.get_all()
server = alert.get('server', None) # If the alert is for specific server, it could be global
selected_command = " ".join([alert.get("command", ""), alert.get('params', "")])
notifications = notifications_model.get_all_formated()
if request.method == 'POST':
form = EditHealthCheckAlertForm(request.POST, all_servers=all_servers)
if form.is_valid():
data = form.cleaned_data
form_data = {
"tags": request.POST.getlist('tags', None),
"status": data.get('status'),
"period": data.get('period'),
"server": server,
"notifications": request.POST.getlist('notifications')
}
alerts_model.update(form_data, alert_id)
return redirect(reverse('alerts'))
else:
form = EditHealthCheckAlertForm(
all_servers=all_servers,
initial={
'period': alert['period'],
'server':server,
"status": alert['status'],
})
return render(request, 'alerts/edit_healthcheck.html', {
"server": server,
'tags': tags,
"alert": alert,
"form": form,
"selected_command": selected_command,
"notifications": notifications,
})
| 2,930
|
Python
|
.py
| 70
| 32.128571
| 96
| 0.611719
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,090
|
alerts.py
|
amonapp_amon/amon/apps/alerts/views/alerts.py
|
from amon.apps.core.views import *
from amon.apps.servers.models import server_model
from amon.apps.alerts.models import alerts_model, alerts_history_model, alerts_api_model
from amon.apps.alerts.forms import AlertForm, EditAlertForm, MuteForm
from amon.apps.tags.models import tags_model
from amon.apps.notifications.models import notifications_model
@login_required
def all(request):
all_servers = server_model.get_all(account_id=request.account_id)
alerts = []
if all_servers:
for server in all_servers:
types = ['system', 'process', 'uptime', 'plugin']
for alert_type in types:
result = alerts_model.get_alerts(type=alert_type, server=server)
if result:
[alerts.append(x) for x in result]
global_alerts = alerts_model.get_global_alerts_with_notifications(all_servers=all_servers, account_id=request.account_id, include_all_types=True)
if global_alerts:
[alerts.append(x) for x in global_alerts]
global_health_check_alerts = alerts_model.get_alerts(type='health_check')
if global_health_check_alerts:
[alerts.append(x) for x in global_health_check_alerts]
return render(request, 'alerts/all.html', {
"alerts": alerts,
"all_servers": all_servers,
"server_metrics": settings.SERVER_METRICS,
"common_metrics": settings.COMMON_METRICS,
"total_alerts": len(alerts),
})
@login_required
def add_alert(request):
all_servers = server_model.get_all(account_id=request.account_id)
tags = tags_model.get_all()
notifications = notifications_model.get_all_formated()
if request.method == 'POST':
form = AlertForm(request.POST, all_servers=all_servers)
if form.is_valid():
data = form.cleaned_data
metric = data.get('metric')
metric_dict = dict(item.split(":") for item in metric.split("."))
form_data = {
"metric_type": request.POST.get('metric_type'),
"tags": request.POST.getlist('tags'),
"notifications": request.POST.getlist('notifications'),
"rule_type": metric_dict.get('rule_type'),
"account_id": request.account_id,
}
form_data = dict(list(form_data.items()) + list(metric_dict.items()))
del data['metric']
data.update(form_data)
alerts_model.save(data)
return redirect(reverse('alerts'))
else:
form = AlertForm(all_servers=all_servers)
return render(request, 'alerts/add.html', {
"common_metrics": settings.COMMON_METRICS,
"form": form,
'tags': tags,
'notifications': notifications,
"all_servers": all_servers
})
@login_required
def edit_alert(request, alert_id):
all_servers = server_model.get_all(account_id=request.account_id)
alert = alerts_model.get_by_id(alert_id, recipients_dict=False)
tags = tags_model.get_all()
server = alert.get('server', None) # If the alert is for specific server, it could be global
notifications = notifications_model.get_all_formated()
selected_metric = alerts_api_model.get_selected_metric(alert=alert)
if request.method == 'POST':
form = EditAlertForm(request.POST, all_servers=all_servers)
if form.is_valid():
data = form.cleaned_data
form_data = {
"tags": request.POST.getlist('tags', None),
"metric_value": data.get('metric_value'),
"above_below": data.get('above_below'),
"period": data.get('period'),
"server": server,
"metric_type": request.POST.get('metric_type'),
"notifications": request.POST.getlist('notifications')
}
alerts_model.update(form_data, alert_id)
return redirect(reverse('alerts'))
else:
form = EditAlertForm(all_servers=all_servers, initial={
'metric_value': alert['metric_value'],
'period': alert['period'],
'server':server,
"above_below": alert['above_below'],
})
# TODO - Fix that angular bug sometime
metric_types = ''
metric = alert.get('metric')
if metric:
metric = metric.lower()
metric_types = ["%"] if metric == 'cpu' else []
metric_types = ["%", "MB"] if metric == 'memory' else metric_types
metric_types = ["%", "MB", "GB"] if metric == 'disk' else metric_types
metric_types = ["KB/s"] if metric in ['network/inbound', 'network/outbound'] else metric_types
return render(request, 'alerts/edit.html', {
"server": server,
'tags': tags,
"alert": alert,
"form": form,
"selected_metric": selected_metric,
"notifications": notifications,
"metric_types": metric_types,
})
@login_required
def delete_alert(request, alert_id):
alert = alerts_model.get_by_id(alert_id)
rule_type = alert.get('rule_type', None)
if rule_type in ['process_global', 'plugin_global', 'global', 'health_check']:
all_servers = server_model.get_all()
if all_servers:
for server in all_servers:
server_id = server.get('_id')
alerts_model.delete(server_id=server_id, alert_id=alert_id)
else:
alerts_model.delete(alert_id=alert_id)
else:
server_id = alert.get('server', None)
alerts_model.delete(server_id=server_id, alert_id=alert_id)
return redirect(reverse('alerts'))
@login_required
def clear_triggers(request, alert_id):
alerts_history_model.clear(alert_id=alert_id)
messages.add_message(request, messages.INFO, 'Triggers deleted.')
return redirect(reverse('alerts'))
@login_required
def history(request, alert_id):
data = {}
alert = alerts_model.get_by_id(alert_id)
notifications = alerts_history_model.get_notifications_list(alert_id=alert['_id'], limit=100)
return render(request, 'alerts/history.html', {
'notifications': notifications,
'alert': alert,
'data': data
})
@login_required
def history_health_check(request, alert_id):
data = {}
alert = alerts_model.get_by_id(alert_id)
notifications = alerts_history_model.get_notifications_list(alert_id=alert['_id'], limit=100)
return render(request, 'alerts/history.html', {
'notifications': notifications,
'alert': alert,
'data': data
})
@login_required
def history_system(request, alert_id):
alert = alerts_model.get_by_id(alert_id)
server = server_model.get_by_id(alert['server'])
page = request.GET.get('page', 1)
page = int(page)
skip = 0
if page > 1:
skip = 100 * (page - 1)
total = alerts_history_model.count_notifications(alert_id=alert['_id'])
on_page = 100
if total > on_page:
total_pages = total//on_page
else:
total_pages = 1
total_pages = range(total_pages)
notifications = alerts_history_model.get_notifications_list(alert_id=alert['_id'], limit=100, skip=skip)
return render(request, 'alerts/history.html', {
'notifications': notifications,
'alert': alert,
'server': server,
"total_pages": total_pages,
"page": page
})
@login_required
def ajax_alert_triggers(request, alert_id=None):
alert = alerts_model.get_by_id(alert_id)
notifications = alerts_history_model.get_notifications_list(alert_id=alert['_id'], limit=5)
return render(request, 'alerts/ajax_history.html', {
"notifications": notifications,
"rule": alert
})
| 7,778
|
Python
|
.py
| 183
| 34.098361
| 149
| 0.631846
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,091
|
alerts_test.py
|
amonapp_amon/amon/apps/alerts/views/tests/alerts_test.py
|
from django.test.client import Client
from django.test import TestCase
from django.urls import reverse
from nose.tools import *
from amon.apps.alerts.models import alerts_model
from django.contrib.auth import get_user_model
User = get_user_model()
from amon.apps.notifications.models import notifications_model
class TestAlertViews(TestCase):
def setUp(self):
User.objects.all().delete()
self.c = Client()
self.user = User.objects.create_user(password='qwerty', email='foo@test.com')
self.account_id = 1
self.c.login(username='foo@test.com', password='qwerty')
self.server_collection = alerts_model.mongo.get_collection('servers')
self.server_collection.insert({
"name" : "test",
"key": "test_me",
"account_id": self.account_id
})
server = self.server_collection.find_one({'account_id': self.account_id})
self.server_id = server['_id']
self.process_collection = alerts_model.mongo.get_collection('processes')
self.process_collection.insert({
"name" : "test",
"account_id": self.account_id
})
process = self.process_collection.find_one()
self.process_id = process['_id']
notifications_model.save(data={"email": "martin@amon.cx"}, provider_id="email")
notifications = notifications_model.get_all_formated()
self.notifications_list = [x['formated_id'] for x in notifications]
notifications_model.save(data={"email": "ajax@amon.cx"}, provider_id="email")
notifications = notifications_model.get_all_formated()
self.updated_notifications_list = [x['formated_id'] for x in notifications]
self.example_alert_dict = {
"above_below": "above",
"email_recepients": [],
"rule_type": "global",
"server": "all",
"account_id": self.account_id,
"period": 300,
}
def tearDown(self):
self.c.logout()
self.user.delete()
User.objects.all().delete()
self.server_collection.remove()
self.process_collection.remove()
notifications_model.collection.remove()
def _cleanup(self):
alerts_model.collection.remove()
def edit_alert_test(self):
self._cleanup()
self.example_alert_dict = {
"above_below": "above",
"email_recepients": [],
"rule_type": "global",
"server": "all",
'notifications': self.notifications_list,
"account_id": self.account_id,
"period": 300,
}
alerts_model.collection.insert(self.example_alert_dict)
alert = alerts_model.collection.find_one()
assert alert['notifications'] == self.notifications_list
url = reverse('edit_alert', kwargs={'alert_id': alert['_id']})
data = {
'server': 'all',
'metric': 'server:all.metric:CPU.rule_type:global',
'account_id': self.account_id,
'metric_value':15,
'period': 900,
'notifications': self.updated_notifications_list,
'metric_type': u'%',
'above_below': u'below',
}
response = self.c.post(url, data)
alert = alerts_model.collection.find_one()
for key, value in data.items():
if key not in ['metric']:
eq_(alert.get(key), value)
assert alert['notifications'] == self.updated_notifications_list
self._cleanup()
def all_alerts_test(self):
url = reverse('alerts')
response = self.c.get(url)
assert response.status_code == 200
def add_alert_test(self):
url = reverse('add_alert')
self._cleanup()
# Global alert
data = {
'server': 'all',
'metric': 'server:all.metric:CPU.rule_type:global',
'account_id': self.account_id,
'metric_value': 0,
'period': 300,
'metric_type': u'%',
'above_below': u'above',
'notifications': self.notifications_list
}
response = self.c.post(url, data)
self.assertRedirects(response, reverse('alerts'))
db_result = alerts_model.collection.find_one()
result_keys = {
u'rule_type': u'global',
u'account_id': self.account_id,
u'metric_value': 0,
u'metric': u'CPU',
u'period': 300,
u'server': u'all',
u'metric_type': u'%',
u'above_below': u'above',
u'notifications': self.notifications_list,
}
for key, value in result_keys.items():
eq_(db_result.get(key), value)
self._cleanup()
# Server alert
data = {
'server': self.server_id,
'metric': 'server:{0}.metric:CPU.rule_type:system'.format(self.server_id),
'account_id': self.account_id,
'metric_value': 12,
'period': 300,
'metric_type': u'%',
'above_below': u'above',
'notifications': self.notifications_list
}
response = self.c.post(url, data)
self.assertRedirects(response, reverse('alerts'))
db_result = alerts_model.collection.find_one()
result_keys = {
u'rule_type': u'system',
u'account_id': self.account_id,
u'metric_value': 12,
u'metric': u'CPU',
u'period': 300,
u'server': self.server_id,
u'metric_type': u'%',
u'above_below': u'above',
u'notifications': self.notifications_list,
}
for key, value in result_keys.items():
eq_(db_result.get(key), value)
self._cleanup()
# Process alert
data = {
'server': self.server_id,
'metric': 'server:{0}.process:{1}.metric:Memory.rule_type:process'.format(self.server_id, self.process_id),
'account_id': self.account_id,
'metric_value': 45,
'period': 300,
'metric_type': u'%',
'above_below': u'above',
u'notifications': self.notifications_list,
}
response = self.c.post(url, data)
self.assertRedirects(response, reverse('alerts'))
db_result = alerts_model.collection.find_one()
result_keys = {
u'rule_type': u'process',
u'account_id': self.account_id,
u'metric_value': 45,
u'metric': u'Memory',
u'period': 300,
u'server': self.server_id,
u'metric_type': u'%',
u'above_below': u'above',
u'notifications': self.notifications_list,
u'process': self.process_id,
}
for key, value in result_keys.items():
eq_(db_result.get(key), value)
self._cleanup()
# Process global alert
data = {
'server': 'all',
'metric': 'server:all.process:mongo.metric:Memory.rule_type:process_global',
'account_id': self.account_id,
'metric_value': 45,
'period': 300,
'metric_type': u'%',
'above_below': u'above',
u'notifications': self.notifications_list,
}
response = self.c.post(url, data)
self.assertRedirects(response, reverse('alerts'))
db_result = alerts_model.collection.find_one()
result_keys = {
u'rule_type': u'process_global',
u'account_id': self.account_id,
u'metric_value': 45,
u'metric': u'Memory',
u'period': 300,
u'server': 'all',
u'metric_type': u'%',
u'above_below': u'above',
u'notifications': self.notifications_list,
u'process': 'mongo',
}
for key, value in result_keys.items():
eq_(db_result.get(key), value)
self._cleanup()
# Down process alert
data = {
'server': self.server_id,
'metric': 'server:{0}.process:{1}.metric:Down.rule_type:uptime'.format(self.server_id, self.process_id),
'account_id': self.account_id,
'metric_value': 45,
'period': 300,
'metric_type': u'%',
'above_below': u'above',
u'notifications': self.notifications_list,
}
response = self.c.post(url, data)
self.assertRedirects(response, reverse('alerts'))
db_result = alerts_model.collection.find_one()
result_keys = {
u'rule_type': u'uptime',
u'account_id': self.account_id,
u'metric_value': 45,
u'metric': u'Down',
u'period': 300,
u'server': self.server_id,
u'metric_type': u'%',
u'above_below': u'above',
u'notifications': self.notifications_list,
u'process': self.process_id,
}
for key, value in result_keys.items():
eq_(db_result.get(key), value)
self._cleanup()
def mute_alert_test(self):
self._cleanup()
alerts_model.collection.insert(self.example_alert_dict)
alert = alerts_model.collection.find_one()
url = reverse('mute_alert', kwargs={'alert_id': alert['_id']})
assert_false(alert.get('mute'))
self.c.get(url)
alert = alerts_model.collection.find_one()
eq_(alert.get('mute'), True)
self._cleanup()
def delete_alert_test(self):
self._cleanup()
alerts_model.collection.insert(self.example_alert_dict)
alert = alerts_model.collection.find_one()
assert_true(alert)
url = reverse('delete_alert', kwargs={'alert_id': alert['_id']})
self.c.get(url)
alert = alerts_model.collection.find().count()
eq_(alert, 0)
self._cleanup()
| 10,209
|
Python
|
.py
| 255
| 28.545098
| 120
| 0.549729
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,092
|
healthchecks_test.py
|
amonapp_amon/amon/apps/alerts/views/tests/healthchecks_test.py
|
from django.test.client import Client
from django.test import TestCase
from django.urls import reverse
from nose.tools import *
from amon.apps.alerts.models import alerts_model
from django.contrib.auth import get_user_model
User = get_user_model()
from amon.apps.notifications.models import notifications_model
class TestAlertViews(TestCase):
def setUp(self):
User.objects.all().delete()
self.c = Client()
self.user = User.objects.create_user(password='qwerty', email='foo@test.com')
self.account_id = 1
self.c.login(username='foo@test.com', password='qwerty')
self.server_collection = alerts_model.mongo.get_collection('servers')
self.server_collection.insert({
"name" : "test",
"key": "test_me",
})
server = self.server_collection.find_one({'name': 'test'})
self.server_id = server['_id']
notifications_model.save(data={"email": "martin@amon.cx"}, provider_id="email")
notifications = notifications_model.get_all_formated()
self.notifications_list = [x['formated_id'] for x in notifications]
notifications_model.save(data={"email": "ajax@amon.cx"}, provider_id="email")
notifications = notifications_model.get_all_formated()
self.updated_notifications_list = [x['formated_id'] for x in notifications]
self.example_alert_dict = {
"above_below": "above",
"email_recepients": [],
"rule_type": "global",
"server": "all",
"account_id": self.account_id,
"period": 300,
}
def tearDown(self):
self.c.logout()
self.user.delete()
User.objects.all().delete()
self.server_collection.remove()
notifications_model.collection.remove()
def _cleanup(self):
alerts_model.collection.remove()
# def edit_alert_test(self):
# self._cleanup()
# self.example_alert_dict = {
# "above_below": "above",
# "email_recepients": [],
# "rule_type": "global",
# "server": "all",
# 'notifications': self.notifications_list,
# "account_id": self.account_id,
# "period": 300,
# }
# alerts_model.collection.insert(self.example_alert_dict)
# alert = alerts_model.collection.find_one()
# assert alert['notifications'] == self.notifications_list
# url = reverse('edit_alert', kwargs={'alert_id': alert['_id']})
# data = {
# 'server': 'all',
# 'metric': 'server:all.metric:CPU.rule_type:global',
# 'account_id': self.account_id,
# 'metric_value':15,
# 'period': 900,
# 'notifications': self.updated_notifications_list,
# 'metric_type': u'%',
# 'above_below': u'below',
# }
# response = self.c.post(url, data)
# alert = alerts_model.collection.find_one()
# for key, value in data.items():
# if key not in ['metric']:
# eq_(alert.get(key), value)
# assert alert['notifications'] == self.updated_notifications_list
# self._cleanup()
def add_alert_test(self):
url = reverse('add_healthcheck_alert')
self._cleanup()
# Global alert
data = {
'server': 'all',
'status': 'critical',
'period': 300,
'command': u'check-http.rb',
'param': u'-u https://www.amon.cx',
'notifications': self.notifications_list
}
response = self.c.post(url, data)
self.assertRedirects(response, reverse('alerts'))
db_result = alerts_model.collection.find_one()
result_keys = {
u'rule_type': u'health_check',
u'period': 300,
u'server': u'all',
'status': 'critical',
u'command': u'check-http.rb',
u'param': u'-u https://www.amon.cx',
u'notifications': self.notifications_list,
}
for key, value in result_keys.items():
eq_(db_result.get(key), value)
self._cleanup()
| 4,295
|
Python
|
.py
| 103
| 32.854369
| 87
| 0.569685
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,093
|
alerter_test.py
|
amonapp_amon/amon/apps/alerts/tests/alerter_test.py
|
import unittest
from nose.tools import eq_
from django.contrib.auth import get_user_model
User = get_user_model()
from amon.apps.servers.models import server_model
from amon.apps.processes.models import process_model
from amon.apps.alerts.models import alerts_model, alerts_history_model
from amon.apps.alerts.alerter import (
server_alerter,
process_alerter,
uptime_alerter,
plugin_alerter,
health_check_alerter,
notsendingdata_alerter
)
from amon.apps.plugins.models import plugin_model
from amon.utils.dates import unix_utc_now
class ServerAlerterTest(unittest.TestCase):
def setUp(self):
User.objects.all().delete()
self.alerter = server_alerter
self.user = User.objects.create_user(password='qwerty' , email='foo@test.com')
self.account_id = 1
self.server_key = server_model.add('test', account_id=self.account_id)
self.server = server_model.get_server_by_key(self.server_key)
self.server_id = self.server['_id']
self.process = process_model.get_or_create(server_id=self.server_id, name='test')
self.process_id = self.process['_id']
self.plugin = plugin_model.get_or_create(server_id=self.server_id, name='testplugin')
self.plugin_id = self.plugin['_id']
self.gauge = plugin_model.get_or_create_gauge_by_name(plugin=self.plugin, name='gauge')
self.gauge_id = self.gauge['_id']
def tearDown(self):
alerts_model.collection.remove()
server_model.collection.remove()
process_model.collection.remove()
plugin_model.collection.remove()
plugin_model.gauge_collection.remove()
self.user.delete()
User.objects.all().delete()
def _cleanup(self):
alerts_history_model.collection.remove()
alerts_model.collection.remove()
def test_global_check(self):
self._cleanup()
# GLOBAL ALERT
global_alert = {
"above_below": "above",
"rule_type": "global",
"server": "all",
"account_id": self.account_id,
"period": 0,
}
# CPU alert
cpu_alert_dict = {**global_alert, 'metric': 'CPU', 'metric_value': 1, 'metric_type': "%"}
alert_id = alerts_model.collection.insert(cpu_alert_dict)
global_rules = alerts_model.get_global_alerts(account_id=self.account_id)
eq_(len(global_rules), 1)
data = {u'cpu': {u'system': u'1.30', u'idle': u'98.70', u'user': u'0.00', u'steal': u'0.00', u'nice': u'0.00'}}
server_alerter.check(data, self.server)
unsent_alerts = alerts_history_model.get_unsent(server_id=self.server_id)
for trigger in unsent_alerts['data']:
assert trigger['alert_id'] == alert_id
eq_(unsent_alerts['data'].count(), 1)
def test_system_check(self):
self._cleanup()
# System alert
system_alert = {
"above_below": "above",
"rule_type": "system",
"server": self.server_id,
"account_id": self.account_id,
"period": 0,
}
# CPU alert
cpu_alert_dict = {**system_alert, 'metric': 'CPU', 'metric_value': 1, 'metric_type': "%"}
alert_id = alerts_model.collection.insert(cpu_alert_dict)
rules = alerts_model.get_alerts(type='system', server=self.server)
eq_(len(rules), 1)
data = {u'cpu': {u'system': u'1.30', u'idle': u'98.70', u'user': u'0.00', u'steal': u'0.00', u'nice': u'0.00'}}
server_alerter.check(data, self.server)
unsent_alerts = alerts_history_model.get_unsent(server_id=self.server_id)
for trigger in unsent_alerts['data']:
assert trigger['alert_id'] == alert_id
eq_(unsent_alerts['data'].count(), 1)
self._cleanup()
def test_process_alert(self):
self._cleanup()
process_alert = {
"above_below": "above",
"rule_type": "process",
"server": self.server_id,
"process": self.process_id,
"account_id": self.account_id,
"period": 0,
}
cpu_alert_dict = {**process_alert, 'metric': 'CPU', 'metric_value': 1, 'metric_type': "%"}
alert_id = alerts_model.collection.insert(cpu_alert_dict)
cpu_value = float(2)
data = {'data': [{'p': self.process_id, 'c': cpu_value}]}
process_alerter.check(data, self.server)
unsent_alerts = alerts_history_model.get_unsent()
for trigger in unsent_alerts['data']:
assert trigger['alert_id'] == alert_id
assert trigger['average_value'] == cpu_value
eq_(unsent_alerts['data'].count(), 1)
self._cleanup()
process_alert = {
"above_below": "above",
"rule_type": "process_global",
"server": 'all',
"process": 'mongo',
"account_id": self.account_id,
"period": 0,
}
process = process_model.get_or_create(server_id=self.server_id, name='mongo')
global_process_id = process['_id']
cpu_alert_dict = {**process_alert, 'metric': 'CPU', 'metric_value': 1, 'metric_type': "%"}
alert_id = alerts_model.collection.insert(cpu_alert_dict)
cpu_value = float(2)
data = {'data': [{'p': global_process_id, 'c': cpu_value}]}
process_alerter.check(data, self.server)
unsent_alerts = alerts_history_model.get_unsent()
for trigger in unsent_alerts['data']:
assert trigger['alert_id'] == alert_id
assert trigger['average_value'] == cpu_value
eq_(unsent_alerts['data'].count(), 1)
def test_plugin_alert(self):
self._cleanup()
plugin_alert = {
"above_below": "above",
"rule_type": "plugin",
"server": self.server_id,
"gauge": self.gauge_id,
"plugin": self.plugin_id,
"account_id": self.account_id,
"key": "testkey",
"period": 0,
"metric_value": 5
}
alert_id = alerts_model.collection.insert(plugin_alert)
key_name = '{0}.testkey'.format(self.gauge['name'])
data = {'gauges': {'bla.test': 1, key_name: 6}}
plugin_alerter.check(data=data, plugin=self.plugin)
unsent_alerts = alerts_history_model.get_unsent()
for trigger in unsent_alerts['data']:
assert trigger['alert_id'] == alert_id
assert trigger['average_value'] == 6
eq_(unsent_alerts['data'].count(), 1)
self._cleanup()
plugin = plugin_model.get_or_create(server_id=self.server_id, name='mongo')
gauge = 'global_gauge.global_key'
plugin_alert = {
"above_below": "above",
"rule_type": "plugin_global",
"server": 'all',
"plugin": 'mongo',
"gauge": 'global_gauge',
"key": 'global_key',
"period": 0,
"metric_value": 5
}
alert_id = alerts_model.collection.insert(plugin_alert)
data = {'gauges': {'bla.test': 1, gauge: 6}}
plugin_alerter.check(data=data, plugin=plugin, server=self.server)
unsent_alerts = alerts_history_model.get_unsent()
for trigger in unsent_alerts['data']:
assert trigger['alert_id'] == alert_id
assert trigger['average_value'] == 6
eq_(unsent_alerts['data'].count(), 1)
def test_uptime_alert(self):
self._cleanup()
uptime_alert = {
"above_below": "above",
"rule_type": "uptime",
"server": self.server_id,
"process": self.process_id,
"account_id": self.account_id,
"period": 0,
}
cpu_alert_dict = {**uptime_alert, 'metric': 'Down', 'metric_value': 0}
alerts_model.collection.insert(cpu_alert_dict)
data = {'data': []}
uptime_alerter.check(data, self.server)
unsent_alerts = alerts_history_model.get_unsent(server_id=self.server_id)
eq_(unsent_alerts['data'].count(), 1)
self._cleanup()
def test_notsendingdata_alert(self):
self._cleanup()
now = unix_utc_now()
uptime_alert = {
"rule_type": "system",
"server": self.server_id,
"account_id": self.account_id,
"period": 0,
}
cpu_alert_dict = {**uptime_alert, 'metric': 'NotSendingData'}
alert_id = alerts_model.collection.insert(cpu_alert_dict)
server_model.update({'last_check': now - 15}, self.server_id)
notsendingdata_alerter.check()
unsent_alerts = alerts_history_model.get_unsent()
for trigger in unsent_alerts['data']:
assert trigger['alert_id'] == alert_id
eq_(unsent_alerts['data'].count(), 1)
self._cleanup()
now = unix_utc_now()
uptime_alert = {
"rule_type": "global",
"server": "all",
"account_id": self.account_id,
"period": 0,
}
cpu_alert_dict = {**uptime_alert, 'metric': 'NotSendingData'}
alert_id = alerts_model.collection.insert(cpu_alert_dict)
server_model.update({'last_check': now - 15}, self.server_id)
notsendingdata_alerter.check()
unsent_alerts = alerts_history_model.get_unsent()
for trigger in unsent_alerts['data']:
assert trigger['alert_id'] == alert_id
eq_(unsent_alerts['data'].count(), 1)
def test_health_check_alert(self):
self._cleanup()
# Alert for 1 server
health_check_alert = {
"rule_type": "health_check",
"server": self.server_id,
"status": "critical",
"command": "check-http.rb",
"period": 0,
}
alert_id = alerts_model.collection.insert(health_check_alert)
data = [{u'command': u'check-http.rb', u'name': u'', u'exit_code': 2}]
health_check_alerter.check(data=data, server=self.server)
unsent_alerts = alerts_history_model.get_unsent()
for trigger in unsent_alerts['data']:
assert trigger['alert_id'] == alert_id
eq_(unsent_alerts['data'].count(), 1)
self._cleanup()
global_health_check_alert = {
"rule_type": "health_check",
"status": "critical",
"command": "check-http.rb",
"period": 0,
}
alert_id = alerts_model.collection.insert(global_health_check_alert)
data = [{u'command': u'check-http.rb -u amon.cx', u'name': u'', u'exit_code': 2}]
health_check_alerter.check(data=data, server=self.server)
unsent_alerts = alerts_history_model.get_unsent()
for trigger in unsent_alerts['data']:
assert trigger['alert_id'] == alert_id
eq_(unsent_alerts['data'].count(), 1)
| 10,967
|
Python
|
.py
| 251
| 33.665339
| 119
| 0.579985
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,094
|
forms_test.py
|
amonapp_amon/amon/apps/alerts/tests/forms_test.py
|
from django.test.client import Client
from django.urls import reverse
from django.test import TestCase
from nose.tools import *
from django.contrib.auth import get_user_model
User = get_user_model()
from amon.apps.servers.models import server_model
from amon.apps.alerts.models import alert_mute_servers_model
from amon.utils.dates import unix_utc_now
class TestMuteForm(TestCase):
def setUp(self):
self.c = Client()
User.objects.all().delete()
self.user = User.objects.create_user(password='qwerty', email='foo@test.com')
self.c.login(username='foo@test.com', password='qwerty')
def tearDown(self):
self.c.logout()
User.objects.all().delete()
def _cleanup(self):
server_model.collection.remove()
alert_mute_servers_model.collection.remove()
def test_mute(self):
self._cleanup()
url = reverse('alerts_mute_servers')
response = self.c.post(url,{
'server': 'all',
'period': 1,
})
result = alert_mute_servers_model.collection.find_one()
assert result['expires_at_utc'] == unix_utc_now()+3600
self._cleanup()
response = self.c.post(url,{
'server': 'all',
'period': 0,
})
result = alert_mute_servers_model.collection.find_one()
assert result.get('expires_at_utc') == None
| 1,432
|
Python
|
.py
| 37
| 30.540541
| 85
| 0.647626
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,095
|
models.py
|
amonapp_amon/amon/apps/_files/models.py
|
import gridfs
import tempfile
from amon.apps.core.basemodel import BaseModel
class FilesModel(BaseModel):
def __init__(self):
super(FilesModel, self).__init__()
self.fs = gridfs.GridFS(self.mongo.get_database(), collection='files')
def get_by_id(self, file_id=None):
file_id = self.object_id(file_id)
result = self.fs.get(file_id)
return result
def delete(self, file_id=None):
self.fs.delete(file_id)
def add(self, file=None, filename=None):
# Filename is used for temporary files
filename = filename if filename else file.name
file_id = self.fs.put(file, filename=filename)
return file_id
# The file is string, write it to temp
def add_with_temporary(self, file=None, filename=None):
file_id = None
file = "\n".join(file.splitlines()) # Transorm \r\n to \n
with tempfile.NamedTemporaryFile() as temp:
temp.write(file)
temp.seek(0)
file_id = files_model.add(temp, filename=filename)
temp.flush()
return file_id
files_model = FilesModel()
| 1,183
|
Python
|
.py
| 29
| 31.344828
| 78
| 0.640439
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,096
|
models.py
|
amonapp_amon/amon/apps/map/models.py
|
import collections
from amon.apps.core.basemodel import BaseModel
from amon.apps.servers.models import server_model
from amon.apps.system.models import system_model
from amon.apps.devices.models import volumes_model, interfaces_model
from amon.apps.processes.models import process_model
from amon.apps.tags.models import tags_model
from amon.apps.servers.utils import filter_tags
class MapModel(BaseModel):
def __init__(self):
super(MapModel, self).__init__()
def _get_device_stats(self, value=None, data=None):
all_data = [0, ]
metric_map = {'used_percent': 'percent', 'inbound': 'i', 'outbound': 'o'}
value_in_dict = metric_map.get(value, False)
if value_in_dict is False:
value_in_dict = value
for _, d in data.items():
d = {} if d is None else d
metric_value = d.get(value_in_dict, 0)
metric_value = float(metric_value)
all_data.append(metric_value)
v = max(all_data)
return v
def group_by(self, group_id=None, data=None):
tags_for_group = tags_model.get_for_group(group_id)
grouped_servers = collections.OrderedDict()
if tags_for_group.clone().count() > 0:
server_ids_in_groups = []
for tag in tags_for_group:
tag_name = tag.get('name')
grouped_servers[tag_name] = {
'sorted_data': [],
'max_value': data['max_value']
}
for s in data['sorted_data']:
server = s.get('server')
server_id = str(server.get('_id'))
filtered_tag_id = str(tag.get("_id"))
append_server = filter_tags(
server=server,
tags=filtered_tag_id
)
if append_server:
server_ids_in_groups.append(server_id)
grouped_servers[tag_name]['sorted_data'].append(s)
# Add servers with no groups
grouped_servers['not_in_group'] = {
'sorted_data': [],
'max_value': data['max_value']
}
for s in data['sorted_data']:
server = s.get('server')
server_id = str(server.get('_id'))
if server_id not in server_ids_in_groups:
grouped_servers['not_in_group']['sorted_data'].append(s)
return grouped_servers
def sort_by(self, field=None):
all_servers = server_model.get_all()
metric_type, metric_block = field.split(":")
name, value = metric_block.split('.')
all_data = []
calculate_max_list = []
unit_dict = {
'cpu': "%",
'memory': 'MB',
'used_percent': "%",
'network': 'kb/s',
'swap_used_percent': "%",
}
if not all_servers:
return
for s in all_servers:
v = 0
last_check = s.get('last_check', 0)
if metric_type == 'disk' or metric_type == 'network':
if metric_type == 'disk':
device_data = volumes_model.get_check_for_timestamp(s, last_check)
else:
device_data = interfaces_model.get_check_for_timestamp(s, last_check)
v = self._get_device_stats(value=value, data=device_data)
elif metric_type == 'process':
process = process_model.get_by_name_and_server_id(server_id=s['_id'], name=name)
value_in_dict = 'c' if value == 'cpu' else 'm'
if process:
process_data = process_model.get_check_for_timestamp(server=s, timestamp=process.get('last_check'))
if process_data:
for p in process_data.get('data', []):
if p.get('p') == process.get('_id'):
v = p.get(value_in_dict, 0)
else:
system_data = system_model.get_check_for_timestamp(s, last_check)
metric_dict = system_data.get(name, {})
v = metric_dict.get(value, 0)
unit = unit_dict.get(name, "")
# Overwrite, if you find an alternative unit
alterative_unit = unit_dict.get(value, False)
if alterative_unit:
unit = alterative_unit
server_data = {
'server': s,
'last_check': last_check,
'unit': unit,
'value': v,
'field': field
}
calculate_max_list.append(v)
all_data.append(server_data)
all_data = sorted(all_data, key=lambda k: k['value'], reverse=True)
result = {
'sorted_data': all_data,
'max_value': max(calculate_max_list)
}
return result
def get_fields(self):
system_fields = [
('system:cpu.system', 'cpu.system'),
('system:cpu.user', 'cpu.user'),
('system:cpu.steal', 'cpu.steal'),
('system:cpu.iowait', 'cpu.iowait'),
('system:loadavg.minute', 'loadavg.minute'),
('system:loadavg.five_minutes', 'loadavg.five_minutes'),
('system:loadavg.fifteen_minutes', 'loadavg.fifteen_minutes'),
('system:memory.used_mb', 'memory.used_mb'),
('system:memory.used_percent', 'memory.used_percent'),
('system:memory.free_mb', 'memory.free_mb'),
('system:memory.total_mb', 'memory.total_mb'),
('system:memory.swap_free_mb', 'memory.swap_free_mb'),
('system:memory.swap_used_percent', 'memory.swap_used_percent'),
('system:memory.swap_total_mb', 'memory.swap_total_mb'),
('network:network.inbound', 'network.inbound'),
('network:network.outbound', 'network.outbound'),
('disk:disk.total', 'disk.total'),
('disk:disk.free', 'disk.free'),
('disk:disk.used_percent', 'disk.used_percent'),
]
process_fields = []
for p in process_model.get_all_unique():
process_cpu = "{0}.cpu".format(p)
process_memory = "{0}.memory".format(p)
cpu_tuple = ("process:{0}".format(process_cpu), process_cpu)
memory_tuple = ("process:{0}".format(process_memory), process_memory)
process_fields.append(cpu_tuple)
process_fields.append(memory_tuple)
process_fields = sorted(process_fields, key=lambda k: k[0])
system_fields.extend(process_fields)
return system_fields
map_model = MapModel()
| 6,869
|
Python
|
.py
| 148
| 32.432432
| 119
| 0.530225
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,097
|
urls.py
|
amonapp_amon/amon/apps/map/urls.py
|
from django.conf.urls import url
from amon.apps.map import views
urlpatterns = (
url(r'^$', views.index, name='servers_map'),
)
| 134
|
Python
|
.py
| 5
| 24.6
| 48
| 0.724409
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,098
|
views.py
|
amonapp_amon/amon/apps/map/views.py
|
from amon.apps.core.views import *
from amon.apps.servers.models import server_model
from amon.apps.map.models import map_model
from amon.apps.tags.models import tag_groups_model
@login_required
def index(request):
all_sort_fields = map_model.get_fields()
all_servers = server_model.get_all()
tag_groups = tag_groups_model.get_all()
servers_data = []
GET_group_id = request.GET.get('group_id', False)
sort_by = request.GET.get('sort_by', 'system:cpu.system')
servers_data = map_model.sort_by(field=sort_by)
grouped_servers = []
if GET_group_id:
grouped_servers = map_model.group_by(group_id=GET_group_id, data=servers_data)
active_tag_groups = set([])
if all_servers:
for server in all_servers:
server_tags = server.get('tags', [])
for t in server_tags:
group_id = t.get('group_id', False)
if group_id is not False:
active_tag_groups.add(str(group_id))
return render(request, 'map/view.html', {
"all_sort_fields": all_sort_fields,
"grouped_servers": grouped_servers,
"servers_data": servers_data,
"group_id": GET_group_id,
"sort_by": sort_by,
"tag_groups": tag_groups,
"active_tag_groups": active_tag_groups,
})
| 1,330
|
Python
|
.py
| 33
| 32.69697
| 86
| 0.634871
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|
6,099
|
models_test.py
|
amonapp_amon/amon/apps/map/tests/models_test.py
|
import unittest
from time import time
from amon.apps.servers.models import server_model
from amon.apps.map.models import map_model
from amon.apps.devices.models import interfaces_model, volumes_model
from amon.apps.processes.models import process_model
from amon.apps.system.models import system_model
from amon.apps.tags.models import tags_model, tag_groups_model
now = int(time())
minute_ago = (now - 60)
two_minutes_ago = (now - 120)
five_minutes_ago = (now - 300)
class MapModelTest(unittest.TestCase):
def setUp(self):
pass
def _cleanup(self):
server_model.collection.remove()
system_model.data_collection.remove()
process_model.data_collection.remove()
process_model.collection.remove()
interfaces_model.collection.remove()
interfaces_model.get_data_collection().remove()
volumes_model.collection.remove()
volumes_model.get_data_collection().remove()
tags_model.collection.remove()
def get_fields_test(self):
self._cleanup()
result = map_model.get_fields()
assert len(result) != 0
def group_by_test(self):
self._cleanup()
tags = [
'provider:digitalocean',
'provider:amazon',
'region:lon1',
]
tags_object_ids = tags_model.create_and_return_ids(tags)
server = server_model.get_or_create_by_machine_id(
tags=tags_object_ids,
machine_id=1
)
provider_group = tag_groups_model.get_or_create_by_name('provider')
data = {
'max_value': 59,
'sorted_data': [{
'last_check': 100,
'value': 59,
'field': 'system:memory.used_percent',
'server': server,
'unit': '%',
}]
}
result = map_model.group_by(
group_id=provider_group,
data=data.copy()
)
assert set(result.keys()) == set(['not_in_group', 'digitalocean', 'amazon'])
assert len(result['digitalocean']['sorted_data']) == 1
assert len(result['not_in_group']['sorted_data']) == 0
new_tags = [
'size:1gb',
'distro:ubuntu',
'size:2gb'
]
tags_object_ids = tags_model.create_and_return_ids(new_tags)
provider_group = tag_groups_model.get_or_create_by_name('size')
result = map_model.group_by(
group_id=provider_group,
data=data.copy()
)
assert set(result.keys()) == set(['not_in_group', '1gb', '2gb'])
assert len(result['1gb']['sorted_data']) == 0
assert len(result['2gb']['sorted_data']) == 0
assert len(result['not_in_group']['sorted_data']) == 1
def sort_by_test_system_data(self):
self._cleanup()
for i in range(10):
data = {
'name': 'system-server-{0}'.format(i),
'last_check': 100
}
server_id = server_model.collection.insert(data.copy())
cpu_dict = {"time": 100,
"server_id": server_id,
"cpu": {"system": i, "idle": "91.15"},
"memory": {
"used_percent": 50 + i,
"swap_used_mb": 9,
"total_mb": 497,
"free_mb": 12,
"swap_used_percent": 1,
"swap_free_mb": 1015,
"used_mb": 485,
"swap_total_mb": 1024
},
"loadavg": {
"cores": 1,
"fifteen_minutes": i + 1,
"five_minutes": 0.01,
"minute": 0
},
}
system_model.data_collection.insert(cpu_dict)
result = map_model.sort_by(field='system:cpu.system')
assert len(result['sorted_data']) == 10
for i, r in enumerate(result['sorted_data']):
assert r['value'] == (9 - i)
assert result['max_value'] == 9
result = map_model.sort_by(field='system:memory.used_percent')
assert len(result['sorted_data']) == 10
assert result['sorted_data'][0]['value'] == 59
assert result['max_value'] == 59
result = map_model.sort_by(field='system:loadavg.fifteen_minutes')
assert len(result['sorted_data']) == 10
assert result['sorted_data'][0]['value'] == 10
assert result['max_value'] == 10
def sort_by_test_process_data(self):
self._cleanup()
for i in range(10):
data = {
'name': 'process-server-{0}'.format(i),
'last_check': 100
}
server_id = server_model.collection.insert(data.copy())
process = {
'name': 'amonagent',
'server': server_id,
'last_check': 100,
}
process_id = process_model.collection.insert(process.copy())
process_dict = {
"server_id": server_id,
"t": 100,
"data": [{
"c": i + 1,
"m": i + 101,
"n": "amonagent",
"p": process_id,
"r": 0.17,
"w": 0.0
}]
}
process_model.data_collection.insert(process_dict.copy())
result = map_model.sort_by(field='process:amonagent.cpu')
assert len(result['sorted_data']) == 10
assert result['sorted_data'][0]['value'] == 10
assert result['sorted_data'][0]['unit'] == '%'
assert result['max_value'] == 10
result = map_model.sort_by(field='process:amonagent.memory')
assert len(result['sorted_data']) == 10
assert result['sorted_data'][0]['value'] == 110
assert result['sorted_data'][0]['unit'] == 'MB'
assert result['max_value'] == 110
result = map_model.sort_by(field='process:nontexisting.no')
assert len(result['sorted_data']) == 10
assert result['sorted_data'][0]['value'] == 0
assert result['sorted_data'][0]['unit'] == ''
assert result['max_value'] == 0
def test_sort_by_volume_data(self):
self._cleanup()
for i in range(10):
data = {
'name': 'volume-server-{0}'.format(i),
'last_check': 100
}
server_id = server_model.collection.insert(data.copy())
for v in range(5):
volume = {
'name': 'sda-{0}'.format(v),
'server_id': server_id,
'last_update': 100,
}
volume_id = volumes_model.collection.insert(volume.copy())
volume_data_dict = {
"server_id": server_id,
"device_id": volume_id,
"percent": i + v + 10,
"t": 100
}
volumes_model.get_data_collection().insert(volume_data_dict.copy())
result = map_model.sort_by(field='disk:disk.used_percent')
assert len(result['sorted_data']) == 10
assert result['sorted_data'][0]['value'] == 23 # 10 + 9 + 4
assert result['sorted_data'][0]['unit'] == '%'
def test_sort_by_iface_data(self):
self._cleanup()
for i in range(10):
data = {
'name': 'iface-server-{0}'.format(i),
'last_check': 100
}
server_id = server_model.collection.insert(data.copy())
for v in range(5):
device_data = {
'name': 'eth{0}'.format(v),
'server_id': server_id,
'last_update': 100,
}
device_id = interfaces_model.collection.insert(device_data.copy())
data_dict = {
"server_id": server_id,
"device_id": device_id,
"i": i + v + 100,
"t": 100
}
interfaces_model.get_data_collection().insert(data_dict.copy())
result = map_model.sort_by(field='network:network.inbound')
assert len(result['sorted_data']) == 10
assert result['sorted_data'][0]['value'] == 113 # 100 + 9 + 4
assert result['sorted_data'][0]['unit'] == 'kb/s'
| 8,580
|
Python
|
.py
| 210
| 27.8
| 84
| 0.502978
|
amonapp/amon
| 1,334
| 108
| 37
|
AGPL-3.0
|
9/5/2024, 5:09:37 PM (Europe/Amsterdam)
|