Search is not available for this dataset
repo stringlengths 2 152 ⌀ | file stringlengths 15 239 | code stringlengths 0 58.4M | file_length int64 0 58.4M | avg_line_length float64 0 1.81M | max_line_length int64 0 12.7M | extension_type stringclasses 364 values |
|---|---|---|---|---|---|---|
null | ceph-main/src/pybind/mgr/dashboard/tests/test_host.py | import unittest
from unittest import mock
from orchestrator import DaemonDescription, HostSpec
from .. import mgr
from ..controllers._version import APIVersion
from ..controllers.host import Host, HostUi, get_device_osd_map, get_hosts, get_inventories
from ..tests import ControllerTestCase, patch_orch
from ..tools import NotificationQueue, TaskManager
class HostControllerTest(ControllerTestCase):
URL_HOST = '/api/host'
@classmethod
def setup_server(cls):
NotificationQueue.start_queue()
TaskManager.init()
cls.setup_controllers([Host])
@classmethod
def tearDownClass(cls):
NotificationQueue.stop()
@mock.patch('dashboard.controllers.host.get_hosts')
def test_host_list_with_sources(self, mock_get_hosts):
hosts = [{
'hostname': 'host-0',
'sources': {
'ceph': True,
'orchestrator': False
}
}, {
'hostname': 'host-1',
'sources': {
'ceph': False,
'orchestrator': True
}
}, {
'hostname': 'host-2',
'sources': {
'ceph': True,
'orchestrator': True
}
}]
def _get_hosts(sources=None):
if sources == 'ceph':
return hosts[0]
if sources == 'orchestrator':
return hosts[1:]
if sources == 'ceph, orchestrator':
return hosts[2]
return hosts
mock_get_hosts.side_effect = _get_hosts
self._get(self.URL_HOST, version=APIVersion(1, 1))
self.assertStatus(200)
self.assertJsonBody(hosts)
self._get('{}?sources=ceph'.format(self.URL_HOST), version=APIVersion(1, 1))
self.assertStatus(200)
self.assertJsonBody(hosts[0])
self._get('{}?sources=orchestrator'.format(self.URL_HOST), version=APIVersion(1, 1))
self.assertStatus(200)
self.assertJsonBody(hosts[1:])
self._get('{}?sources=ceph,orchestrator'.format(self.URL_HOST), version=APIVersion(1, 1))
self.assertStatus(200)
self.assertJsonBody(hosts)
@mock.patch('dashboard.controllers.host.get_hosts')
def test_host_list_with_facts(self, mock_get_hosts):
hosts_without_facts = [{
'hostname': 'host-0',
'sources': {
'ceph': True,
'orchestrator': False
}
}, {
'hostname': 'host-1',
'sources': {
'ceph': False,
'orchestrator': True
}
}]
hosts_facts = [{
'hostname': 'host-0',
'cpu_count': 1,
'memory_total_kb': 1024
}, {
'hostname': 'host-1',
'cpu_count': 2,
'memory_total_kb': 1024
}]
hosts_with_facts = [{
'hostname': 'host-0',
'sources': {
'ceph': True,
'orchestrator': False
},
'cpu_count': 1,
'memory_total_kb': 1024
}, {
'hostname': 'host-1',
'sources': {
'ceph': False,
'orchestrator': True
},
'cpu_count': 2,
'memory_total_kb': 1024
}]
# test with orchestrator available
with patch_orch(True, hosts=hosts_without_facts) as fake_client:
mock_get_hosts.return_value = hosts_without_facts
fake_client.hosts.get_facts.return_value = hosts_facts
# test with ?facts=true
self._get('{}?facts=true'.format(self.URL_HOST), version=APIVersion(1, 1))
self.assertStatus(200)
self.assertHeader('Content-Type',
APIVersion(1, 2).to_mime_type())
self.assertJsonBody(hosts_with_facts)
# test with ?facts=false
self._get('{}?facts=false'.format(self.URL_HOST), version=APIVersion(1, 1))
self.assertStatus(200)
self.assertHeader('Content-Type',
APIVersion(1, 2).to_mime_type())
self.assertJsonBody(hosts_without_facts)
# test with orchestrator available but orch backend!=cephadm
with patch_orch(True, missing_features=['get_facts']) as fake_client:
mock_get_hosts.return_value = hosts_without_facts
# test with ?facts=true
self._get('{}?facts=true'.format(self.URL_HOST), version=APIVersion(1, 1))
self.assertStatus(400)
# test with no orchestrator available
with patch_orch(False):
mock_get_hosts.return_value = hosts_without_facts
# test with ?facts=true
self._get('{}?facts=true'.format(self.URL_HOST), version=APIVersion(1, 1))
self.assertStatus(400)
# test with ?facts=false
self._get('{}?facts=false'.format(self.URL_HOST), version=APIVersion(1, 1))
self.assertStatus(200)
self.assertHeader('Content-Type',
APIVersion(1, 2).to_mime_type())
self.assertJsonBody(hosts_without_facts)
def test_get_1(self):
mgr.list_servers.return_value = []
with patch_orch(False):
self._get('{}/node1'.format(self.URL_HOST))
self.assertStatus(404)
def test_get_2(self):
mgr.list_servers.return_value = [{
'hostname': 'node1',
'services': []
}]
with patch_orch(False):
self._get('{}/node1'.format(self.URL_HOST))
self.assertStatus(200)
self.assertIn('labels', self.json_body())
self.assertIn('status', self.json_body())
self.assertIn('addr', self.json_body())
def test_get_3(self):
mgr.list_servers.return_value = []
with patch_orch(True, hosts=[HostSpec('node1')]):
self._get('{}/node1'.format(self.URL_HOST))
self.assertStatus(200)
self.assertIn('labels', self.json_body())
self.assertIn('status', self.json_body())
self.assertIn('addr', self.json_body())
def test_populate_service_instances(self):
mgr.list_servers.return_value = []
node1_daemons = [
DaemonDescription(
hostname='node1',
daemon_type='mon',
daemon_id='a'
),
DaemonDescription(
hostname='node1',
daemon_type='mon',
daemon_id='b'
)
]
node2_daemons = [
DaemonDescription(
hostname='node2',
daemon_type='mgr',
daemon_id='x'
),
DaemonDescription(
hostname='node2',
daemon_type='mon',
daemon_id='c'
)
]
node1_instances = [{
'type': 'mon',
'count': 2
}]
node2_instances = [{
'type': 'mgr',
'count': 1
}, {
'type': 'mon',
'count': 1
}]
# test with orchestrator available
with patch_orch(True,
hosts=[HostSpec('node1'), HostSpec('node2')]) as fake_client:
fake_client.services.list_daemons.return_value = node1_daemons
self._get('{}/node1'.format(self.URL_HOST))
self.assertStatus(200)
self.assertIn('service_instances', self.json_body())
self.assertEqual(self.json_body()['service_instances'], node1_instances)
fake_client.services.list_daemons.return_value = node2_daemons
self._get('{}/node2'.format(self.URL_HOST))
self.assertStatus(200)
self.assertIn('service_instances', self.json_body())
self.assertEqual(self.json_body()['service_instances'], node2_instances)
# test with no orchestrator available
with patch_orch(False):
mgr.list_servers.return_value = [{
'hostname': 'node1',
'services': [{
'type': 'mon',
'id': 'a'
}, {
'type': 'mgr',
'id': 'b'
}]
}]
self._get('{}/node1'.format(self.URL_HOST))
self.assertStatus(200)
self.assertIn('service_instances', self.json_body())
self.assertEqual(self.json_body()['service_instances'],
[{
'type': 'mon',
'count': 1
}, {
'type': 'mgr',
'count': 1
}])
@mock.patch('dashboard.controllers.host.add_host')
def test_add_host(self, mock_add_host):
with patch_orch(True):
payload = {
'hostname': 'node0',
'addr': '192.0.2.0',
'labels': 'mon',
'status': 'maintenance'
}
self._post(self.URL_HOST, payload, version=APIVersion(0, 1))
self.assertStatus(201)
mock_add_host.assert_called()
def test_set_labels(self):
mgr.list_servers.return_value = []
orch_hosts = [
HostSpec('node0', labels=['aaa', 'bbb'])
]
with patch_orch(True, hosts=orch_hosts) as fake_client:
fake_client.hosts.remove_label = mock.Mock()
fake_client.hosts.add_label = mock.Mock()
payload = {'update_labels': True, 'labels': ['bbb', 'ccc']}
self._put('{}/node0'.format(self.URL_HOST), payload, version=APIVersion(0, 1))
self.assertStatus(200)
self.assertHeader('Content-Type',
'application/vnd.ceph.api.v0.1+json')
fake_client.hosts.remove_label.assert_called_once_with('node0', 'aaa')
fake_client.hosts.add_label.assert_called_once_with('node0', 'ccc')
# return 400 if type other than List[str]
self._put('{}/node0'.format(self.URL_HOST),
{'update_labels': True, 'labels': 'ddd'},
version=APIVersion(0, 1))
self.assertStatus(400)
def test_host_maintenance(self):
mgr.list_servers.return_value = []
orch_hosts = [
HostSpec('node0'),
HostSpec('node1')
]
with patch_orch(True, hosts=orch_hosts):
# enter maintenance mode
self._put('{}/node0'.format(self.URL_HOST), {'maintenance': True},
version=APIVersion(0, 1))
self.assertStatus(200)
self.assertHeader('Content-Type',
'application/vnd.ceph.api.v0.1+json')
# force enter maintenance mode
self._put('{}/node1'.format(self.URL_HOST), {'maintenance': True, 'force': True},
version=APIVersion(0, 1))
self.assertStatus(200)
# exit maintenance mode
self._put('{}/node0'.format(self.URL_HOST), {'maintenance': True},
version=APIVersion(0, 1))
self.assertStatus(200)
self._put('{}/node1'.format(self.URL_HOST), {'maintenance': True},
version=APIVersion(0, 1))
self.assertStatus(200)
# maintenance without orchestrator service
with patch_orch(False):
self._put('{}/node0'.format(self.URL_HOST), {'maintenance': True},
version=APIVersion(0, 1))
self.assertStatus(503)
@mock.patch('dashboard.controllers.host.time')
def test_identify_device(self, mock_time):
url = '{}/host-0/identify_device'.format(self.URL_HOST)
with patch_orch(True) as fake_client:
payload = {
'device': '/dev/sdz',
'duration': '1'
}
self._task_post(url, payload)
self.assertStatus(200)
mock_time.sleep.assert_called()
calls = [
mock.call('host-0', '/dev/sdz', 'ident', True),
mock.call('host-0', '/dev/sdz', 'ident', False),
]
fake_client.blink_device_light.assert_has_calls(calls)
@mock.patch('dashboard.controllers.host.get_inventories')
def test_inventory(self, mock_get_inventories):
inventory_url = '{}/host-0/inventory'.format(self.URL_HOST)
with patch_orch(True):
tests = [
{
'url': inventory_url,
'inventories': [{'a': 'b'}],
'refresh': None,
'resp': {'a': 'b'}
},
{
'url': '{}?refresh=true'.format(inventory_url),
'inventories': [{'a': 'b'}],
'refresh': "true",
'resp': {'a': 'b'}
},
{
'url': inventory_url,
'inventories': [],
'refresh': None,
'resp': {}
},
]
for test in tests:
mock_get_inventories.reset_mock()
mock_get_inventories.return_value = test['inventories']
self._get(test['url'])
mock_get_inventories.assert_called_once_with(['host-0'], test['refresh'])
self.assertEqual(self.json_body(), test['resp'])
self.assertStatus(200)
# list without orchestrator service
with patch_orch(False):
self._get(inventory_url)
self.assertStatus(503)
def test_host_drain(self):
mgr.list_servers.return_value = []
orch_hosts = [
HostSpec('node0')
]
with patch_orch(True, hosts=orch_hosts):
self._put('{}/node0'.format(self.URL_HOST), {'drain': True},
version=APIVersion(0, 1))
self.assertStatus(200)
self.assertHeader('Content-Type',
'application/vnd.ceph.api.v0.1+json')
# maintenance without orchestrator service
with patch_orch(False):
self._put('{}/node0'.format(self.URL_HOST), {'drain': True},
version=APIVersion(0, 1))
self.assertStatus(503)
class HostUiControllerTest(ControllerTestCase):
URL_HOST = '/ui-api/host'
@classmethod
def setup_server(cls):
cls.setup_controllers([HostUi])
def test_labels(self):
orch_hosts = [
HostSpec('node1', labels=['foo']),
HostSpec('node2', labels=['foo', 'bar'])
]
with patch_orch(True, hosts=orch_hosts):
self._get('{}/labels'.format(self.URL_HOST))
self.assertStatus(200)
labels = self.json_body()
labels.sort()
self.assertListEqual(labels, ['bar', 'foo'])
@mock.patch('dashboard.controllers.host.get_inventories')
def test_inventory(self, mock_get_inventories):
inventory_url = '{}/inventory'.format(self.URL_HOST)
with patch_orch(True):
tests = [
{
'url': inventory_url,
'refresh': None
},
{
'url': '{}?refresh=true'.format(inventory_url),
'refresh': "true"
},
]
for test in tests:
mock_get_inventories.reset_mock()
mock_get_inventories.return_value = [{'a': 'b'}]
self._get(test['url'])
mock_get_inventories.assert_called_once_with(None, test['refresh'])
self.assertEqual(self.json_body(), [{'a': 'b'}])
self.assertStatus(200)
# list without orchestrator service
with patch_orch(False):
self._get(inventory_url)
self.assertStatus(503)
class TestHosts(unittest.TestCase):
def test_get_hosts(self):
mgr.list_servers.return_value = [{
'hostname': 'node1',
'services': []
}, {
'hostname': 'localhost',
'services': []
}]
orch_hosts = [
HostSpec('node1', labels=['foo', 'bar']),
HostSpec('node2', labels=['bar'])
]
with patch_orch(True, hosts=orch_hosts):
hosts = get_hosts()
self.assertEqual(len(hosts), 3)
checks = {
'localhost': {
'sources': {
'ceph': True,
'orchestrator': False
},
'labels': []
},
'node1': {
'sources': {
'ceph': True,
'orchestrator': True
},
'labels': ['bar', 'foo']
},
'node2': {
'sources': {
'ceph': False,
'orchestrator': True
},
'labels': ['bar']
}
}
for host in hosts:
hostname = host['hostname']
self.assertDictEqual(host['sources'], checks[hostname]['sources'])
self.assertListEqual(host['labels'], checks[hostname]['labels'])
@mock.patch('dashboard.controllers.host.mgr.get')
def test_get_device_osd_map(self, mgr_get):
mgr_get.side_effect = lambda key: {
'osd_metadata': {
'0': {
'hostname': 'node0',
'devices': 'nvme0n1,sdb',
},
'1': {
'hostname': 'node0',
'devices': 'nvme0n1,sdc',
},
'2': {
'hostname': 'node1',
'devices': 'sda',
},
'3': {
'hostname': 'node2',
'devices': '',
}
}
}[key]
device_osd_map = get_device_osd_map()
mgr.get.assert_called_with('osd_metadata')
# sort OSD IDs to make assertDictEqual work
for devices in device_osd_map.values():
for host in devices.keys():
devices[host] = sorted(devices[host])
self.assertDictEqual(device_osd_map, {
'node0': {
'nvme0n1': [0, 1],
'sdb': [0],
'sdc': [1],
},
'node1': {
'sda': [2]
}
})
@mock.patch('dashboard.controllers.host.str_to_bool')
@mock.patch('dashboard.controllers.host.get_device_osd_map')
def test_get_inventories(self, mock_get_device_osd_map, mock_str_to_bool):
mock_get_device_osd_map.return_value = {
'host-0': {
'nvme0n1': [1, 2],
'sdb': [1],
'sdc': [2]
},
'host-1': {
'sdb': [3]
}
}
inventory = [
{
'name': 'host-0',
'addr': '1.2.3.4',
'devices': [
{'path': 'nvme0n1'},
{'path': '/dev/sdb'},
{'path': '/dev/sdc'},
]
},
{
'name': 'host-1',
'addr': '1.2.3.5',
'devices': [
{'path': '/dev/sda'},
{'path': 'sdb'},
]
}
]
with patch_orch(True, inventory=inventory) as orch_client:
mock_str_to_bool.return_value = True
hosts = ['host-0', 'host-1']
inventories = get_inventories(hosts, 'true')
mock_str_to_bool.assert_called_with('true')
orch_client.inventory.list.assert_called_once_with(hosts=hosts, refresh=True)
self.assertEqual(len(inventories), 2)
host0 = inventories[0]
self.assertEqual(host0['name'], 'host-0')
self.assertEqual(host0['addr'], '1.2.3.4')
# devices should be sorted by path name, so
# /dev/sdb, /dev/sdc, nvme0n1
self.assertEqual(host0['devices'][0]['osd_ids'], [1])
self.assertEqual(host0['devices'][1]['osd_ids'], [2])
self.assertEqual(host0['devices'][2]['osd_ids'], [1, 2])
host1 = inventories[1]
self.assertEqual(host1['name'], 'host-1')
self.assertEqual(host1['addr'], '1.2.3.5')
# devices should be sorted by path name, so
# /dev/sda, sdb
self.assertEqual(host1['devices'][0]['osd_ids'], [])
self.assertEqual(host1['devices'][1]['osd_ids'], [3])
| 21,103 | 34.173333 | 97 | py |
null | ceph-main/src/pybind/mgr/dashboard/tests/test_iscsi.py | # pylint: disable=too-many-public-methods, too-many-lines
import copy
import errno
import json
import unittest
try:
import mock
except ImportError:
import unittest.mock as mock
from mgr_module import ERROR_MSG_NO_INPUT_FILE
from .. import mgr
from ..controllers.iscsi import Iscsi, IscsiTarget, IscsiUi
from ..exceptions import DashboardException
from ..rest_client import RequestException
from ..services.exception import handle_request_error
from ..services.iscsi_client import IscsiClient
from ..services.orchestrator import OrchClient
from ..tests import CLICommandTestMixin, CmdException, ControllerTestCase, KVStoreMockMixin
from ..tools import NotificationQueue, TaskManager
class IscsiTestCli(unittest.TestCase, CLICommandTestMixin):
def setUp(self):
self.mock_kv_store()
# pylint: disable=protected-access
IscsiClientMock._instance = IscsiClientMock()
IscsiClient.instance = IscsiClientMock.instance
def test_cli_add_gateway_invalid_url(self):
with self.assertRaises(CmdException) as ctx:
self.exec_cmd('iscsi-gateway-add', name='node1',
inbuf='http:/hello.com')
self.assertEqual(ctx.exception.retcode, -errno.EINVAL)
self.assertEqual(str(ctx.exception),
"Invalid service URL 'http:/hello.com'. Valid format: "
"'<scheme>://<username>:<password>@<host>[:port]'.")
def test_cli_add_gateway_empty_url(self):
with self.assertRaises(CmdException) as ctx:
self.exec_cmd('iscsi-gateway-add', name='node1',
inbuf='')
self.assertEqual(ctx.exception.retcode, -errno.EINVAL)
self.assertIn(ERROR_MSG_NO_INPUT_FILE, str(ctx.exception))
def test_cli_add_gateway(self):
self.exec_cmd('iscsi-gateway-add', name='node1',
inbuf='https://admin:admin@10.17.5.1:5001')
self.exec_cmd('iscsi-gateway-add', name='node2',
inbuf='https://admin:admin@10.17.5.2:5001')
iscsi_config = json.loads(self.get_key("_iscsi_config"))
self.assertEqual(iscsi_config['gateways'], {
'node1': {
'service_url': 'https://admin:admin@10.17.5.1:5001'
},
'node2': {
'service_url': 'https://admin:admin@10.17.5.2:5001'
}
})
def test_cli_remove_gateway(self):
self.test_cli_add_gateway()
self.exec_cmd('iscsi-gateway-rm', name='node1')
iscsi_config = json.loads(self.get_key("_iscsi_config"))
self.assertEqual(iscsi_config['gateways'], {
'node2': {
'service_url': 'https://admin:admin@10.17.5.2:5001'
}
})
class IscsiTestController(ControllerTestCase, KVStoreMockMixin):
@classmethod
def setup_server(cls):
NotificationQueue.start_queue()
TaskManager.init()
OrchClient.instance().available = lambda: False
mgr.rados.side_effect = None
cls.setup_controllers([Iscsi, IscsiTarget])
@classmethod
def tearDownClass(cls):
NotificationQueue.stop()
def setUp(self):
self.mock_kv_store()
self.CONFIG_KEY_DICT['_iscsi_config'] = '''
{
"gateways": {
"node1": {
"service_url": "https://admin:admin@10.17.5.1:5001"
},
"node2": {
"service_url": "https://admin:admin@10.17.5.2:5001"
}
}
}
'''
# pylint: disable=protected-access
IscsiClientMock._instance = IscsiClientMock()
IscsiClient.instance = IscsiClientMock.instance
def test_enable_discoveryauth(self):
discoveryauth = {
'user': 'myiscsiusername',
'password': 'myiscsipassword',
'mutual_user': 'myiscsiusername2',
'mutual_password': 'myiscsipassword2'
}
self._put('/api/iscsi/discoveryauth', discoveryauth)
self.assertStatus(200)
self.assertJsonBody(discoveryauth)
self._get('/api/iscsi/discoveryauth')
self.assertStatus(200)
self.assertJsonBody(discoveryauth)
def test_bad_discoveryauth(self):
discoveryauth = {
'user': 'myiscsiusername',
'password': 'myiscsipasswordmyiscsipasswordmyiscsipassword',
'mutual_user': '',
'mutual_password': ''
}
put_response = {
'detail': 'Bad authentication',
'code': 'target_bad_auth',
'component': 'iscsi'
}
get_response = {
'user': '',
'password': '',
'mutual_user': '',
'mutual_password': ''
}
self._put('/api/iscsi/discoveryauth', discoveryauth)
self.assertStatus(400)
self.assertJsonBody(put_response)
self._get('/api/iscsi/discoveryauth')
self.assertStatus(200)
self.assertJsonBody(get_response)
def test_disable_discoveryauth(self):
discoveryauth = {
'user': '',
'password': '',
'mutual_user': '',
'mutual_password': ''
}
self._put('/api/iscsi/discoveryauth', discoveryauth)
self.assertStatus(200)
self.assertJsonBody(discoveryauth)
self._get('/api/iscsi/discoveryauth')
self.assertStatus(200)
self.assertJsonBody(discoveryauth)
def test_list_empty(self):
self._get('/api/iscsi/target')
self.assertStatus(200)
self.assertJsonBody([])
@mock.patch('dashboard.controllers.iscsi.IscsiTarget._validate_image')
def test_list(self, _validate_image_mock):
target_iqn = "iqn.2003-01.com.redhat.iscsi-gw:iscsi-igw1"
request = copy.deepcopy(iscsi_target_request)
request['target_iqn'] = target_iqn
self._task_post('/api/iscsi/target', request)
self.assertStatus(201)
self._get('/api/iscsi/target')
self.assertStatus(200)
response = copy.deepcopy(iscsi_target_response)
response['target_iqn'] = target_iqn
self.assertJsonBody([response])
@mock.patch('dashboard.controllers.iscsi.IscsiTarget._validate_image')
def test_create(self, _validate_image_mock):
target_iqn = "iqn.2003-01.com.redhat.iscsi-gw:iscsi-igw2"
request = copy.deepcopy(iscsi_target_request)
request['target_iqn'] = target_iqn
self._task_post('/api/iscsi/target', request)
self.assertStatus(201)
self._get('/api/iscsi/target/{}'.format(request['target_iqn']))
self.assertStatus(200)
response = copy.deepcopy(iscsi_target_response)
response['target_iqn'] = target_iqn
self.assertJsonBody(response)
@mock.patch('dashboard.controllers.iscsi.IscsiTarget._validate_image')
def test_create_acl_enabled(self, _validate_image_mock):
target_iqn = "iqn.2003-01.com.redhat.iscsi-gw:iscsi-igw2"
request = copy.deepcopy(iscsi_target_request)
request['target_iqn'] = target_iqn
request['acl_enabled'] = False
self._task_post('/api/iscsi/target', request)
self.assertStatus(201)
self._get('/api/iscsi/target/{}'.format(request['target_iqn']))
self.assertStatus(200)
response = copy.deepcopy(iscsi_target_response)
response['target_iqn'] = target_iqn
response['acl_enabled'] = False
self.assertJsonBody(response)
@mock.patch('dashboard.controllers.iscsi.IscsiTarget._create')
def test_create_error(self, _create_mock):
# pylint: disable=protected-access
target_iqn = "iqn.2003-01.com.redhat.iscsi-gw:iscsi-igw2"
request = copy.deepcopy(iscsi_target_request)
request['target_iqn'] = target_iqn
request['config'] = ""
request['settings'] = ""
request['task_progress_begin'] = 0
request['task_progress_end'] = 100
_create_mock.side_effect = RequestException("message error")
with self.assertRaises(DashboardException):
with handle_request_error('iscsi'):
IscsiTarget._create(**request)
def test_validate_error_iqn(self):
# pylint: disable=protected-access
with self.assertRaises(DashboardException) as ctx:
IscsiTarget._validate(None, None, None, None, None, None)
self.assertEquals(ctx.exception.__str__(),
"Target IQN is required")
def test_validate_error_portals(self):
# pylint: disable=protected-access
target_iqn = iscsi_target_request['target_iqn']
target_controls = iscsi_target_request['target_controls']
portals = {}
disks = iscsi_target_request['disks']
groups = iscsi_target_request['groups']
settings = {'config': {'minimum_gateways': 1}}
with self.assertRaises(DashboardException) as ctx:
IscsiTarget._validate(target_iqn, target_controls, portals, disks, groups, settings)
self.assertEquals(ctx.exception.__str__(),
"At least one portal is required")
settings = {'config': {'minimum_gateways': 2}}
with self.assertRaises(DashboardException) as ctx:
IscsiTarget._validate(target_iqn, target_controls, portals, disks, groups, settings)
self.assertEquals(ctx.exception.__str__(),
"At least 2 portals are required")
def test_validate_error_target_control(self):
# pylint: disable=protected-access
target_iqn = iscsi_target_request['target_iqn']
target_controls = {
'target_name': 0
}
portals = iscsi_target_request['portals']
disks = iscsi_target_request['disks']
groups = iscsi_target_request['groups']
settings = {
'config': {'minimum_gateways': 1},
'target_controls_limits': {
'target_name': {
'min': 1,
'max': 2,
}
}
}
with self.assertRaises(DashboardException) as ctx:
IscsiTarget._validate(target_iqn, target_controls, portals, disks, groups, settings)
self.assertEquals(ctx.exception.__str__(),
"Target control target_name must be >= 1")
target_controls = {
'target_name': 3
}
with self.assertRaises(DashboardException) as ctx:
IscsiTarget._validate(target_iqn, target_controls, portals, disks, groups, settings)
self.assertEquals(ctx.exception.__str__(),
"Target control target_name must be <= 2")
@mock.patch('dashboard.controllers.iscsi.IscsiTarget._validate_image')
def test_validate_error_disk_control(self, _validate_image_mock):
# pylint: disable=protected-access
target_iqn = iscsi_target_request['target_iqn']
target_controls = {}
portals = iscsi_target_request['portals']
disks = iscsi_target_request['disks']
groups = iscsi_target_request['groups']
settings = {
'config': {'minimum_gateways': 1},
'required_rbd_features': {
'user:rbd': 0
},
'unsupported_rbd_features': {
'user:rbd': 0
},
'disk_controls_limits': {
'user:rbd': {'max_data_area_mb': {
'min': 129,
'max': 127,
}}
}
}
with self.assertRaises(DashboardException) as ctx:
IscsiTarget._validate(target_iqn, target_controls, portals, disks, groups, settings)
self.assertEquals(ctx.exception.__str__(),
"Disk control max_data_area_mb must be >= 129")
settings['disk_controls_limits']['user:rbd']['max_data_area_mb']['min'] = 1
with self.assertRaises(DashboardException) as ctx:
IscsiTarget._validate(target_iqn, target_controls, portals, disks, groups, settings)
self.assertEquals(ctx.exception.__str__(),
"Disk control max_data_area_mb must be <= 127")
@mock.patch('dashboard.controllers.iscsi.IscsiTarget._validate_image')
def test_delete(self, _validate_image_mock):
target_iqn = "iqn.2003-01.com.redhat.iscsi-gw:iscsi-igw3"
request = copy.deepcopy(iscsi_target_request)
request['target_iqn'] = target_iqn
self._task_post('/api/iscsi/target', request)
self.assertStatus(201)
self._task_delete('/api/iscsi/target/{}'.format(request['target_iqn']))
self.assertStatus(204)
self._get('/api/iscsi/target')
self.assertStatus(200)
self.assertJsonBody([])
@mock.patch('dashboard.tools.TaskManager.current_task')
def test_delete_raises_exception(self, _validate_image_mock):
target_iqn = "iqn.2003-01.com.redhat.iscsi-gw:iscsi-igw3"
request = copy.deepcopy(iscsi_target_request)
request['target_iqn'] = target_iqn
configs = {'targets': {target_iqn: {'portals': {}}}}
with self.assertRaises(DashboardException):
# pylint: disable=protected-access
IscsiTarget._delete(target_iqn, configs, 0, 100)
@mock.patch('dashboard.controllers.iscsi.IscsiTarget._validate_image')
def test_add_client(self, _validate_image_mock):
target_iqn = "iqn.2003-01.com.redhat.iscsi-gw:iscsi-igw4"
create_request = copy.deepcopy(iscsi_target_request)
create_request['target_iqn'] = target_iqn
update_request = copy.deepcopy(create_request)
update_request['new_target_iqn'] = target_iqn
update_request['clients'].append(
{
"luns": [{"image": "lun1", "pool": "rbd"}],
"client_iqn": "iqn.1994-05.com.redhat:rh7-client3",
"auth": {
"password": "myiscsipassword5",
"user": "myiscsiusername5",
"mutual_password": "myiscsipassword6",
"mutual_user": "myiscsiusername6"}
})
response = copy.deepcopy(iscsi_target_response)
response['target_iqn'] = target_iqn
response['clients'].append(
{
"luns": [{"image": "lun1", "pool": "rbd"}],
"client_iqn": "iqn.1994-05.com.redhat:rh7-client3",
"auth": {
"password": "myiscsipassword5",
"user": "myiscsiusername5",
"mutual_password": "myiscsipassword6",
"mutual_user": "myiscsiusername6"},
"info": {
"alias": "",
"ip_address": [],
"state": {}
}
})
self._update_iscsi_target(create_request, update_request, 200, None, response)
@mock.patch('dashboard.controllers.iscsi.IscsiTarget._validate_image')
def test_add_bad_client(self, _validate_image_mock):
target_iqn = "iqn.2003-01.com.redhat.iscsi-gw:iscsi-igw4"
create_request = copy.deepcopy(iscsi_target_request)
create_request['target_iqn'] = target_iqn
update_request = copy.deepcopy(create_request)
update_request['new_target_iqn'] = target_iqn
update_request['clients'].append(
{
"luns": [{"image": "lun1", "pool": "rbd"}],
"client_iqn": "iqn.1994-05.com.redhat:rh7-client4",
"auth": {
"password": "myiscsipassword7myiscsipassword7myiscsipasswo",
"user": "myiscsiusername7",
"mutual_password": "myiscsipassword8",
"mutual_user": "myiscsiusername8"}
})
response = copy.deepcopy(iscsi_target_response)
response['target_iqn'] = target_iqn
self._task_post('/api/iscsi/target', create_request)
self.assertStatus(201)
self._task_put('/api/iscsi/target/{}'.format(create_request['target_iqn']), update_request)
self.assertStatus(400)
self._get('/api/iscsi/target/{}'.format(update_request['new_target_iqn']))
self.assertStatus(200)
self.assertJsonBody(response)
@mock.patch('dashboard.controllers.iscsi.IscsiTarget._validate_image')
def test_change_client_password(self, _validate_image_mock):
target_iqn = "iqn.2003-01.com.redhat.iscsi-gw:iscsi-igw5"
create_request = copy.deepcopy(iscsi_target_request)
create_request['target_iqn'] = target_iqn
update_request = copy.deepcopy(create_request)
update_request['new_target_iqn'] = target_iqn
update_request['clients'][0]['auth']['password'] = 'MyNewPassword'
response = copy.deepcopy(iscsi_target_response)
response['target_iqn'] = target_iqn
response['clients'][0]['auth']['password'] = 'MyNewPassword'
self._update_iscsi_target(create_request, update_request, 200, None, response)
@mock.patch('dashboard.controllers.iscsi.IscsiTarget._validate_image')
def test_rename_client(self, _validate_image_mock):
target_iqn = "iqn.2003-01.com.redhat.iscsi-gw:iscsi-igw6"
create_request = copy.deepcopy(iscsi_target_request)
create_request['target_iqn'] = target_iqn
update_request = copy.deepcopy(create_request)
update_request['new_target_iqn'] = target_iqn
update_request['clients'][0]['client_iqn'] = 'iqn.1994-05.com.redhat:rh7-client0'
response = copy.deepcopy(iscsi_target_response)
response['target_iqn'] = target_iqn
response['clients'][0]['client_iqn'] = 'iqn.1994-05.com.redhat:rh7-client0'
self._update_iscsi_target(create_request, update_request, 200, None, response)
@mock.patch('dashboard.controllers.iscsi.IscsiTarget._validate_image')
def test_add_disk(self, _validate_image_mock):
target_iqn = "iqn.2003-01.com.redhat.iscsi-gw:iscsi-igw7"
create_request = copy.deepcopy(iscsi_target_request)
create_request['target_iqn'] = target_iqn
update_request = copy.deepcopy(create_request)
update_request['new_target_iqn'] = target_iqn
update_request['disks'].append(
{
"image": "lun3",
"pool": "rbd",
"controls": {},
"backstore": "user:rbd"
})
update_request['clients'][0]['luns'].append({"image": "lun3", "pool": "rbd"})
response = copy.deepcopy(iscsi_target_response)
response['target_iqn'] = target_iqn
response['disks'].append(
{
"image": "lun3",
"pool": "rbd",
"controls": {},
"backstore": "user:rbd",
"wwn": "64af6678-9694-4367-bacc-f8eb0baa2",
"lun": 2
})
response['clients'][0]['luns'].append({"image": "lun3", "pool": "rbd"})
self._update_iscsi_target(create_request, update_request, 200, None, response)
@mock.patch('dashboard.controllers.iscsi.IscsiTarget._validate_image')
def test_change_disk_image(self, _validate_image_mock):
target_iqn = "iqn.2003-01.com.redhat.iscsi-gw:iscsi-igw8"
create_request = copy.deepcopy(iscsi_target_request)
create_request['target_iqn'] = target_iqn
update_request = copy.deepcopy(create_request)
update_request['new_target_iqn'] = target_iqn
update_request['disks'][0]['image'] = 'lun0'
update_request['clients'][0]['luns'][0]['image'] = 'lun0'
response = copy.deepcopy(iscsi_target_response)
response['target_iqn'] = target_iqn
response['disks'][0]['image'] = 'lun0'
response['clients'][0]['luns'][0]['image'] = 'lun0'
self._update_iscsi_target(create_request, update_request, 200, None, response)
@mock.patch('dashboard.controllers.iscsi.IscsiTarget._validate_image')
def test_change_disk_controls(self, _validate_image_mock):
target_iqn = "iqn.2003-01.com.redhat.iscsi-gw:iscsi-igw9"
create_request = copy.deepcopy(iscsi_target_request)
create_request['target_iqn'] = target_iqn
update_request = copy.deepcopy(create_request)
update_request['new_target_iqn'] = target_iqn
update_request['disks'][0]['controls'] = {"qfull_timeout": 15}
response = copy.deepcopy(iscsi_target_response)
response['target_iqn'] = target_iqn
response['disks'][0]['controls'] = {"qfull_timeout": 15}
self._update_iscsi_target(create_request, update_request, 200, None, response)
@mock.patch('dashboard.controllers.iscsi.IscsiTarget._validate_image')
def test_rename_target(self, _validate_image_mock):
target_iqn = "iqn.2003-01.com.redhat.iscsi-gw:iscsi-igw10"
new_target_iqn = "iqn.2003-01.com.redhat.iscsi-gw:iscsi-igw11"
create_request = copy.deepcopy(iscsi_target_request)
create_request['target_iqn'] = target_iqn
update_request = copy.deepcopy(create_request)
update_request['new_target_iqn'] = new_target_iqn
response = copy.deepcopy(iscsi_target_response)
response['target_iqn'] = new_target_iqn
self._update_iscsi_target(create_request, update_request, 200, None, response)
@mock.patch('dashboard.controllers.iscsi.IscsiTarget._validate_image')
def test_rename_group(self, _validate_image_mock):
target_iqn = "iqn.2003-01.com.redhat.iscsi-gw:iscsi-igw12"
create_request = copy.deepcopy(iscsi_target_request)
create_request['target_iqn'] = target_iqn
update_request = copy.deepcopy(create_request)
update_request['new_target_iqn'] = target_iqn
update_request['groups'][0]['group_id'] = 'mygroup0'
response = copy.deepcopy(iscsi_target_response)
response['target_iqn'] = target_iqn
response['groups'][0]['group_id'] = 'mygroup0'
self._update_iscsi_target(create_request, update_request, 200, None, response)
@mock.patch('dashboard.controllers.iscsi.IscsiTarget._validate_image')
def test_add_client_to_group(self, _validate_image_mock):
target_iqn = "iqn.2003-01.com.redhat.iscsi-gw:iscsi-igw13"
create_request = copy.deepcopy(iscsi_target_request)
create_request['target_iqn'] = target_iqn
update_request = copy.deepcopy(create_request)
update_request['new_target_iqn'] = target_iqn
update_request['clients'].append(
{
"luns": [],
"client_iqn": "iqn.1994-05.com.redhat:rh7-client3",
"auth": {
"password": None,
"user": None,
"mutual_password": None,
"mutual_user": None}
})
update_request['groups'][0]['members'].append('iqn.1994-05.com.redhat:rh7-client3')
response = copy.deepcopy(iscsi_target_response)
response['target_iqn'] = target_iqn
response['clients'].append(
{
"luns": [],
"client_iqn": "iqn.1994-05.com.redhat:rh7-client3",
"auth": {
"password": None,
"user": None,
"mutual_password": None,
"mutual_user": None},
"info": {
"alias": "",
"ip_address": [],
"state": {}
}
})
response['groups'][0]['members'].append('iqn.1994-05.com.redhat:rh7-client3')
self._update_iscsi_target(create_request, update_request, 200, None, response)
@mock.patch('dashboard.controllers.iscsi.IscsiTarget._validate_image')
def test_remove_client_from_group(self, _validate_image_mock):
target_iqn = "iqn.2003-01.com.redhat.iscsi-gw:iscsi-igw14"
create_request = copy.deepcopy(iscsi_target_request)
create_request['target_iqn'] = target_iqn
update_request = copy.deepcopy(create_request)
update_request['new_target_iqn'] = target_iqn
update_request['groups'][0]['members'].remove('iqn.1994-05.com.redhat:rh7-client2')
response = copy.deepcopy(iscsi_target_response)
response['target_iqn'] = target_iqn
response['groups'][0]['members'].remove('iqn.1994-05.com.redhat:rh7-client2')
self._update_iscsi_target(create_request, update_request, 200, None, response)
@mock.patch('dashboard.controllers.iscsi.IscsiTarget._validate_image')
def test_remove_groups(self, _validate_image_mock):
target_iqn = "iqn.2003-01.com.redhat.iscsi-gw:iscsi-igw15"
create_request = copy.deepcopy(iscsi_target_request)
create_request['target_iqn'] = target_iqn
update_request = copy.deepcopy(create_request)
update_request['new_target_iqn'] = target_iqn
update_request['groups'] = []
response = copy.deepcopy(iscsi_target_response)
response['target_iqn'] = target_iqn
response['groups'] = []
self._update_iscsi_target(create_request, update_request, 200, None, response)
@mock.patch('dashboard.controllers.iscsi.IscsiTarget._validate_image')
def test_add_client_to_multiple_groups(self, _validate_image_mock):
target_iqn = "iqn.2003-01.com.redhat.iscsi-gw:iscsi-igw16"
create_request = copy.deepcopy(iscsi_target_request)
create_request['target_iqn'] = target_iqn
create_request['groups'].append(copy.deepcopy(create_request['groups'][0]))
create_request['groups'][1]['group_id'] = 'mygroup2'
self._task_post('/api/iscsi/target', create_request)
self.assertStatus(400)
self.assertJsonBody({
'detail': 'Each initiator can only be part of 1 group at a time',
'code': 'initiator_in_multiple_groups',
'component': 'iscsi'
})
@mock.patch('dashboard.controllers.iscsi.IscsiTarget._validate_image')
def test_remove_client_lun(self, _validate_image_mock):
target_iqn = "iqn.2003-01.com.redhat.iscsi-gw:iscsi-igw17"
create_request = copy.deepcopy(iscsi_target_request)
create_request['target_iqn'] = target_iqn
create_request['clients'][0]['luns'] = [
{"image": "lun1", "pool": "rbd"},
{"image": "lun2", "pool": "rbd"},
{"image": "lun3", "pool": "rbd"}
]
update_request = copy.deepcopy(create_request)
update_request['new_target_iqn'] = target_iqn
update_request['clients'][0]['luns'] = [
{"image": "lun1", "pool": "rbd"},
{"image": "lun3", "pool": "rbd"}
]
response = copy.deepcopy(iscsi_target_response)
response['target_iqn'] = target_iqn
response['clients'][0]['luns'] = [
{"image": "lun1", "pool": "rbd"},
{"image": "lun3", "pool": "rbd"}
]
self._update_iscsi_target(create_request, update_request, 200, None, response)
@mock.patch('dashboard.controllers.iscsi.IscsiTarget._validate_image')
def test_change_client_auth(self, _validate_image_mock):
target_iqn = "iqn.2003-01.com.redhat.iscsi-gw:iscsi-igw18"
create_request = copy.deepcopy(iscsi_target_request)
create_request['target_iqn'] = target_iqn
update_request = copy.deepcopy(create_request)
update_request['new_target_iqn'] = target_iqn
update_request['clients'][0]['auth']['password'] = 'myiscsipasswordX'
response = copy.deepcopy(iscsi_target_response)
response['target_iqn'] = target_iqn
response['clients'][0]['auth']['password'] = 'myiscsipasswordX'
self._update_iscsi_target(create_request, update_request, 200, None, response)
@mock.patch('dashboard.controllers.iscsi.IscsiTarget._validate_image')
def test_remove_client_logged_in(self, _validate_image_mock):
client_info = {
'alias': '',
'ip_address': [],
'state': {'LOGGED_IN': ['node1']}
}
# pylint: disable=protected-access
IscsiClientMock._instance.clientinfo = client_info
target_iqn = "iqn.2003-01.com.redhat.iscsi-gw:iscsi-igw19"
create_request = copy.deepcopy(iscsi_target_request)
create_request['target_iqn'] = target_iqn
update_request = copy.deepcopy(create_request)
update_request['new_target_iqn'] = target_iqn
update_request['clients'].pop(0)
response = copy.deepcopy(iscsi_target_response)
response['target_iqn'] = target_iqn
for client in response['clients']:
client['info'] = client_info
update_response = {
'detail': "Client 'iqn.1994-05.com.redhat:rh7-client' cannot be deleted until it's "
"logged out",
'code': 'client_logged_in',
'component': 'iscsi'
}
self._update_iscsi_target(create_request, update_request, 400, update_response, response)
@mock.patch('dashboard.controllers.iscsi.IscsiTarget._validate_image')
def test_remove_client(self, _validate_image_mock):
target_iqn = "iqn.2003-01.com.redhat.iscsi-gw:iscsi-igw20"
create_request = copy.deepcopy(iscsi_target_request)
create_request['target_iqn'] = target_iqn
update_request = copy.deepcopy(create_request)
update_request['new_target_iqn'] = target_iqn
update_request['clients'].pop(0)
response = copy.deepcopy(iscsi_target_response)
response['target_iqn'] = target_iqn
response['clients'].pop(0)
self._update_iscsi_target(create_request, update_request, 200, None, response)
@mock.patch('dashboard.controllers.iscsi.IscsiTarget._validate_image')
def test_add_image_to_group_with_client_logged_in(self, _validate_image_mock):
client_info = {
'alias': '',
'ip_address': [],
'state': {'LOGGED_IN': ['node1']}
}
new_disk = {"pool": "rbd", "image": "lun1"}
# pylint: disable=protected-access
IscsiClientMock._instance.clientinfo = client_info
target_iqn = "iqn.2003-01.com.redhat.iscsi-gw:iscsi-igw21"
create_request = copy.deepcopy(iscsi_target_request)
create_request['target_iqn'] = target_iqn
update_request = copy.deepcopy(create_request)
update_request['new_target_iqn'] = target_iqn
update_request['groups'][0]['disks'].append(new_disk)
response = copy.deepcopy(iscsi_target_response)
response['target_iqn'] = target_iqn
response['groups'][0]['disks'].insert(0, new_disk)
for client in response['clients']:
client['info'] = client_info
self._update_iscsi_target(create_request, update_request, 200, None, response)
@mock.patch('dashboard.controllers.iscsi.IscsiTarget._validate_image')
def test_add_image_to_initiator_with_client_logged_in(self, _validate_image_mock):
client_info = {
'alias': '',
'ip_address': [],
'state': {'LOGGED_IN': ['node1']}
}
new_disk = {"pool": "rbd", "image": "lun2"}
# pylint: disable=protected-access
IscsiClientMock._instance.clientinfo = client_info
target_iqn = "iqn.2003-01.com.redhat.iscsi-gw:iscsi-igw22"
create_request = copy.deepcopy(iscsi_target_request)
create_request['target_iqn'] = target_iqn
update_request = copy.deepcopy(create_request)
update_request['new_target_iqn'] = target_iqn
update_request['clients'][0]['luns'].append(new_disk)
response = copy.deepcopy(iscsi_target_response)
response['target_iqn'] = target_iqn
response['clients'][0]['luns'].append(new_disk)
for client in response['clients']:
client['info'] = client_info
self._update_iscsi_target(create_request, update_request, 200, None, response)
@mock.patch('dashboard.controllers.iscsi.IscsiTarget._validate_image')
def test_remove_image_from_group_with_client_logged_in(self, _validate_image_mock):
client_info = {
'alias': '',
'ip_address': [],
'state': {'LOGGED_IN': ['node1']}
}
# pylint: disable=protected-access
IscsiClientMock._instance.clientinfo = client_info
target_iqn = "iqn.2003-01.com.redhat.iscsi-gw:iscsi-igw23"
create_request = copy.deepcopy(iscsi_target_request)
create_request['target_iqn'] = target_iqn
update_request = copy.deepcopy(create_request)
update_request['new_target_iqn'] = target_iqn
update_request['groups'][0]['disks'] = []
response = copy.deepcopy(iscsi_target_response)
response['target_iqn'] = target_iqn
response['groups'][0]['disks'] = []
for client in response['clients']:
client['info'] = client_info
self._update_iscsi_target(create_request, update_request, 200, None, response)
def _update_iscsi_target(self, create_request, update_request, update_response_code,
update_response, response):
self._task_post('/api/iscsi/target', create_request)
self.assertStatus(201)
self._task_put(
'/api/iscsi/target/{}'.format(create_request['target_iqn']), update_request)
self.assertStatus(update_response_code)
self.assertJsonBody(update_response)
self._get(
'/api/iscsi/target/{}'.format(update_request['new_target_iqn']))
self.assertStatus(200)
self.assertJsonBody(response)
class TestIscsiUi(ControllerTestCase):
@classmethod
def setup_server(cls):
cls.setup_controllers([IscsiUi])
@mock.patch('dashboard.services.tcmu_service.TcmuService.get_image_info')
@mock.patch('dashboard.services.tcmu_service.TcmuService.get_iscsi_info')
def test_overview(self, get_iscsi_info_mock, get_image_info_mock):
get_iscsi_info_mock.return_value = None
get_image_info_mock.return_value = None
response = copy.deepcopy(iscsiui_response)
response['images'] = []
self._get('/ui-api/iscsi/overview')
self.assertStatus(200)
self.assertJsonBody(response)
@mock.patch('dashboard.services.iscsi_config.IscsiGatewaysConfig.get_gateways_config')
@mock.patch('dashboard.services.tcmu_service.TcmuService.get_image_info')
@mock.patch('dashboard.services.tcmu_service.TcmuService.get_iscsi_info')
def test_overview_config(self, get_iscsi_info_mock, get_image_info_mock,
get_gateways_config_mock):
get_iscsi_info_mock.return_value = None
get_image_info_mock.return_value = None
response = copy.deepcopy(iscsiui_response)
response['images'] = []
get_gateways_config_mock.return_value = iscsiui_gateways_config_mock
self._get('/ui-api/iscsi/overview')
self.assertStatus(200)
self.assertJsonBody(response)
def raise_ex(self):
raise RequestException('error')
config_method = IscsiClientMock.get_config
IscsiClientMock.get_config = raise_ex
response['gateways'][0]['num_sessions'] = 'n/a'
response['gateways'][1]['num_sessions'] = 'n/a'
response['gateways'][0]['num_targets'] = 'n/a'
response['gateways'][1]['num_targets'] = 'n/a'
self._get('/ui-api/iscsi/overview')
self.assertStatus(200)
self.assertJsonBody(response)
IscsiClientMock.get_config = config_method
@mock.patch('dashboard.services.iscsi_config.IscsiGatewaysConfig.get_gateways_config')
@mock.patch('dashboard.services.tcmu_service.TcmuService.get_image_info')
@mock.patch('dashboard.services.tcmu_service.TcmuService.get_iscsi_info')
def test_overview_ping(self, get_iscsi_info_mock, get_image_info_mock,
get_gateways_config_mock):
get_iscsi_info_mock.return_value = None
get_image_info_mock.return_value = None
get_gateways_config_mock.return_value = iscsiui_gateways_config_mock
response = copy.deepcopy(iscsiui_response)
response['gateways'][0]['num_sessions'] = 0
response['gateways'][1]['num_sessions'] = 0
response['gateways'][0]['num_targets'] = 0
response['gateways'][1]['num_targets'] = 0
self._get('/ui-api/iscsi/overview')
self.assertStatus(200)
self.assertJsonBody(response)
def raise_ex(self):
raise RequestException('error')
ping_method = IscsiClientMock.ping
IscsiClientMock.ping = raise_ex
response['gateways'][0]['num_sessions'] = 'n/a'
response['gateways'][1]['num_sessions'] = 'n/a'
response['gateways'][0]['state'] = 'down'
response['gateways'][1]['state'] = 'down'
self._get('/ui-api/iscsi/overview')
self.assertStatus(200)
self.assertJsonBody(response)
IscsiClientMock.ping = ping_method
@mock.patch(
'dashboard.services.iscsi_config.IscsiGatewaysConfig.get_gateways_config')
@mock.patch('dashboard.services.tcmu_service.TcmuService.get_image_info')
@mock.patch('dashboard.services.tcmu_service.TcmuService.get_iscsi_info')
def test_overview_images_info(self, get_iscsi_info_mock, get_image_info_mock,
get_gateways_config_mock):
get_iscsi_info_mock.return_value = None
image_info = {"optimized_since": "1616735075", "stats": {}, "stats_history": {}}
# pylint: disable=protected-access
IscsiClientMock._instance.config['disks'] = {
1: {"image": "lun1", "pool": "rbd", "backstore": "user:rbd",
"optimized_since": "1616735075", "stats": {}, "stats_history": {}},
2: {"image": "lun2", "pool": "rbd", "backstore": "user:rbd",
"optimized_since": "1616735075", "stats": {}, "stats_history": {}},
}
response = copy.deepcopy(iscsiui_response)
response['images'][0]['optimized_since'] = '1616735075'
response['images'][1]['optimized_since'] = '1616735075'
response['images'][0]['stats'] = {}
response['images'][1]['stats'] = {}
response['images'][0]['stats_history'] = {}
response['images'][1]['stats_history'] = {}
get_gateways_config_mock.return_value = iscsiui_gateways_config_mock
get_image_info_mock.return_value = image_info
self._get('/ui-api/iscsi/overview')
self.assertStatus(200)
self.assertJsonBody(response)
iscsiui_gateways_config_mock = {
'gateways': {
'node1': None,
'node2': None,
},
'disks': {
1: {"image": "lun1", "pool": "rbd", "backstore": "user:rbd",
"controls": {"max_data_area_mb": 128}},
2: {"image": "lun2", "pool": "rbd", "backstore": "user:rbd",
"controls": {"max_data_area_mb": 128}}
}
}
iscsiui_response = {
"gateways": [
{"name": "node1", "state": "up", "num_targets": 0, "num_sessions": 0},
{"name": "node2", "state": "up", "num_targets": 0, "num_sessions": 0}
],
"images": [
{
'pool': 'rbd',
'image': 'lun1',
'backstore': 'user:rbd',
'optimized_since': None,
'stats': None,
'stats_history': None
},
{
'pool': 'rbd',
'image': 'lun2',
'backstore': 'user:rbd',
'optimized_since': None,
'stats': None,
'stats_history': None
}
]
}
iscsi_target_request = {
"target_iqn": "iqn.2003-01.com.redhat.iscsi-gw:iscsi-igw",
"portals": [
{"ip": "192.168.100.202", "host": "node2"},
{"ip": "10.0.2.15", "host": "node2"},
{"ip": "192.168.100.203", "host": "node3"}
],
"disks": [
{"image": "lun1", "pool": "rbd", "backstore": "user:rbd",
"controls": {"max_data_area_mb": 128}},
{"image": "lun2", "pool": "rbd", "backstore": "user:rbd",
"controls": {"max_data_area_mb": 128}}
],
"clients": [
{
"luns": [{"image": "lun1", "pool": "rbd"}],
"client_iqn": "iqn.1994-05.com.redhat:rh7-client",
"auth": {
"password": "myiscsipassword1",
"user": "myiscsiusername1",
"mutual_password": "myiscsipassword2",
"mutual_user": "myiscsiusername2"}
},
{
"luns": [],
"client_iqn": "iqn.1994-05.com.redhat:rh7-client2",
"auth": {
"password": "myiscsipassword3",
"user": "myiscsiusername3",
"mutual_password": "myiscsipassword4",
"mutual_user": "myiscsiusername4"
}
}
],
"acl_enabled": True,
"auth": {
"password": "",
"user": "",
"mutual_password": "",
"mutual_user": ""},
"target_controls": {},
"groups": [
{
"group_id": "mygroup",
"disks": [{"pool": "rbd", "image": "lun2"}],
"members": ["iqn.1994-05.com.redhat:rh7-client2"]
}
]
}
iscsi_target_response = {
'target_iqn': 'iqn.2003-01.com.redhat.iscsi-gw:iscsi-igw',
'portals': [
{'host': 'node2', 'ip': '10.0.2.15'},
{'host': 'node2', 'ip': '192.168.100.202'},
{'host': 'node3', 'ip': '192.168.100.203'}
],
'disks': [
{'pool': 'rbd', 'image': 'lun1', 'backstore': 'user:rbd',
'wwn': '64af6678-9694-4367-bacc-f8eb0baa0', 'lun': 0,
'controls': {'max_data_area_mb': 128}},
{'pool': 'rbd', 'image': 'lun2', 'backstore': 'user:rbd',
'wwn': '64af6678-9694-4367-bacc-f8eb0baa1', 'lun': 1,
'controls': {'max_data_area_mb': 128}}
],
'clients': [
{
'client_iqn': 'iqn.1994-05.com.redhat:rh7-client',
'luns': [{'pool': 'rbd', 'image': 'lun1'}],
'auth': {
'user': 'myiscsiusername1',
'password': 'myiscsipassword1',
'mutual_password': 'myiscsipassword2',
'mutual_user': 'myiscsiusername2'
},
'info': {
'alias': '',
'ip_address': [],
'state': {}
}
},
{
'client_iqn': 'iqn.1994-05.com.redhat:rh7-client2',
'luns': [],
'auth': {
'user': 'myiscsiusername3',
'password': 'myiscsipassword3',
'mutual_password': 'myiscsipassword4',
'mutual_user': 'myiscsiusername4'
},
'info': {
'alias': '',
'ip_address': [],
'state': {}
}
}
],
"acl_enabled": True,
"auth": {
"password": "",
"user": "",
"mutual_password": "",
"mutual_user": ""},
'groups': [
{
'group_id': 'mygroup',
'disks': [{'pool': 'rbd', 'image': 'lun2'}],
'members': ['iqn.1994-05.com.redhat:rh7-client2']
}
],
'target_controls': {},
'info': {
'num_sessions': 0
}
}
class IscsiClientMock(object):
_instance = None
def __init__(self):
self.gateway_name = None
self.service_url = None
self.config = {
"created": "2019/01/17 08:57:16",
"discovery_auth": {
"username": "",
"password": "",
"password_encryption_enabled": False,
"mutual_username": "",
"mutual_password": "",
"mutual_password_encryption_enabled": False
},
"disks": {},
"epoch": 0,
"gateways": {},
"targets": {},
"updated": "",
"version": 11
}
self.clientinfo = {
'alias': '',
'ip_address': [],
'state': {}
}
@classmethod
def instance(cls, gateway_name=None, service_url=None):
cls._instance.gateway_name = gateway_name
cls._instance.service_url = service_url
# pylint: disable=unused-argument
return cls._instance
def ping(self):
return {
"message": "pong"
}
def get_settings(self):
return {
"api_version": 2,
"backstores": [
"user:rbd"
],
"config": {
"minimum_gateways": 2
},
"default_backstore": "user:rbd",
"required_rbd_features": {
"rbd": 0,
"user:rbd": 4,
},
"unsupported_rbd_features": {
"rbd": 88,
"user:rbd": 0,
},
"disk_default_controls": {
"user:rbd": {
"hw_max_sectors": 1024,
"max_data_area_mb": 8,
"osd_op_timeout": 30,
"qfull_timeout": 5
}
},
"target_default_controls": {
"cmdsn_depth": 128,
"dataout_timeout": 20,
"first_burst_length": 262144,
"immediate_data": "Yes",
"initial_r2t": "Yes",
"max_burst_length": 524288,
"max_outstanding_r2t": 1,
"max_recv_data_segment_length": 262144,
"max_xmit_data_segment_length": 262144,
"nopin_response_timeout": 5,
"nopin_timeout": 5
}
}
def get_config(self):
return copy.deepcopy(self.config)
def create_target(self, target_iqn, target_controls):
self.config['targets'][target_iqn] = {
"clients": {},
"acl_enabled": True,
"auth": {
"username": "",
"password": "",
"password_encryption_enabled": False,
"mutual_username": "",
"mutual_password": "",
"mutual_password_encryption_enabled": False
},
"controls": target_controls,
"created": "2019/01/17 09:22:34",
"disks": {},
"groups": {},
"portals": {}
}
def create_gateway(self, target_iqn, gateway_name, ip_addresses):
target_config = self.config['targets'][target_iqn]
if 'ip_list' not in target_config:
target_config['ip_list'] = []
target_config['ip_list'] += ip_addresses
target_config['portals'][gateway_name] = {
"portal_ip_addresses": ip_addresses
}
def delete_gateway(self, target_iqn, gateway_name):
target_config = self.config['targets'][target_iqn]
portal_config = target_config['portals'][gateway_name]
for ip in portal_config['portal_ip_addresses']:
target_config['ip_list'].remove(ip)
target_config['portals'].pop(gateway_name)
def create_disk(self, pool, image, backstore, wwn):
if wwn is None:
wwn = '64af6678-9694-4367-bacc-f8eb0baa' + str(len(self.config['disks']))
image_id = '{}/{}'.format(pool, image)
self.config['disks'][image_id] = {
"pool": pool,
"image": image,
"backstore": backstore,
"controls": {},
"wwn": wwn
}
def create_target_lun(self, target_iqn, image_id, lun):
target_config = self.config['targets'][target_iqn]
if lun is None:
lun = len(target_config['disks'])
target_config['disks'][image_id] = {
"lun_id": lun
}
self.config['disks'][image_id]['owner'] = list(target_config['portals'].keys())[0]
def reconfigure_disk(self, pool, image, controls):
image_id = '{}/{}'.format(pool, image)
settings = self.get_settings()
backstore = self.config['disks'][image_id]['backstore']
disk_default_controls = settings['disk_default_controls'][backstore]
new_controls = {}
for control_k, control_v in controls.items():
if control_v != disk_default_controls[control_k]:
new_controls[control_k] = control_v
self.config['disks'][image_id]['controls'] = new_controls
def create_client(self, target_iqn, client_iqn):
target_config = self.config['targets'][target_iqn]
target_config['clients'][client_iqn] = {
"auth": {
"username": "",
"password": "",
"password_encryption_enabled": False,
"mutual_username": "",
"mutual_password": "",
"mutual_password_encryption_enabled": False
},
"group_name": "",
"luns": {}
}
def create_client_lun(self, target_iqn, client_iqn, image_id):
target_config = self.config['targets'][target_iqn]
target_config['clients'][client_iqn]['luns'][image_id] = {}
def delete_client_lun(self, target_iqn, client_iqn, image_id):
target_config = self.config['targets'][target_iqn]
del target_config['clients'][client_iqn]['luns'][image_id]
def create_client_auth(self, target_iqn, client_iqn, user, password, m_user, m_password):
target_config = self.config['targets'][target_iqn]
target_config['clients'][client_iqn]['auth']['username'] = user
target_config['clients'][client_iqn]['auth']['password'] = password
target_config['clients'][client_iqn]['auth']['mutual_username'] = m_user
target_config['clients'][client_iqn]['auth']['mutual_password'] = m_password
def create_group(self, target_iqn, group_name, members, image_ids):
target_config = self.config['targets'][target_iqn]
target_config['groups'][group_name] = {
"disks": {},
"members": []
}
for image_id in image_ids:
target_config['groups'][group_name]['disks'][image_id] = {}
target_config['groups'][group_name]['members'] = members
def update_group(self, target_iqn, group_name, members, image_ids):
target_config = self.config['targets'][target_iqn]
group = target_config['groups'][group_name]
old_members = group['members']
disks = group['disks']
target_config['groups'][group_name] = {
"disks": {},
"members": []
}
for image_id in disks.keys():
if image_id not in image_ids:
target_config['groups'][group_name]['disks'][image_id] = {}
new_members = []
for member_iqn in old_members:
if member_iqn not in members:
new_members.append(member_iqn)
target_config['groups'][group_name]['members'] = new_members
def delete_group(self, target_iqn, group_name):
target_config = self.config['targets'][target_iqn]
del target_config['groups'][group_name]
def delete_client(self, target_iqn, client_iqn):
target_config = self.config['targets'][target_iqn]
del target_config['clients'][client_iqn]
def delete_target_lun(self, target_iqn, image_id):
target_config = self.config['targets'][target_iqn]
target_config['disks'].pop(image_id)
del self.config['disks'][image_id]['owner']
def delete_disk(self, pool, image):
image_id = '{}/{}'.format(pool, image)
del self.config['disks'][image_id]
def delete_target(self, target_iqn):
del self.config['targets'][target_iqn]
def get_ip_addresses(self):
ips = {
'node1': ['192.168.100.201'],
'node2': ['192.168.100.202', '10.0.2.15'],
'node3': ['192.168.100.203']
}
return {'data': ips[self.gateway_name]}
def get_hostname(self):
hostnames = {
'https://admin:admin@10.17.5.1:5001': 'node1',
'https://admin:admin@10.17.5.2:5001': 'node2',
'https://admin:admin@10.17.5.3:5001': 'node3'
}
if self.service_url not in hostnames:
raise RequestException('No route to host')
return {'data': hostnames[self.service_url]}
def update_discoveryauth(self, user, password, mutual_user, mutual_password):
self.config['discovery_auth']['username'] = user
self.config['discovery_auth']['password'] = password
self.config['discovery_auth']['mutual_username'] = mutual_user
self.config['discovery_auth']['mutual_password'] = mutual_password
def update_targetacl(self, target_iqn, action):
self.config['targets'][target_iqn]['acl_enabled'] = (action == 'enable_acl')
def update_targetauth(self, target_iqn, user, password, mutual_user, mutual_password):
target_config = self.config['targets'][target_iqn]
target_config['auth']['username'] = user
target_config['auth']['password'] = password
target_config['auth']['mutual_username'] = mutual_user
target_config['auth']['mutual_password'] = mutual_password
def get_targetinfo(self, target_iqn):
# pylint: disable=unused-argument
return {
'num_sessions': 0
}
def get_clientinfo(self, target_iqn, client_iqn):
# pylint: disable=unused-argument
return self.clientinfo
| 53,551 | 40.935787 | 99 | py |
null | ceph-main/src/pybind/mgr/dashboard/tests/test_nfs.py | # -*- coding: utf-8 -*-
# pylint: disable=too-many-lines
from copy import deepcopy
from unittest.mock import Mock, patch
from urllib.parse import urlencode
from nfs.export import AppliedExportResults
from .. import mgr
from ..controllers._version import APIVersion
from ..controllers.nfs import NFSGaneshaExports, NFSGaneshaUi
from ..tests import ControllerTestCase
from ..tools import NotificationQueue, TaskManager
class NFSGaneshaExportsTest(ControllerTestCase):
_nfs_module_export = {
"export_id": 1,
"path": "bk1",
"cluster_id": "myc",
"pseudo": "/bk-ps",
"access_type": "RO",
"squash": "root_id_squash",
"security_label": False,
"protocols": [
4
],
"transports": [
"TCP",
"UDP"
],
"fsal": {
"name": "RGW",
"user_id": "dashboard",
"access_key_id": "UUU5YVVOQ2P5QTOPYNAN",
"secret_access_key": "7z87tMUUsHr67ZWx12pCbWkp9UyOldxhDuPY8tVN"
},
"clients": []
}
_applied_export = AppliedExportResults()
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._expected_export = deepcopy(cls._nfs_module_export)
del cls._expected_export['fsal']['access_key_id']
del cls._expected_export['fsal']['secret_access_key']
@classmethod
def tearDownClass(cls):
super().tearDownClass()
NotificationQueue.stop()
@classmethod
def setup_server(cls):
NotificationQueue.start_queue()
TaskManager.init()
cls.setup_controllers([NFSGaneshaExports])
def test_list_exports(self):
mgr.remote = Mock(return_value=[self._nfs_module_export])
self._get('/api/nfs-ganesha/export')
self.assertStatus(200)
self.assertJsonBody([self._expected_export])
def test_get_export(self):
mgr.remote = Mock(return_value=self._nfs_module_export)
self._get('/api/nfs-ganesha/export/myc/1')
self.assertStatus(200)
self.assertJsonBody(self._expected_export)
def test_create_export(self):
export_mgr = Mock()
created_nfs_export = deepcopy(self._nfs_module_export)
applied_nfs_export = deepcopy(self._applied_export)
created_nfs_export['pseudo'] = 'new-pseudo'
created_nfs_export['export_id'] = 2
export_mgr.get_export_by_pseudo.side_effect = [None, created_nfs_export]
export_mgr.apply_export.return_value = applied_nfs_export
mgr.remote.return_value = export_mgr
export_create_body = deepcopy(self._expected_export)
del export_create_body['export_id']
export_create_body['pseudo'] = created_nfs_export['pseudo']
applied_nfs_export.append(export_create_body)
self._post('/api/nfs-ganesha/export',
export_create_body,
version=APIVersion(2, 0))
self.assertStatus(201)
applied_nfs_export.changes[0]['export_id'] = created_nfs_export['export_id']
self.assertJsonBody(applied_nfs_export.changes[0])
def test_create_export_with_existing_pseudo_fails(self):
export_mgr = Mock()
export_mgr.get_export_by_pseudo.return_value = self._nfs_module_export
mgr.remote.return_value = export_mgr
export_create_body = deepcopy(self._expected_export)
del export_create_body['export_id']
self._post('/api/nfs-ganesha/export',
export_create_body,
version=APIVersion(2, 0))
self.assertStatus(400)
response = self.json_body()
self.assertIn(f'Pseudo {export_create_body["pseudo"]} is already in use',
response['detail'])
def test_set_export(self):
export_mgr = Mock()
updated_nfs_export = deepcopy(self._nfs_module_export)
applied_nfs_export = deepcopy(self._applied_export)
updated_nfs_export['pseudo'] = 'updated-pseudo'
export_mgr.get_export_by_pseudo.return_value = updated_nfs_export
export_mgr.apply_export.return_value = applied_nfs_export
mgr.remote.return_value = export_mgr
updated_export_body = deepcopy(self._expected_export)
updated_export_body['pseudo'] = updated_nfs_export['pseudo']
applied_nfs_export.append(updated_export_body)
self._put('/api/nfs-ganesha/export/myc/2',
updated_export_body,
version=APIVersion(2, 0))
self.assertStatus(200)
self.assertJsonBody(applied_nfs_export.changes[0])
def test_delete_export(self):
mgr.remote = Mock(side_effect=[self._nfs_module_export, None])
self._delete('/api/nfs-ganesha/export/myc/2',
version=APIVersion(2, 0))
self.assertStatus(204)
def test_delete_export_not_found(self):
mgr.remote = Mock(return_value=None)
self._delete('/api/nfs-ganesha/export/myc/3',
version=APIVersion(2, 0))
self.assertStatus(404)
class NFSGaneshaUiControllerTest(ControllerTestCase):
@classmethod
def setup_server(cls):
cls.setup_controllers([NFSGaneshaUi])
@classmethod
def _create_ls_dir_url(cls, fs_name, query_params):
api_url = '/ui-api/nfs-ganesha/lsdir/{}'.format(fs_name)
if query_params is not None:
return '{}?{}'.format(api_url, urlencode(query_params))
return api_url
@patch('dashboard.controllers.nfs.CephFS')
def test_lsdir(self, cephfs_class):
cephfs_class.return_value.ls_dir.return_value = [
{'path': '/foo'},
{'path': '/foo/bar'}
]
mocked_ls_dir = cephfs_class.return_value.ls_dir
reqs = [
{
'params': None,
'cephfs_ls_dir_args': ['/', 1],
'path0': '/',
'status': 200
},
{
'params': {'root_dir': '/', 'depth': '1'},
'cephfs_ls_dir_args': ['/', 1],
'path0': '/',
'status': 200
},
{
'params': {'root_dir': '', 'depth': '1'},
'cephfs_ls_dir_args': ['/', 1],
'path0': '/',
'status': 200
},
{
'params': {'root_dir': '/foo', 'depth': '3'},
'cephfs_ls_dir_args': ['/foo', 3],
'path0': '/foo',
'status': 200
},
{
'params': {'root_dir': 'foo', 'depth': '6'},
'cephfs_ls_dir_args': ['/foo', 5],
'path0': '/foo',
'status': 200
},
{
'params': {'root_dir': '/', 'depth': '-1'},
'status': 400
},
{
'params': {'root_dir': '/', 'depth': 'abc'},
'status': 400
}
]
for req in reqs:
self._get(self._create_ls_dir_url('a', req['params']))
self.assertStatus(req['status'])
# Returned paths should contain root_dir as first element
if req['status'] == 200:
paths = self.json_body()['paths']
self.assertEqual(paths[0], req['path0'])
cephfs_class.assert_called_once_with('a')
# Check the arguments passed to `CephFS.ls_dir`.
if req.get('cephfs_ls_dir_args'):
mocked_ls_dir.assert_called_once_with(*req['cephfs_ls_dir_args'])
else:
mocked_ls_dir.assert_not_called()
mocked_ls_dir.reset_mock()
cephfs_class.reset_mock()
@patch('dashboard.controllers.nfs.cephfs')
@patch('dashboard.controllers.nfs.CephFS')
def test_lsdir_non_existed_dir(self, cephfs_class, cephfs):
cephfs.ObjectNotFound = Exception
cephfs.PermissionError = Exception
cephfs_class.return_value.ls_dir.side_effect = cephfs.ObjectNotFound()
self._get(self._create_ls_dir_url('a', {'root_dir': '/foo', 'depth': '3'}))
cephfs_class.assert_called_once_with('a')
cephfs_class.return_value.ls_dir.assert_called_once_with('/foo', 3)
self.assertStatus(200)
self.assertJsonBody({'paths': []})
def test_status_available(self):
self._get('/ui-api/nfs-ganesha/status')
self.assertStatus(200)
self.assertJsonBody({'available': True, 'message': None})
def test_status_not_available(self):
mgr.remote = Mock(side_effect=RuntimeError('Test'))
self._get('/ui-api/nfs-ganesha/status')
self.assertStatus(200)
self.assertJsonBody({'available': False, 'message': 'Test'})
| 8,762 | 34.334677 | 84 | py |
null | ceph-main/src/pybind/mgr/dashboard/tests/test_notification.py | # -*- coding: utf-8 -*-
import random
import time
import unittest
from ..tools import NotificationQueue
class Listener(object):
# pylint: disable=too-many-instance-attributes
def __init__(self):
self.type1 = []
self.type1_ts = []
self.type2 = []
self.type2_ts = []
self.type1_3 = []
self.type1_3_ts = []
self.all = []
self.all_ts = []
def register(self):
NotificationQueue.register(self.log_type1, 'type1', priority=90)
NotificationQueue.register(self.log_type2, 'type2')
NotificationQueue.register(self.log_type1_3, ['type1', 'type3'])
NotificationQueue.register(self.log_all, priority=50)
# these should be ignored by the queue
NotificationQueue.register(self.log_type1, 'type1')
NotificationQueue.register(self.log_type1_3, ['type1', 'type3'])
NotificationQueue.register(self.log_all)
def log_type1(self, val):
self.type1_ts.append(time.time())
self.type1.append(val)
def log_type2(self, val):
self.type2_ts.append(time.time())
self.type2.append(val)
def log_type1_3(self, val):
self.type1_3_ts.append(time.time())
self.type1_3.append(val)
def log_all(self, val):
self.all_ts.append(time.time())
self.all.append(val)
def clear(self):
self.type1 = []
self.type1_ts = []
self.type2 = []
self.type2_ts = []
self.type1_3 = []
self.type1_3_ts = []
self.all = []
self.all_ts = []
NotificationQueue.deregister(self.log_type1, 'type1')
NotificationQueue.deregister(self.log_type2, 'type2')
NotificationQueue.deregister(self.log_type1_3, ['type1', 'type3'])
NotificationQueue.deregister(self.log_all)
class NotificationQueueTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.listener = Listener()
def setUp(self):
self.listener.register()
def tearDown(self):
self.listener.clear()
def test_invalid_register(self):
with self.assertRaises(Exception) as ctx:
NotificationQueue.register(None, 1)
self.assertEqual(str(ctx.exception),
"n_types param is neither a string nor a list")
def test_notifications(self):
NotificationQueue.start_queue()
NotificationQueue.new_notification('type1', 1)
NotificationQueue.new_notification('type2', 2)
NotificationQueue.new_notification('type3', 3)
NotificationQueue.stop()
self.assertEqual(self.listener.type1, [1])
self.assertEqual(self.listener.type2, [2])
self.assertEqual(self.listener.type1_3, [1, 3])
self.assertEqual(self.listener.all, [1, 2, 3])
# validate priorities
self.assertLessEqual(self.listener.type1_3_ts[0], self.listener.all_ts[0])
self.assertLessEqual(self.listener.all_ts[0], self.listener.type1_ts[0])
self.assertLessEqual(self.listener.type2_ts[0], self.listener.all_ts[1])
self.assertLessEqual(self.listener.type1_3_ts[1], self.listener.all_ts[2])
def test_notifications2(self):
NotificationQueue.start_queue()
for i in range(0, 600):
typ = "type{}".format(i % 3 + 1)
if random.random() < 0.5:
time.sleep(0.002)
NotificationQueue.new_notification(typ, i)
NotificationQueue.stop()
for i in range(0, 600):
typ = i % 3 + 1
if typ == 1:
self.assertIn(i, self.listener.type1)
self.assertIn(i, self.listener.type1_3)
elif typ == 2:
self.assertIn(i, self.listener.type2)
elif typ == 3:
self.assertIn(i, self.listener.type1_3)
self.assertIn(i, self.listener.all)
self.assertEqual(len(self.listener.type1), 200)
self.assertEqual(len(self.listener.type2), 200)
self.assertEqual(len(self.listener.type1_3), 400)
self.assertEqual(len(self.listener.all), 600)
def test_deregister(self):
NotificationQueue.start_queue()
NotificationQueue.new_notification('type1', 1)
NotificationQueue.new_notification('type3', 3)
NotificationQueue.stop()
self.assertEqual(self.listener.type1, [1])
self.assertEqual(self.listener.type1_3, [1, 3])
NotificationQueue.start_queue()
NotificationQueue.deregister(self.listener.log_type1_3, ['type1'])
NotificationQueue.new_notification('type1', 4)
NotificationQueue.new_notification('type3', 5)
NotificationQueue.stop()
self.assertEqual(self.listener.type1, [1, 4])
self.assertEqual(self.listener.type1_3, [1, 3, 5])
| 4,800 | 34.043796 | 82 | py |
null | ceph-main/src/pybind/mgr/dashboard/tests/test_orchestrator.py | import inspect
import unittest
from unittest import mock
from orchestrator import Orchestrator as OrchestratorBase
from ..controllers.orchestrator import Orchestrator
from ..services.orchestrator import OrchFeature
from ..tests import ControllerTestCase
class OrchestratorControllerTest(ControllerTestCase):
URL_STATUS = '/ui-api/orchestrator/status'
URL_INVENTORY = '/api/orchestrator/inventory'
@classmethod
def setup_server(cls):
cls.setup_controllers([Orchestrator])
@mock.patch('dashboard.controllers.orchestrator.OrchClient.instance')
def test_status_get(self, instance):
status = {'available': False, 'description': ''}
fake_client = mock.Mock()
fake_client.status.return_value = status
instance.return_value = fake_client
self._get(self.URL_STATUS)
self.assertStatus(200)
self.assertJsonBody(status)
class TestOrchestrator(unittest.TestCase):
def test_features_has_corresponding_methods(self):
defined_methods = [v for k, v in inspect.getmembers(
OrchFeature, lambda m: not inspect.isroutine(m)) if not k.startswith('_')]
orchestrator_methods = [k for k, v in inspect.getmembers(
OrchestratorBase, inspect.isroutine)]
for method in defined_methods:
self.assertIn(method, orchestrator_methods)
| 1,363 | 32.268293 | 86 | py |
null | ceph-main/src/pybind/mgr/dashboard/tests/test_osd.py | # -*- coding: utf-8 -*-
import uuid
from contextlib import contextmanager
from typing import Any, Dict, List, Optional
from unittest import mock
from ceph.deployment.drive_group import DeviceSelection, DriveGroupSpec # type: ignore
from ceph.deployment.service_spec import PlacementSpec
from .. import mgr
from ..controllers.osd import Osd, OsdUi
from ..services.osd import OsdDeploymentOptions
from ..tests import ControllerTestCase
from ..tools import NotificationQueue, TaskManager
from .helper import update_dict # pylint: disable=import-error
class OsdHelper(object):
DEFAULT_OSD_IDS = [0, 1, 2]
@staticmethod
def _gen_osdmap_tree_node(node_id: int, node_type: str, children: Optional[List[int]] = None,
update_data: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
assert node_type in ['root', 'host', 'osd']
if node_type in ['root', 'host']:
assert children is not None
node_types = {
'root': {
'id': node_id,
'name': 'default',
'type': 'root',
'type_id': 10,
'children': children,
},
'host': {
'id': node_id,
'name': 'ceph-1',
'type': 'host',
'type_id': 1,
'pool_weights': {},
'children': children,
},
'osd': {
'id': node_id,
'device_class': 'hdd',
'type': 'osd',
'type_id': 0,
'crush_weight': 0.009796142578125,
'depth': 2,
'pool_weights': {},
'exists': 1,
'status': 'up',
'reweight': 1.0,
'primary_affinity': 1.0,
'name': 'osd.{}'.format(node_id),
}
}
node = node_types[node_type]
return update_dict(node, update_data) if update_data else node
@staticmethod
def _gen_osd_stats(osd_id: int, update_data: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
stats = {
'osd': osd_id,
'up_from': 11,
'seq': 47244640581,
'num_pgs': 50,
'kb': 10551288,
'kb_used': 1119736,
'kb_used_data': 5504,
'kb_used_omap': 0,
'kb_used_meta': 1048576,
'kb_avail': 9431552,
'statfs': {
'total': 10804518912,
'available': 9657909248,
'internally_reserved': 1073741824,
'allocated': 5636096,
'data_stored': 102508,
'data_compressed': 0,
'data_compressed_allocated': 0,
'data_compressed_original': 0,
'omap_allocated': 0,
'internal_metadata': 1073741824
},
'hb_peers': [0, 1],
'snap_trim_queue_len': 0,
'num_snap_trimming': 0,
'op_queue_age_hist': {
'histogram': [],
'upper_bound': 1
},
'perf_stat': {
'commit_latency_ms': 0.0,
'apply_latency_ms': 0.0,
'commit_latency_ns': 0,
'apply_latency_ns': 0
},
'alerts': [],
}
return stats if not update_data else update_dict(stats, update_data)
@staticmethod
def _gen_osd_map_osd(osd_id: int) -> Dict[str, Any]:
return {
'osd': osd_id,
'up': 1,
'in': 1,
'weight': 1.0,
'primary_affinity': 1.0,
'last_clean_begin': 0,
'last_clean_end': 0,
'up_from': 5,
'up_thru': 21,
'down_at': 0,
'lost_at': 0,
'public_addrs': {
'addrvec': [{
'type': 'v2',
'nonce': 1302,
'addr': '172.23.0.2:6802'
}, {
'type': 'v1',
'nonce': 1302,
'addr': '172.23.0.2:6803'
}]
},
'cluster_addrs': {
'addrvec': [{
'type': 'v2',
'nonce': 1302,
'addr': '172.23.0.2:6804'
}, {
'type': 'v1',
'nonce': 1302,
'addr': '172.23.0.2:6805'
}]
},
'heartbeat_back_addrs': {
'addrvec': [{
'type': 'v2',
'nonce': 1302,
'addr': '172.23.0.2:6808'
}, {
'type': 'v1',
'nonce': 1302,
'addr': '172.23.0.2:6809'
}]
},
'heartbeat_front_addrs': {
'addrvec': [{
'type': 'v2',
'nonce': 1302,
'addr': '172.23.0.2:6806'
}, {
'type': 'v1',
'nonce': 1302,
'addr': '172.23.0.2:6807'
}]
},
'state': ['exists', 'up'],
'uuid': str(uuid.uuid4()),
'public_addr': '172.23.0.2:6803/1302',
'cluster_addr': '172.23.0.2:6805/1302',
'heartbeat_back_addr': '172.23.0.2:6809/1302',
'heartbeat_front_addr': '172.23.0.2:6807/1302',
'id': osd_id,
}
@classmethod
def gen_osdmap(cls, ids: Optional[List[int]] = None) -> Dict[str, Any]:
return {str(i): cls._gen_osd_map_osd(i) for i in ids or cls.DEFAULT_OSD_IDS}
@classmethod
def gen_osd_stats(cls, ids: Optional[List[int]] = None) -> List[Dict[str, Any]]:
return [cls._gen_osd_stats(i) for i in ids or cls.DEFAULT_OSD_IDS]
@classmethod
def gen_osdmap_tree_nodes(cls, ids: Optional[List[int]] = None) -> List[Dict[str, Any]]:
return [
cls._gen_osdmap_tree_node(-1, 'root', [-3]),
cls._gen_osdmap_tree_node(-3, 'host', ids or cls.DEFAULT_OSD_IDS),
] + [cls._gen_osdmap_tree_node(node_id, 'osd') for node_id in ids or cls.DEFAULT_OSD_IDS]
@classmethod
def gen_mgr_get_counter(cls) -> List[List[int]]:
return [[1551973855, 35], [1551973860, 35], [1551973865, 35], [1551973870, 35]]
@staticmethod
def mock_inventory_host(orch_client_mock, devices_data: Dict[str, str]) -> None:
class MockDevice:
def __init__(self, human_readable_type, path, available=True):
self.human_readable_type = human_readable_type
self.available = available
self.path = path
def create_invetory_host(host, devices_data):
inventory_host = mock.Mock()
inventory_host.devices.devices = []
for data in devices_data:
if data['host'] != host:
continue
inventory_host.devices.devices.append(MockDevice(data['type'], data['path']))
return inventory_host
hosts = set()
for device in devices_data:
hosts.add(device['host'])
inventory = [create_invetory_host(host, devices_data) for host in hosts]
orch_client_mock.inventory.list.return_value = inventory
class OsdTest(ControllerTestCase):
@classmethod
def setup_server(cls):
cls.setup_controllers([Osd, OsdUi])
NotificationQueue.start_queue()
TaskManager.init()
@classmethod
def tearDownClass(cls):
NotificationQueue.stop()
@contextmanager
def _mock_osd_list(self, osd_stat_ids, osdmap_tree_node_ids, osdmap_ids):
def mgr_get_replacement(*args, **kwargs):
method = args[0] or kwargs['method']
if method == 'osd_stats':
return {'osd_stats': OsdHelper.gen_osd_stats(osd_stat_ids)}
if method == 'osd_map_tree':
return {'nodes': OsdHelper.gen_osdmap_tree_nodes(osdmap_tree_node_ids)}
raise NotImplementedError()
def mgr_get_counter_replacement(svc_type, _, path):
if svc_type == 'osd':
return {path: OsdHelper.gen_mgr_get_counter()}
raise NotImplementedError()
with mock.patch.object(Osd, 'get_osd_map', return_value=OsdHelper.gen_osdmap(osdmap_ids)):
with mock.patch.object(mgr, 'get', side_effect=mgr_get_replacement):
with mock.patch.object(mgr, 'get_counter', side_effect=mgr_get_counter_replacement):
with mock.patch.object(mgr, 'get_latest', return_value=1146609664):
with mock.patch.object(Osd, 'get_removing_osds', return_value=[]):
yield
def _get_drive_group_data(self, service_id='all_hdd', host_pattern_k='host_pattern',
host_pattern_v='*'):
return {
'method': 'drive_groups',
'data': [
{
'service_type': 'osd',
'service_id': service_id,
'data_devices': {
'rotational': True
},
host_pattern_k: host_pattern_v
}
],
'tracking_id': 'all_hdd, b_ssd'
}
def test_osd_list_aggregation(self):
"""
This test emulates the state of a cluster where an OSD has only been
removed (with e.g. `ceph osd rm`), but it hasn't been removed from the
CRUSH map. Ceph reports a health warning alongside a `1 osds exist in
the crush map but not in the osdmap` warning in such a case.
"""
osds_actual = [0, 1]
osds_leftover = [0, 1, 2]
with self._mock_osd_list(osd_stat_ids=osds_actual, osdmap_tree_node_ids=osds_leftover,
osdmap_ids=osds_actual):
self._get('/api/osd')
self.assertEqual(len(self.json_body()), 2, 'It should display two OSDs without failure')
self.assertStatus(200)
@mock.patch('dashboard.controllers.osd.CephService')
def test_osd_scrub(self, ceph_service):
self._task_post('/api/osd/1/scrub', {'deep': True})
ceph_service.send_command.assert_called_once_with('mon', 'osd deep-scrub', who='1')
self.assertStatus(200)
self.assertJsonBody(None)
@mock.patch('dashboard.controllers.osd.CephService')
def test_osd_create_bare(self, ceph_service):
ceph_service.send_command.return_value = '5'
sample_data = {
'uuid': 'f860ca2e-757d-48ce-b74a-87052cad563f',
'svc_id': 5
}
data = {
'method': 'bare',
'data': sample_data,
'tracking_id': 'bare-5'
}
self._task_post('/api/osd', data)
self.assertStatus(201)
ceph_service.send_command.assert_called()
# unknown method
data['method'] = 'other'
self._task_post('/api/osd', data)
self.assertStatus(400)
res = self.json_body()
self.assertIn('Unknown method', res['detail'])
# svc_id is not int
data['data']['svc_id'] = "five"
data['method'] = 'bare'
self._task_post('/api/osd', data)
self.assertStatus(400)
res = self.json_body()
self.assertIn(data['data']['svc_id'], res['detail'])
@mock.patch('dashboard.controllers.orchestrator.OrchClient.instance')
def test_osd_create_with_drive_groups(self, instance):
# without orchestrator service
fake_client = mock.Mock()
instance.return_value = fake_client
# Valid DriveGroup
data = self._get_drive_group_data()
# Without orchestrator service
fake_client.available.return_value = False
self._task_post('/api/osd', data)
self.assertStatus(503)
# With orchestrator service
fake_client.available.return_value = True
fake_client.get_missing_features.return_value = []
self._task_post('/api/osd', data)
self.assertStatus(201)
dg_specs = [DriveGroupSpec(placement=PlacementSpec(host_pattern='*'),
service_id='all_hdd',
service_type='osd',
data_devices=DeviceSelection(rotational=True))]
fake_client.osds.create.assert_called_with(dg_specs)
@mock.patch('dashboard.controllers.orchestrator.OrchClient.instance')
def test_osd_create_with_invalid_drive_groups(self, instance):
# without orchestrator service
fake_client = mock.Mock()
instance.return_value = fake_client
fake_client.get_missing_features.return_value = []
# Invalid DriveGroup
data = self._get_drive_group_data('invalid_dg', 'host_pattern_wrong', 'unknown')
self._task_post('/api/osd', data)
self.assertStatus(400)
@mock.patch('dashboard.controllers.osd.CephService')
def test_osd_mark_all_actions(self, instance):
fake_client = mock.Mock()
instance.return_value = fake_client
action_list = ['OUT', 'IN', 'DOWN']
for action in action_list:
data = {'action': action}
self._task_put('/api/osd/1/mark', data)
self.assertStatus(200)
# invalid mark
instance.reset_mock()
with self.assertLogs(level='ERROR') as cm:
self._task_put('/api/osd/1/mark', {'action': 'OTHER'})
instance.send_command.assert_not_called()
self.assertIn('Invalid OSD mark action', cm.output[0])
self.assertStatus(200)
self.assertJsonBody(None)
self._task_post('/api/osd/1/purge', {'svc_id': 1})
instance.send_command.assert_called_once_with('mon', 'osd purge-actual', id=1,
yes_i_really_mean_it=True)
self.assertStatus(200)
self.assertJsonBody(None)
@mock.patch('dashboard.controllers.osd.CephService')
def test_reweight_osd(self, instance):
instance.send_command.return_value = '5'
uuid1 = str(uuid.uuid1())
sample_data = {
'uuid': uuid1,
'svc_id': 1
}
data = {
'method': 'bare',
'data': sample_data,
'tracking_id': 'bare-1'
}
self._task_post('/api/osd', data)
self._task_put('/api/osd/1/mark', {'action': 'DOWN'})
self.assertStatus(200)
self._task_post('/api/osd/1/reweight', {'weight': '1'})
instance.send_command.assert_called_with('mon', 'osd reweight', id=1, weight=1.0)
self.assertStatus(200)
def _get_deployment_options(self, fake_client, devices_data: Dict[str, str]) -> Dict[str, Any]:
OsdHelper.mock_inventory_host(fake_client, devices_data)
self._get('/ui-api/osd/deployment_options')
self.assertStatus(200)
res = self.json_body()
return res
@mock.patch('dashboard.controllers.orchestrator.OrchClient.instance')
def test_deployment_options(self, instance):
fake_client = mock.Mock()
instance.return_value = fake_client
fake_client.get_missing_features.return_value = []
devices_data = [
{'type': 'hdd', 'path': '/dev/sda', 'host': 'host1'},
{'type': 'hdd', 'path': '/dev/sdc', 'host': 'host1'},
{'type': 'hdd', 'path': '/dev/sdb', 'host': 'host2'},
{'type': 'hdd', 'path': '/dev/sde', 'host': 'host1'},
{'type': 'hdd', 'path': '/dev/sdd', 'host': 'host2'},
]
res = self._get_deployment_options(fake_client, devices_data)
self.assertTrue(res['options'][OsdDeploymentOptions.COST_CAPACITY]['available'])
assert res['recommended_option'] == OsdDeploymentOptions.COST_CAPACITY
# we don't want cost_capacity enabled without hdds
for data in devices_data:
data['type'] = 'ssd'
res = self._get_deployment_options(fake_client, devices_data)
self.assertFalse(res['options'][OsdDeploymentOptions.COST_CAPACITY]['available'])
self.assertFalse(res['options'][OsdDeploymentOptions.THROUGHPUT]['available'])
self.assertEqual(res['recommended_option'], None)
@mock.patch('dashboard.controllers.orchestrator.OrchClient.instance')
def test_deployment_options_throughput(self, instance):
fake_client = mock.Mock()
instance.return_value = fake_client
fake_client.get_missing_features.return_value = []
devices_data = [
{'type': 'ssd', 'path': '/dev/sda', 'host': 'host1'},
{'type': 'ssd', 'path': '/dev/sdc', 'host': 'host1'},
{'type': 'ssd', 'path': '/dev/sdb', 'host': 'host2'},
{'type': 'hdd', 'path': '/dev/sde', 'host': 'host1'},
{'type': 'hdd', 'path': '/dev/sdd', 'host': 'host2'},
]
res = self._get_deployment_options(fake_client, devices_data)
self.assertTrue(res['options'][OsdDeploymentOptions.COST_CAPACITY]['available'])
self.assertTrue(res['options'][OsdDeploymentOptions.THROUGHPUT]['available'])
self.assertFalse(res['options'][OsdDeploymentOptions.IOPS]['available'])
assert res['recommended_option'] == OsdDeploymentOptions.THROUGHPUT
@mock.patch('dashboard.controllers.orchestrator.OrchClient.instance')
def test_deployment_options_with_hdds_and_nvmes(self, instance):
fake_client = mock.Mock()
instance.return_value = fake_client
fake_client.get_missing_features.return_value = []
devices_data = [
{'type': 'ssd', 'path': '/dev/nvme01', 'host': 'host1'},
{'type': 'ssd', 'path': '/dev/nvme02', 'host': 'host1'},
{'type': 'ssd', 'path': '/dev/nvme03', 'host': 'host2'},
{'type': 'hdd', 'path': '/dev/sde', 'host': 'host1'},
{'type': 'hdd', 'path': '/dev/sdd', 'host': 'host2'},
]
res = self._get_deployment_options(fake_client, devices_data)
self.assertTrue(res['options'][OsdDeploymentOptions.COST_CAPACITY]['available'])
self.assertFalse(res['options'][OsdDeploymentOptions.THROUGHPUT]['available'])
self.assertTrue(res['options'][OsdDeploymentOptions.IOPS]['available'])
assert res['recommended_option'] == OsdDeploymentOptions.COST_CAPACITY
@mock.patch('dashboard.controllers.orchestrator.OrchClient.instance')
def test_deployment_options_iops(self, instance):
fake_client = mock.Mock()
instance.return_value = fake_client
fake_client.get_missing_features.return_value = []
devices_data = [
{'type': 'ssd', 'path': '/dev/nvme01', 'host': 'host1'},
{'type': 'ssd', 'path': '/dev/nvme02', 'host': 'host1'},
{'type': 'ssd', 'path': '/dev/nvme03', 'host': 'host2'}
]
res = self._get_deployment_options(fake_client, devices_data)
self.assertFalse(res['options'][OsdDeploymentOptions.COST_CAPACITY]['available'])
self.assertFalse(res['options'][OsdDeploymentOptions.THROUGHPUT]['available'])
self.assertTrue(res['options'][OsdDeploymentOptions.IOPS]['available'])
| 19,388 | 38.3286 | 100 | py |
null | ceph-main/src/pybind/mgr/dashboard/tests/test_plugin_debug.py | # -*- coding: utf-8 -*-
from ..tests import CLICommandTestMixin, ControllerTestCase
class TestPluginDebug(ControllerTestCase, CLICommandTestMixin):
@classmethod
def setup_server(cls):
# pylint: disable=protected-access
cls.setup_controllers([])
def setUp(self):
self.mock_kv_store()
def test_debug_disabled(self):
self.exec_cmd('debug', action='disable')
self._get('/api/unexisting_controller')
self.assertStatus(404)
data = self.json_body()
self.assertGreater(len(data), 0)
self.assertNotIn('traceback', data)
self.assertNotIn('version', data)
self.assertIn('request_id', data)
def test_debug_enabled(self):
self.exec_cmd('debug', action='enable')
self._get('/api/unexisting_controller')
self.assertStatus(404)
data = self.json_body()
self.assertGreater(len(data), 0)
self.assertIn('traceback', data)
self.assertIn('version', data)
self.assertIn('request_id', data)
| 1,049 | 26.631579 | 63 | py |
null | ceph-main/src/pybind/mgr/dashboard/tests/test_pool.py | # -*- coding: utf-8 -*-
# pylint: disable=protected-access
import time
try:
import mock
except ImportError:
import unittest.mock as mock
from .. import mgr
from ..controllers.pool import Pool
from ..controllers.task import Task
from ..tests import ControllerTestCase
from ..tools import NotificationQueue, TaskManager
class MockTask(object):
percentages = []
def set_progress(self, percentage):
self.percentages.append(percentage)
class PoolControllerTest(ControllerTestCase):
@classmethod
def setup_server(cls):
cls.setup_controllers([Pool, Task])
@mock.patch('dashboard.services.progress.get_progress_tasks')
@mock.patch('dashboard.controllers.pool.Pool._get')
@mock.patch('dashboard.services.ceph_service.CephService.send_command')
def test_creation(self, send_command, _get, get_progress_tasks):
_get.side_effect = [{
'pool_name': 'test-pool',
'pg_num': 64,
'pg_num_target': 63,
'pg_placement_num': 64,
'pg_placement_num_target': 63
}, {
'pool_name': 'test-pool',
'pg_num': 64,
'pg_num_target': 64,
'pg_placement_num': 64,
'pg_placement_num_target': 64
}]
NotificationQueue.start_queue()
TaskManager.init()
def _send_cmd(*args, **kwargs): # pylint: disable=unused-argument
time.sleep(3)
send_command.side_effect = _send_cmd
get_progress_tasks.return_value = [], []
self._task_post('/api/pool', {
'pool': 'test-pool',
'pool_type': 1,
'pg_num': 64
}, 10)
self.assertStatus(201)
self.assertEqual(_get.call_count, 2)
NotificationQueue.stop()
@mock.patch('dashboard.controllers.pool.Pool._get')
def test_wait_for_pgs_without_waiting(self, _get):
_get.side_effect = [{
'pool_name': 'test-pool',
'pg_num': 32,
'pg_num_target': 32,
'pg_placement_num': 32,
'pg_placement_num_target': 32
}]
Pool._wait_for_pgs('test-pool')
self.assertEqual(_get.call_count, 1)
@mock.patch('dashboard.controllers.pool.Pool._get')
def test_wait_for_pgs_with_waiting(self, _get):
task = MockTask()
orig_method = TaskManager.current_task
TaskManager.current_task = mock.MagicMock()
TaskManager.current_task.return_value = task
_get.side_effect = [{
'pool_name': 'test-pool',
'pg_num': 64,
'pg_num_target': 32,
'pg_placement_num': 64,
'pg_placement_num_target': 64
}, {
'pool_name': 'test-pool',
'pg_num': 63,
'pg_num_target': 32,
'pg_placement_num': 62,
'pg_placement_num_target': 32
}, {
'pool_name': 'test-pool',
'pg_num': 48,
'pg_num_target': 32,
'pg_placement_num': 48,
'pg_placement_num_target': 32
}, {
'pool_name': 'test-pool',
'pg_num': 48,
'pg_num_target': 32,
'pg_placement_num': 33,
'pg_placement_num_target': 32
}, {
'pool_name': 'test-pool',
'pg_num': 33,
'pg_num_target': 32,
'pg_placement_num': 32,
'pg_placement_num_target': 32
}, {
'pool_name': 'test-pool',
'pg_num': 32,
'pg_num_target': 32,
'pg_placement_num': 32,
'pg_placement_num_target': 32
}]
Pool._wait_for_pgs('test-pool')
self.assertEqual(_get.call_count, 6)
self.assertEqual(task.percentages, [0, 5, 50, 73, 98])
TaskManager.current_task = orig_method
@mock.patch('dashboard.controllers.osd.CephService.get_pool_list_with_stats')
@mock.patch('dashboard.controllers.osd.CephService.get_pool_list')
def test_pool_list(self, get_pool_list, get_pool_list_with_stats):
get_pool_list.return_value = [{
'type': 3,
'crush_rule': 1,
'application_metadata': {
'test_key': 'test_metadata'
},
'pool_name': 'test_name'
}]
mgr.get.side_effect = lambda key: {
'osd_map_crush': {
'rules': [{
'rule_id': 1,
'rule_name': 'test-rule'
}]
}
}[key]
Pool._pool_list()
mgr.get.assert_called_with('osd_map_crush')
self.assertEqual(get_pool_list.call_count, 1)
# with stats
get_pool_list_with_stats.return_value = get_pool_list.return_value
Pool._pool_list(attrs='type', stats='True')
self.assertEqual(get_pool_list_with_stats.call_count, 1)
@mock.patch('dashboard.controllers.pool.Pool._get')
@mock.patch('dashboard.services.ceph_service.CephService.send_command')
def test_set_pool_name(self, send_command, _get):
_get.return_value = {
'options': {
'compression_min_blob_size': '1'
},
'application_metadata': ['data1', 'data2']
}
def _send_cmd(*args, **kwargs): # pylint: disable=unused-argument
pass
send_command.side_effect = _send_cmd
NotificationQueue.start_queue()
TaskManager.init()
self._task_put('/api/pool/test-pool', {
"flags": "ec_overwrites",
"application_metadata": ['data3', 'data2'],
"configuration": "test-conf",
"compression_mode": 'unset',
'compression_min_blob_size': '1',
'compression_max_blob_size': '1',
'compression_required_ratio': '1',
'pool': 'test-pool',
'pg_num': 64
})
NotificationQueue.stop()
self.assertEqual(_get.call_count, 1)
self.assertEqual(send_command.call_count, 10)
| 6,012 | 32.220994 | 81 | py |
null | ceph-main/src/pybind/mgr/dashboard/tests/test_prometheus.py | # -*- coding: utf-8 -*-
# pylint: disable=protected-access
try:
from mock import patch
except ImportError:
from unittest.mock import patch
from .. import mgr
from ..controllers.prometheus import Prometheus, PrometheusNotifications, PrometheusReceiver
from ..tests import ControllerTestCase
class PrometheusControllerTest(ControllerTestCase):
alert_host = 'http://alertmanager:9093/mock'
alert_host_api = alert_host + '/api/v1'
prometheus_host = 'http://prometheus:9090/mock'
prometheus_host_api = prometheus_host + '/api/v1'
@classmethod
def setup_server(cls):
settings = {
'ALERTMANAGER_API_HOST': cls.alert_host,
'PROMETHEUS_API_HOST': cls.prometheus_host
}
mgr.get_module_option.side_effect = settings.get
cls.setup_controllers([Prometheus, PrometheusNotifications, PrometheusReceiver])
@patch("dashboard.controllers.prometheus.mgr.get_module_option_ex", lambda a, b, c: False)
def test_rules(self):
with patch('requests.request') as mock_request:
self._get('/api/prometheus/rules')
mock_request.assert_called_with('GET', self.prometheus_host_api + '/rules',
json=None, params={}, verify=True, auth=None)
@patch("dashboard.controllers.prometheus.mgr.get_module_option_ex", lambda a, b, c: False)
def test_list(self):
with patch('requests.request') as mock_request:
self._get('/api/prometheus')
mock_request.assert_called_with('GET', self.alert_host_api + '/alerts',
json=None, params={}, verify=True, auth=None)
@patch("dashboard.controllers.prometheus.mgr.get_module_option_ex", lambda a, b, c: False)
def test_get_silences(self):
with patch('requests.request') as mock_request:
self._get('/api/prometheus/silences')
mock_request.assert_called_with('GET', self.alert_host_api + '/silences',
json=None, params={}, verify=True, auth=None)
@patch("dashboard.controllers.prometheus.mgr.get_module_option_ex", lambda a, b, c: False)
def test_add_silence(self):
with patch('requests.request') as mock_request:
self._post('/api/prometheus/silence', {'id': 'new-silence'})
mock_request.assert_called_with('POST', self.alert_host_api + '/silences',
params=None, json={'id': 'new-silence'},
verify=True, auth=None)
@patch("dashboard.controllers.prometheus.mgr.get_module_option_ex", lambda a, b, c: False)
def test_update_silence(self):
with patch('requests.request') as mock_request:
self._post('/api/prometheus/silence', {'id': 'update-silence'})
mock_request.assert_called_with('POST', self.alert_host_api + '/silences',
params=None, json={'id': 'update-silence'},
verify=True, auth=None)
@patch("dashboard.controllers.prometheus.mgr.get_module_option_ex", lambda a, b, c: False)
def test_expire_silence(self):
with patch('requests.request') as mock_request:
self._delete('/api/prometheus/silence/0')
mock_request.assert_called_with('DELETE', self.alert_host_api + '/silence/0',
json=None, params=None, verify=True, auth=None)
def test_silences_empty_delete(self):
with patch('requests.request') as mock_request:
self._delete('/api/prometheus/silence')
mock_request.assert_not_called()
def test_post_on_receiver(self):
PrometheusReceiver.notifications = []
self._post('/api/prometheus_receiver', {'name': 'foo'})
self.assertEqual(len(PrometheusReceiver.notifications), 1)
notification = PrometheusReceiver.notifications[0]
self.assertEqual(notification['name'], 'foo')
self.assertTrue(len(notification['notified']) > 20)
def test_get_empty_list_with_no_notifications(self):
PrometheusReceiver.notifications = []
self._get('/api/prometheus/notifications')
self.assertStatus(200)
self.assertJsonBody([])
self._get('/api/prometheus/notifications?from=last')
self.assertStatus(200)
self.assertJsonBody([])
def test_get_all_notification(self):
PrometheusReceiver.notifications = []
self._post('/api/prometheus_receiver', {'name': 'foo'})
self._post('/api/prometheus_receiver', {'name': 'bar'})
self._get('/api/prometheus/notifications')
self.assertStatus(200)
self.assertJsonBody(PrometheusReceiver.notifications)
def test_get_last_notification_with_use_of_last_keyword(self):
PrometheusReceiver.notifications = []
self._post('/api/prometheus_receiver', {'name': 'foo'})
self._post('/api/prometheus_receiver', {'name': 'bar'})
self._get('/api/prometheus/notifications?from=last')
self.assertStatus(200)
last = PrometheusReceiver.notifications[1]
self.assertJsonBody([last])
def test_get_no_notification_with_unknown_id(self):
PrometheusReceiver.notifications = []
self._post('/api/prometheus_receiver', {'name': 'foo'})
self._post('/api/prometheus_receiver', {'name': 'bar'})
self._get('/api/prometheus/notifications?from=42')
self.assertStatus(200)
self.assertJsonBody([])
def test_get_no_notification_since_with_last_notification(self):
PrometheusReceiver.notifications = []
self._post('/api/prometheus_receiver', {'name': 'foo'})
notification = PrometheusReceiver.notifications[0]
self._get('/api/prometheus/notifications?from=' + notification['id'])
self.assertStatus(200)
self.assertJsonBody([])
def test_get_notifications_since_last_notification(self):
PrometheusReceiver.notifications = []
self._post('/api/prometheus_receiver', {'name': 'foobar'})
next_to_last = PrometheusReceiver.notifications[0]
self._post('/api/prometheus_receiver', {'name': 'foo'})
self._post('/api/prometheus_receiver', {'name': 'bar'})
self._get('/api/prometheus/notifications?from=' + next_to_last['id'])
forelast = PrometheusReceiver.notifications[1]
last = PrometheusReceiver.notifications[2]
self.assertEqual(self.json_body(), [forelast, last])
| 6,563 | 46.565217 | 94 | py |
null | ceph-main/src/pybind/mgr/dashboard/tests/test_rbd_mirroring.py |
import json
import unittest
import rbd
try:
import mock
except ImportError:
import unittest.mock as mock
from .. import mgr
from ..controllers.orchestrator import Orchestrator
from ..controllers.rbd_mirroring import RbdMirroring, \
RbdMirroringPoolBootstrap, RbdMirroringStatus, RbdMirroringSummary, \
get_daemons, get_pools
from ..controllers.summary import Summary
from ..services import progress
from ..tests import ControllerTestCase
mock_list_servers = [{
'hostname': 'ceph-host',
'services': [{'id': 3, 'type': 'rbd-mirror'}]
}]
mock_get_metadata = {
'id': 1,
'instance_id': 3,
'ceph_version': 'ceph version 13.0.0-5719 mimic (dev)'
}
_status = {
1: {
'callouts': {
'image': {
'level': 'warning',
}
},
'image_local_count': 5,
'image_remote_count': 6,
'image_error_count': 7,
'image_warning_count': 8,
'name': 'rbd'
}
}
mock_get_daemon_status = {
'json': json.dumps(_status)
}
mock_osd_map = {
'pools': [{
'pool_name': 'rbd',
'application_metadata': {'rbd'}
}]
}
class GetDaemonAndPoolsTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
mgr.list_servers.return_value = mock_list_servers
mgr.get_metadata = mock.Mock(return_value=mock_get_metadata)
mgr.get_daemon_status.return_value = mock_get_daemon_status
mgr.get.side_effect = lambda key: {
'osd_map': mock_osd_map,
'health': {'json': '{"status": 1}'},
'fs_map': {'filesystems': []},
'mgr_map': {
'services': {
'dashboard': 'https://ceph.dev:11000/'
},
}
}[key]
mgr.url_prefix = ''
mgr.get_mgr_id.return_value = 0
mgr.have_mon_connection.return_value = True
mgr.version = 'ceph version 13.1.0-534-g23d3751b89 ' \
'(23d3751b897b31d2bda57aeaf01acb5ff3c4a9cd) ' \
'nautilus (dev)'
progress.get_progress_tasks = mock.MagicMock()
progress.get_progress_tasks.return_value = ([], [])
@mock.patch('rbd.RBD')
def test_get_pools_unknown(self, mock_rbd):
mock_rbd_instance = mock_rbd.return_value
mock_rbd_instance.mirror_mode_get.side_effect = Exception
daemons = get_daemons()
res = get_pools(daemons)
self.assertTrue(res['rbd']['mirror_mode'] == "unknown")
@mock.patch('rbd.RBD')
def test_get_pools_mode(self, mock_rbd):
daemons = get_daemons()
mock_rbd_instance = mock_rbd.return_value
testcases = [
(rbd.RBD_MIRROR_MODE_DISABLED, "disabled"),
(rbd.RBD_MIRROR_MODE_IMAGE, "image"),
(rbd.RBD_MIRROR_MODE_POOL, "pool"),
]
mock_rbd_instance.mirror_peer_list.return_value = []
for mirror_mode, expected in testcases:
mock_rbd_instance.mirror_mode_get.return_value = mirror_mode
res = get_pools(daemons)
self.assertTrue(res['rbd']['mirror_mode'] == expected)
@mock.patch('rbd.RBD')
def test_get_pools_health(self, mock_rbd):
mock_rbd_instance = mock_rbd.return_value
mock_rbd_instance.mirror_peer_list.return_value = []
test_cases = self._get_pool_test_cases()
for new_status, pool_mirror_mode, images_summary, expected_output in test_cases:
_status[1].update(new_status)
daemon_status = {
'json': json.dumps(_status)
}
mgr.get_daemon_status.return_value = daemon_status
daemons = get_daemons()
mock_rbd_instance.mirror_mode_get.return_value = pool_mirror_mode
mock_rbd_instance.mirror_image_status_summary.return_value = images_summary
res = get_pools(daemons)
for k, v in expected_output.items():
self.assertTrue(v == res['rbd'][k])
mgr.get_daemon_status.return_value = mock_get_daemon_status # reset return value
def _get_pool_test_cases(self):
test_cases = [
# 1. daemon status
# 2. Pool mirror mock_get_daemon_status
# 3. Image health summary
# 4. Pool health output
(
{
'image_error_count': 7,
},
rbd.RBD_MIRROR_MODE_IMAGE,
[(rbd.MIRROR_IMAGE_STATUS_STATE_UNKNOWN, None)],
{
'health_color': 'warning',
'health': 'Warning'
}
),
(
{
'image_error_count': 7,
},
rbd.RBD_MIRROR_MODE_POOL,
[(rbd.MIRROR_IMAGE_STATUS_STATE_ERROR, None)],
{
'health_color': 'error',
'health': 'Error'
}
),
(
{
'image_error_count': 0,
'image_warning_count': 0,
'leader_id': 1
},
rbd.RBD_MIRROR_MODE_DISABLED,
[],
{
'health_color': 'info',
'health': 'Disabled'
}
),
]
return test_cases
class RbdMirroringControllerTest(ControllerTestCase):
@classmethod
def setup_server(cls):
cls.setup_controllers([RbdMirroring])
@mock.patch('dashboard.controllers.rbd_mirroring.rbd.RBD')
def test_site_name(self, mock_rbd):
result = {'site_name': 'fsid'}
mock_rbd_instance = mock_rbd.return_value
mock_rbd_instance.mirror_site_name_get.return_value = \
result['site_name']
self._get('/api/block/mirroring/site_name')
self.assertStatus(200)
self.assertJsonBody(result)
result['site_name'] = 'site-a'
mock_rbd_instance.mirror_site_name_get.return_value = \
result['site_name']
self._put('/api/block/mirroring/site_name', result)
self.assertStatus(200)
self.assertJsonBody(result)
mock_rbd_instance.mirror_site_name_set.assert_called_with(
mock.ANY, result['site_name'])
class RbdMirroringPoolBootstrapControllerTest(ControllerTestCase):
@classmethod
def setup_server(cls):
cls.setup_controllers([RbdMirroringPoolBootstrap])
@mock.patch('dashboard.controllers.rbd_mirroring.rbd.RBD')
def test_token(self, mock_rbd):
mock_rbd_instance = mock_rbd.return_value
mock_rbd_instance.mirror_peer_bootstrap_create.return_value = "1234"
self._post('/api/block/mirroring/pool/abc/bootstrap/token')
self.assertStatus(200)
self.assertJsonBody({"token": "1234"})
mgr.rados.open_ioctx.assert_called_with("abc")
mock_rbd_instance.mirror_peer_bootstrap_create.assert_called()
@mock.patch('dashboard.controllers.rbd_mirroring.rbd')
def test_peer(self, mock_rbd_module):
mock_rbd_instance = mock_rbd_module.RBD.return_value
values = {
"direction": "invalid",
"token": "1234"
}
self._post('/api/block/mirroring/pool/abc/bootstrap/peer', values)
self.assertStatus(500)
mgr.rados.open_ioctx.assert_called_with("abc")
values["direction"] = "rx"
self._post('/api/block/mirroring/pool/abc/bootstrap/peer', values)
self.assertStatus(200)
self.assertJsonBody({})
mgr.rados.open_ioctx.assert_called_with("abc")
mock_rbd_instance.mirror_peer_bootstrap_import.assert_called_with(
mock.ANY, mock_rbd_module.RBD_MIRROR_PEER_DIRECTION_RX, '1234')
class RbdMirroringSummaryControllerTest(ControllerTestCase):
@classmethod
def setup_server(cls):
mgr.list_servers.return_value = mock_list_servers
mgr.get_metadata = mock.Mock(return_value=mock_get_metadata)
mgr.get_daemon_status.return_value = mock_get_daemon_status
mgr.get.side_effect = lambda key: {
'osd_map': mock_osd_map,
'health': {'json': '{"status": 1}'},
'fs_map': {'filesystems': []},
'mgr_map': {
'services': {
'dashboard': 'https://ceph.dev:11000/'
},
}
}[key]
mgr.url_prefix = ''
mgr.get_mgr_id.return_value = 0
mgr.have_mon_connection.return_value = True
mgr.version = 'ceph version 13.1.0-534-g23d3751b89 ' \
'(23d3751b897b31d2bda57aeaf01acb5ff3c4a9cd) ' \
'nautilus (dev)'
progress.get_progress_tasks = mock.MagicMock()
progress.get_progress_tasks.return_value = ([], [])
cls.setup_controllers([RbdMirroringSummary, Summary], '/test')
@mock.patch('dashboard.controllers.rbd_mirroring.rbd.RBD')
def test_default(self, mock_rbd):
mock_rbd_instance = mock_rbd.return_value
mock_rbd_instance.mirror_site_name_get.return_value = 'site-a'
self._get('/test/api/block/mirroring/summary')
result = self.json_body()
self.assertStatus(200)
self.assertEqual(result['site_name'], 'site-a')
self.assertEqual(result['status'], 0)
for k in ['daemons', 'pools', 'image_error', 'image_syncing', 'image_ready']:
self.assertIn(k, result['content_data'])
@mock.patch('dashboard.controllers.BaseController._has_permissions')
@mock.patch('dashboard.controllers.rbd_mirroring.rbd.RBD')
def test_summary(self, mock_rbd, has_perms_mock):
"""We're also testing `summary`, as it also uses code from `rbd_mirroring.py`"""
mock_rbd_instance = mock_rbd.return_value
mock_rbd_instance.mirror_site_name_get.return_value = 'site-a'
has_perms_mock.return_value = True
self._get('/test/api/summary')
self.assertStatus(200)
summary = self.json_body()['rbd_mirroring']
# 2 warnings: 1 for the daemon, 1 for the pool
self.assertEqual(summary, {'errors': 0, 'warnings': 2})
class RbdMirroringStatusControllerTest(ControllerTestCase):
@classmethod
def setup_server(cls):
cls.setup_controllers([RbdMirroringStatus, Orchestrator])
@mock.patch('dashboard.controllers.orchestrator.OrchClient.instance')
def test_status(self, instance):
status = {'available': False, 'description': ''}
fake_client = mock.Mock()
fake_client.status.return_value = status
instance.return_value = fake_client
self._get('/ui-api/block/mirroring/status')
self.assertStatus(200)
self.assertJsonBody({'available': True, 'message': None})
def test_configure(self):
self._post('/ui-api/block/mirroring/configure')
self.assertStatus(200)
| 10,907 | 33.194357 | 89 | py |
null | ceph-main/src/pybind/mgr/dashboard/tests/test_rbd_service.py | # -*- coding: utf-8 -*-
# pylint: disable=dangerous-default-value,too-many-public-methods
import unittest
from datetime import datetime
from unittest.mock import MagicMock
try:
import mock
except ImportError:
import unittest.mock as mock
from .. import mgr
from ..services.rbd import RbdConfiguration, RBDSchedulerInterval, RbdService, \
get_image_spec, parse_image_spec
class ImageNotFoundStub(Exception):
def __init__(self, message, errno=None):
super(ImageNotFoundStub, self).__init__(
'RBD image not found (%s)' % message, errno)
class RbdServiceTest(unittest.TestCase):
def setUp(self):
# pylint: disable=protected-access
RbdService._rbd_inst = mock.Mock()
self.rbd_inst_mock = RbdService._rbd_inst
def test_compose_image_spec(self):
self.assertEqual(get_image_spec('mypool', 'myns', 'myimage'), 'mypool/myns/myimage')
self.assertEqual(get_image_spec('mypool', None, 'myimage'), 'mypool/myimage')
def test_parse_image_spec(self):
self.assertEqual(parse_image_spec('mypool/myns/myimage'), ('mypool', 'myns', 'myimage'))
self.assertEqual(parse_image_spec('mypool/myimage'), ('mypool', None, 'myimage'))
@mock.patch('dashboard.services.rbd.RbdConfiguration._rbd.config_list')
@mock.patch('dashboard.mgr.get')
@mock.patch('dashboard.services.ceph_service.CephService.get_pool_list')
def test_pool_rbd_configuration_with_different_pg_states(self, get_pool_list, get, config_list):
get_pool_list.return_value = [{
'pool_name': 'good-pool',
'pool': 1,
}, {
'pool_name': 'bad-pool',
'pool': 2,
}]
get.return_value = {
'by_pool': {
'1': {'active+clean': 32},
'2': {'creating+incomplete': 32},
}
}
config_list.return_value = [1, 2, 3]
config = RbdConfiguration('bad-pool')
self.assertEqual(config.list(), [])
config = RbdConfiguration('good-pool')
self.assertEqual(config.list(), [1, 2, 3])
def test_rbd_image_stat_removing(self):
time = datetime.utcnow()
self.rbd_inst_mock.trash_get.return_value = {
'id': '3c1a5ee60a88',
'name': 'test_rbd',
'source': 'REMOVING',
'deletion_time': time,
'deferment_end_time': time
}
ioctx_mock = MagicMock()
# pylint: disable=protected-access
rbd = RbdService._rbd_image_stat_removing(ioctx_mock, 'test_pool', '', '3c1a5ee60a88')
self.assertEqual(rbd, {
'id': '3c1a5ee60a88',
'unique_id': 'test_pool/3c1a5ee60a88',
'name': 'test_rbd',
'source': 'REMOVING',
'deletion_time': '{}Z'.format(time.isoformat()),
'deferment_end_time': '{}Z'.format(time.isoformat()),
'pool_name': 'test_pool',
'namespace': ''
})
@mock.patch('dashboard.services.rbd.rbd.ImageNotFound', new_callable=lambda: ImageNotFoundStub)
def test_rbd_image_stat_filter_source_user(self, _):
self.rbd_inst_mock.trash_get.return_value = {
'id': '3c1a5ee60a88',
'name': 'test_rbd',
'source': 'USER'
}
ioctx_mock = MagicMock()
with self.assertRaises(ImageNotFoundStub) as ctx:
# pylint: disable=protected-access
RbdService._rbd_image_stat_removing(ioctx_mock, 'test_pool', '', '3c1a5ee60a88')
self.assertIn('No image test_pool/3c1a5ee60a88 in status `REMOVING` found.',
str(ctx.exception))
@mock.patch('dashboard.services.rbd.rbd.ImageNotFound', new_callable=lambda: ImageNotFoundStub)
@mock.patch('dashboard.services.rbd.RbdService._pool_namespaces')
@mock.patch('dashboard.services.rbd.RbdService._rbd_image_stat_removing')
@mock.patch('dashboard.services.rbd.RbdService._rbd_image_stat')
@mock.patch('dashboard.services.rbd.RbdService._rbd_image_refs')
def test_rbd_pool_list(self, rbd_image_ref_mock, rbd_image_stat_mock,
rbd_image_stat_removing_mock, pool_namespaces, _):
time = datetime.utcnow()
ioctx_mock = MagicMock()
mgr.rados = MagicMock()
mgr.rados.open_ioctx.return_value = ioctx_mock
self.rbd_inst_mock.namespace_list.return_value = []
rbd_image_ref_mock.return_value = [{'name': 'test_rbd', 'id': '3c1a5ee60a88'}]
pool_namespaces.return_value = ['']
rbd_image_stat_mock.side_effect = mock.Mock(side_effect=ImageNotFoundStub(
'RBD image not found test_pool/3c1a5ee60a88'))
rbd_image_stat_removing_mock.return_value = {
'id': '3c1a5ee60a88',
'unique_id': 'test_pool/3c1a5ee60a88',
'name': 'test_rbd',
'source': 'REMOVING',
'deletion_time': '{}Z'.format(time.isoformat()),
'deferment_end_time': '{}Z'.format(time.isoformat()),
'pool_name': 'test_pool',
'namespace': ''
}
# test with limit 0, it should return a list of pools with an empty list, but
rbd_pool_list = RbdService.rbd_pool_list(['test_pool'], offset=0, limit=0)
self.assertEqual(rbd_pool_list, ([], 1))
self.rbd_inst_mock.namespace_list.return_value = []
rbd_pool_list = RbdService.rbd_pool_list(['test_pool'], offset=0, limit=5)
self.assertEqual(rbd_pool_list, ([{
'id': '3c1a5ee60a88',
'unique_id': 'test_pool/3c1a5ee60a88',
'name': 'test_rbd',
'source': 'REMOVING',
'deletion_time': '{}Z'.format(time.isoformat()),
'deferment_end_time': '{}Z'.format(time.isoformat()),
'pool_name': 'test_pool',
'namespace': ''
}], 1))
def test_valid_interval(self):
test_cases = [
('15m', False),
('1h', False),
('5d', False),
('m', True),
('d', True),
('1s', True),
('11', True),
('1m1', True),
]
for interval, error in test_cases:
if error:
with self.assertRaises(ValueError):
RBDSchedulerInterval(interval)
else:
self.assertEqual(str(RBDSchedulerInterval(interval)), interval)
def test_rbd_image_refs_cache(self):
ioctx_mock = MagicMock()
mgr.rados = MagicMock()
mgr.rados.open_ioctx.return_value = ioctx_mock
images = [{'image': str(i), 'id': str(i)} for i in range(10)]
for i in range(5):
self.rbd_inst_mock.list2.return_value = images[i*2:(i*2)+2]
ioctx_mock = MagicMock()
# pylint: disable=protected-access
res = RbdService._rbd_image_refs(ioctx_mock, str(i))
self.assertEqual(res, images[i*2:(i*2)+2])
| 6,939 | 37.555556 | 100 | py |
null | ceph-main/src/pybind/mgr/dashboard/tests/test_rest_client.py | # -*- coding: utf-8 -*-
import unittest
import requests.exceptions
try:
from mock import patch
except ImportError:
from unittest.mock import patch
from urllib3.exceptions import MaxRetryError, ProtocolError
from .. import mgr
from ..rest_client import RequestException, RestClient
class RestClientTestClass(RestClient):
"""RestClient subclass for testing purposes."""
@RestClient.api_get('/')
def fake_endpoint_method_with_annotation(self, request=None) -> bool:
pass
class RestClientTest(unittest.TestCase):
def setUp(self):
settings = {'REST_REQUESTS_TIMEOUT': 45}
mgr.get_module_option.side_effect = settings.get
def test_timeout_auto_set(self):
with patch('requests.Session.request') as mock_request:
rest_client = RestClient('localhost', 8000)
rest_client.session.request('GET', '/test')
mock_request.assert_called_with('GET', '/test', timeout=45)
def test_timeout_auto_set_arg(self):
with patch('requests.Session.request') as mock_request:
rest_client = RestClient('localhost', 8000)
rest_client.session.request(
'GET', '/test', None, None, None, None,
None, None, None)
mock_request.assert_called_with(
'GET', '/test', None, None, None, None,
None, None, None, timeout=45)
def test_timeout_no_auto_set_kwarg(self):
with patch('requests.Session.request') as mock_request:
rest_client = RestClient('localhost', 8000)
rest_client.session.request('GET', '/test', timeout=20)
mock_request.assert_called_with('GET', '/test', timeout=20)
def test_timeout_no_auto_set_arg(self):
with patch('requests.Session.request') as mock_request:
rest_client = RestClient('localhost', 8000)
rest_client.session.request(
'GET', '/test', None, None, None, None,
None, None, 40)
mock_request.assert_called_with(
'GET', '/test', None, None, None, None,
None, None, 40)
class RestClientDoRequestTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.mock_requests = patch('requests.Session').start()
cls.rest_client = RestClientTestClass('localhost', 8000, 'UnitTest')
def test_endpoint_method_with_annotation(self):
self.assertEqual(self.rest_client.fake_endpoint_method_with_annotation(), None)
def test_do_request_exception_no_args(self):
self.mock_requests().get.side_effect = requests.exceptions.ConnectionError()
with self.assertRaises(RequestException) as context:
self.rest_client.do_request('GET', '/test')
self.assertEqual('UnitTest REST API cannot be reached. Please '
'check your configuration and that the API '
'endpoint is accessible',
context.exception.message)
def test_do_request_exception_args_1(self):
self.mock_requests().post.side_effect = requests.exceptions.ConnectionError(
MaxRetryError('Abc', 'http://xxx.yyy', 'too many redirects'))
with self.assertRaises(RequestException) as context:
self.rest_client.do_request('POST', '/test')
self.assertEqual('UnitTest REST API cannot be reached. Please '
'check your configuration and that the API '
'endpoint is accessible',
context.exception.message)
def test_do_request_exception_args_2(self):
self.mock_requests().put.side_effect = requests.exceptions.ConnectionError(
ProtocolError('Connection broken: xyz'))
with self.assertRaises(RequestException) as context:
self.rest_client.do_request('PUT', '/test')
self.assertEqual('UnitTest REST API cannot be reached. Please '
'check your configuration and that the API '
'endpoint is accessible',
context.exception.message)
def test_do_request_exception_nested_args(self):
self.mock_requests().delete.side_effect = requests.exceptions.ConnectionError(
MaxRetryError('Xyz', 'https://foo.bar',
Exception('Foo: [Errno -42] bla bla bla')))
with self.assertRaises(RequestException) as context:
self.rest_client.do_request('DELETE', '/test')
self.assertEqual('UnitTest REST API cannot be reached: bla '
'bla bla [errno -42]. Please check your '
'configuration and that the API endpoint '
'is accessible',
context.exception.message)
| 4,871 | 42.891892 | 87 | py |
null | ceph-main/src/pybind/mgr/dashboard/tests/test_rest_tasks.py | # -*- coding: utf-8 -*-
import time
try:
import mock
except ImportError:
import unittest.mock as mock
from ..controllers import RESTController, Router, Task
from ..controllers.task import Task as TaskController
from ..services import progress
from ..tests import ControllerTestCase
from ..tools import NotificationQueue, TaskManager
@Router('/test/task', secure=False)
class TaskTest(RESTController):
sleep_time = 0.0
@Task('task/create', {'param': '{param}'}, wait_for=1.0)
def create(self, param):
time.sleep(TaskTest.sleep_time)
return {'my_param': param}
@Task('task/set', {'param': '{2}'}, wait_for=1.0)
def set(self, key, param=None):
time.sleep(TaskTest.sleep_time)
return {'key': key, 'my_param': param}
@Task('task/delete', ['{key}'], wait_for=1.0)
def delete(self, key):
# pylint: disable=unused-argument
time.sleep(TaskTest.sleep_time)
@Task('task/foo', ['{param}'])
@RESTController.Collection('POST', path='/foo')
def foo_post(self, param):
return {'my_param': param}
@Task('task/bar', ['{key}', '{param}'])
@RESTController.Resource('PUT', path='/bar')
def bar_put(self, key, param=None):
return {'my_param': param, 'key': key}
@Task('task/query', ['{param}'])
@RESTController.Collection('POST', query_params=['param'])
def query(self, param=None):
return {'my_param': param}
class TaskControllerTest(ControllerTestCase):
@classmethod
def setup_server(cls):
# pylint: disable=protected-access
progress.get_progress_tasks = mock.MagicMock()
progress.get_progress_tasks.return_value = ([], [])
NotificationQueue.start_queue()
TaskManager.init()
cls.setup_controllers([TaskTest, TaskController])
@classmethod
def tearDownClass(cls):
NotificationQueue.stop()
def setUp(self):
TaskTest.sleep_time = 0.0
def test_create_task(self):
self._task_post('/test/task', {'param': 'hello'})
self.assertJsonBody({'my_param': 'hello'})
def test_long_set_task(self):
TaskTest.sleep_time = 2.0
self._task_put('/test/task/2', {'param': 'hello'})
self.assertJsonBody({'key': '2', 'my_param': 'hello'})
def test_delete_task(self):
self._task_delete('/test/task/hello')
def test_foo_task(self):
self._task_post('/test/task/foo', {'param': 'hello'})
self.assertJsonBody({'my_param': 'hello'})
def test_bar_task(self):
self._task_put('/test/task/3/bar', {'param': 'hello'})
self.assertJsonBody({'my_param': 'hello', 'key': '3'})
def test_query_param(self):
self._task_post('/test/task/query')
self.assertJsonBody({'my_param': None})
| 2,790 | 29.010753 | 62 | py |
null | ceph-main/src/pybind/mgr/dashboard/tests/test_rgw.py | from unittest.mock import Mock, call, patch
from .. import mgr
from ..controllers.rgw import Rgw, RgwDaemon, RgwUser
from ..rest_client import RequestException
from ..services.rgw_client import RgwClient
from ..tests import ControllerTestCase, RgwStub
class RgwControllerTestCase(ControllerTestCase):
@classmethod
def setup_server(cls):
cls.setup_controllers([Rgw], '/test')
def setUp(self) -> None:
RgwStub.get_daemons()
RgwStub.get_settings()
@patch.object(RgwClient, '_get_user_id', Mock(return_value='fake-user'))
@patch.object(RgwClient, 'is_service_online', Mock(return_value=True))
@patch.object(RgwClient, '_is_system_user', Mock(return_value=True))
def test_status_available(self):
self._get('/test/ui-api/rgw/status')
self.assertStatus(200)
self.assertJsonBody({'available': True, 'message': None})
@patch.object(RgwClient, '_get_user_id', Mock(return_value='fake-user'))
@patch.object(RgwClient, 'is_service_online', Mock(
side_effect=RequestException('My test error')))
def test_status_online_check_error(self):
self._get('/test/ui-api/rgw/status')
self.assertStatus(200)
self.assertJsonBody({'available': False,
'message': 'My test error'})
@patch.object(RgwClient, '_get_user_id', Mock(return_value='fake-user'))
@patch.object(RgwClient, 'is_service_online', Mock(return_value=False))
def test_status_not_online(self):
self._get('/test/ui-api/rgw/status')
self.assertStatus(200)
self.assertJsonBody({'available': False,
'message': "Failed to connect to the Object Gateway's Admin Ops API."})
@patch.object(RgwClient, '_get_user_id', Mock(return_value='fake-user'))
@patch.object(RgwClient, 'is_service_online', Mock(return_value=True))
@patch.object(RgwClient, '_is_system_user', Mock(return_value=False))
def test_status_not_system_user(self):
self._get('/test/ui-api/rgw/status')
self.assertStatus(200)
self.assertJsonBody({'available': False,
'message': 'The system flag is not set for user "fake-user".'})
def test_status_no_service(self):
RgwStub.get_mgr_no_services()
self._get('/test/ui-api/rgw/status')
self.assertStatus(200)
self.assertJsonBody({'available': False, 'message': 'No RGW service is running.'})
class RgwDaemonControllerTestCase(ControllerTestCase):
@classmethod
def setup_server(cls):
cls.setup_controllers([RgwDaemon], '/test')
@patch('dashboard.services.rgw_client.RgwClient._get_user_id', Mock(
return_value='dummy_admin'))
def test_list(self):
RgwStub.get_daemons()
RgwStub.get_settings()
mgr.list_servers.return_value = [{
'hostname': 'host1',
'services': [{'id': '4832', 'type': 'rgw'}, {'id': '5356', 'type': 'rgw'}]
}]
mgr.get_metadata.side_effect = [
{
'ceph_version': 'ceph version master (dev)',
'id': 'daemon1',
'realm_name': 'realm1',
'zonegroup_name': 'zg1',
'zone_name': 'zone1',
'frontend_config#0': 'beast port=80'
},
{
'ceph_version': 'ceph version master (dev)',
'id': 'daemon2',
'realm_name': 'realm2',
'zonegroup_name': 'zg2',
'zone_name': 'zone2',
'frontend_config#0': 'beast port=80'
}]
self._get('/test/api/rgw/daemon')
self.assertStatus(200)
self.assertJsonBody([{
'id': 'daemon1',
'service_map_id': '4832',
'version': 'ceph version master (dev)',
'server_hostname': 'host1',
'realm_name': 'realm1',
'zonegroup_name': 'zg1',
'zone_name': 'zone1', 'default': True,
'port': 80
},
{
'id': 'daemon2',
'service_map_id': '5356',
'version': 'ceph version master (dev)',
'server_hostname': 'host1',
'realm_name': 'realm2',
'zonegroup_name': 'zg2',
'zone_name': 'zone2',
'default': False,
'port': 80
}])
def test_list_empty(self):
RgwStub.get_mgr_no_services()
self._get('/test/api/rgw/daemon')
self.assertStatus(200)
self.assertJsonBody([])
class RgwUserControllerTestCase(ControllerTestCase):
@classmethod
def setup_server(cls):
cls.setup_controllers([RgwUser], '/test')
@patch('dashboard.controllers.rgw.RgwRESTController.proxy')
def test_user_list(self, mock_proxy):
mock_proxy.side_effect = [{
'count': 3,
'keys': ['test1', 'test2', 'test3'],
'truncated': False
}]
self._get('/test/api/rgw/user?daemon_name=dummy-daemon')
self.assertStatus(200)
mock_proxy.assert_has_calls([
call('dummy-daemon', 'GET', 'user?list', {})
])
self.assertJsonBody(['test1', 'test2', 'test3'])
@patch('dashboard.controllers.rgw.RgwRESTController.proxy')
def test_user_list_marker(self, mock_proxy):
mock_proxy.side_effect = [{
'count': 3,
'keys': ['test1', 'test2', 'test3'],
'marker': 'foo:bar',
'truncated': True
}, {
'count': 1,
'keys': ['admin'],
'truncated': False
}]
self._get('/test/api/rgw/user')
self.assertStatus(200)
mock_proxy.assert_has_calls([
call(None, 'GET', 'user?list', {}),
call(None, 'GET', 'user?list', {'marker': 'foo:bar'})
])
self.assertJsonBody(['test1', 'test2', 'test3', 'admin'])
@patch('dashboard.controllers.rgw.RgwRESTController.proxy')
def test_user_list_duplicate_marker(self, mock_proxy):
mock_proxy.side_effect = [{
'count': 3,
'keys': ['test1', 'test2', 'test3'],
'marker': 'foo:bar',
'truncated': True
}, {
'count': 3,
'keys': ['test4', 'test5', 'test6'],
'marker': 'foo:bar',
'truncated': True
}, {
'count': 1,
'keys': ['admin'],
'truncated': False
}]
self._get('/test/api/rgw/user')
self.assertStatus(500)
@patch('dashboard.controllers.rgw.RgwRESTController.proxy')
def test_user_list_invalid_marker(self, mock_proxy):
mock_proxy.side_effect = [{
'count': 3,
'keys': ['test1', 'test2', 'test3'],
'marker': 'foo:bar',
'truncated': True
}, {
'count': 3,
'keys': ['test4', 'test5', 'test6'],
'marker': '',
'truncated': True
}, {
'count': 1,
'keys': ['admin'],
'truncated': False
}]
self._get('/test/api/rgw/user')
self.assertStatus(500)
@patch('dashboard.controllers.rgw.RgwRESTController.proxy')
@patch.object(RgwUser, '_keys_allowed')
def test_user_get_with_keys(self, keys_allowed, mock_proxy):
keys_allowed.return_value = True
mock_proxy.return_value = {
'tenant': '',
'user_id': 'my_user_id',
'keys': [],
'swift_keys': []
}
self._get('/test/api/rgw/user/testuser')
self.assertStatus(200)
self.assertInJsonBody('keys')
self.assertInJsonBody('swift_keys')
@patch('dashboard.controllers.rgw.RgwRESTController.proxy')
@patch.object(RgwUser, '_keys_allowed')
def test_user_get_without_keys(self, keys_allowed, mock_proxy):
keys_allowed.return_value = False
mock_proxy.return_value = {
'tenant': '',
'user_id': 'my_user_id',
'keys': [],
'swift_keys': []
}
self._get('/test/api/rgw/user/testuser')
self.assertStatus(200)
self.assertNotIn('keys', self.json_body())
self.assertNotIn('swift_keys', self.json_body())
| 8,268 | 34.952174 | 100 | py |
null | ceph-main/src/pybind/mgr/dashboard/tests/test_rgw_client.py | # -*- coding: utf-8 -*-
# pylint: disable=too-many-public-methods
import errno
from unittest import TestCase
from unittest.mock import Mock, patch
from .. import mgr
from ..exceptions import DashboardException
from ..services.rgw_client import NoCredentialsException, \
NoRgwDaemonsException, RgwClient, _parse_frontend_config
from ..settings import Settings
from ..tests import CLICommandTestMixin, RgwStub
@patch('dashboard.services.rgw_client.RgwClient._get_user_id', Mock(
return_value='dummy_admin'))
class RgwClientTest(TestCase, CLICommandTestMixin):
_dashboard_user_realm1_access_key = 'VUOFXZFK24H81ISTVBTR'
_dashboard_user_realm1_secret_key = '0PGsCvXPGWS3AGgibUZEcd9efLrbbshlUkY3jruR'
_dashboard_user_realm2_access_key = 'OMDR282VYLBC1ZYMYDL0'
_dashboard_user_realm2_secret_key = 'N3thf7jAiwQ90PsPrhC2DIcvCFOsBXtBvPJJMdC3'
_radosgw_admin_result_error = (-errno.EINVAL, '', 'fake error')
_radosgw_admin_result_no_realms = (0, {}, '')
_radosgw_admin_result_realms = (0, {"realms": ["realm1", "realm2"]}, '')
_radosgw_admin_result_user_realm1 = (
0,
{
"keys": [
{
"user": "dashboard",
"access_key": _dashboard_user_realm1_access_key,
"secret_key": _dashboard_user_realm1_secret_key
}
],
"system": "true"
},
'')
_radosgw_admin_result_user_realm2 = (
0,
{
"keys": [
{
"user": "dashboard",
"access_key": _dashboard_user_realm2_access_key,
"secret_key": _dashboard_user_realm2_secret_key
}
],
"system": "true"
},
'')
def setUp(self):
RgwStub.get_daemons()
self.mock_kv_store()
self.CONFIG_KEY_DICT.update({
'RGW_API_ACCESS_KEY': 'klausmustermann',
'RGW_API_SECRET_KEY': 'supergeheim',
})
def test_configure_credentials_error(self):
self.CONFIG_KEY_DICT.update({
'RGW_API_ACCESS_KEY': '',
'RGW_API_SECRET_KEY': '',
})
# Get no realms, get no user, user creation fails.
mgr.send_rgwadmin_command.side_effect = [
self._radosgw_admin_result_error,
self._radosgw_admin_result_error,
self._radosgw_admin_result_error,
]
with self.assertRaises(NoCredentialsException) as cm:
RgwClient.admin_instance()
self.assertIn('No RGW credentials found', str(cm.exception))
def test_configure_credentials_error_with_realms(self):
self.CONFIG_KEY_DICT.update({
'RGW_API_ACCESS_KEY': '',
'RGW_API_SECRET_KEY': '',
})
# Get realms, get no user, user creation fails.
mgr.send_rgwadmin_command.side_effect = [
self._radosgw_admin_result_realms,
self._radosgw_admin_result_error,
self._radosgw_admin_result_error,
self._radosgw_admin_result_error,
self._radosgw_admin_result_error,
]
with self.assertRaises(NoCredentialsException) as cm:
RgwClient.admin_instance()
self.assertIn('No RGW credentials found', str(cm.exception))
def test_set_rgw_credentials_command(self):
# Get no realms, get user.
mgr.send_rgwadmin_command.side_effect = [
self._radosgw_admin_result_error,
self._radosgw_admin_result_user_realm1
]
result = self.exec_cmd('set-rgw-credentials')
self.assertEqual(result, 'RGW credentials configured')
self.assertEqual(Settings.RGW_API_ACCESS_KEY, self._dashboard_user_realm1_access_key)
self.assertEqual(Settings.RGW_API_SECRET_KEY, self._dashboard_user_realm1_secret_key)
# Get no realms, get no user, user creation.
mgr.send_rgwadmin_command.side_effect = [
self._radosgw_admin_result_error,
self._radosgw_admin_result_error,
self._radosgw_admin_result_user_realm1
]
result = self.exec_cmd('set-rgw-credentials')
self.assertEqual(result, 'RGW credentials configured')
self.assertEqual(Settings.RGW_API_ACCESS_KEY, self._dashboard_user_realm1_access_key)
self.assertEqual(Settings.RGW_API_SECRET_KEY, self._dashboard_user_realm1_secret_key)
# Get realms, get users.
mgr.send_rgwadmin_command.side_effect = [
self._radosgw_admin_result_realms,
self._radosgw_admin_result_user_realm1,
self._radosgw_admin_result_user_realm2
]
result = self.exec_cmd('set-rgw-credentials')
self.assertEqual(result, 'RGW credentials configured')
self.assertEqual(Settings.RGW_API_ACCESS_KEY, {
'realm1': self._dashboard_user_realm1_access_key,
'realm2': self._dashboard_user_realm2_access_key
})
self.assertEqual(Settings.RGW_API_SECRET_KEY, {
'realm1': self._dashboard_user_realm1_secret_key,
'realm2': self._dashboard_user_realm2_secret_key
})
# Get realms, get no users, users' creation.
mgr.send_rgwadmin_command.side_effect = [
self._radosgw_admin_result_realms,
self._radosgw_admin_result_error,
self._radosgw_admin_result_user_realm1,
self._radosgw_admin_result_error,
self._radosgw_admin_result_user_realm2
]
result = self.exec_cmd('set-rgw-credentials')
self.assertEqual(result, 'RGW credentials configured')
self.assertEqual(Settings.RGW_API_ACCESS_KEY, {
'realm1': self._dashboard_user_realm1_access_key,
'realm2': self._dashboard_user_realm2_access_key
})
self.assertEqual(Settings.RGW_API_SECRET_KEY, {
'realm1': self._dashboard_user_realm1_secret_key,
'realm2': self._dashboard_user_realm2_secret_key
})
# Get realms, get no users, realm 2 user creation fails.
mgr.send_rgwadmin_command.side_effect = [
self._radosgw_admin_result_realms,
self._radosgw_admin_result_error,
self._radosgw_admin_result_user_realm1,
self._radosgw_admin_result_error,
self._radosgw_admin_result_error,
]
result = self.exec_cmd('set-rgw-credentials')
self.assertEqual(result, 'RGW credentials configured')
self.assertEqual(Settings.RGW_API_ACCESS_KEY, {
'realm1': self._dashboard_user_realm1_access_key,
})
self.assertEqual(Settings.RGW_API_SECRET_KEY, {
'realm1': self._dashboard_user_realm1_secret_key,
})
def test_ssl_verify(self):
Settings.RGW_API_SSL_VERIFY = True
instance = RgwClient.admin_instance()
self.assertTrue(instance.session.verify)
def test_no_ssl_verify(self):
Settings.RGW_API_SSL_VERIFY = False
instance = RgwClient.admin_instance()
self.assertFalse(instance.session.verify)
def test_no_daemons(self):
RgwStub.get_mgr_no_services()
with self.assertRaises(NoRgwDaemonsException) as cm:
RgwClient.admin_instance()
self.assertIn('No RGW service is running.', str(cm.exception))
@patch.object(RgwClient, '_get_daemon_zone_info')
def test_get_placement_targets_from_zone(self, zone_info):
zone_info.return_value = {
'id': 'a0df30ea-4b5b-4830-b143-2bedf684663d',
'placement_pools': [
{
'key': 'default-placement',
'val': {
'index_pool': 'default.rgw.buckets.index',
'storage_classes': {
'STANDARD': {
'data_pool': 'default.rgw.buckets.data'
}
}
}
}
]
}
instance = RgwClient.admin_instance()
expected_result = {
'zonegroup': 'zonegroup1',
'placement_targets': [
{
'name': 'default-placement',
'data_pool': 'default.rgw.buckets.data'
}
]
}
self.assertEqual(expected_result, instance.get_placement_targets())
@patch.object(RgwClient, '_get_realms_info')
def test_get_realms(self, realms_info):
realms_info.side_effect = [
{
'default_info': '51de8373-bc24-4f74-a9b7-8e9ef4cb71f7',
'realms': [
'realm1',
'realm2'
]
},
{}
]
instance = RgwClient.admin_instance()
self.assertEqual(['realm1', 'realm2'], instance.get_realms())
self.assertEqual([], instance.get_realms())
def test_set_bucket_locking_error(self):
instance = RgwClient.admin_instance()
test_params = [
('COMPLIANCE', 'null', None, 'must be a positive integer'),
('COMPLIANCE', None, 'null', 'must be a positive integer'),
('COMPLIANCE', -1, None, 'must be a positive integer'),
('COMPLIANCE', None, -1, 'must be a positive integer'),
('COMPLIANCE', 1, 1, 'You can\'t specify both at the same time'),
('COMPLIANCE', None, None, 'You must specify at least one'),
('COMPLIANCE', 0, 0, 'You must specify at least one'),
(None, 1, 0, 'must be either COMPLIANCE or GOVERNANCE'),
('', 1, 0, 'must be either COMPLIANCE or GOVERNANCE'),
('FAKE_MODE', 1, 0, 'must be either COMPLIANCE or GOVERNANCE')
]
for params in test_params:
mode, days, years, error_msg = params
with self.assertRaises(DashboardException) as cm:
instance.set_bucket_locking(
bucket_name='test',
mode=mode,
retention_period_days=days,
retention_period_years=years
)
self.assertIn(error_msg, str(cm.exception))
@patch('dashboard.rest_client._Request', Mock())
def test_set_bucket_locking_success(self):
instance = RgwClient.admin_instance()
test_params = [
('Compliance', '1', None),
('Governance', 1, None),
('COMPLIANCE', None, '1'),
('GOVERNANCE', None, 1),
]
for params in test_params:
mode, days, years = params
self.assertIsNone(instance.set_bucket_locking(
bucket_name='test',
mode=mode,
retention_period_days=days,
retention_period_years=years
))
class RgwClientHelperTest(TestCase):
def test_parse_frontend_config_1(self):
self.assertEqual(_parse_frontend_config('beast port=8000'), (8000, False))
def test_parse_frontend_config_2(self):
self.assertEqual(_parse_frontend_config('beast port=80 port=8000'), (80, False))
def test_parse_frontend_config_3(self):
self.assertEqual(_parse_frontend_config('beast ssl_port=443 port=8000'), (443, True))
def test_parse_frontend_config_4(self):
self.assertEqual(_parse_frontend_config('beast endpoint=192.168.0.100:8000'), (8000, False))
def test_parse_frontend_config_5(self):
self.assertEqual(_parse_frontend_config('beast endpoint=[::1]'), (80, False))
def test_parse_frontend_config_6(self):
self.assertEqual(_parse_frontend_config(
'beast ssl_endpoint=192.168.0.100:8443'), (8443, True))
def test_parse_frontend_config_7(self):
self.assertEqual(_parse_frontend_config('beast ssl_endpoint=192.168.0.100'), (443, True))
def test_parse_frontend_config_8(self):
self.assertEqual(_parse_frontend_config(
'beast ssl_endpoint=[::1]:8443 endpoint=192.0.2.3:80'), (8443, True))
def test_parse_frontend_config_9(self):
self.assertEqual(_parse_frontend_config(
'beast port=8080 endpoint=192.0.2.3:80'), (8080, False))
def test_parse_frontend_config_10(self):
self.assertEqual(_parse_frontend_config(
'beast ssl_endpoint=192.0.2.3:8443 port=8080'), (8443, True))
def test_parse_frontend_config_11(self):
self.assertEqual(_parse_frontend_config('civetweb port=8000s'), (8000, True))
def test_parse_frontend_config_12(self):
self.assertEqual(_parse_frontend_config('civetweb port=443s port=8000'), (443, True))
def test_parse_frontend_config_13(self):
self.assertEqual(_parse_frontend_config('civetweb port=192.0.2.3:80'), (80, False))
def test_parse_frontend_config_14(self):
self.assertEqual(_parse_frontend_config('civetweb port=172.5.2.51:8080s'), (8080, True))
def test_parse_frontend_config_15(self):
self.assertEqual(_parse_frontend_config('civetweb port=[::]:8080'), (8080, False))
def test_parse_frontend_config_16(self):
self.assertEqual(_parse_frontend_config('civetweb port=ip6-localhost:80s'), (80, True))
def test_parse_frontend_config_17(self):
self.assertEqual(_parse_frontend_config('civetweb port=[2001:0db8::1234]:80'), (80, False))
def test_parse_frontend_config_18(self):
self.assertEqual(_parse_frontend_config('civetweb port=[::1]:8443s'), (8443, True))
def test_parse_frontend_config_19(self):
self.assertEqual(_parse_frontend_config('civetweb port=127.0.0.1:8443s+8000'), (8443, True))
def test_parse_frontend_config_20(self):
self.assertEqual(_parse_frontend_config('civetweb port=127.0.0.1:8080+443s'), (8080, False))
def test_parse_frontend_config_21(self):
with self.assertRaises(LookupError) as ctx:
_parse_frontend_config('civetweb port=xyz')
self.assertEqual(str(ctx.exception),
'Failed to determine RGW port from "civetweb port=xyz"')
def test_parse_frontend_config_22(self):
with self.assertRaises(LookupError) as ctx:
_parse_frontend_config('civetweb')
self.assertEqual(str(ctx.exception), 'Failed to determine RGW port from "civetweb"')
def test_parse_frontend_config_23(self):
with self.assertRaises(LookupError) as ctx:
_parse_frontend_config('mongoose port=8080')
self.assertEqual(str(ctx.exception),
'Failed to determine RGW port from "mongoose port=8080"')
| 14,602 | 40.019663 | 100 | py |
null | ceph-main/src/pybind/mgr/dashboard/tests/test_settings.py | # -*- coding: utf-8 -*-
import errno
import unittest
from mgr_module import ERROR_MSG_EMPTY_INPUT_FILE
from .. import settings
from ..controllers.settings import Settings as SettingsController
from ..settings import Settings, handle_option_command
from ..tests import ControllerTestCase, KVStoreMockMixin
class SettingsTest(unittest.TestCase, KVStoreMockMixin):
@classmethod
def setUpClass(cls):
setattr(settings.Options, 'GRAFANA_API_HOST', settings.Setting('localhost', [str]))
setattr(settings.Options, 'GRAFANA_API_PORT', settings.Setting(3000, [int]))
setattr(settings.Options, 'GRAFANA_ENABLED', settings.Setting(False, [bool]))
# pylint: disable=protected-access
settings._OPTIONS_COMMAND_MAP = settings._options_command_map()
def setUp(self):
self.mock_kv_store()
if Settings.GRAFANA_API_HOST != 'localhost':
Settings.GRAFANA_API_HOST = 'localhost'
if Settings.GRAFANA_API_PORT != 3000:
Settings.GRAFANA_API_PORT = 3000
def test_get_setting(self):
self.assertEqual(Settings.GRAFANA_API_HOST, 'localhost')
self.assertEqual(Settings.GRAFANA_API_PORT, 3000)
self.assertEqual(Settings.GRAFANA_ENABLED, False)
def test_set_setting(self):
Settings.GRAFANA_API_HOST = 'grafanahost'
self.assertEqual(Settings.GRAFANA_API_HOST, 'grafanahost')
Settings.GRAFANA_API_PORT = 50
self.assertEqual(Settings.GRAFANA_API_PORT, 50)
Settings.GRAFANA_ENABLED = True
self.assertEqual(Settings.GRAFANA_ENABLED, True)
def test_get_cmd(self):
r, out, err = handle_option_command(
{'prefix': 'dashboard get-grafana-api-port'},
None
)
self.assertEqual(r, 0)
self.assertEqual(out, '3000')
self.assertEqual(err, '')
def test_set_cmd(self):
r, out, err = handle_option_command(
{'prefix': 'dashboard set-grafana-api-port',
'value': '4000'},
None
)
self.assertEqual(r, 0)
self.assertEqual(out, 'Option GRAFANA_API_PORT updated')
self.assertEqual(err, '')
def test_set_secret_empty(self):
r, out, err = handle_option_command(
{'prefix': 'dashboard set-grafana-api-password'},
None
)
self.assertEqual(r, -errno.EINVAL)
self.assertEqual(out, '')
self.assertIn(ERROR_MSG_EMPTY_INPUT_FILE, err)
def test_set_secret(self):
r, out, err = handle_option_command(
{'prefix': 'dashboard set-grafana-api-password'},
'my-secret'
)
self.assertEqual(r, 0)
self.assertEqual(out, 'Option GRAFANA_API_PASSWORD updated')
self.assertEqual(err, '')
def test_reset_cmd(self):
r, out, err = handle_option_command(
{'prefix': 'dashboard reset-grafana-enabled'},
None
)
self.assertEqual(r, 0)
self.assertEqual(out, 'Option {} reset to default value "{}"'.format(
'GRAFANA_ENABLED', Settings.GRAFANA_ENABLED))
self.assertEqual(err, '')
def test_inv_cmd(self):
r, out, err = handle_option_command(
{'prefix': 'dashboard get-non-existent-option'},
None
)
self.assertEqual(r, -errno.ENOSYS)
self.assertEqual(out, '')
self.assertEqual(err, "Command not found "
"'dashboard get-non-existent-option'")
def test_sync(self):
Settings.GRAFANA_API_PORT = 5000
r, out, err = handle_option_command(
{'prefix': 'dashboard get-grafana-api-port'},
None
)
self.assertEqual(r, 0)
self.assertEqual(out, '5000')
self.assertEqual(err, '')
r, out, err = handle_option_command(
{'prefix': 'dashboard set-grafana-api-host',
'value': 'new-local-host'},
None
)
self.assertEqual(r, 0)
self.assertEqual(out, 'Option GRAFANA_API_HOST updated')
self.assertEqual(err, '')
self.assertEqual(Settings.GRAFANA_API_HOST, 'new-local-host')
def test_attribute_error(self):
with self.assertRaises(AttributeError) as ctx:
_ = Settings.NON_EXISTENT_OPTION
self.assertEqual(str(ctx.exception),
"type object 'Options' has no attribute 'NON_EXISTENT_OPTION'")
class SettingsControllerTest(ControllerTestCase, KVStoreMockMixin):
@classmethod
def setup_server(cls):
cls.setup_controllers([SettingsController])
@classmethod
def setUpClass(cls):
super().setUpClass()
setattr(settings.Options, 'GRAFANA_API_HOST', settings.Setting('localhost', [str]))
setattr(settings.Options, 'GRAFANA_ENABLED', settings.Setting(False, [bool]))
@classmethod
def tearDownClass(cls):
super().tearDownClass()
def setUp(self):
super().setUp()
self.mock_kv_store()
def test_settings_list(self):
self._get('/api/settings')
data = self.json_body()
self.assertTrue(len(data) > 0)
self.assertStatus(200)
self.assertIn('default', data[0].keys())
self.assertIn('type', data[0].keys())
self.assertIn('name', data[0].keys())
self.assertIn('value', data[0].keys())
def test_settings_list_filtered(self):
self._get('/api/settings?names=GRAFANA_ENABLED,PWD_POLICY_ENABLED')
self.assertStatus(200)
data = self.json_body()
self.assertTrue(len(data) == 2)
names = [option['name'] for option in data]
self.assertIn('GRAFANA_ENABLED', names)
self.assertIn('PWD_POLICY_ENABLED', names)
def test_rgw_daemon_get(self):
self._get('/api/settings/grafana-api-username')
self.assertStatus(200)
self.assertJsonBody({
u'default': u'admin',
u'type': u'str',
u'name': u'GRAFANA_API_USERNAME',
u'value': u'admin',
})
def test_set(self):
self._put('/api/settings/GRAFANA_API_USERNAME', {'value': 'foo'})
self.assertStatus(200)
self._get('/api/settings/GRAFANA_API_USERNAME')
self.assertStatus(200)
self.assertInJsonBody('default')
self.assertInJsonBody('type')
self.assertInJsonBody('name')
self.assertInJsonBody('value')
self.assertEqual(self.json_body()['value'], 'foo')
def test_bulk_set(self):
self._put('/api/settings', {
'GRAFANA_API_USERNAME': 'foo',
'GRAFANA_API_HOST': 'somehost',
})
self.assertStatus(200)
self._get('/api/settings/grafana-api-username')
self.assertStatus(200)
body = self.json_body()
self.assertEqual(body['value'], 'foo')
self._get('/api/settings/grafana-api-username')
self.assertStatus(200)
self.assertEqual(self.json_body()['value'], 'foo')
self._get('/api/settings/grafana-api-host')
self.assertStatus(200)
self.assertEqual(self.json_body()['value'], 'somehost')
| 7,147 | 33.365385 | 91 | py |
null | ceph-main/src/pybind/mgr/dashboard/tests/test_ssl.py | import errno
import unittest
from ..tests import CLICommandTestMixin, CmdException
class SslTest(unittest.TestCase, CLICommandTestMixin):
def test_ssl_certificate_and_key(self):
with self.assertRaises(CmdException) as ctx:
self.exec_cmd('set-ssl-certificate', inbuf='', mgr_id='x')
self.assertEqual(ctx.exception.retcode, -errno.EINVAL)
self.assertEqual(str(ctx.exception), 'Please specify the certificate with "-i" option')
result = self.exec_cmd('set-ssl-certificate', inbuf='content', mgr_id='x')
self.assertEqual(result, 'SSL certificate updated')
with self.assertRaises(CmdException) as ctx:
self.exec_cmd('set-ssl-certificate-key', inbuf='', mgr_id='x')
self.assertEqual(ctx.exception.retcode, -errno.EINVAL)
self.assertEqual(str(ctx.exception), 'Please specify the certificate key with "-i" option')
result = self.exec_cmd('set-ssl-certificate-key', inbuf='content', mgr_id='x')
self.assertEqual(result, 'SSL certificate key updated')
def test_set_mgr_created_self_signed_cert(self):
result = self.exec_cmd('create-self-signed-cert')
self.assertEqual(result, 'Self-signed certificate created')
| 1,237 | 41.689655 | 99 | py |
null | ceph-main/src/pybind/mgr/dashboard/tests/test_sso.py | # -*- coding: utf-8 -*-
# pylint: disable=dangerous-default-value,too-many-public-methods
import errno
import tempfile
import unittest
from ..services.sso import load_sso_db
from ..tests import CLICommandTestMixin, CmdException
class AccessControlTest(unittest.TestCase, CLICommandTestMixin):
IDP_METADATA = '''<?xml version="1.0"?>
<md:EntityDescriptor xmlns:md="urn:oasis:names:tc:SAML:2.0:metadata"
xmlns:ds="http://www.w3.org/2000/09/xmldsig#"
entityID="https://testidp.ceph.com/simplesamlphp/saml2/idp/metadata.php"
ID="pfx8ca6fbd7-6062-d4a9-7995-0730aeb8114f">
<ds:Signature>
<ds:SignedInfo>
<ds:CanonicalizationMethod Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#"/>
<ds:SignatureMethod Algorithm="http://www.w3.org/2001/04/xmldsig-more#rsa-sha256"/>
<ds:Reference URI="#pfx8ca6fbd7-6062-d4a9-7995-0730aeb8114f">
<ds:Transforms>
<ds:Transform Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature"/>
<ds:Transform Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#"/>
</ds:Transforms>
<ds:DigestMethod Algorithm="http://www.w3.org/2001/04/xmlenc#sha256"/>
<ds:DigestValue>v6V8fooEUeq/LO/59JCfJF69Tw3ohN52OGAY6X3jX8w=</ds:DigestValue>
</ds:Reference>
</ds:SignedInfo>
<ds:SignatureValue>IDP_SIGNATURE_VALUE</ds:SignatureValue>
<ds:KeyInfo>
<ds:X509Data>
<ds:X509Certificate>IDP_X509_CERTIFICATE</ds:X509Certificate>
</ds:X509Data>
</ds:KeyInfo>
</ds:Signature>
<md:IDPSSODescriptor protocolSupportEnumeration="urn:oasis:names:tc:SAML:2.0:protocol">
<md:KeyDescriptor use="signing">
<ds:KeyInfo xmlns:ds="http://www.w3.org/2000/09/xmldsig#">
<ds:X509Data>
<ds:X509Certificate>IDP_X509_CERTIFICATE</ds:X509Certificate>
</ds:X509Data>
</ds:KeyInfo>
</md:KeyDescriptor>
<md:KeyDescriptor use="encryption">
<ds:KeyInfo xmlns:ds="http://www.w3.org/2000/09/xmldsig#">
<ds:X509Data>
<ds:X509Certificate>IDP_X509_CERTIFICATE</ds:X509Certificate>
</ds:X509Data>
</ds:KeyInfo>
</md:KeyDescriptor>
<md:SingleLogoutService Binding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect"
Location="https://testidp.ceph.com/simplesamlphp/saml2/idp/SingleLogoutService.php"/>
<md:NameIDFormat>urn:oasis:names:tc:SAML:2.0:nameid-format:transient</md:NameIDFormat>
<md:SingleSignOnService Binding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect"
Location="https://testidp.ceph.com/simplesamlphp/saml2/idp/SSOService.php"/>
</md:IDPSSODescriptor>
</md:EntityDescriptor>'''
def setUp(self):
self.mock_kv_store()
load_sso_db()
def validate_onelogin_settings(self, onelogin_settings, ceph_dashboard_base_url, uid,
sp_x509cert, sp_private_key, signature_enabled):
self.assertIn('sp', onelogin_settings)
self.assertIn('entityId', onelogin_settings['sp'])
self.assertEqual(onelogin_settings['sp']['entityId'],
'{}/auth/saml2/metadata'.format(ceph_dashboard_base_url))
self.assertIn('assertionConsumerService', onelogin_settings['sp'])
self.assertIn('url', onelogin_settings['sp']['assertionConsumerService'])
self.assertEqual(onelogin_settings['sp']['assertionConsumerService']['url'],
'{}/auth/saml2'.format(ceph_dashboard_base_url))
self.assertIn('attributeConsumingService', onelogin_settings['sp'])
attribute_consuming_service = onelogin_settings['sp']['attributeConsumingService']
self.assertIn('requestedAttributes', attribute_consuming_service)
requested_attributes = attribute_consuming_service['requestedAttributes']
self.assertEqual(len(requested_attributes), 1)
self.assertIn('name', requested_attributes[0])
self.assertEqual(requested_attributes[0]['name'], uid)
self.assertIn('singleLogoutService', onelogin_settings['sp'])
self.assertIn('url', onelogin_settings['sp']['singleLogoutService'])
self.assertEqual(onelogin_settings['sp']['singleLogoutService']['url'],
'{}/auth/saml2/logout'.format(ceph_dashboard_base_url))
self.assertIn('x509cert', onelogin_settings['sp'])
self.assertEqual(onelogin_settings['sp']['x509cert'], sp_x509cert)
self.assertIn('privateKey', onelogin_settings['sp'])
self.assertEqual(onelogin_settings['sp']['privateKey'], sp_private_key)
self.assertIn('security', onelogin_settings)
self.assertIn('authnRequestsSigned', onelogin_settings['security'])
self.assertEqual(onelogin_settings['security']['authnRequestsSigned'], signature_enabled)
self.assertIn('logoutRequestSigned', onelogin_settings['security'])
self.assertEqual(onelogin_settings['security']['logoutRequestSigned'], signature_enabled)
self.assertIn('logoutResponseSigned', onelogin_settings['security'])
self.assertEqual(onelogin_settings['security']['logoutResponseSigned'], signature_enabled)
self.assertIn('wantMessagesSigned', onelogin_settings['security'])
self.assertEqual(onelogin_settings['security']['wantMessagesSigned'], signature_enabled)
self.assertIn('wantAssertionsSigned', onelogin_settings['security'])
self.assertEqual(onelogin_settings['security']['wantAssertionsSigned'], signature_enabled)
def test_sso_saml2_setup(self):
result = self.exec_cmd('sso setup saml2',
ceph_dashboard_base_url='https://cephdashboard.local',
idp_metadata=self.IDP_METADATA)
self.validate_onelogin_settings(result, 'https://cephdashboard.local', 'uid', '', '',
False)
def test_sso_saml2_setup_error(self):
default_kwargs = {
"ceph_dashboard_base_url": 'https://cephdashboard.local',
"idp_metadata": self.IDP_METADATA
}
params = [
({"sp_x_509_cert": "some/path"},
"Missing parameter `sp_private_key`."),
({"sp_private_key": "some/path"},
"Missing parameter `sp_x_509_cert`."),
({"sp_private_key": "some/path", "sp_x_509_cert": "invalid/path"},
"`some/path` not found."),
]
for param in params:
kwargs = param[0]
msg = param[1]
kwargs.update(default_kwargs)
with self.assertRaises(CmdException) as ctx:
self.exec_cmd('sso setup saml2', **kwargs)
self.assertEqual(str(ctx.exception), msg)
self.assertEqual(ctx.exception.retcode, -errno.EINVAL)
def test_sso_saml2_setup_with_files(self):
tmpfile = tempfile.NamedTemporaryFile()
tmpfile2 = tempfile.NamedTemporaryFile()
kwargs = {
"ceph_dashboard_base_url": 'https://cephdashboard.local',
"idp_metadata": self.IDP_METADATA,
"sp_private_key": tmpfile.name,
"sp_x_509_cert": tmpfile2.name,
}
result = self.exec_cmd('sso setup saml2', **kwargs)
self.validate_onelogin_settings(result, 'https://cephdashboard.local', 'uid', '', '',
True)
tmpfile.close()
tmpfile2.close()
def test_sso_enable_saml2(self):
with self.assertRaises(CmdException) as ctx:
self.exec_cmd('sso enable saml2')
self.assertEqual(ctx.exception.retcode, -errno.EPERM)
self.assertEqual(str(ctx.exception), 'Single Sign-On is not configured: '
'use `ceph dashboard sso setup saml2`')
self.exec_cmd('sso setup saml2',
ceph_dashboard_base_url='https://cephdashboard.local',
idp_metadata=self.IDP_METADATA)
result = self.exec_cmd('sso enable saml2')
self.assertEqual(result, 'SSO is "enabled" with "SAML2" protocol.')
def test_sso_disable(self):
result = self.exec_cmd('sso disable')
self.assertEqual(result, 'SSO is "disabled".')
def test_sso_status(self):
result = self.exec_cmd('sso status')
self.assertEqual(result, 'SSO is "disabled".')
self.exec_cmd('sso setup saml2',
ceph_dashboard_base_url='https://cephdashboard.local',
idp_metadata=self.IDP_METADATA)
result = self.exec_cmd('sso status')
self.assertEqual(result, 'SSO is "enabled" with "SAML2" protocol.')
def test_sso_show_saml2(self):
result = self.exec_cmd('sso show saml2')
self.assertEqual(result, {
'onelogin_settings': {}
})
| 8,867 | 45.429319 | 113 | py |
null | ceph-main/src/pybind/mgr/dashboard/tests/test_task.py | # -*- coding: utf-8 -*-
import json
import threading
import time
import unittest
from collections import defaultdict
from functools import partial
from ..services.exception import serialize_dashboard_exception
from ..tools import NotificationQueue, TaskExecutor, TaskManager
class MyTask(object):
class CallbackExecutor(TaskExecutor):
def __init__(self, fail, progress):
super(MyTask.CallbackExecutor, self).__init__()
self.fail = fail
self.progress = progress
def init(self, task):
super(MyTask.CallbackExecutor, self).init(task)
args = [self.callback]
args.extend(self.task.fn_args)
self.task.fn_args = args
def callback(self, result):
self.task.set_progress(self.progress)
if self.fail:
self.finish(None, Exception("Task Unexpected Exception"))
else:
self.finish(result, None)
# pylint: disable=too-many-arguments
def __init__(self, op_seconds, wait=False, fail=False, progress=50,
is_async=False, handle_ex=False):
self.op_seconds = op_seconds
self.wait = wait
self.fail = fail
self.progress = progress
self.is_async = is_async
self.handle_ex = handle_ex
self._event = threading.Event()
def run(self, ns, timeout=None):
args = ['dummy arg']
kwargs = {'dummy': 'arg'}
h_ex = partial(serialize_dashboard_exception,
include_http_status=True) if self.handle_ex else None
if not self.is_async:
task = TaskManager.run(
ns, self.metadata(), self.task_op, args, kwargs,
exception_handler=h_ex)
else:
task = TaskManager.run(
ns, self.metadata(), self.task_async_op, args, kwargs,
executor=MyTask.CallbackExecutor(self.fail, self.progress),
exception_handler=h_ex)
return task.wait(timeout)
def task_op(self, *args, **kwargs):
time.sleep(self.op_seconds)
TaskManager.current_task().set_progress(self.progress)
if self.fail:
raise Exception("Task Unexpected Exception")
if self.wait:
self._event.wait()
return {'args': list(args), 'kwargs': kwargs}
def task_async_op(self, callback, *args, **kwargs):
if self.fail == "premature":
raise Exception("Task Unexpected Exception")
def _run_bg():
time.sleep(self.op_seconds)
if self.wait:
self._event.wait()
callback({'args': list(args), 'kwargs': kwargs})
worker = threading.Thread(target=_run_bg)
worker.start()
def resume(self):
self._event.set()
def metadata(self):
return {
'op_seconds': self.op_seconds,
'wait': self.wait,
'fail': self.fail,
'progress': self.progress,
'is_async': self.is_async,
'handle_ex': self.handle_ex
}
class TaskTest(unittest.TestCase):
TASK_FINISHED_MAP = defaultdict(threading.Event)
@classmethod
def _handle_task(cls, task):
cls.TASK_FINISHED_MAP[task.name].set()
@classmethod
def wait_for_task(cls, name):
cls.TASK_FINISHED_MAP[name].wait()
@classmethod
def setUpClass(cls):
NotificationQueue.start_queue()
TaskManager.init()
NotificationQueue.register(cls._handle_task, 'cd_task_finished',
priority=100)
@classmethod
def tearDownClass(cls):
NotificationQueue.deregister(cls._handle_task, 'cd_task_finished')
NotificationQueue.stop()
def setUp(self):
TaskManager.FINISHED_TASK_SIZE = 10
TaskManager.FINISHED_TASK_TTL = 60.0
def assertTaskResult(self, result): # noqa: N802
self.assertEqual(result,
{'args': ['dummy arg'], 'kwargs': {'dummy': 'arg'}})
def test_fast_task(self):
task1 = MyTask(1)
state, result = task1.run('test1/task1')
self.assertEqual(state, TaskManager.VALUE_DONE)
self.assertTaskResult(result)
self.wait_for_task('test1/task1')
_, fn_t = TaskManager.list('test1/*')
self.assertEqual(len(fn_t), 1)
self.assertIsNone(fn_t[0].exception)
self.assertTaskResult(fn_t[0].ret_value)
self.assertEqual(fn_t[0].progress, 100)
def test_slow_task(self):
task1 = MyTask(1)
state, result = task1.run('test2/task1', 0.5)
self.assertEqual(state, TaskManager.VALUE_EXECUTING)
self.assertIsNone(result)
self.wait_for_task('test2/task1')
_, fn_t = TaskManager.list('test2/*')
self.assertEqual(len(fn_t), 1)
self.assertIsNone(fn_t[0].exception)
self.assertTaskResult(fn_t[0].ret_value)
self.assertEqual(fn_t[0].progress, 100)
def test_fast_task_with_failure(self):
task1 = MyTask(1, fail=True, progress=40)
with self.assertRaises(Exception) as ctx:
task1.run('test3/task1')
self.assertEqual(str(ctx.exception), "Task Unexpected Exception")
self.wait_for_task('test3/task1')
_, fn_t = TaskManager.list('test3/*')
self.assertEqual(len(fn_t), 1)
self.assertIsNone(fn_t[0].ret_value)
self.assertEqual(str(fn_t[0].exception), "Task Unexpected Exception")
self.assertEqual(fn_t[0].progress, 40)
def test_slow_task_with_failure(self):
task1 = MyTask(1, fail=True, progress=70)
state, result = task1.run('test4/task1', 0.5)
self.assertEqual(state, TaskManager.VALUE_EXECUTING)
self.assertIsNone(result)
self.wait_for_task('test4/task1')
_, fn_t = TaskManager.list('test4/*')
self.assertEqual(len(fn_t), 1)
self.assertIsNone(fn_t[0].ret_value)
self.assertEqual(str(fn_t[0].exception), "Task Unexpected Exception")
self.assertEqual(fn_t[0].progress, 70)
def test_executing_tasks_list(self):
task1 = MyTask(0, wait=True, progress=30)
task2 = MyTask(0, wait=True, progress=60)
state, result = task1.run('test5/task1', 0.5)
self.assertEqual(state, TaskManager.VALUE_EXECUTING)
self.assertIsNone(result)
ex_t, _ = TaskManager.list('test5/*')
self.assertEqual(len(ex_t), 1)
self.assertEqual(ex_t[0].name, 'test5/task1')
self.assertEqual(ex_t[0].progress, 30)
state, result = task2.run('test5/task2', 0.5)
self.assertEqual(state, TaskManager.VALUE_EXECUTING)
self.assertIsNone(result)
ex_t, _ = TaskManager.list('test5/*')
self.assertEqual(len(ex_t), 2)
for task in ex_t:
if task.name == 'test5/task1':
self.assertEqual(task.progress, 30)
elif task.name == 'test5/task2':
self.assertEqual(task.progress, 60)
task2.resume()
self.wait_for_task('test5/task2')
ex_t, _ = TaskManager.list('test5/*')
self.assertEqual(len(ex_t), 1)
self.assertEqual(ex_t[0].name, 'test5/task1')
task1.resume()
self.wait_for_task('test5/task1')
ex_t, _ = TaskManager.list('test5/*')
self.assertEqual(len(ex_t), 0)
def test_task_idempotent(self):
task1 = MyTask(0, wait=True)
task1_clone = MyTask(0, wait=True)
state, result = task1.run('test6/task1', 0.5)
self.assertEqual(state, TaskManager.VALUE_EXECUTING)
self.assertIsNone(result)
ex_t, _ = TaskManager.list('test6/*')
self.assertEqual(len(ex_t), 1)
self.assertEqual(ex_t[0].name, 'test6/task1')
state, result = task1_clone.run('test6/task1', 0.5)
self.assertEqual(state, TaskManager.VALUE_EXECUTING)
self.assertIsNone(result)
ex_t, _ = TaskManager.list('test6/*')
self.assertEqual(len(ex_t), 1)
self.assertEqual(ex_t[0].name, 'test6/task1')
task1.resume()
self.wait_for_task('test6/task1')
ex_t, fn_t = TaskManager.list('test6/*')
self.assertEqual(len(ex_t), 0)
self.assertEqual(len(fn_t), 1)
def test_finished_cleanup(self):
TaskManager.FINISHED_TASK_SIZE = 2
TaskManager.FINISHED_TASK_TTL = 0.5
task1 = MyTask(0)
task2 = MyTask(0)
state, result = task1.run('test7/task1')
self.assertEqual(state, TaskManager.VALUE_DONE)
self.assertTaskResult(result)
self.wait_for_task('test7/task1')
state, result = task2.run('test7/task2')
self.assertEqual(state, TaskManager.VALUE_DONE)
self.assertTaskResult(result)
self.wait_for_task('test7/task2')
time.sleep(1)
_, fn_t = TaskManager.list('test7/*')
self.assertEqual(len(fn_t), 2)
for idx, task in enumerate(fn_t):
self.assertEqual(task.name,
"test7/task{}".format(len(fn_t)-idx))
task3 = MyTask(0)
state, result = task3.run('test7/task3')
self.assertEqual(state, TaskManager.VALUE_DONE)
self.assertTaskResult(result)
self.wait_for_task('test7/task3')
time.sleep(1)
_, fn_t = TaskManager.list('test7/*')
self.assertEqual(len(fn_t), 3)
for idx, task in enumerate(fn_t):
self.assertEqual(task.name,
"test7/task{}".format(len(fn_t)-idx))
_, fn_t = TaskManager.list('test7/*')
self.assertEqual(len(fn_t), 2)
for idx, task in enumerate(fn_t):
self.assertEqual(task.name,
"test7/task{}".format(len(fn_t)-idx+1))
def test_task_serialization_format(self):
task1 = MyTask(0, wait=True, progress=20)
task2 = MyTask(1)
task1.run('test8/task1', 0.5)
task2.run('test8/task2', 0.5)
self.wait_for_task('test8/task2')
ex_t, fn_t = TaskManager.list_serializable('test8/*')
self.assertEqual(len(ex_t), 1)
self.assertEqual(len(fn_t), 1)
try:
json.dumps(ex_t)
except ValueError as ex:
self.fail("Failed to serialize executing tasks: {}".format(str(ex)))
try:
json.dumps(fn_t)
except ValueError as ex:
self.fail("Failed to serialize finished tasks: {}".format(str(ex)))
# validate executing tasks attributes
self.assertEqual(len(ex_t[0].keys()), 4)
self.assertEqual(ex_t[0]['name'], 'test8/task1')
self.assertEqual(ex_t[0]['metadata'], task1.metadata())
self.assertIsNotNone(ex_t[0]['begin_time'])
self.assertEqual(ex_t[0]['progress'], 20)
# validate finished tasks attributes
self.assertEqual(len(fn_t[0].keys()), 9)
self.assertEqual(fn_t[0]['name'], 'test8/task2')
self.assertEqual(fn_t[0]['metadata'], task2.metadata())
self.assertIsNotNone(fn_t[0]['begin_time'])
self.assertIsNotNone(fn_t[0]['end_time'])
self.assertGreaterEqual(fn_t[0]['duration'], 1.0)
self.assertEqual(fn_t[0]['progress'], 100)
self.assertTrue(fn_t[0]['success'])
self.assertTaskResult(fn_t[0]['ret_value'])
self.assertIsNone(fn_t[0]['exception'])
task1.resume()
self.wait_for_task('test8/task1')
def test_fast_async_task(self):
task1 = MyTask(1, is_async=True)
state, result = task1.run('test9/task1')
self.assertEqual(state, TaskManager.VALUE_DONE)
self.assertTaskResult(result)
self.wait_for_task('test9/task1')
_, fn_t = TaskManager.list('test9/*')
self.assertEqual(len(fn_t), 1)
self.assertIsNone(fn_t[0].exception)
self.assertTaskResult(fn_t[0].ret_value)
self.assertEqual(fn_t[0].progress, 100)
def test_slow_async_task(self):
task1 = MyTask(1, is_async=True)
state, result = task1.run('test10/task1', 0.5)
self.assertEqual(state, TaskManager.VALUE_EXECUTING)
self.assertIsNone(result)
self.wait_for_task('test10/task1')
_, fn_t = TaskManager.list('test10/*')
self.assertEqual(len(fn_t), 1)
self.assertIsNone(fn_t[0].exception)
self.assertTaskResult(fn_t[0].ret_value)
self.assertEqual(fn_t[0].progress, 100)
def test_fast_async_task_with_failure(self):
task1 = MyTask(1, fail=True, progress=40, is_async=True)
with self.assertRaises(Exception) as ctx:
task1.run('test11/task1')
self.assertEqual(str(ctx.exception), "Task Unexpected Exception")
self.wait_for_task('test11/task1')
_, fn_t = TaskManager.list('test11/*')
self.assertEqual(len(fn_t), 1)
self.assertIsNone(fn_t[0].ret_value)
self.assertEqual(str(fn_t[0].exception), "Task Unexpected Exception")
self.assertEqual(fn_t[0].progress, 40)
def test_slow_async_task_with_failure(self):
task1 = MyTask(1, fail=True, progress=70, is_async=True)
state, result = task1.run('test12/task1', 0.5)
self.assertEqual(state, TaskManager.VALUE_EXECUTING)
self.assertIsNone(result)
self.wait_for_task('test12/task1')
_, fn_t = TaskManager.list('test12/*')
self.assertEqual(len(fn_t), 1)
self.assertIsNone(fn_t[0].ret_value)
self.assertEqual(str(fn_t[0].exception), "Task Unexpected Exception")
self.assertEqual(fn_t[0].progress, 70)
def test_fast_async_task_with_premature_failure(self):
task1 = MyTask(1, fail="premature", progress=40, is_async=True)
with self.assertRaises(Exception) as ctx:
task1.run('test13/task1')
self.assertEqual(str(ctx.exception), "Task Unexpected Exception")
self.wait_for_task('test13/task1')
_, fn_t = TaskManager.list('test13/*')
self.assertEqual(len(fn_t), 1)
self.assertIsNone(fn_t[0].ret_value)
self.assertEqual(str(fn_t[0].exception), "Task Unexpected Exception")
def test_task_serialization_format_on_failure(self):
task1 = MyTask(1, fail=True)
task1.run('test14/task1', 0.5)
self.wait_for_task('test14/task1')
ex_t, fn_t = TaskManager.list_serializable('test14/*')
self.assertEqual(len(ex_t), 0)
self.assertEqual(len(fn_t), 1)
# validate finished tasks attributes
try:
json.dumps(fn_t)
except TypeError as ex:
self.fail("Failed to serialize finished tasks: {}".format(str(ex)))
self.assertEqual(len(fn_t[0].keys()), 9)
self.assertEqual(fn_t[0]['name'], 'test14/task1')
self.assertEqual(fn_t[0]['metadata'], task1.metadata())
self.assertIsNotNone(fn_t[0]['begin_time'])
self.assertIsNotNone(fn_t[0]['end_time'])
self.assertGreaterEqual(fn_t[0]['duration'], 1.0)
self.assertEqual(fn_t[0]['progress'], 50)
self.assertFalse(fn_t[0]['success'])
self.assertIsNotNone(fn_t[0]['exception'])
self.assertEqual(fn_t[0]['exception'],
{"detail": "Task Unexpected Exception"})
def test_task_serialization_format_on_failure_with_handler(self):
task1 = MyTask(1, fail=True, handle_ex=True)
task1.run('test15/task1', 0.5)
self.wait_for_task('test15/task1')
ex_t, fn_t = TaskManager.list_serializable('test15/*')
self.assertEqual(len(ex_t), 0)
self.assertEqual(len(fn_t), 1)
# validate finished tasks attributes
try:
json.dumps(fn_t)
except TypeError as ex:
self.fail("Failed to serialize finished tasks: {}".format(str(ex)))
self.assertEqual(len(fn_t[0].keys()), 9)
self.assertEqual(fn_t[0]['name'], 'test15/task1')
self.assertEqual(fn_t[0]['metadata'], task1.metadata())
self.assertIsNotNone(fn_t[0]['begin_time'])
self.assertIsNotNone(fn_t[0]['end_time'])
self.assertGreaterEqual(fn_t[0]['duration'], 1.0)
self.assertEqual(fn_t[0]['progress'], 50)
self.assertFalse(fn_t[0]['success'])
self.assertIsNotNone(fn_t[0]['exception'])
self.assertEqual(fn_t[0]['exception'], {
'component': None,
'detail': 'Task Unexpected Exception',
'status': 500,
'task': {
'metadata': {
'fail': True,
'handle_ex': True,
'is_async': False,
'op_seconds': 1,
'progress': 50,
'wait': False},
'name': 'test15/task1'
}
})
| 16,715 | 37.605081 | 80 | py |
null | ceph-main/src/pybind/mgr/dashboard/tests/test_tools.py | # -*- coding: utf-8 -*-
import unittest
import cherrypy
from cherrypy.lib.sessions import RamSession
try:
from mock import patch
except ImportError:
from unittest.mock import patch
from ..controllers import APIRouter, BaseController, Proxy, RESTController, Router
from ..controllers._version import APIVersion
from ..services.exception import handle_rados_error
from ..tests import ControllerTestCase
from ..tools import dict_contains_path, dict_get, json_str_to_object, \
merge_list_of_dicts_by_key, partial_dict
# pylint: disable=W0613
@Router('/foo', secure=False)
class FooResource(RESTController):
elems = []
def list(self):
return FooResource.elems
def create(self, a):
FooResource.elems.append({'a': a})
return {'a': a}
def get(self, key):
return {'detail': (key, [])}
def delete(self, key):
del FooResource.elems[int(key)]
def bulk_delete(self):
FooResource.elems = []
def set(self, key, newdata):
FooResource.elems[int(key)] = {'newdata': newdata}
return dict(key=key, newdata=newdata)
@Router('/foo/:key/:method', secure=False)
class FooResourceDetail(RESTController):
def list(self, key, method):
return {'detail': (key, [method])}
@APIRouter('/rgw/proxy', secure=False)
class GenerateControllerRoutesController(BaseController):
@Proxy()
def __call__(self, path, **params):
pass
@APIRouter('/fooargs', secure=False)
class FooArgs(RESTController):
def set(self, code, name=None, opt1=None, opt2=None):
return {'code': code, 'name': name, 'opt1': opt1, 'opt2': opt2}
@handle_rados_error('foo')
def create(self, my_arg_name):
return my_arg_name
def list(self):
raise cherrypy.NotFound()
class Root(object):
foo_resource = FooResource()
fooargs = FooArgs()
class RESTControllerTest(ControllerTestCase):
@classmethod
def setup_server(cls):
cls.setup_controllers(
[FooResource, FooResourceDetail, FooArgs, GenerateControllerRoutesController])
def test_empty(self):
self._delete("/foo")
self.assertStatus(204)
self._get("/foo")
self.assertStatus('200 OK')
self.assertHeader('Content-Type', APIVersion.DEFAULT.to_mime_type())
self.assertBody('[]')
def test_fill(self):
sess_mock = RamSession()
with patch('cherrypy.session', sess_mock, create=True):
data = {'a': 'b'}
for _ in range(5):
self._post("/foo", data)
self.assertJsonBody(data)
self.assertStatus(201)
self.assertHeader('Content-Type', APIVersion.DEFAULT.to_mime_type())
self._get("/foo")
self.assertStatus('200 OK')
self.assertHeader('Content-Type', APIVersion.DEFAULT.to_mime_type())
self.assertJsonBody([data] * 5)
self._put('/foo/0', {'newdata': 'newdata'})
self.assertStatus('200 OK')
self.assertHeader('Content-Type', APIVersion.DEFAULT.to_mime_type())
self.assertJsonBody({'newdata': 'newdata', 'key': '0'})
def test_not_implemented(self):
self._put("/foo")
self.assertStatus(404)
body = self.json_body()
self.assertIsInstance(body, dict)
assert body['detail'] == "The path '/foo' was not found."
assert '404' in body['status']
def test_args_from_json(self):
self._put("/api/fooargs/hello", {'name': 'world'})
self.assertJsonBody({'code': 'hello', 'name': 'world', 'opt1': None, 'opt2': None})
self._put("/api/fooargs/hello", {'name': 'world', 'opt1': 'opt1'})
self.assertJsonBody({'code': 'hello', 'name': 'world', 'opt1': 'opt1', 'opt2': None})
self._put("/api/fooargs/hello", {'name': 'world', 'opt2': 'opt2'})
self.assertJsonBody({'code': 'hello', 'name': 'world', 'opt1': None, 'opt2': 'opt2'})
def test_detail_route(self):
self._get('/foo/default')
self.assertJsonBody({'detail': ['default', []]})
self._get('/foo/default/default')
self.assertJsonBody({'detail': ['default', ['default']]})
self._get('/foo/1/detail')
self.assertJsonBody({'detail': ['1', ['detail']]})
self._post('/foo/1/detail', 'post-data')
self.assertStatus(404)
def test_generate_controller_routes(self):
# We just need to add this controller in setup_server():
# noinspection PyStatementEffect
# pylint: disable=pointless-statement
GenerateControllerRoutesController
class RequestLoggingToolTest(ControllerTestCase):
_request_logging = True
@classmethod
def setup_server(cls):
cls.setup_controllers([FooResource])
def test_is_logged(self):
with patch('logging.Logger.debug') as mock_logger_debug:
self._put('/foo/0', {'newdata': 'xyz'})
self.assertStatus(200)
call_args_list = mock_logger_debug.call_args_list
_, host, _, method, user, path = call_args_list[0][0]
self.assertEqual(host, '127.0.0.1')
self.assertEqual(method, 'PUT')
self.assertIsNone(user)
self.assertEqual(path, '/foo/0')
class TestFunctions(unittest.TestCase):
def test_dict_contains_path(self):
x = {'a': {'b': {'c': 'foo'}}}
self.assertTrue(dict_contains_path(x, ['a', 'b', 'c']))
self.assertTrue(dict_contains_path(x, ['a', 'b', 'c']))
self.assertTrue(dict_contains_path(x, ['a']))
self.assertFalse(dict_contains_path(x, ['a', 'c']))
self.assertTrue(dict_contains_path(x, []))
def test_json_str_to_object(self):
expected_result = {'a': 1, 'b': 'bbb'}
self.assertEqual(expected_result, json_str_to_object('{"a": 1, "b": "bbb"}'))
self.assertEqual(expected_result, json_str_to_object(b'{"a": 1, "b": "bbb"}'))
self.assertEqual('', json_str_to_object(''))
self.assertRaises(TypeError, json_str_to_object, None)
def test_partial_dict(self):
expected_result = {'a': 1, 'c': 3}
self.assertEqual(expected_result, partial_dict({'a': 1, 'b': 2, 'c': 3}, ['a', 'c']))
self.assertEqual({}, partial_dict({'a': 1, 'b': 2, 'c': 3}, []))
self.assertEqual({}, partial_dict({}, []))
self.assertRaises(KeyError, partial_dict, {'a': 1, 'b': 2, 'c': 3}, ['d'])
self.assertRaises(TypeError, partial_dict, None, ['a'])
self.assertRaises(TypeError, partial_dict, {'a': 1, 'b': 2, 'c': 3}, None)
def test_dict_get(self):
self.assertFalse(dict_get({'foo': {'bar': False}}, 'foo.bar'))
self.assertIsNone(dict_get({'foo': {'bar': False}}, 'foo.bar.baz'))
self.assertEqual(dict_get({'foo': {'bar': False}, 'baz': 'xyz'}, 'baz'), 'xyz')
def test_merge_list_of_dicts_by_key(self):
expected_result = [{'a': 1, 'b': 2, 'c': 3}, {'a': 4, 'b': 5, 'c': 6}]
self.assertEqual(expected_result, merge_list_of_dicts_by_key(
[{'a': 1, 'b': 2}, {'a': 4, 'b': 5}], [{'a': 1, 'c': 3}, {'a': 4, 'c': 6}], 'a'))
expected_result = [{'a': 1, 'b': 2}, {'a': 4, 'b': 5, 'c': 6}]
self.assertEqual(expected_result, merge_list_of_dicts_by_key(
[{'a': 1, 'b': 2}, {'a': 4, 'b': 5}], [{}, {'a': 4, 'c': 6}], 'a'))
self.assertRaises(TypeError, merge_list_of_dicts_by_key, None)
| 7,442 | 34.274882 | 93 | py |
null | ceph-main/src/pybind/mgr/dashboard/tests/test_versioning.py | # -*- coding: utf-8 -*-
import unittest
from ..controllers._api_router import APIRouter
from ..controllers._rest_controller import RESTController
from ..controllers._version import APIVersion
from ..tests import ControllerTestCase
@APIRouter("/vtest", secure=False)
class VTest(RESTController):
RESOURCE_ID = "vid"
@RESTController.MethodMap(version=APIVersion(0, 1))
def list(self):
return {'version': ""}
def get(self):
return {'version': ""}
@RESTController.Collection('GET', version=APIVersion(1, 0))
def vmethod(self):
return {'version': '1.0'}
@RESTController.Collection('GET', version=APIVersion(1, 1))
def vmethodv1_1(self):
return {'version': '1.1'}
@RESTController.Collection('GET', version=APIVersion(2, 0))
def vmethodv2(self):
return {'version': '2.0'}
class RESTVersioningTest(ControllerTestCase, unittest.TestCase):
@classmethod
def setup_server(cls):
cls.setup_controllers([VTest], "/test")
def test_list(self):
for (version, expected_status) in [
((0, 1), 200),
((2, 0), 415)
]:
with self.subTest(version=version):
self._get('/test/api/vtest', version=APIVersion._make(version))
self.assertStatus(expected_status)
def test_v1(self):
for (version, expected_status) in [
((1, 0), 200),
((2, 0), 415)
]:
with self.subTest(version=version):
self._get('/test/api/vtest/vmethod',
version=APIVersion._make(version))
self.assertStatus(expected_status)
def test_v2(self):
for (version, expected_status) in [
((2, 0), 200),
((1, 0), 415)
]:
with self.subTest(version=version):
self._get('/test/api/vtest/vmethodv2',
version=APIVersion._make(version))
self.assertStatus(expected_status)
def test_backward_compatibility(self):
for (version, expected_status) in [
((1, 1), 200),
((1, 0), 200),
((2, 0), 415)
]:
with self.subTest(version=version):
self._get('/test/api/vtest/vmethodv1_1',
version=APIVersion._make(version))
self.assertStatus(expected_status)
| 2,450 | 30.025316 | 79 | py |
null | ceph-main/src/pybind/mgr/devicehealth/__init__.py | # flake8: noqa
from .module import Module
| 42 | 13.333333 | 26 | py |
null | ceph-main/src/pybind/mgr/devicehealth/module.py | """
Device health monitoring
"""
import errno
import json
from mgr_module import MgrModule, CommandResult, MgrModuleRecoverDB, CLIRequiresDB, CLICommand, CLIReadCommand, Option, MgrDBNotReady
import operator
import rados
import re
from threading import Event
from datetime import datetime, timedelta, timezone
from typing import cast, Any, Dict, List, Optional, Sequence, Tuple, TYPE_CHECKING, Union
TIME_FORMAT = '%Y%m%d-%H%M%S'
DEVICE_HEALTH = 'DEVICE_HEALTH'
DEVICE_HEALTH_IN_USE = 'DEVICE_HEALTH_IN_USE'
DEVICE_HEALTH_TOOMANY = 'DEVICE_HEALTH_TOOMANY'
HEALTH_MESSAGES = {
DEVICE_HEALTH: '%d device(s) expected to fail soon',
DEVICE_HEALTH_IN_USE: '%d daemon(s) expected to fail soon and still contain data',
DEVICE_HEALTH_TOOMANY: 'Too many daemons are expected to fail soon',
}
def get_ata_wear_level(data: Dict[Any, Any]) -> Optional[float]:
"""
Extract wear level (as float) from smartctl -x --json output for SATA SSD
"""
for page in data.get("ata_device_statistics", {}).get("pages", []):
if page is None or page.get("number") != 7:
continue
for item in page.get("table", []):
if item["offset"] == 8:
return item["value"] / 100.0
return None
def get_nvme_wear_level(data: Dict[Any, Any]) -> Optional[float]:
"""
Extract wear level (as float) from smartctl -x --json output for NVME SSD
"""
pct_used = data.get("nvme_smart_health_information_log", {}).get("percentage_used")
if pct_used is None:
return None
return pct_used / 100.0
class Module(MgrModule):
# latest (if db does not exist)
SCHEMA = """
CREATE TABLE Device (
devid TEXT PRIMARY KEY
) WITHOUT ROWID;
CREATE TABLE DeviceHealthMetrics (
time DATETIME DEFAULT (strftime('%s', 'now')),
devid TEXT NOT NULL REFERENCES Device (devid),
raw_smart TEXT NOT NULL,
PRIMARY KEY (time, devid)
);
"""
SCHEMA_VERSIONED = [
# v1
"""
CREATE TABLE Device (
devid TEXT PRIMARY KEY
) WITHOUT ROWID;
CREATE TABLE DeviceHealthMetrics (
time DATETIME DEFAULT (strftime('%s', 'now')),
devid TEXT NOT NULL REFERENCES Device (devid),
raw_smart TEXT NOT NULL,
PRIMARY KEY (time, devid)
);
"""
]
MODULE_OPTIONS = [
Option(
name='enable_monitoring',
default=True,
type='bool',
desc='monitor device health metrics',
runtime=True,
),
Option(
name='scrape_frequency',
default=86400,
type='secs',
desc='how frequently to scrape device health metrics',
runtime=True,
),
Option(
name='pool_name',
default='device_health_metrics',
type='str',
desc='name of pool in which to store device health metrics',
runtime=True,
),
Option(
name='retention_period',
default=(86400 * 180),
type='secs',
desc='how long to retain device health metrics',
runtime=True,
),
Option(
name='mark_out_threshold',
default=(86400 * 14 * 2),
type='secs',
desc='automatically mark OSD if it may fail before this long',
runtime=True,
),
Option(
name='warn_threshold',
default=(86400 * 14 * 6),
type='secs',
desc='raise health warning if OSD may fail before this long',
runtime=True,
),
Option(
name='self_heal',
default=True,
type='bool',
desc='preemptively heal cluster around devices that may fail',
runtime=True,
),
Option(
name='sleep_interval',
default=600,
type='secs',
desc='how frequently to wake up and check device health',
runtime=True,
),
]
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(Module, self).__init__(*args, **kwargs)
# populate options (just until serve() runs)
for opt in self.MODULE_OPTIONS:
setattr(self, opt['name'], opt['default'])
# other
self.run = True
self.event = Event()
# for mypy which does not run the code
if TYPE_CHECKING:
self.enable_monitoring = True
self.scrape_frequency = 0.0
self.pool_name = ''
self.device_health_metrics = ''
self.retention_period = 0.0
self.mark_out_threshold = 0.0
self.warn_threshold = 0.0
self.self_heal = True
self.sleep_interval = 0.0
def is_valid_daemon_name(self, who: str) -> bool:
parts = who.split('.', 1)
if len(parts) != 2:
return False
return parts[0] in ('osd', 'mon')
@CLIReadCommand('device query-daemon-health-metrics')
def do_query_daemon_health_metrics(self, who: str) -> Tuple[int, str, str]:
'''
Get device health metrics for a given daemon
'''
if not self.is_valid_daemon_name(who):
return -errno.EINVAL, '', 'not a valid mon or osd daemon name'
(daemon_type, daemon_id) = who.split('.')
result = CommandResult('')
self.send_command(result, daemon_type, daemon_id, json.dumps({
'prefix': 'smart',
'format': 'json',
}), '')
return result.wait()
@CLIRequiresDB
@CLIReadCommand('device scrape-daemon-health-metrics')
@MgrModuleRecoverDB
def do_scrape_daemon_health_metrics(self, who: str) -> Tuple[int, str, str]:
'''
Scrape and store device health metrics for a given daemon
'''
if not self.is_valid_daemon_name(who):
return -errno.EINVAL, '', 'not a valid mon or osd daemon name'
(daemon_type, daemon_id) = who.split('.')
return self.scrape_daemon(daemon_type, daemon_id)
@CLIRequiresDB
@CLIReadCommand('device scrape-health-metrics')
@MgrModuleRecoverDB
def do_scrape_health_metrics(self, devid: Optional[str] = None) -> Tuple[int, str, str]:
'''
Scrape and store device health metrics
'''
if devid is None:
return self.scrape_all()
else:
return self.scrape_device(devid)
@CLIRequiresDB
@CLIReadCommand('device get-health-metrics')
@MgrModuleRecoverDB
def do_get_health_metrics(self, devid: str, sample: Optional[str] = None) -> Tuple[int, str, str]:
'''
Show stored device metrics for the device
'''
return self.show_device_metrics(devid, sample)
@CLIRequiresDB
@CLICommand('device check-health')
@MgrModuleRecoverDB
def do_check_health(self) -> Tuple[int, str, str]:
'''
Check life expectancy of devices
'''
return self.check_health()
@CLICommand('device monitoring on')
def do_monitoring_on(self) -> Tuple[int, str, str]:
'''
Enable device health monitoring
'''
self.set_module_option('enable_monitoring', True)
self.event.set()
return 0, '', ''
@CLICommand('device monitoring off')
def do_monitoring_off(self) -> Tuple[int, str, str]:
'''
Disable device health monitoring
'''
self.set_module_option('enable_monitoring', False)
self.set_health_checks({}) # avoid stuck health alerts
return 0, '', ''
@CLIRequiresDB
@CLIReadCommand('device predict-life-expectancy')
@MgrModuleRecoverDB
def do_predict_life_expectancy(self, devid: str) -> Tuple[int, str, str]:
'''
Predict life expectancy with local predictor
'''
return self.predict_lift_expectancy(devid)
def self_test(self) -> None:
assert self.db_ready()
self.config_notify()
osdmap = self.get('osd_map')
osd_id = osdmap['osds'][0]['osd']
osdmeta = self.get('osd_metadata')
devs = osdmeta.get(str(osd_id), {}).get('device_ids')
if devs:
devid = devs.split()[0].split('=')[1]
self.log.debug(f"getting devid {devid}")
(r, before, err) = self.show_device_metrics(devid, None)
assert r == 0
self.log.debug(f"before: {before}")
(r, out, err) = self.scrape_device(devid)
assert r == 0
(r, after, err) = self.show_device_metrics(devid, None)
assert r == 0
self.log.debug(f"after: {after}")
assert before != after
def config_notify(self) -> None:
for opt in self.MODULE_OPTIONS:
setattr(self,
opt['name'],
self.get_module_option(opt['name']))
self.log.debug(' %s = %s', opt['name'], getattr(self, opt['name']))
def _legacy_put_device_metrics(self, t: str, devid: str, data: str) -> None:
SQL = """
INSERT OR IGNORE INTO DeviceHealthMetrics (time, devid, raw_smart)
VALUES (?, ?, ?);
"""
self._create_device(devid)
epoch = self._t2epoch(t)
json.loads(data) # valid?
self.db.execute(SQL, (epoch, devid, data))
devre = r"[a-zA-Z0-9-]+[_-][a-zA-Z0-9-]+[_-][a-zA-Z0-9-]+"
def _load_legacy_object(self, ioctx: rados.Ioctx, oid: str) -> bool:
MAX_OMAP = 10000
self.log.debug(f"loading object {oid}")
if re.search(self.devre, oid) is None:
return False
with rados.ReadOpCtx() as op:
it, rc = ioctx.get_omap_vals(op, None, None, MAX_OMAP)
if rc == 0:
ioctx.operate_read_op(op, oid)
count = 0
for t, raw_smart in it:
self.log.debug(f"putting {oid} {t}")
self._legacy_put_device_metrics(t, oid, raw_smart)
count += 1
assert count < MAX_OMAP
self.log.debug(f"removing object {oid}")
ioctx.remove_object(oid)
return True
def check_legacy_pool(self) -> bool:
try:
# 'device_health_metrics' is automatically renamed '.mgr' in
# create_mgr_pool
ioctx = self.rados.open_ioctx(self.MGR_POOL_NAME)
except rados.ObjectNotFound:
return True
if not ioctx:
return True
done = False
with ioctx, self._db_lock, self.db:
count = 0
for obj in ioctx.list_objects():
try:
if self._load_legacy_object(ioctx, obj.key):
count += 1
except json.decoder.JSONDecodeError:
pass
if count >= 10:
break
done = count < 10
self.log.debug(f"finished reading legacy pool, complete = {done}")
return done
@MgrModuleRecoverDB
def _do_serve(self) -> None:
last_scrape = None
finished_loading_legacy = False
while self.run:
# sleep first, in case of exceptions causing retry:
sleep_interval = self.sleep_interval or 60
if not finished_loading_legacy:
sleep_interval = 2
self.log.debug('Sleeping for %d seconds', sleep_interval)
self.event.wait(sleep_interval)
self.event.clear()
if self.db_ready() and self.enable_monitoring:
self.log.debug('Running')
if not finished_loading_legacy:
finished_loading_legacy = self.check_legacy_pool()
if last_scrape is None:
ls = self.get_kv('last_scrape')
if ls:
try:
last_scrape = datetime.strptime(ls, TIME_FORMAT)
except ValueError:
pass
self.log.debug('Last scrape %s', last_scrape)
self.check_health()
now = datetime.utcnow()
if not last_scrape:
next_scrape = now
else:
# align to scrape interval
scrape_frequency = self.scrape_frequency or 86400
seconds = (last_scrape - datetime.utcfromtimestamp(0)).total_seconds()
seconds -= seconds % scrape_frequency
seconds += scrape_frequency
next_scrape = datetime.utcfromtimestamp(seconds)
if last_scrape:
self.log.debug('Last scrape %s, next scrape due %s',
last_scrape.strftime(TIME_FORMAT),
next_scrape.strftime(TIME_FORMAT))
else:
self.log.debug('Last scrape never, next scrape due %s',
next_scrape.strftime(TIME_FORMAT))
if now >= next_scrape:
self.scrape_all()
self.predict_all_devices()
last_scrape = now
self.set_kv('last_scrape', last_scrape.strftime(TIME_FORMAT))
def serve(self) -> None:
self.log.info("Starting")
self.config_notify()
self._do_serve()
def shutdown(self) -> None:
self.log.info('Stopping')
self.run = False
self.event.set()
def scrape_daemon(self, daemon_type: str, daemon_id: str) -> Tuple[int, str, str]:
if not self.db_ready():
return -errno.EAGAIN, "", "mgr db not yet available"
raw_smart_data = self.do_scrape_daemon(daemon_type, daemon_id)
if raw_smart_data:
for device, raw_data in raw_smart_data.items():
data = self.extract_smart_features(raw_data)
if device and data:
self.put_device_metrics(device, data)
return 0, "", ""
def scrape_all(self) -> Tuple[int, str, str]:
if not self.db_ready():
return -errno.EAGAIN, "", "mgr db not yet available"
osdmap = self.get("osd_map")
assert osdmap is not None
did_device = {}
ids = []
for osd in osdmap['osds']:
ids.append(('osd', str(osd['osd'])))
monmap = self.get("mon_map")
for mon in monmap['mons']:
ids.append(('mon', mon['name']))
for daemon_type, daemon_id in ids:
raw_smart_data = self.do_scrape_daemon(daemon_type, daemon_id)
if not raw_smart_data:
continue
for device, raw_data in raw_smart_data.items():
if device in did_device:
self.log.debug('skipping duplicate %s' % device)
continue
did_device[device] = 1
data = self.extract_smart_features(raw_data)
if device and data:
self.put_device_metrics(device, data)
return 0, "", ""
def scrape_device(self, devid: str) -> Tuple[int, str, str]:
if not self.db_ready():
return -errno.EAGAIN, "", "mgr db not yet available"
r = self.get("device " + devid)
if not r or 'device' not in r.keys():
return -errno.ENOENT, '', 'device ' + devid + ' not found'
daemons = r['device'].get('daemons', [])
if not daemons:
return (-errno.EAGAIN, '',
'device ' + devid + ' not claimed by any active daemons')
(daemon_type, daemon_id) = daemons[0].split('.')
raw_smart_data = self.do_scrape_daemon(daemon_type, daemon_id,
devid=devid)
if raw_smart_data:
for device, raw_data in raw_smart_data.items():
data = self.extract_smart_features(raw_data)
if device and data:
self.put_device_metrics(device, data)
return 0, "", ""
def do_scrape_daemon(self,
daemon_type: str,
daemon_id: str,
devid: str = '') -> Optional[Dict[str, Any]]:
"""
:return: a dict, or None if the scrape failed.
"""
self.log.debug('do_scrape_daemon %s.%s' % (daemon_type, daemon_id))
result = CommandResult('')
self.send_command(result, daemon_type, daemon_id, json.dumps({
'prefix': 'smart',
'format': 'json',
'devid': devid,
}), '')
r, outb, outs = result.wait()
try:
return json.loads(outb)
except (IndexError, ValueError):
self.log.error(
"Fail to parse JSON result from daemon {0}.{1} ({2})".format(
daemon_type, daemon_id, outb))
return None
def _prune_device_metrics(self) -> None:
SQL = """
DELETE FROM DeviceHealthMetrics
WHERE time < (strftime('%s', 'now') - ?);
"""
cursor = self.db.execute(SQL, (self.retention_period,))
if cursor.rowcount >= 1:
self.log.info(f"pruned {cursor.rowcount} metrics")
def _create_device(self, devid: str) -> None:
SQL = """
INSERT OR IGNORE INTO Device VALUES (?);
"""
cursor = self.db.execute(SQL, (devid,))
if cursor.rowcount >= 1:
self.log.info(f"created device {devid}")
else:
self.log.debug(f"device {devid} already exists")
def put_device_metrics(self, devid: str, data: Any) -> None:
SQL = """
INSERT INTO DeviceHealthMetrics (devid, raw_smart)
VALUES (?, ?);
"""
with self._db_lock, self.db:
self._create_device(devid)
self.db.execute(SQL, (devid, json.dumps(data)))
self._prune_device_metrics()
# extract wear level?
wear_level = get_ata_wear_level(data)
if wear_level is None:
wear_level = get_nvme_wear_level(data)
dev_data = self.get(f"device {devid}") or {}
if wear_level is not None:
if dev_data.get(wear_level) != str(wear_level):
dev_data["wear_level"] = str(wear_level)
self.log.debug(f"updating {devid} wear level to {wear_level}")
self.set_device_wear_level(devid, wear_level)
else:
if "wear_level" in dev_data:
del dev_data["wear_level"]
self.log.debug(f"removing {devid} wear level")
self.set_device_wear_level(devid, -1.0)
def _t2epoch(self, t: Optional[str]) -> int:
if not t:
return 0
else:
return int(datetime.strptime(t, TIME_FORMAT).strftime("%s"))
def _get_device_metrics(self, devid: str,
sample: Optional[str] = None,
min_sample: Optional[str] = None) -> Dict[str, Dict[str, Any]]:
res = {}
SQL_EXACT = """
SELECT time, raw_smart
FROM DeviceHealthMetrics
WHERE devid = ? AND time = ?
ORDER BY time DESC;
"""
SQL_MIN = """
SELECT time, raw_smart
FROM DeviceHealthMetrics
WHERE devid = ? AND ? <= time
ORDER BY time DESC;
"""
isample = None
imin_sample = None
if sample:
isample = self._t2epoch(sample)
else:
imin_sample = self._t2epoch(min_sample)
self.log.debug(f"_get_device_metrics: {devid} {sample} {min_sample}")
with self._db_lock, self.db:
if isample:
cursor = self.db.execute(SQL_EXACT, (devid, isample))
else:
cursor = self.db.execute(SQL_MIN, (devid, imin_sample))
for row in cursor:
t = row['time']
dt = datetime.utcfromtimestamp(t).strftime(TIME_FORMAT)
try:
res[dt] = json.loads(row['raw_smart'])
except (ValueError, IndexError):
self.log.debug(f"unable to parse value for {devid}:{t}")
pass
return res
def show_device_metrics(self, devid: str, sample: Optional[str]) -> Tuple[int, str, str]:
# verify device exists
r = self.get("device " + devid)
if not r or 'device' not in r.keys():
return -errno.ENOENT, '', 'device ' + devid + ' not found'
# fetch metrics
res = self._get_device_metrics(devid, sample=sample)
return 0, json.dumps(res, indent=4, sort_keys=True), ''
def check_health(self) -> Tuple[int, str, str]:
self.log.info('Check health')
config = self.get('config')
min_in_ratio = float(config.get('mon_osd_min_in_ratio'))
mark_out_threshold_td = timedelta(seconds=self.mark_out_threshold)
warn_threshold_td = timedelta(seconds=self.warn_threshold)
checks: Dict[str, Dict[str, Union[int, str, Sequence[str]]]] = {}
health_warnings: Dict[str, List[str]] = {
DEVICE_HEALTH: [],
DEVICE_HEALTH_IN_USE: [],
}
devs = self.get("devices")
osds_in = {}
osds_out = {}
now = datetime.now(timezone.utc) # e.g. '2021-09-22 13:18:45.021712+00:00'
osdmap = self.get("osd_map")
assert osdmap is not None
for dev in devs['devices']:
if 'life_expectancy_max' not in dev:
continue
# ignore devices that are not consumed by any daemons
if not dev['daemons']:
continue
if not dev['life_expectancy_max'] or \
dev['life_expectancy_max'] == '0.000000':
continue
# life_expectancy_(min/max) is in the format of:
# '%Y-%m-%dT%H:%M:%S.%f%z', e.g.:
# '2019-01-20 21:12:12.000000+00:00'
life_expectancy_max = datetime.strptime(
dev['life_expectancy_max'],
'%Y-%m-%dT%H:%M:%S.%f%z')
self.log.debug('device %s expectancy max %s', dev,
life_expectancy_max)
if life_expectancy_max - now <= mark_out_threshold_td:
if self.self_heal:
# dev['daemons'] == ["osd.0","osd.1","osd.2"]
if dev['daemons']:
osds = [x for x in dev['daemons']
if x.startswith('osd.')]
osd_ids = map(lambda x: x[4:], osds)
for _id in osd_ids:
if self.is_osd_in(osdmap, _id):
osds_in[_id] = life_expectancy_max
else:
osds_out[_id] = 1
if life_expectancy_max - now <= warn_threshold_td:
# device can appear in more than one location in case
# of SCSI multipath
device_locations = map(lambda x: x['host'] + ':' + x['dev'],
dev['location'])
health_warnings[DEVICE_HEALTH].append(
'%s (%s); daemons %s; life expectancy between %s and %s'
% (dev['devid'],
','.join(device_locations),
','.join(dev.get('daemons', ['none'])),
dev['life_expectancy_max'],
dev.get('life_expectancy_max', 'unknown')))
# OSD might be marked 'out' (which means it has no
# data), however PGs are still attached to it.
for _id in osds_out:
num_pgs = self.get_osd_num_pgs(_id)
if num_pgs > 0:
health_warnings[DEVICE_HEALTH_IN_USE].append(
'osd.%s is marked out '
'but still has %s PG(s)' %
(_id, num_pgs))
if osds_in:
self.log.debug('osds_in %s' % osds_in)
# calculate target in ratio
num_osds = len(osdmap['osds'])
num_in = len([x for x in osdmap['osds'] if x['in']])
num_bad = len(osds_in)
# sort with next-to-fail first
bad_osds = sorted(osds_in.items(), key=operator.itemgetter(1))
did = 0
to_mark_out = []
for osd_id, when in bad_osds:
ratio = float(num_in - did - 1) / float(num_osds)
if ratio < min_in_ratio:
final_ratio = float(num_in - num_bad) / float(num_osds)
checks[DEVICE_HEALTH_TOOMANY] = {
'severity': 'warning',
'summary': HEALTH_MESSAGES[DEVICE_HEALTH_TOOMANY],
'detail': [
'%d OSDs with failing device(s) would bring "in" ratio to %f < mon_osd_min_in_ratio %f' % (
num_bad - did, final_ratio, min_in_ratio)
]
}
break
to_mark_out.append(osd_id)
did += 1
if to_mark_out:
self.mark_out_etc(to_mark_out)
for warning, ls in health_warnings.items():
n = len(ls)
if n:
checks[warning] = {
'severity': 'warning',
'summary': HEALTH_MESSAGES[warning] % n,
'count': len(ls),
'detail': ls,
}
self.set_health_checks(checks)
return 0, "", ""
def is_osd_in(self, osdmap: Dict[str, Any], osd_id: str) -> bool:
for osd in osdmap['osds']:
if osd_id == str(osd['osd']):
return bool(osd['in'])
return False
def get_osd_num_pgs(self, osd_id: str) -> int:
stats = self.get('osd_stats')
assert stats is not None
for stat in stats['osd_stats']:
if osd_id == str(stat['osd']):
return stat['num_pgs']
return -1
def mark_out_etc(self, osd_ids: List[str]) -> None:
self.log.info('Marking out OSDs: %s' % osd_ids)
result = CommandResult('')
self.send_command(result, 'mon', '', json.dumps({
'prefix': 'osd out',
'format': 'json',
'ids': osd_ids,
}), '')
r, outb, outs = result.wait()
if r != 0:
self.log.warning('Could not mark OSD %s out. r: [%s], outb: [%s], outs: [%s]',
osd_ids, r, outb, outs)
for osd_id in osd_ids:
result = CommandResult('')
self.send_command(result, 'mon', '', json.dumps({
'prefix': 'osd primary-affinity',
'format': 'json',
'id': int(osd_id),
'weight': 0.0,
}), '')
r, outb, outs = result.wait()
if r != 0:
self.log.warning('Could not set osd.%s primary-affinity, '
'r: [%s], outb: [%s], outs: [%s]',
osd_id, r, outb, outs)
def extract_smart_features(self, raw: Any) -> Any:
# FIXME: extract and normalize raw smartctl --json output and
# generate a dict of the fields we care about.
return raw
def predict_lift_expectancy(self, devid: str) -> Tuple[int, str, str]:
plugin_name = ''
model = self.get_ceph_option('device_failure_prediction_mode')
if cast(str, model).lower() == 'local':
plugin_name = 'diskprediction_local'
else:
return -1, '', 'unable to enable any disk prediction model[local/cloud]'
try:
can_run, _ = self.remote(plugin_name, 'can_run')
if can_run:
return self.remote(plugin_name, 'predict_life_expectancy', devid=devid)
else:
return -1, '', f'{plugin_name} is not available'
except Exception:
return -1, '', 'unable to invoke diskprediction local or remote plugin'
def predict_all_devices(self) -> Tuple[int, str, str]:
plugin_name = ''
model = self.get_ceph_option('device_failure_prediction_mode')
if cast(str, model).lower() == 'local':
plugin_name = 'diskprediction_local'
else:
return -1, '', 'unable to enable any disk prediction model[local/cloud]'
try:
can_run, _ = self.remote(plugin_name, 'can_run')
if can_run:
return self.remote(plugin_name, 'predict_all_devices')
else:
return -1, '', f'{plugin_name} is not available'
except Exception:
return -1, '', 'unable to invoke diskprediction local or remote plugin'
def get_recent_device_metrics(self, devid: str, min_sample: str) -> Dict[str, Dict[str, Any]]:
try:
return self._get_device_metrics(devid, min_sample=min_sample)
except MgrDBNotReady:
return dict()
def get_time_format(self) -> str:
return TIME_FORMAT
| 29,158 | 36.335467 | 133 | py |
null | ceph-main/src/pybind/mgr/diskprediction_local/__init__.py | # flake8: noqa
from .module import Module
| 42 | 13.333333 | 26 | py |
null | ceph-main/src/pybind/mgr/diskprediction_local/module.py | """
diskprediction with local predictor
"""
import json
import datetime
from threading import Event
import time
from typing import Any, Dict, List, Optional, Tuple, TYPE_CHECKING
from mgr_module import CommandResult, MgrModule, Option
# Importing scipy early appears to avoid a future deadlock when
# we try to do
#
# from .predictor import get_diskfailurepredictor_path
#
# in a command thread. See https://tracker.ceph.com/issues/42764
import scipy # noqa: ignore=F401
from .predictor import DevSmartT, Predictor, get_diskfailurepredictor_path
TIME_FORMAT = '%Y%m%d-%H%M%S'
TIME_DAYS = 24 * 60 * 60
TIME_WEEK = TIME_DAYS * 7
class Module(MgrModule):
MODULE_OPTIONS = [
Option(name='sleep_interval',
default=600),
Option(name='predict_interval',
default=86400),
Option(name='predictor_model',
default='prophetstor')
]
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(Module, self).__init__(*args, **kwargs)
# options
for opt in self.MODULE_OPTIONS:
setattr(self, opt['name'], opt['default'])
# other
self._run = True
self._event = Event()
# for mypy which does not run the code
if TYPE_CHECKING:
self.sleep_interval = 0
self.predict_interval = 0
self.predictor_model = ''
def config_notify(self) -> None:
for opt in self.MODULE_OPTIONS:
setattr(self,
opt['name'],
self.get_module_option(opt['name']))
self.log.debug(' %s = %s', opt['name'], getattr(self, opt['name']))
if self.get_ceph_option('device_failure_prediction_mode') == 'local':
self._event.set()
def refresh_config(self) -> None:
for opt in self.MODULE_OPTIONS:
setattr(self,
opt['name'],
self.get_module_option(opt['name']))
self.log.debug(' %s = %s', opt['name'], getattr(self, opt['name']))
def self_test(self) -> None:
self.log.debug('self_test enter')
ret, out, err = self.predict_all_devices()
assert ret == 0
def serve(self) -> None:
self.log.info('Starting diskprediction local module')
self.config_notify()
last_predicted = None
ls = self.get_store('last_predicted')
if ls:
try:
last_predicted = datetime.datetime.strptime(ls, TIME_FORMAT)
except ValueError:
pass
self.log.debug('Last predicted %s', last_predicted)
while self._run:
self.refresh_config()
mode = self.get_ceph_option('device_failure_prediction_mode')
if mode == 'local':
now = datetime.datetime.utcnow()
if not last_predicted:
next_predicted = now
else:
predicted_frequency = self.predict_interval or 86400
seconds = (last_predicted - datetime.datetime.utcfromtimestamp(0)).total_seconds()
seconds -= seconds % predicted_frequency
seconds += predicted_frequency
next_predicted = datetime.datetime.utcfromtimestamp(seconds)
self.log.debug('Last scrape %s, next scrape due %s',
last_predicted.strftime(TIME_FORMAT),
next_predicted.strftime(TIME_FORMAT))
if now >= next_predicted:
self.predict_all_devices()
last_predicted = now
self.set_store('last_predicted', last_predicted.strftime(TIME_FORMAT))
sleep_interval = self.sleep_interval or 60
self.log.debug('Sleeping for %d seconds', sleep_interval)
self._event.wait(sleep_interval)
self._event.clear()
def shutdown(self) -> None:
self.log.info('Stopping')
self._run = False
self._event.set()
@staticmethod
def _convert_timestamp(predicted_timestamp: int, life_expectancy_day: int) -> str:
"""
:param predicted_timestamp: unit is nanoseconds
:param life_expectancy_day: unit is seconds
:return:
date format '%Y-%m-%d' ex. 2018-01-01
"""
return datetime.datetime.fromtimestamp(
predicted_timestamp / (1000 ** 3) + life_expectancy_day).strftime('%Y-%m-%d')
def _predict_life_expectancy(self, devid: str) -> str:
predicted_result = ''
health_data: Dict[str, Dict[str, Any]] = {}
predict_datas: List[DevSmartT] = []
try:
r, outb, outs = self.remote(
'devicehealth', 'show_device_metrics', devid=devid, sample='')
if r != 0:
self.log.error('failed to get device %s health', devid)
health_data = {}
else:
health_data = json.loads(outb)
except Exception as e:
self.log.error('failed to get device %s health data due to %s', devid, str(e))
# initialize appropriate disk failure predictor model
obj_predictor = Predictor.create(self.predictor_model)
if obj_predictor is None:
self.log.error('invalid value received for MODULE_OPTIONS.predictor_model')
return predicted_result
try:
obj_predictor.initialize(
"{}/models/{}".format(get_diskfailurepredictor_path(), self.predictor_model))
except Exception as e:
self.log.error('Error initializing predictor: %s', e)
return predicted_result
if len(health_data) >= 6:
o_keys = sorted(health_data.keys(), reverse=True)
for o_key in o_keys:
# get values for current day (?)
dev_smart = {}
s_val = health_data[o_key]
# add all smart attributes
ata_smart = s_val.get('ata_smart_attributes', {})
for attr in ata_smart.get('table', []):
# get raw smart values
if attr.get('raw', {}).get('string') is not None:
if str(attr.get('raw', {}).get('string', '0')).isdigit():
dev_smart['smart_%s_raw' % attr.get('id')] = \
int(attr.get('raw', {}).get('string', '0'))
else:
if str(attr.get('raw', {}).get('string', '0')).split(' ')[0].isdigit():
dev_smart['smart_%s_raw' % attr.get('id')] = \
int(attr.get('raw', {}).get('string',
'0').split(' ')[0])
else:
dev_smart['smart_%s_raw' % attr.get('id')] = \
attr.get('raw', {}).get('value', 0)
# get normalized smart values
if attr.get('value') is not None:
dev_smart['smart_%s_normalized' % attr.get('id')] = \
attr.get('value')
# add power on hours manually if not available in smart attributes
power_on_time = s_val.get('power_on_time', {}).get('hours')
if power_on_time is not None:
dev_smart['smart_9_raw'] = int(power_on_time)
# add device capacity
user_capacity = s_val.get('user_capacity', {}).get('bytes')
if user_capacity is not None:
dev_smart['user_capacity'] = user_capacity
else:
self.log.debug('user_capacity not found in smart attributes list')
# add device model
model_name = s_val.get('model_name')
if model_name is not None:
dev_smart['model_name'] = model_name
# add vendor
vendor = s_val.get('vendor')
if vendor is not None:
dev_smart['vendor'] = vendor
# if smart data was found, then add that to list
if dev_smart:
predict_datas.append(dev_smart)
if len(predict_datas) >= 12:
break
else:
self.log.error('unable to predict device due to health data records less than 6 days')
if len(predict_datas) >= 6:
predicted_result = obj_predictor.predict(predict_datas)
return predicted_result
def predict_life_expectancy(self, devid: str) -> Tuple[int, str, str]:
result = self._predict_life_expectancy(devid)
if result.lower() == 'good':
return 0, '>6w', ''
elif result.lower() == 'warning':
return 0, '>=2w and <=6w', ''
elif result.lower() == 'bad':
return 0, '<2w', ''
else:
return 0, 'unknown', ''
def _reset_device_life_expectancy(self, device_id: str) -> int:
result = CommandResult('')
self.send_command(result, 'mon', '', json.dumps({
'prefix': 'device rm-life-expectancy',
'devid': device_id
}), '')
ret, _, outs = result.wait()
if ret != 0:
self.log.error(
'failed to reset device life expectancy, %s' % outs)
return ret
def _set_device_life_expectancy(self,
device_id: str,
from_date: str,
to_date: Optional[str] = None) -> int:
result = CommandResult('')
if to_date is None:
self.send_command(result, 'mon', '', json.dumps({
'prefix': 'device set-life-expectancy',
'devid': device_id,
'from': from_date
}), '')
else:
self.send_command(result, 'mon', '', json.dumps({
'prefix': 'device set-life-expectancy',
'devid': device_id,
'from': from_date,
'to': to_date
}), '')
ret, _, outs = result.wait()
if ret != 0:
self.log.error(
'failed to set device life expectancy, %s' % outs)
return ret
def predict_all_devices(self) -> Tuple[int, str, str]:
self.log.debug('predict_all_devices')
devices = self.get('devices').get('devices', [])
for devInfo in devices:
if not devInfo.get('daemons'):
continue
if not devInfo.get('devid'):
continue
self.log.debug('%s' % devInfo)
result = self._predict_life_expectancy(devInfo['devid'])
if result == 'unknown':
self._reset_device_life_expectancy(devInfo['devid'])
continue
predicted = int(time.time() * (1000 ** 3))
if result.lower() == 'good':
life_expectancy_day_min = (TIME_WEEK * 6) + TIME_DAYS
life_expectancy_day_max = 0
elif result.lower() == 'warning':
life_expectancy_day_min = (TIME_WEEK * 2)
life_expectancy_day_max = (TIME_WEEK * 6)
elif result.lower() == 'bad':
life_expectancy_day_min = 0
life_expectancy_day_max = (TIME_WEEK * 2) - TIME_DAYS
else:
predicted = 0
life_expectancy_day_min = 0
life_expectancy_day_max = 0
if predicted and devInfo['devid'] and life_expectancy_day_min:
from_date = None
to_date = None
try:
assert life_expectancy_day_min
from_date = self._convert_timestamp(predicted, life_expectancy_day_min)
if life_expectancy_day_max:
to_date = self._convert_timestamp(predicted, life_expectancy_day_max)
self._set_device_life_expectancy(devInfo['devid'], from_date, to_date)
self._logger.info(
'succeed to set device {} life expectancy from: {}, to: {}'.format(
devInfo['devid'], from_date, to_date))
except Exception as e:
self._logger.error(
'failed to set device {} life expectancy from: {}, to: {}, {}'.format(
devInfo['devid'], from_date, to_date, str(e)))
else:
self._reset_device_life_expectancy(devInfo['devid'])
return 0, 'succeed to predicted all devices', ''
| 12,787 | 40.79085 | 102 | py |
null | ceph-main/src/pybind/mgr/diskprediction_local/predictor.py | """Machine learning model for disk failure prediction.
This classes defined here provide the disk failure prediction module.
RHDiskFailurePredictor uses the models developed at the AICoE in the
Office of the CTO at Red Hat. These models were built using the open
source Backblaze SMART metrics dataset.
PSDiskFailurePredictor uses the models developed by ProphetStor as an
example.
An instance of the predictor is initialized by providing the path to trained
models. Then, to predict hard drive health and deduce time to failure, the
predict function is called with 6 days worth of SMART data from the hard drive.
It will return a string to indicate disk failure status: "Good", "Warning",
"Bad", or "Unknown".
An example code is as follows:
>>> model = RHDiskFailurePredictor()
>>> model.initialize(get_diskfailurepredictor_path() + "/models/redhat")
>>> vendor = list(RHDiskFailurePredictor.MANUFACTURER_MODELNAME_PREFIXES.keys())[0]
>>> disk_days = [{'vendor': vendor}]
>>> model.predict(disk_days)
'Unknown'
"""
import os
import json
import pickle
import logging
from typing import Any, Dict, List, Optional, Sequence, Tuple
import numpy as np
def get_diskfailurepredictor_path() -> str:
path = os.path.abspath(__file__)
dir_path = os.path.dirname(path)
return dir_path
DevSmartT = Dict[str, Any]
AttrNamesT = List[str]
AttrDiffsT = List[Dict[str, int]]
class Predictor:
@classmethod
def create(cls, name: str) -> Optional['Predictor']:
if name == 'prophetstor':
return PSDiskFailurePredictor()
elif name == 'redhat':
return RHDiskFailurePredictor()
else:
return None
def initialize(self, model_dir: str) -> None:
raise NotImplementedError()
def predict(self, dataset: Sequence[DevSmartT]) -> str:
raise NotImplementedError()
class RHDiskFailurePredictor(Predictor):
"""Disk failure prediction module developed at Red Hat
This class implements a disk failure prediction module.
"""
# json with manufacturer names as keys
# and features used for prediction as values
CONFIG_FILE = "config.json"
PREDICTION_CLASSES = {-1: "Unknown", 0: "Good", 1: "Warning", 2: "Bad"}
# model name prefixes to identify vendor
MANUFACTURER_MODELNAME_PREFIXES = {
"WDC": "WDC",
"Toshiba": "Toshiba", # for cases like "Toshiba xxx"
"TOSHIBA": "Toshiba", # for cases like "TOSHIBA xxx"
"toshiba": "Toshiba", # for cases like "toshiba xxx"
"S": "Seagate", # for cases like "STxxxx" and "Seagate BarraCuda ZAxxx"
"ZA": "Seagate", # for cases like "ZAxxxx"
"Hitachi": "Hitachi",
"HGST": "HGST",
}
LOGGER = logging.getLogger()
def __init__(self) -> None:
"""
This function may throw exception due to wrong file operation.
"""
self.model_dirpath = ""
self.model_context: Dict[str, List[str]] = {}
def initialize(self, model_dirpath: str) -> None:
"""Initialize all models. Save paths of all trained model files to list
Arguments:
model_dirpath {str} -- path to directory of trained models
Returns:
str -- Error message. If all goes well, return None
"""
# read config file as json, if it exists
config_path = os.path.join(model_dirpath, self.CONFIG_FILE)
if not os.path.isfile(config_path):
raise Exception("Missing config file: " + config_path)
with open(config_path) as f_conf:
self.model_context = json.load(f_conf)
# ensure all manufacturers whose context is defined in config file
# have models and scalers saved inside model_dirpath
for manufacturer in self.model_context:
scaler_path = os.path.join(model_dirpath, manufacturer + "_scaler.pkl")
if not os.path.isfile(scaler_path):
raise Exception(f"Missing scaler file: {scaler_path}")
model_path = os.path.join(model_dirpath, manufacturer + "_predictor.pkl")
if not os.path.isfile(model_path):
raise Exception(f"Missing model file: {model_path}")
self.model_dirpath = model_dirpath
def __preprocess(self, disk_days: Sequence[DevSmartT], manufacturer: str) -> Optional[np.ndarray]:
"""Scales and transforms input dataframe to feed it to prediction model
Arguments:
disk_days {list} -- list in which each element is a dictionary with key,val
as feature name,value respectively.
e.g.[{'smart_1_raw': 0, 'user_capacity': 512 ...}, ...]
manufacturer {str} -- manufacturer of the hard drive
Returns:
numpy.ndarray -- (n, d) shaped array of n days worth of data and d
features, scaled
"""
# get the attributes that were used to train model for current manufacturer
try:
model_smart_attr = self.model_context[manufacturer]
except KeyError:
RHDiskFailurePredictor.LOGGER.debug(
"No context (SMART attributes on which model has been trained) found for manufacturer: {}".format(
manufacturer
)
)
return None
# convert to structured array, keeping only the required features
# assumes all data is in float64 dtype
try:
struc_dtypes = [(attr, np.float64) for attr in model_smart_attr]
values = [tuple(day[attr] for attr in model_smart_attr) for day in disk_days]
disk_days_sa = np.array(values, dtype=struc_dtypes)
except KeyError:
RHDiskFailurePredictor.LOGGER.debug(
"Mismatch in SMART attributes used to train model and SMART attributes available"
)
return None
# view structured array as 2d array for applying rolling window transforms
# do not include capacity_bytes in this. only use smart_attrs
disk_days_attrs = disk_days_sa[[attr for attr in model_smart_attr if 'smart_' in attr]]\
.view(np.float64).reshape(disk_days_sa.shape + (-1,))
# featurize n (6 to 12) days data - mean,std,coefficient of variation
# current model is trained on 6 days of data because that is what will be
# available at runtime
# rolling time window interval size in days
roll_window_size = 6
# rolling means generator
dataset_size = disk_days_attrs.shape[0] - roll_window_size + 1
gen = (disk_days_attrs[i: i + roll_window_size, ...].mean(axis=0)
for i in range(dataset_size))
means = np.vstack(gen) # type: ignore
# rolling stds generator
gen = (disk_days_attrs[i: i + roll_window_size, ...].std(axis=0, ddof=1)
for i in range(dataset_size))
stds = np.vstack(gen) # type: ignore
# coefficient of variation
cvs = stds / means
cvs[np.isnan(cvs)] = 0
featurized = np.hstack((means,
stds,
cvs,
disk_days_sa['user_capacity'][: dataset_size].reshape(-1, 1)))
# scale features
scaler_path = os.path.join(self.model_dirpath, manufacturer + "_scaler.pkl")
with open(scaler_path, 'rb') as f:
scaler = pickle.load(f)
featurized = scaler.transform(featurized)
return featurized
@staticmethod
def __get_manufacturer(model_name: str) -> Optional[str]:
"""Returns the manufacturer name for a given hard drive model name
Arguments:
model_name {str} -- hard drive model name
Returns:
str -- manufacturer name
"""
for prefix, manufacturer in RHDiskFailurePredictor.MANUFACTURER_MODELNAME_PREFIXES.items():
if model_name.startswith(prefix):
return manufacturer.lower()
# print error message
RHDiskFailurePredictor.LOGGER.debug(
f"Could not infer manufacturer from model name {model_name}")
return None
def predict(self, disk_days: Sequence[DevSmartT]) -> str:
# get manufacturer preferably as a smartctl attribute
# if not available then infer using model name
manufacturer = disk_days[0].get("vendor")
if manufacturer is None:
RHDiskFailurePredictor.LOGGER.debug(
'"vendor" field not found in smartctl output. Will try to infer manufacturer from model name.'
)
manufacturer = RHDiskFailurePredictor.__get_manufacturer(
disk_days[0].get("model_name", ""))
# print error message, return Unknown, and continue execution
if manufacturer is None:
RHDiskFailurePredictor.LOGGER.debug(
"Manufacturer could not be determiend. This may be because \
DiskPredictor has never encountered this manufacturer before, \
or the model name is not according to the manufacturer's \
naming conventions known to DiskPredictor"
)
return RHDiskFailurePredictor.PREDICTION_CLASSES[-1]
# preprocess for feeding to model
preprocessed_data = self.__preprocess(disk_days, manufacturer)
if preprocessed_data is None:
return RHDiskFailurePredictor.PREDICTION_CLASSES[-1]
# get model for current manufacturer
model_path = os.path.join(
self.model_dirpath, manufacturer + "_predictor.pkl"
)
with open(model_path, 'rb') as f:
model = pickle.load(f)
# use prediction for most recent day
# TODO: ensure that most recent day is last element and most previous day
# is first element in input disk_days
pred_class_id = model.predict(preprocessed_data)[-1]
return RHDiskFailurePredictor.PREDICTION_CLASSES[pred_class_id]
class PSDiskFailurePredictor(Predictor):
"""Disk failure prediction developed at ProphetStor
This class implements a disk failure prediction module.
"""
CONFIG_FILE = "config.json"
EXCLUDED_ATTRS = ["smart_9_raw", "smart_241_raw", "smart_242_raw"]
def __init__(self) -> None:
"""
This function may throw exception due to wrong file operation.
"""
self.model_dirpath = ""
self.model_context: Dict[str, List[str]] = {}
def initialize(self, model_dirpath: str) -> None:
"""
Initialize all models.
Args: None
Returns:
Error message. If all goes well, return an empty string.
Raises:
"""
config_path = os.path.join(model_dirpath, self.CONFIG_FILE)
if not os.path.isfile(config_path):
raise Exception(f"Missing config file: {config_path}")
with open(config_path) as f_conf:
self.model_context = json.load(f_conf)
for model_name in self.model_context:
model_path = os.path.join(model_dirpath, model_name)
if not os.path.isfile(model_path):
raise Exception(f"Missing model file: {model_path}")
self.model_dirpath = model_dirpath
def __preprocess(self, disk_days: Sequence[DevSmartT]) -> Sequence[DevSmartT]:
"""
Preprocess disk attributes.
Args:
disk_days: Refer to function predict(...).
Returns:
new_disk_days: Processed disk days.
"""
req_attrs = []
new_disk_days = []
attr_list = set.intersection(*[set(disk_day.keys()) for disk_day in disk_days])
for attr in attr_list:
if (
attr.startswith("smart_") and attr.endswith("_raw")
) and attr not in self.EXCLUDED_ATTRS:
req_attrs.append(attr)
for disk_day in disk_days:
new_disk_day = {}
for attr in req_attrs:
if float(disk_day[attr]) >= 0.0:
new_disk_day[attr] = disk_day[attr]
new_disk_days.append(new_disk_day)
return new_disk_days
@staticmethod
def __get_diff_attrs(disk_days: Sequence[DevSmartT]) -> Tuple[AttrNamesT, AttrDiffsT]:
"""
Get 5 days differential attributes.
Args:
disk_days: Refer to function predict(...).
Returns:
attr_list: All S.M.A.R.T. attributes used in given disk. Here we
use intersection set of all disk days.
diff_disk_days: A list struct comprises 5 dictionaries, each
dictionary contains differential attributes.
Raises:
Exceptions of wrong list/dict operations.
"""
all_attrs = [set(disk_day.keys()) for disk_day in disk_days]
attr_list = list(set.intersection(*all_attrs))
prev_days = disk_days[:-1]
curr_days = disk_days[1:]
diff_disk_days = []
# TODO: ensure that this ordering is correct
for prev, cur in zip(prev_days, curr_days):
diff_disk_days.append(
{attr: (int(cur[attr]) - int(prev[attr])) for attr in attr_list}
)
return attr_list, diff_disk_days
def __get_best_models(self, attr_list: AttrNamesT) -> Optional[Dict[str, List[str]]]:
"""
Find the best model from model list according to given attribute list.
Args:
attr_list: All S.M.A.R.T. attributes used in given disk.
Returns:
modelpath: The best model for the given attribute list.
model_attrlist: 'Ordered' attribute list of the returned model.
Must be aware that SMART attributes is in order.
Raises:
"""
models = self.model_context.keys()
scores = []
for model_name in models:
scores.append(
sum(attr in attr_list for attr in self.model_context[model_name])
)
max_score = max(scores)
# Skip if too few matched attributes.
if max_score < 3:
print("Too few matched attributes")
return None
best_models: Dict[str, List[str]] = {}
best_model_indices = [
idx for idx, score in enumerate(scores) if score > max_score - 2
]
for model_idx in best_model_indices:
model_name = list(models)[model_idx]
model_path = os.path.join(self.model_dirpath, model_name)
model_attrlist = self.model_context[model_name]
best_models[model_path] = model_attrlist
return best_models
# return os.path.join(self.model_dirpath, model_name), model_attrlist
@staticmethod
def __get_ordered_attrs(disk_days: Sequence[DevSmartT], model_attrlist: List[str]) -> List[List[float]]:
"""
Return ordered attributes of given disk days.
Args:
disk_days: Unordered disk days.
model_attrlist: Model's ordered attribute list.
Returns:
ordered_attrs: Ordered disk days.
Raises: None
"""
ordered_attrs = []
for one_day in disk_days:
one_day_attrs = []
for attr in model_attrlist:
if attr in one_day:
one_day_attrs.append(one_day[attr])
else:
one_day_attrs.append(0)
ordered_attrs.append(one_day_attrs)
return ordered_attrs
def predict(self, disk_days: Sequence[DevSmartT]) -> str:
"""
Predict using given 6-days disk S.M.A.R.T. attributes.
Args:
disk_days: A list struct comprises 6 dictionaries. These
dictionaries store 'consecutive' days of disk SMART
attributes.
Returns:
A string indicates prediction result. One of following four strings
will be returned according to disk failure status:
(1) Good : Disk is health
(2) Warning : Disk has some symptoms but may not fail immediately
(3) Bad : Disk is in danger and data backup is highly recommended
(4) Unknown : Not enough data for prediction.
Raises:
Pickle exceptions
"""
all_pred = []
proc_disk_days = self.__preprocess(disk_days)
attr_list, diff_data = PSDiskFailurePredictor.__get_diff_attrs(proc_disk_days)
modellist = self.__get_best_models(attr_list)
if modellist is None:
return "Unknown"
for modelpath in modellist:
model_attrlist = modellist[modelpath]
ordered_data = PSDiskFailurePredictor.__get_ordered_attrs(
diff_data, model_attrlist
)
try:
with open(modelpath, "rb") as f_model:
clf = pickle.load(f_model)
except UnicodeDecodeError:
# Compatibility for python3
with open(modelpath, "rb") as f_model:
clf = pickle.load(f_model, encoding="latin1")
pred = clf.predict(ordered_data)
all_pred.append(1 if any(pred) else 0)
score = 2 ** sum(all_pred) - len(modellist)
if score > 10:
return "Bad"
if score > 4:
return "Warning"
return "Good"
| 17,526 | 35.138144 | 114 | py |
null | ceph-main/src/pybind/mgr/feedback/__init__.py | # flake8: noqa
from .module import FeedbackModule | 49 | 24 | 34 | py |
null | ceph-main/src/pybind/mgr/feedback/model.py | # # -*- coding: utf-8 -*-
from enum import Enum
class Feedback:
project_id: int
tracker_id: int
subject: str
description: str
status: int
class Project(Enum):
dashboard = 46
block = 9 # rbd
object = 10 # rgw
file_system = 13 # cephfs
ceph_manager = 46
orchestrator = 42
ceph_volume = 39
core_ceph = 36 # rados
class TrackerType(Enum):
bug = 1
feature = 2
class Status(Enum):
new = 1
def __init__(self, project_id, tracker_id, subject, description):
self.project_id = int(project_id)
self.tracker_id = int(tracker_id)
self.subject = subject
self.description = description
self.status = Feedback.Status.new.value
def as_dict(self):
return {
"issue": {
"project": {
"id": self.project_id
},
"tracker_id": self.tracker_id,
"Status": self.status,
"subject": self.subject,
"description": self.description
}
}
| 1,138 | 22.729167 | 69 | py |
null | ceph-main/src/pybind/mgr/feedback/module.py |
"""
Feedback module
See doc/mgr/feedback.rst for more info.
"""
from requests.exceptions import RequestException
from mgr_module import CLIReadCommand, HandleCommandResult, MgrModule
import errno
from .service import CephTrackerClient
from .model import Feedback
class FeedbackModule(MgrModule):
# there are CLI commands we implement
@CLIReadCommand('feedback set api-key')
def _cmd_feedback_set_api_key(self, key: str) -> HandleCommandResult:
"""
Set Ceph Issue Tracker API key
"""
try:
self.set_store('api_key', key)
except Exception as error:
return HandleCommandResult(stderr=f'Exception in setting API key : {error}')
return HandleCommandResult(stdout="Successfully updated API key")
@CLIReadCommand('feedback delete api-key')
def _cmd_feedback_delete_api_key(self) -> HandleCommandResult:
"""
Delete Ceph Issue Tracker API key
"""
try:
self.set_store('api_key', None)
except Exception as error:
return HandleCommandResult(stderr=f'Exception in deleting API key : {error}')
return HandleCommandResult(stdout="Successfully deleted key")
@CLIReadCommand('feedback get api-key')
def _cmd_feedback_get_api_key(self) -> HandleCommandResult:
"""
Get Ceph Issue Tracker API key
"""
try:
key = self.get_store('api_key')
if key is None:
return HandleCommandResult(stderr='Issue tracker key is not set. Set key with `ceph feedback api-key set <your_key>`')
except Exception as error:
return HandleCommandResult(stderr=f'Error in retreiving issue tracker API key: {error}')
return HandleCommandResult(stdout=f'Your key: {key}')
@CLIReadCommand('feedback issue list')
def _cmd_feedback_issue_list(self) -> HandleCommandResult:
"""
Fetch issue list
"""
tracker_client = CephTrackerClient()
try:
response = tracker_client.list_issues()
except Exception:
return HandleCommandResult(stderr="Error occurred. Try again later")
return HandleCommandResult(stdout=str(response))
@CLIReadCommand('feedback issue report')
def _cmd_feedback_issue_report(self, project: str, tracker: str, subject: str, description: str) -> HandleCommandResult:
"""
Create an issue
"""
try:
feedback = Feedback(Feedback.Project[project].value,
Feedback.TrackerType[tracker].value, subject, description)
except KeyError:
return -errno.EINVAL, '', 'Invalid arguments'
try:
current_api_key = self.get_store('api_key')
if current_api_key is None:
return HandleCommandResult(stderr='Issue tracker key is not set. Set key with `ceph set issue_key <your_key>`')
except Exception as error:
return HandleCommandResult(stderr=f'Error in retreiving issue tracker API key: {error}')
tracker_client = CephTrackerClient()
try:
response = tracker_client.create_issue(feedback, current_api_key)
except RequestException as error:
return HandleCommandResult(stderr=f'Error in creating issue: {str(error)}. Please set valid API key.')
return HandleCommandResult(stdout=f'{str(response)}')
def set_api_key(self, key: str):
try:
self.set_store('api_key', key)
except Exception as error:
raise RequestException(f'Exception in setting API key : {error}')
return 'Successfully updated API key'
def get_api_key(self):
try:
key = self.get_store('api_key')
except Exception as error:
raise RequestException(f'Error in retreiving issue tracker API key : {error}')
return key
def is_api_key_set(self):
try:
key = self.get_store('api_key')
except Exception as error:
raise RequestException(f'Error in retreiving issue tracker API key : {error}')
if key is None:
return False
return key != ''
def delete_api_key(self):
try:
self.set_store('api_key', None)
except Exception as error:
raise RequestException(f'Exception in deleting API key : {error}')
return 'Successfully deleted API key'
def get_issues(self):
tracker_client = CephTrackerClient()
return tracker_client.list_issues()
def validate_and_create_issue(self, project: str, tracker: str, subject: str, description: str, api_key=None):
feedback = Feedback(Feedback.Project[project].value,
Feedback.TrackerType[tracker].value, subject, description)
tracker_client = CephTrackerClient()
stored_api_key = self.get_store('api_key')
try:
if api_key:
result = tracker_client.create_issue(feedback, api_key)
else:
result = tracker_client.create_issue(feedback, stored_api_key)
except RequestException:
self.set_store('api_key', None)
raise
if not stored_api_key:
self.set_store('api_key', api_key)
return result
| 5,337 | 37.128571 | 134 | py |
null | ceph-main/src/pybind/mgr/feedback/service.py | # -*- coding: utf-8 -*-
import json
import requests
from requests.exceptions import RequestException
from .model import Feedback
class config:
url = 'tracker.ceph.com'
port = 443
class CephTrackerClient():
def list_issues(self):
'''
Fetch an issue from the Ceph Issue tracker
'''
headers = {
'Content-Type': 'application/json',
}
response = requests.get(
f'https://{config.url}/issues.json', headers=headers)
if not response.ok:
if response.status_code == 404:
raise FileNotFoundError
raise RequestException(response.status_code)
return {"message": response.json()}
def create_issue(self, feedback: Feedback, api_key: str):
'''
Create an issue in the Ceph Issue tracker
'''
try:
headers = {
'Content-Type': 'application/json',
'X-Redmine-API-Key': api_key,
}
except KeyError:
raise Exception("Ceph Tracker API Key not set")
data = json.dumps(feedback.as_dict())
response = requests.post(
f'https://{config.url}/projects/{feedback.project_id}/issues.json',
headers=headers, data=data)
if not response.ok:
if response.status_code == 401:
raise RequestException("Unauthorized. Invalid issue tracker API key")
raise RequestException(response.reason)
return {"message": response.json()}
| 1,536 | 29.74 | 85 | py |
null | ceph-main/src/pybind/mgr/hello/__init__.py | # flake8: noqa
from .module import Hello
| 41 | 13 | 25 | py |
null | ceph-main/src/pybind/mgr/hello/module.py |
"""
A hello world module
See doc/mgr/hello.rst for more info.
"""
from mgr_module import CLIReadCommand, HandleCommandResult, MgrModule, Option
from threading import Event
from typing import cast, Any, Optional, TYPE_CHECKING
import errno
class Hello(MgrModule):
# These are module options we understand. These can be set with
#
# ceph config set global mgr/hello/<name> <value>
#
# e.g.,
#
# ceph config set global mgr/hello/place Earth
#
MODULE_OPTIONS = [
Option(name='place',
default='world',
desc='a place in the world',
runtime=True), # can be updated at runtime (no mgr restart)
Option(name='emphatic',
type='bool',
desc='whether to say it loudly',
default=True,
runtime=True),
Option(name='foo',
type='str',
enum_allowed=['a', 'b', 'c'],
default='a',
runtime=True)
]
# These are "native" Ceph options that this module cares about.
NATIVE_OPTIONS = [
'mgr_tick_period',
]
def __init__(self, *args: Any, **kwargs: Any):
super().__init__(*args, **kwargs)
# set up some members to enable the serve() method and shutdown()
self.run = True
self.event = Event()
# ensure config options members are initialized; see config_notify()
self.config_notify()
# for mypy which does not run the code
if TYPE_CHECKING:
self.mgr_tick_period = 0
def config_notify(self) -> None:
"""
This method is called whenever one of our config options is changed.
"""
# This is some boilerplate that stores MODULE_OPTIONS in a class
# member, so that, for instance, the 'emphatic' option is always
# available as 'self.emphatic'.
for opt in self.MODULE_OPTIONS:
setattr(self,
opt['name'],
self.get_module_option(opt['name']))
self.log.debug(' mgr option %s = %s',
opt['name'], getattr(self, opt['name']))
# Do the same for the native options.
for opt in self.NATIVE_OPTIONS:
setattr(self,
opt,
self.get_ceph_option(opt))
self.log.debug(' native option %s = %s', opt, getattr(self, opt))
# there are CLI commands we implement
@CLIReadCommand('hello')
def hello(self, person_name: Optional[str] = None) -> HandleCommandResult:
"""
Say hello
"""
if person_name is None:
who = cast(str, self.get_module_option('place'))
else:
who = person_name
fin = '!' if self.get_module_option('emphatic') else ''
return HandleCommandResult(stdout=f'Hello, {who}{fin}')
@CLIReadCommand('count')
def count(self, num: int) -> HandleCommandResult:
"""
Do some counting
"""
ret = 0
out = ''
err = ''
if num < 1:
err = 'That\'s too small a number'
ret = -errno.EINVAL
elif num > 10:
err = 'That\'s too big a number'
ret = -errno.EINVAL
else:
out = 'Hello, I am the count!\n'
out += ', '.join([str(x) for x in range(1, num + 1)]) + '!'
return HandleCommandResult(retval=ret,
stdout=out,
stderr=err)
def serve(self) -> None:
"""
This method is called by the mgr when the module starts and can be
used for any background activity.
"""
self.log.info("Starting")
while self.run:
# Do some useful background work here.
# Use mgr_tick_period (default: 2) here just to illustrate
# consuming native ceph options. Any real background work
# would presumably have some more appropriate frequency.
sleep_interval = self.mgr_tick_period
self.log.debug('Sleeping for %d seconds', sleep_interval)
self.event.wait(sleep_interval)
self.event.clear()
def shutdown(self) -> None:
"""
This method is called by the mgr when the module needs to shut
down (i.e., when the serve() function needs to exit).
"""
self.log.info('Stopping')
self.run = False
self.event.set()
| 4,515 | 31.724638 | 78 | py |
null | ceph-main/src/pybind/mgr/influx/__init__.py | from .module import Module
| 27 | 13 | 26 | py |
null | ceph-main/src/pybind/mgr/influx/module.py | from contextlib import contextmanager
from datetime import datetime
from threading import Event, Thread
from itertools import chain
import queue
import json
import errno
import time
from typing import cast, Any, Dict, Iterator, List, Optional, Tuple, Union
from mgr_module import CLICommand, CLIReadCommand, CLIWriteCommand, MgrModule, Option, OptionValue
try:
from influxdb import InfluxDBClient
from influxdb.exceptions import InfluxDBClientError
from requests.exceptions import RequestException
except ImportError:
InfluxDBClient = None
class Module(MgrModule):
MODULE_OPTIONS = [
Option(name='hostname',
default=None,
desc='InfluxDB server hostname'),
Option(name='port',
type='int',
default=8086,
desc='InfluxDB server port'),
Option(name='database',
default='ceph',
desc=('InfluxDB database name. You will need to create this '
'database and grant write privileges to the configured '
'username or the username must have admin privileges to '
'create it.')),
Option(name='username',
default=None,
desc='username of InfluxDB server user'),
Option(name='password',
default=None,
desc='password of InfluxDB server user'),
Option(name='interval',
type='secs',
min=5,
default=30,
desc='Time between reports to InfluxDB. Default 30 seconds.'),
Option(name='ssl',
default='false',
desc='Use https connection for InfluxDB server. Use "true" or "false".'),
Option(name='verify_ssl',
default='true',
desc='Verify https cert for InfluxDB server. Use "true" or "false".'),
Option(name='threads',
type='int',
min=1,
max=32,
default=5,
desc='How many worker threads should be spawned for sending data to InfluxDB.'),
Option(name='batch_size',
type='int',
default=5000,
desc='How big batches of data points should be when sending to InfluxDB.'),
]
@property
def config_keys(self) -> Dict[str, OptionValue]:
return dict((o['name'], o.get('default', None))
for o in self.MODULE_OPTIONS)
COMMANDS = [
{
"cmd": "influx config-set name=key,type=CephString "
"name=value,type=CephString",
"desc": "Set a configuration value",
"perm": "rw"
},
{
"cmd": "influx config-show",
"desc": "Show current configuration",
"perm": "r"
},
{
"cmd": "influx send",
"desc": "Force sending data to Influx",
"perm": "rw"
}
]
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(Module, self).__init__(*args, **kwargs)
self.event = Event()
self.run = True
self.config: Dict[str, OptionValue] = dict()
self.workers: List[Thread] = list()
self.queue: 'queue.Queue[Optional[List[Dict[str, str]]]]' = queue.Queue(maxsize=100)
self.health_checks: Dict[str, Dict[str, Any]] = dict()
def get_fsid(self) -> str:
return self.get('mon_map')['fsid']
@staticmethod
def can_run() -> Tuple[bool, str]:
if InfluxDBClient is not None:
return True, ""
else:
return False, "influxdb python module not found"
@staticmethod
def get_timestamp() -> str:
return datetime.utcnow().isoformat() + 'Z'
@staticmethod
def chunk(l: Iterator[Dict[str, str]], n: int) -> Iterator[List[Dict[str, str]]]:
try:
while True:
xs = []
for _ in range(n):
xs.append(next(l))
yield xs
except StopIteration:
yield xs
def queue_worker(self) -> None:
while True:
try:
points = self.queue.get()
if not points:
self.log.debug('Worker shutting down')
break
start = time.time()
with self.get_influx_client() as client:
client.write_points(points, time_precision='ms')
runtime = time.time() - start
self.log.debug('Writing points %d to Influx took %.3f seconds',
len(points), runtime)
except RequestException as e:
hostname = self.config['hostname']
port = self.config['port']
self.log.exception(f"Failed to connect to Influx host {hostname}:{port}")
self.health_checks.update({
'MGR_INFLUX_SEND_FAILED': {
'severity': 'warning',
'summary': 'Failed to send data to InfluxDB server '
f'at {hostname}:{port} due to an connection error',
'detail': [str(e)]
}
})
except InfluxDBClientError as e:
self.health_checks.update({
'MGR_INFLUX_SEND_FAILED': {
'severity': 'warning',
'summary': 'Failed to send data to InfluxDB',
'detail': [str(e)]
}
})
self.log.exception('Failed to send data to InfluxDB')
except queue.Empty:
continue
except:
self.log.exception('Unhandled Exception while sending to Influx')
finally:
self.queue.task_done()
def get_latest(self, daemon_type: str, daemon_name: str, stat: str) -> int:
data = self.get_counter(daemon_type, daemon_name, stat)[stat]
if data:
return data[-1][1]
return 0
def get_df_stats(self, now) -> Tuple[List[Dict[str, Any]], Dict[str, str]]:
df = self.get("df")
data = []
pool_info = {}
df_types = [
'stored',
'kb_used',
'dirty',
'rd',
'rd_bytes',
'stored_raw',
'wr',
'wr_bytes',
'objects',
'max_avail',
'quota_objects',
'quota_bytes'
]
for df_type in df_types:
for pool in df['pools']:
point = {
"measurement": "ceph_pool_stats",
"tags": {
"pool_name": pool['name'],
"pool_id": pool['id'],
"type_instance": df_type,
"fsid": self.get_fsid()
},
"time": now,
"fields": {
"value": pool['stats'][df_type],
}
}
data.append(point)
pool_info.update({str(pool['id']):pool['name']})
return data, pool_info
def get_pg_summary_osd(self, pool_info: Dict[str, str], now: str) -> Iterator[Dict[str, Any]]:
pg_sum = self.get('pg_summary')
osd_sum = pg_sum['by_osd']
for osd_id, stats in osd_sum.items():
metadata = self.get_metadata('osd', "%s" % osd_id)
if not metadata:
continue
for stat in stats:
yield {
"measurement": "ceph_pg_summary_osd",
"tags": {
"ceph_daemon": "osd." + str(osd_id),
"type_instance": stat,
"host": metadata['hostname']
},
"time" : now,
"fields" : {
"value": stats[stat]
}
}
def get_pg_summary_pool(self, pool_info: Dict[str, str], now: str) -> Iterator[Dict[str, Any]]:
pool_sum = self.get('pg_summary')['by_pool']
for pool_id, stats in pool_sum.items():
try:
pool_name = pool_info[pool_id]
except KeyError:
self.log.error('Unable to find pool name for pool {}'.format(pool_id))
continue
for stat in stats:
yield {
"measurement": "ceph_pg_summary_pool",
"tags": {
"pool_name" : pool_name,
"pool_id" : pool_id,
"type_instance" : stat,
},
"time" : now,
"fields": {
"value" : stats[stat],
}
}
def get_daemon_stats(self, now: str) -> Iterator[Dict[str, Any]]:
for daemon, counters in self.get_unlabeled_perf_counters().items():
svc_type, svc_id = daemon.split(".", 1)
metadata = self.get_metadata(svc_type, svc_id)
if metadata is not None:
hostname = metadata['hostname']
else:
hostname = 'N/A'
for path, counter_info in counters.items():
if counter_info['type'] & self.PERFCOUNTER_HISTOGRAM:
continue
value = counter_info['value']
yield {
"measurement": "ceph_daemon_stats",
"tags": {
"ceph_daemon": daemon,
"type_instance": path,
"host": hostname,
"fsid": self.get_fsid()
},
"time": now,
"fields": {
"value": value
}
}
def init_module_config(self) -> None:
self.config['hostname'] = \
self.get_module_option("hostname", default=self.config_keys['hostname'])
self.config['port'] = \
cast(int, self.get_module_option("port", default=self.config_keys['port']))
self.config['database'] = \
self.get_module_option("database", default=self.config_keys['database'])
self.config['username'] = \
self.get_module_option("username", default=self.config_keys['username'])
self.config['password'] = \
self.get_module_option("password", default=self.config_keys['password'])
self.config['interval'] = \
cast(int, self.get_module_option("interval",
default=self.config_keys['interval']))
self.config['threads'] = \
cast(int, self.get_module_option("threads",
default=self.config_keys['threads']))
self.config['batch_size'] = \
cast(int, self.get_module_option("batch_size",
default=self.config_keys['batch_size']))
ssl = cast(str, self.get_module_option("ssl", default=self.config_keys['ssl']))
self.config['ssl'] = ssl.lower() == 'true'
verify_ssl = \
cast(str, self.get_module_option("verify_ssl", default=self.config_keys['verify_ssl']))
self.config['verify_ssl'] = verify_ssl.lower() == 'true'
def gather_statistics(self) -> Iterator[Dict[str, str]]:
now = self.get_timestamp()
df_stats, pools = self.get_df_stats(now)
return chain(df_stats, self.get_daemon_stats(now),
self.get_pg_summary_osd(pools, now),
self.get_pg_summary_pool(pools, now))
@contextmanager
def get_influx_client(self) -> Iterator['InfluxDBClient']:
client = InfluxDBClient(self.config['hostname'],
self.config['port'],
self.config['username'],
self.config['password'],
self.config['database'],
self.config['ssl'],
self.config['verify_ssl'])
try:
yield client
finally:
try:
client.close()
except AttributeError:
# influxdb older than v5.0.0
pass
def send_to_influx(self) -> bool:
if not self.config['hostname']:
self.log.error("No Influx server configured, please set one using: "
"ceph influx config-set hostname <hostname>")
self.set_health_checks({
'MGR_INFLUX_NO_SERVER': {
'severity': 'warning',
'summary': 'No InfluxDB server configured',
'detail': ['Configuration option hostname not set']
}
})
return False
self.health_checks = dict()
self.log.debug("Sending data to Influx host: %s",
self.config['hostname'])
try:
with self.get_influx_client() as client:
databases = client.get_list_database()
if {'name': self.config['database']} not in databases:
self.log.info("Database '%s' not found, trying to create "
"(requires admin privs). You can also create "
"manually and grant write privs to user "
"'%s'", self.config['database'],
self.config['database'])
client.create_database(self.config['database'])
client.create_retention_policy(name='8_weeks',
duration='8w',
replication='1',
default=True,
database=self.config['database'])
self.log.debug('Gathering statistics')
points = self.gather_statistics()
for chunk in self.chunk(points, cast(int, self.config['batch_size'])):
self.queue.put(chunk, block=False)
self.log.debug('Queue currently contains %d items',
self.queue.qsize())
return True
except queue.Full:
self.health_checks.update({
'MGR_INFLUX_QUEUE_FULL': {
'severity': 'warning',
'summary': 'Failed to chunk to InfluxDB Queue',
'detail': ['Queue is full. InfluxDB might be slow with '
'processing data']
}
})
self.log.error('Queue is full, failed to add chunk')
return False
except (RequestException, InfluxDBClientError) as e:
self.health_checks.update({
'MGR_INFLUX_DB_LIST_FAILED': {
'severity': 'warning',
'summary': 'Failed to list/create InfluxDB database',
'detail': [str(e)]
}
})
self.log.exception('Failed to list/create InfluxDB database')
return False
finally:
self.set_health_checks(self.health_checks)
def shutdown(self) -> None:
self.log.info('Stopping influx module')
self.run = False
self.event.set()
self.log.debug('Shutting down queue workers')
for _ in self.workers:
self.queue.put([])
self.queue.join()
for worker in self.workers:
worker.join()
def self_test(self) -> Optional[str]:
now = self.get_timestamp()
daemon_stats = list(self.get_daemon_stats(now))
assert len(daemon_stats)
df_stats, pools = self.get_df_stats(now)
result = {
'daemon_stats': daemon_stats,
'df_stats': df_stats
}
return json.dumps(result, indent=2, sort_keys=True)
@CLIReadCommand('influx config-show')
def config_show(self) -> Tuple[int, str, str]:
"""
Show current configuration
"""
return 0, json.dumps(self.config, sort_keys=True), ''
@CLIWriteCommand('influx config-set')
def config_set(self, key: str, value: str) -> Tuple[int, str, str]:
if not value:
return -errno.EINVAL, '', 'Value should not be empty'
self.log.debug('Setting configuration option %s to %s', key, value)
try:
self.set_module_option(key, value)
self.config[key] = self.get_module_option(key)
return 0, 'Configuration option {0} updated'.format(key), ''
except ValueError as e:
return -errno.EINVAL, '', str(e)
@CLICommand('influx send')
def send(self) -> Tuple[int, str, str]:
"""
Force sending data to Influx
"""
self.send_to_influx()
return 0, 'Sending data to Influx', ''
def serve(self) -> None:
if InfluxDBClient is None:
self.log.error("Cannot transmit statistics: influxdb python "
"module not found. Did you install it?")
return
self.log.info('Starting influx module')
self.init_module_config()
self.run = True
self.log.debug('Starting %d queue worker threads',
self.config['threads'])
for i in range(cast(int, self.config['threads'])):
worker = Thread(target=self.queue_worker, args=())
worker.setDaemon(True)
worker.start()
self.workers.append(worker)
while self.run:
start = time.time()
self.send_to_influx()
runtime = time.time() - start
self.log.debug('Finished sending data to Influx in %.3f seconds',
runtime)
self.log.debug("Sleeping for %d seconds", self.config['interval'])
self.event.wait(cast(float, self.config['interval']))
| 18,317 | 37.004149 | 99 | py |
null | ceph-main/src/pybind/mgr/insights/__init__.py | import os
if 'UNITTEST' in os.environ:
import tests
from .module import Module
| 85 | 11.285714 | 28 | py |
null | ceph-main/src/pybind/mgr/insights/health.py | import json
from collections import defaultdict
import datetime
# freq to write cached state to disk
PERSIST_PERIOD = datetime.timedelta(seconds=10)
# on disk key prefix
HEALTH_HISTORY_KEY_PREFIX = "health_history/"
# apply on offset to "now": used for testing
NOW_OFFSET = None
class HealthEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
return json.JSONEncoder.default(self, obj)
class HealthCheckAccumulator(object):
"""
Deuplicated storage of health checks.
"""
def __init__(self, init_checks=None):
# check : severity : { summary, detail }
# summary and detail are deduplicated
self._checks = defaultdict(lambda:
defaultdict(lambda: {
"summary": set(),
"detail": set()
}))
if init_checks:
self._update(init_checks)
def __str__(self):
return "check count {}".format(len(self._checks))
def add(self, checks):
"""
Add health checks to the current state
Returns:
bool: True if the state changed, False otherwise.
"""
changed = False
for check, info in checks.items():
# only keep the icky stuff
severity = info["severity"]
if severity == "HEALTH_OK":
continue
summary = info["summary"]["message"]
details = map(lambda d: d["message"], info["detail"])
if self._add_check(check, severity, [summary], details):
changed = True
return changed
def checks(self):
return self._checks
def merge(self, other):
assert isinstance(other, HealthCheckAccumulator)
self._update(other._checks)
def _update(self, checks):
"""Merge checks with same structure. Does not set dirty bit"""
for check in checks:
for severity in checks[check]:
summaries = set(checks[check][severity]["summary"])
details = set(checks[check][severity]["detail"])
self._add_check(check, severity, summaries, details)
def _add_check(self, check, severity, summaries, details):
changed = False
for summary in summaries:
if summary not in self._checks[check][severity]["summary"]:
changed = True
self._checks[check][severity]["summary"].add(summary)
for detail in details:
if detail not in self._checks[check][severity]["detail"]:
changed = True
self._checks[check][severity]["detail"].add(detail)
return changed
class HealthHistorySlot(object):
"""
Manage the life cycle of a health history time slot.
A time slot is a fixed slice of wall clock time (e.g. every hours, from :00
to :59), and all health updates that occur during this time are deduplicated
together. A slot is initially in a clean state, and becomes dirty when a new
health check is observed. The state of a slot should be persisted when
need_flush returns true. Once the state has been flushed, reset the dirty
bit by calling mark_flushed.
"""
def __init__(self, init_health=dict()):
self._checks = HealthCheckAccumulator(init_health.get("checks"))
self._slot = self._curr_slot()
self._next_flush = None
def __str__(self):
return "key {} next flush {} checks {}".format(
self.key(), self._next_flush, self._checks)
def health(self):
return dict(checks=self._checks.checks())
def key(self):
"""Identifier in the persist store"""
return self._key(self._slot)
def expired(self):
"""True if this slot is the current slot, False otherwise"""
return self._slot != self._curr_slot()
def need_flush(self):
"""True if this slot needs to be flushed, False otherwise"""
now = HealthHistorySlot._now()
if self._next_flush is not None:
if self._next_flush <= now or self.expired():
return True
return False
def mark_flushed(self):
"""Reset the dirty bit. Caller persists state"""
assert self._next_flush
self._next_flush = None
def add(self, health):
"""
Add health to the underlying health accumulator. When the slot
transitions from clean to dirty a target flush time is computed.
"""
changed = self._checks.add(health["checks"])
if changed and not self._next_flush:
self._next_flush = HealthHistorySlot._now() + PERSIST_PERIOD
return changed
def merge(self, other):
assert isinstance(other, HealthHistorySlot)
self._checks.merge(other._checks)
@staticmethod
def key_range(hours):
"""Return the time slot keys for the past N hours"""
def inner(curr, hours):
slot = curr - datetime.timedelta(hours=hours)
return HealthHistorySlot._key(slot)
curr = HealthHistorySlot._curr_slot()
return map(lambda i: inner(curr, i), range(hours))
@staticmethod
def curr_key():
"""Key for the current UTC time slot"""
return HealthHistorySlot._key(HealthHistorySlot._curr_slot())
@staticmethod
def key_to_time(key):
"""Return key converted into datetime"""
timestr = key[len(HEALTH_HISTORY_KEY_PREFIX):]
return datetime.datetime.strptime(timestr, "%Y-%m-%d_%H")
@staticmethod
def _key(dt):
"""Key format. Example: health_2018_11_05_00"""
return HEALTH_HISTORY_KEY_PREFIX + dt.strftime("%Y-%m-%d_%H")
@staticmethod
def _now():
"""Control now time for easier testing"""
now = datetime.datetime.utcnow()
if NOW_OFFSET is not None:
now = now + NOW_OFFSET
return now
@staticmethod
def _curr_slot():
"""Slot for the current UTC time"""
dt = HealthHistorySlot._now()
return datetime.datetime(
year=dt.year,
month=dt.month,
day=dt.day,
hour=dt.hour)
| 6,282 | 31.056122 | 80 | py |
null | ceph-main/src/pybind/mgr/insights/module.py | import datetime
import json
import re
import threading
from mgr_module import CLICommand, CLIReadCommand, HandleCommandResult
from mgr_module import MgrModule, CommandResult, NotifyType
from . import health as health_util
# hours of crash history to report
CRASH_HISTORY_HOURS = 24
# hours of health history to report
HEALTH_HISTORY_HOURS = 24
# how many hours of health history to keep
HEALTH_RETENTION_HOURS = 30
# health check name for insights health
INSIGHTS_HEALTH_CHECK = "MGR_INSIGHTS_WARNING"
# version tag for persistent data format
ON_DISK_VERSION = 1
class Module(MgrModule):
NOTIFY_TYPES = [NotifyType.health]
def __init__(self, *args, **kwargs):
super(Module, self).__init__(*args, **kwargs)
self._shutdown = False
self._evt = threading.Event()
# health history tracking
self._pending_health = []
self._health_slot = None
self._store = {}
# The following three functions, get_store, set_store, and get_store_prefix
# mask the functions defined in the parent to avoid storing large keys
# persistently to disk as that was proving problematic. Long term we may
# implement a different mechanism to make these persistent. When that day
# comes it should just be a matter of deleting these three functions.
def get_store(self, key):
return self._store.get(key)
def set_store(self, key, value):
if value is None:
if key in self._store:
del self._store[key]
else:
self._store[key] = value
def get_store_prefix(self, prefix):
return { k: v for k, v in self._store.items() if k.startswith(prefix) }
def notify(self, ttype: NotifyType, ident):
"""Queue updates for processing"""
if ttype == NotifyType.health:
self.log.info("Received health check update {} pending".format(
len(self._pending_health)))
health = json.loads(self.get("health")["json"])
self._pending_health.append(health)
self._evt.set()
def serve(self):
self._health_reset()
while True:
self._evt.wait(health_util.PERSIST_PERIOD.total_seconds())
self._evt.clear()
if self._shutdown:
break
# when the current health slot expires, finalize it by flushing it to
# the store, and initializing a new empty slot.
if self._health_slot.expired():
self.log.info("Health history slot expired {}".format(
self._health_slot))
self._health_maybe_flush()
self._health_reset()
self._health_prune_history(HEALTH_RETENTION_HOURS)
# fold in pending health snapshots and flush
self.log.info("Applying {} health updates to slot {}".format(
len(self._pending_health), self._health_slot))
for health in self._pending_health:
self._health_slot.add(health)
self._pending_health = []
self._health_maybe_flush()
def shutdown(self):
self._shutdown = True
self._evt.set()
def _health_reset(self):
"""Initialize the current health slot
The slot will be initialized with any state found to have already been
persisted, otherwise the slot will start empty.
"""
key = health_util.HealthHistorySlot.curr_key()
data = self.get_store(key)
if data:
init_health = json.loads(data)
self._health_slot = health_util.HealthHistorySlot(init_health)
else:
self._health_slot = health_util.HealthHistorySlot()
self.log.info("Reset curr health slot {}".format(self._health_slot))
def _health_maybe_flush(self):
"""Store the health for the current time slot if needed"""
self.log.info("Maybe flushing slot {} needed {}".format(
self._health_slot, self._health_slot.need_flush()))
if self._health_slot.need_flush():
key = self._health_slot.key()
# build store data entry
slot = self._health_slot.health()
assert "version" not in slot
slot.update(dict(version=ON_DISK_VERSION))
data = json.dumps(slot, cls=health_util.HealthEncoder)
self.log.debug("Storing health key {} data {}".format(
key, json.dumps(slot, indent=2, cls=health_util.HealthEncoder)))
self.set_store(key, data)
self._health_slot.mark_flushed()
def _health_filter(self, f):
"""Filter hourly health reports timestamp"""
matches = filter(
lambda t: f(health_util.HealthHistorySlot.key_to_time(t[0])),
self.get_store_prefix(health_util.HEALTH_HISTORY_KEY_PREFIX).items())
return map(lambda t: t[0], matches)
def _health_prune_history(self, hours):
"""Prune old health entries"""
cutoff = datetime.datetime.utcnow() - datetime.timedelta(hours=hours)
for key in self._health_filter(lambda ts: ts <= cutoff):
self.log.info("Removing old health slot key {}".format(key))
self.set_store(key, None)
if not hours:
self._health_slot = health_util.HealthHistorySlot()
def _health_report(self, hours):
"""
Report a consolidated health report for the past N hours.
"""
# roll up the past N hours of health info
collector = health_util.HealthHistorySlot()
keys = health_util.HealthHistorySlot.key_range(hours)
for key in keys:
data = self.get_store(key)
self.log.info("Reporting health key {} found {}".format(
key, bool(data)))
health = json.loads(data) if data else {}
slot = health_util.HealthHistorySlot(health)
collector.merge(slot)
# include history that hasn't yet been flushed
collector.merge(self._health_slot)
return dict(
current=json.loads(self.get("health")["json"]),
history=collector.health()
)
def _version_parse(self, version):
"""
Return the components of a Ceph version string.
This returns nothing when the version string cannot be parsed into its
constituent components, such as when Ceph has been built with
ENABLE_GIT_VERSION=OFF.
"""
r = r"ceph version (?P<release>\d+)\.(?P<major>\d+)\.(?P<minor>\d+)"
m = re.match(r, version)
ver = {} if not m else {
"release": m.group("release"),
"major": m.group("major"),
"minor": m.group("minor")
}
return {k: int(v) for k, v in ver.items()}
def _crash_history(self, hours):
"""
Load crash history for the past N hours from the crash module.
"""
result = dict(
summary={},
hours=hours,
)
health_check_details = []
try:
_, _, crashes = self.remote("crash", "do_json_report", hours)
result["summary"] = json.loads(crashes)
except Exception as e:
errmsg = "failed to invoke crash module"
self.log.warning("{}: {}".format(errmsg, str(e)))
health_check_details.append(errmsg)
else:
self.log.debug("Crash module invocation succeeded {}".format(
json.dumps(result["summary"], indent=2)))
return result, health_check_details
def _apply_osd_stats(self, osd_map):
# map from osd id to its index in the map structure
osd_id_to_idx = {}
for idx in range(len(osd_map["osds"])):
osd_id_to_idx[osd_map["osds"][idx]["osd"]] = idx
# include stats, including space utilization performance counters.
# adapted from dashboard api controller
for s in self.get('osd_stats')['osd_stats']:
try:
idx = osd_id_to_idx[s["osd"]]
osd_map["osds"][idx].update({'osd_stats': s})
except KeyError as e:
self.log.warning("inconsistent api state: {}".format(str(e)))
for osd in osd_map["osds"]:
osd['stats'] = {}
for s in ['osd.numpg', 'osd.stat_bytes', 'osd.stat_bytes_used']:
osd['stats'][s.split('.')[1]] = self.get_latest('osd', str(osd["osd"]), s)
def _config_dump(self):
"""Report cluster configuration
This report is the standard `config dump` report. It does not include
configuration defaults; these can be inferred from the version number.
"""
result = CommandResult("")
args = dict(prefix="config dump", format="json")
self.send_command(result, "mon", "", json.dumps(args), "")
ret, outb, outs = result.wait()
if ret == 0:
return json.loads(outb), []
else:
self.log.warning("send_command 'config dump' failed. \
ret={}, outs=\"{}\"".format(ret, outs))
return [], ["Failed to read monitor config dump"]
@CLIReadCommand('insights')
def do_report(self):
'''
Retrieve insights report
'''
health_check_details = []
report = {}
report.update({
"version": dict(full=self.version,
**self._version_parse(self.version))
})
# crash history
crashes, health_details = self._crash_history(CRASH_HISTORY_HOURS)
report["crashes"] = crashes
health_check_details.extend(health_details)
# health history
report["health"] = self._health_report(HEALTH_HISTORY_HOURS)
# cluster configuration
config, health_details = self._config_dump()
report["config"] = config
health_check_details.extend(health_details)
osd_map = self.get("osd_map")
del osd_map['pg_temp']
self._apply_osd_stats(osd_map)
report["osd_dump"] = osd_map
report["df"] = self.get("df")
report["osd_tree"] = self.get("osd_map_tree")
report["fs_map"] = self.get("fs_map")
report["crush_map"] = self.get("osd_map_crush")
report["mon_map"] = self.get("mon_map")
report["service_map"] = self.get("service_map")
report["manager_map"] = self.get("mgr_map")
report["mon_status"] = json.loads(self.get("mon_status")["json"])
report["pg_summary"] = self.get("pg_summary")
report["osd_metadata"] = self.get("osd_metadata")
report.update({
"errors": health_check_details
})
if health_check_details:
self.set_health_checks({
INSIGHTS_HEALTH_CHECK: {
"severity": "warning",
"summary": "Generated incomplete Insights report",
"detail": health_check_details
}
})
result = json.dumps(report, indent=2, cls=health_util.HealthEncoder)
return HandleCommandResult(stdout=result)
@CLICommand('insights prune-health')
def do_prune_health(self, hours: int):
'''
Remove health history older than <hours> hours
'''
self._health_prune_history(hours)
return HandleCommandResult()
def testing_set_now_time_offset(self, hours):
"""
Control what "now" time it is by applying an offset. This is called from
the selftest module to manage testing scenarios related to tracking
health history.
"""
hours = int(hours)
health_util.NOW_OFFSET = datetime.timedelta(hours=hours)
self.log.warning("Setting now time offset {}".format(health_util.NOW_OFFSET))
| 11,800 | 35.649068 | 90 | py |
null | ceph-main/src/pybind/mgr/insights/tests/__init__.py | 0 | 0 | 0 | py | |
null | ceph-main/src/pybind/mgr/insights/tests/test_health.py | import unittest
from tests import mock
from ..health import *
class HealthChecksTest(unittest.TestCase):
def test_check_accum_empty(self):
# health checks accum initially empty reports empty
h = HealthCheckAccumulator()
self.assertEqual(h.checks(), {})
h = HealthCheckAccumulator({})
self.assertEqual(h.checks(), {})
def _get_init_checks(self):
return HealthCheckAccumulator({
"C0": {
"S0": {
"summary": ["s0", "s1"],
"detail": ("d0", "d1")
}
}
})
def test_check_init(self):
# initialization with lists and tuples is OK
h = self._get_init_checks()
self.assertEqual(h.checks(), {
"C0": {
"S0": {
"summary": set(["s0", "s1"]),
"detail": set(["d0", "d1"])
}
}
})
def _get_merged_checks(self):
h = self._get_init_checks()
h.merge(HealthCheckAccumulator({
"C0": {
"S0": {
"summary": ["s0", "s1", "s2"],
"detail": ("d2",)
},
"S1": {
"summary": ["s0", "s1", "s2"],
"detail": ()
}
},
"C1": {
"S0": {
"summary": [],
"detail": ("d0", "d1", "d2")
}
}
}))
return h
def test_check_merge(self):
# merging combines and de-duplicates
h = self._get_merged_checks()
self.assertEqual(h.checks(), {
"C0": {
"S0": {
"summary": set(["s0", "s1", "s2"]),
"detail": set(["d0", "d1", "d2"])
},
"S1": {
"summary": set(["s0", "s1", "s2"]),
"detail": set([])
}
},
"C1": {
"S0": {
"summary": set([]),
"detail": set(["d0", "d1", "d2"])
}
}
})
def test_check_add_no_change(self):
# returns false when nothing changes
h = self._get_merged_checks()
self.assertFalse(h.add({}))
self.assertFalse(h.add({
"C0": {
"severity": "S0",
"summary": {"message": "s0"},
"detail": []
}
}))
self.assertFalse(h.add({
"C0": {
"severity": "S0",
"summary": {"message": "s1"},
"detail": [{"message": "d1"}]
}
}))
self.assertFalse(h.add({
"C0": {
"severity": "S0",
"summary": {"message": "s0"},
"detail": [{"message": "d1"}, {"message": "d2"}]
}
}))
def test_check_add_changed(self):
# new checks report change
h = self._get_merged_checks()
self.assertTrue(h.add({
"C0": {
"severity": "S0",
"summary": {"message": "s3"},
"detail": []
}
}))
self.assertTrue(h.add({
"C0": {
"severity": "S0",
"summary": {"message": "s1"},
"detail": [{"message": "d4"}]
}
}))
self.assertTrue(h.add({
"C0": {
"severity": "S2",
"summary": {"message": "s0"},
"detail": [{"message": "d0"}]
}
}))
self.assertTrue(h.add({
"C2": {
"severity": "S0",
"summary": {"message": "s0"},
"detail": [{"message": "d0"}, {"message": "d1"}]
}
}))
self.assertEqual(h.checks(), {
"C0": {
"S0": {
"summary": set(["s0", "s1", "s2", "s3"]),
"detail": set(["d0", "d1", "d2", "d4"])
},
"S1": {
"summary": set(["s0", "s1", "s2"]),
"detail": set([])
},
"S2": {
"summary": set(["s0"]),
"detail": set(["d0"])
}
},
"C1": {
"S0": {
"summary": set([]),
"detail": set(["d0", "d1", "d2"])
}
},
"C2": {
"S0": {
"summary": set(["s0"]),
"detail": set(["d0", "d1"])
}
}
})
class HealthHistoryTest(unittest.TestCase):
def _now(self):
# return some time truncated at 30 minutes past the hour. this lets us
# fiddle with time offsets without worrying about accidentally landing
# on exactly the top of the hour which is the edge of a time slot for
# tracking health history.
dt = datetime.datetime.utcnow()
return datetime.datetime(
year=dt.year,
month=dt.month,
day=dt.day,
hour=dt.hour,
minute=30)
def test_empty_slot(self):
now = self._now()
HealthHistorySlot._now = mock.Mock(return_value=now)
h = HealthHistorySlot()
# reports no historical checks
self.assertEqual(h.health(), {"checks": {}})
# an empty slot doesn't need to be flushed
self.assertFalse(h.need_flush())
def test_expires(self):
now = self._now()
HealthHistorySlot._now = mock.Mock(return_value=now)
h = HealthHistorySlot()
self.assertFalse(h.expired())
# an hour from now it would be expired
future = now + datetime.timedelta(hours=1)
HealthHistorySlot._now = mock.Mock(return_value=future)
self.assertTrue(h.expired())
def test_need_flush(self):
now = self._now()
HealthHistorySlot._now = mock.Mock(return_value=now)
h = HealthHistorySlot()
self.assertFalse(h.need_flush())
self.assertTrue(h.add(dict(checks={
"C0": {
"severity": "S0",
"summary": {"message": "s0"},
"detail": [{"message": "d0"}]
}
})))
# no flush needed, yet...
self.assertFalse(h.need_flush())
# after persist period time elapses, a flush is needed
future = now + PERSIST_PERIOD
HealthHistorySlot._now = mock.Mock(return_value=future)
self.assertTrue(h.need_flush())
# mark flush resets
h.mark_flushed()
self.assertFalse(h.need_flush())
def test_need_flush_edge(self):
# test needs flush is true because it has expired, not because it has
# been dirty for the persistence period
dt = datetime.datetime.utcnow()
now = datetime.datetime(
year=dt.year,
month=dt.month,
day=dt.day,
hour=dt.hour,
minute=59,
second=59)
HealthHistorySlot._now = mock.Mock(return_value=now)
h = HealthHistorySlot()
self.assertFalse(h.expired())
self.assertFalse(h.need_flush())
# now it is dirty, but it doesn't need a flush
self.assertTrue(h.add(dict(checks={
"C0": {
"severity": "S0",
"summary": {"message": "s0"},
"detail": [{"message": "d0"}]
}
})))
self.assertFalse(h.expired())
self.assertFalse(h.need_flush())
# advance time past the hour so it expires, but not past the persistence
# period deadline for the last event that set the dirty bit
self.assertTrue(PERSIST_PERIOD.total_seconds() > 5)
future = now + datetime.timedelta(seconds=5)
HealthHistorySlot._now = mock.Mock(return_value=future)
self.assertTrue(h.expired())
self.assertTrue(h.need_flush())
| 8,146 | 28.518116 | 80 | py |
null | ceph-main/src/pybind/mgr/iostat/__init__.py | # flake8: noqa
from .module import Module
| 42 | 13.333333 | 26 | py |
null | ceph-main/src/pybind/mgr/iostat/module.py | from typing import Any
from mgr_module import CLIReadCommand, HandleCommandResult, MgrModule
class Module(MgrModule):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
def self_test(self) -> None:
r = self.get('io_rate')
assert 'pg_stats_delta' in r
assert 'stamp_delta' in r['pg_stats_delta']
assert 'stat_sum' in r['pg_stats_delta']
assert 'num_read_kb' in r['pg_stats_delta']['stat_sum']
assert 'num_write_kb' in r['pg_stats_delta']['stat_sum']
assert 'num_write' in r['pg_stats_delta']['stat_sum']
assert 'num_read' in r['pg_stats_delta']['stat_sum']
@CLIReadCommand('iostat', poll=True)
def iostat(self, width: int = 80, print_header: bool = False) -> HandleCommandResult:
"""
Get IO rates
"""
rd = 0
wr = 0
total = 0
rd_ops = 0
wr_ops = 0
total_ops = 0
ret = ''
r = self.get('io_rate')
stamp_delta = int(float(r['pg_stats_delta']['stamp_delta']))
if stamp_delta > 0:
rd = r['pg_stats_delta']['stat_sum']['num_read_kb'] // stamp_delta
wr = r['pg_stats_delta']['stat_sum']['num_write_kb'] // stamp_delta
# The values are in kB, but to_pretty_iec() requires them to be in bytes
rd = rd << 10
wr = wr << 10
total = rd + wr
rd_ops = r['pg_stats_delta']['stat_sum']['num_read'] // stamp_delta
wr_ops = r['pg_stats_delta']['stat_sum']['num_write'] // stamp_delta
total_ops = rd_ops + wr_ops
if print_header:
elems = ['Read', 'Write', 'Total', 'Read IOPS', 'Write IOPS', 'Total IOPS']
ret += self.get_pretty_header(elems, width)
elems = [
self.to_pretty_iec(rd) + 'B/s',
self.to_pretty_iec(wr) + 'B/s',
self.to_pretty_iec(total) + 'B/s',
str(rd_ops),
str(wr_ops),
str(total_ops)
]
ret += self.get_pretty_row(elems, width)
return HandleCommandResult(stdout=ret)
| 2,145 | 33.063492 | 89 | py |
null | ceph-main/src/pybind/mgr/k8sevents/README.md | # Testing
## To test the k8sevents module
enable the module with `ceph mgr module enable k8sevents`
check that it's working `ceph k8sevents status`, you should see something like this;
```
[root@ceph-mgr ~]# ceph k8sevents status
Kubernetes
- Hostname : https://localhost:30443
- Namespace: ceph
Tracker Health
- EventProcessor : OK
- CephConfigWatcher : OK
- NamespaceWatcher : OK
Tracked Events
- namespace : 5
- ceph events: 0
```
Now run some commands to generate healthchecks and admin level events;
- ```ceph osd set noout```
- ```ceph osd unset noout```
- ```ceph osd pool create mypool 4 4 replicated```
- ```ceph osd pool delete mypool mypool --yes-i-really-really-mean-it```
In addition to tracking audit, healthchecks and configuration changes if you have the environment up for >1 hr you should also see and event that shows the clusters health and configuration overview.
As well as status, you can use k8sevents to see event activity in the target kubernetes namespace
```
[root@rhcs4-3 kube]# ceph k8sevents ls
Last Seen (UTC) Type Count Message Event Object Name
2019/09/20 04:33:00 Normal 1 Pool 'mypool' has been removed from the cluster mgr.ConfigurationChangeql2hj
2019/09/20 04:32:55 Normal 1 Client 'client.admin' issued: ceph osd pool delete mgr.audit.osd_pool_delete_
2019/09/20 04:13:23 Normal 2 Client 'mds.rhcs4-2' issued: ceph osd blacklist mgr.audit.osd_blacklist_
2019/09/20 04:08:28 Normal 1 Ceph log -> event tracking started mgr.k8sevents-moduleq74k7
Total : 4
```
or, focus on the ceph specific events(audit & healthcheck) that are being tracked by the k8sevents module.
```
[root@rhcs4-3 kube]# ceph k8sevents ceph
Last Seen (UTC) Type Count Message Event Object Name
2019/09/20 04:32:55 Normal 1 Client 'client.admin' issued: ceph osd pool delete mgr.audit.osd_pool_delete_
2019/09/20 04:13:23 Normal 2 Client 'mds.rhcs4-2' issued: ceph osd blacklist mgr.audit.osd_blacklist_
Total : 2
```
## Sending events from a standalone Ceph cluster to remote Kubernetes cluster
To test interaction from a standalone ceph cluster to a kubernetes environment, you need to make changes on the kubernetes cluster **and** on one of the mgr hosts.
### kubernetes (minikube)
We need some basic RBAC in place to define a serviceaccount(and token) that we can use to push events into kubernetes. The `rbac_sample.yaml` file provides a quick means to create the required resources. Create them with `kubectl create -f rbac_sample.yaml`
Once the resources are defined inside kubernetes, we need a couple of things copied over to the Ceph mgr's filesystem.
### ceph admin host
We need to run some commands against the cluster, so you'll needs access to a ceph admin host. If you don't have a dedicated admin host, you can use a mon or mgr machine. We'll need the root ca.crt of the kubernetes API, and the token associated with the service account we're using to access the kubernetes API.
1. Download/fetch the root ca.crt for the kubernetes cluster (on minikube this can be found at ~/minikube/ca.crt)
2. Copy the ca.crt to your ceph admin host
3. Extract the token from the service account we're going to use
```
kubectl -n ceph get secrets -o jsonpath="{.items[?(@.metadata.annotations['kubernetes\.io/service-account\.name']=='ceph-mgr')].data.token}"|base64 -d > mytoken
```
4. Copy the token to your ceph admin host
5. On the ceph admin host, enable the module with `ceph mgr module enable k8sevents`
6. Set up the configuration
```
ceph k8sevents set-access cacrt -i <path to ca.crt file>
ceph k8sevents set-access token -i <path to mytoken>
ceph k8sevents set-config server https://<kubernetes api host>:<api_port>
ceph k8sevents set-config namespace ceph
```
7. Restart the module with `ceph mgr module disable k8sevents && ceph mgr module enable k8sevents`
8. Check state with the `ceph k8sevents status` command
9. Remove the ca.crt and mytoken files from your admin host
To remove the configuration keys used for external kubernetes access, run the following command
```
ceph k8sevents clear-config
```
## Networking
You can use the above approach with a minikube based target from a standalone ceph cluster, but you'll need to have a tunnel/routing defined from the mgr host(s) to the minikube machine to make the kubernetes API accessible to the mgr/k8sevents module. This can just be a simple ssh tunnel.
| 4,591 | 55 | 314 | md |
null | ceph-main/src/pybind/mgr/k8sevents/__init__.py | from .module import Module
| 27 | 13 | 26 | py |
null | ceph-main/src/pybind/mgr/k8sevents/module.py | # Integrate with the kubernetes events API.
# This module sends events to Kubernetes, and also captures/tracks all events
# in the rook-ceph namespace so kubernetes activity like pod restarts,
# imagepulls etc can be seen from within the ceph cluster itself.
#
# To interact with the events API, the mgr service to access needs to be
# granted additional permissions
# e.g. kubectl -n rook-ceph edit clusterrole rook-ceph-mgr-cluster-rules
#
# These are the changes needed;
# - apiGroups:
# - ""
# resources:
# - events
# verbs:
# - create
# - patch
# - list
# - get
# - watch
import os
import re
import sys
import time
import json
import yaml
import errno
import socket
import base64
import logging
import tempfile
import threading
from urllib.parse import urlparse
from datetime import tzinfo, datetime, timedelta
from urllib3.exceptions import MaxRetryError,ProtocolError
from collections import OrderedDict
import rados
from mgr_module import MgrModule, NotifyType
from mgr_util import verify_cacrt, ServerConfigException
try:
import queue
except ImportError:
# python 2.7.5
import Queue as queue
finally:
# python 2.7.15 or python3
event_queue = queue.Queue()
try:
from kubernetes import client, config, watch
from kubernetes.client.rest import ApiException
except ImportError:
kubernetes_imported = False
client = None
config = None
watch = None
else:
kubernetes_imported = True
# The watch.Watch.stream method can provide event objects that have involved_object = None
# which causes an exception in the generator. A workaround is discussed for a similar issue
# in https://github.com/kubernetes-client/python/issues/376 which has been used here
# pylint: disable=no-member
from kubernetes.client.models.v1_event import V1Event
def local_involved_object(self, involved_object):
if involved_object is None:
involved_object = client.V1ObjectReference(api_version="1")
self._involved_object = involved_object
V1Event.involved_object = V1Event.involved_object.setter(local_involved_object)
log = logging.getLogger(__name__)
# use a simple local class to represent UTC
# datetime pkg modules vary between python2 and 3 and pytz is not available on older
# ceph container images, so taking a pragmatic approach!
class UTC(tzinfo):
def utcoffset(self, dt):
return timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return timedelta(0)
def text_suffix(num):
"""Define a text suffix based on a value i.e. turn host into hosts"""
return '' if num == 1 else 's'
def create_temp_file(fname, content, suffix=".tmp"):
"""Create a temp file
Attempt to create an temporary file containing the given content
Returns:
str .. full path to the temporary file
Raises:
OSError: problems creating the file
"""
if content is not None:
file_name = os.path.join(tempfile.gettempdir(), fname + suffix)
try:
with open(file_name, "w") as f:
f.write(content)
except OSError as e:
raise OSError("Unable to create temporary file : {}".format(str(e)))
return file_name
class HealthCheck(object):
"""Transform a healthcheck msg into it's component parts"""
def __init__(self, msg, msg_level):
# msg looks like
#
# Health check failed: Reduced data availability: 100 pgs inactive (PG_AVAILABILITY)
# Health check cleared: OSDMAP_FLAGS (was: nodown flag(s) set)
# Health check failed: nodown flag(s) set (OSDMAP_FLAGS)
#
self.msg = None
self.name = None
self.text = None
self.valid = False
if msg.lower().startswith('health check'):
self.valid = True
self.msg = msg
msg_tokens = self.msg.split()
if msg_level == 'INF':
self.text = ' '.join(msg_tokens[3:])
self.name = msg_tokens[3] # health check name e.g. OSDMAP_FLAGS
else: # WRN or ERR
self.text = ' '.join(msg_tokens[3:-1])
self.name = msg_tokens[-1][1:-1]
class LogEntry(object):
"""Generic 'log' object"""
reason_map = {
"audit": "Audit",
"cluster": "HealthCheck",
"config": "ClusterChange",
"heartbeat":"Heartbeat",
"startup": "Started"
}
def __init__(self, source, msg, msg_type, level, tstamp=None):
self.source = source
self.msg = msg
self.msg_type = msg_type
self.level = level
self.tstamp = tstamp
self.healthcheck = None
if 'health check ' in self.msg.lower():
self.healthcheck = HealthCheck(self.msg, self.level)
def __str__(self):
return "source={}, msg_type={}, msg={}, level={}, tstamp={}".format(self.source,
self.msg_type,
self.msg,
self.level,
self.tstamp)
@property
def cmd(self):
"""Look at the msg string and extract the command content"""
# msg looks like 'from=\'client.205306 \' entity=\'client.admin\' cmd=\'[{"prefix": "osd set", "key": "nodown"}]\': finished'
if self.msg_type != 'audit':
return None
else:
_m=self.msg[:-10].replace("\'","").split("cmd=")
_s='"cmd":{}'.format(_m[1])
cmds_list = json.loads('{' + _s + '}')['cmd']
# TODO. Assuming only one command was issued for now
_c = cmds_list[0]
return "{} {}".format(_c['prefix'], _c.get('key', ''))
@property
def event_type(self):
return 'Normal' if self.level == 'INF' else 'Warning'
@property
def event_reason(self):
return self.reason_map[self.msg_type]
@property
def event_name(self):
if self.msg_type == 'heartbeat':
return 'mgr.Heartbeat'
elif self.healthcheck:
return 'mgr.health.{}'.format(self.healthcheck.name)
elif self.msg_type == 'audit':
return 'mgr.audit.{}'.format(self.cmd).replace(' ', '_')
elif self.msg_type == 'config':
return 'mgr.ConfigurationChange'
elif self.msg_type == 'startup':
return "mgr.k8sevents-module"
else:
return None
@property
def event_entity(self):
if self.msg_type == 'audit':
return self.msg.replace("\'","").split('entity=')[1].split(' ')[0]
else:
return None
@property
def event_msg(self):
if self.msg_type == 'audit':
return "Client '{}' issued: ceph {}".format(self.event_entity, self.cmd)
elif self.healthcheck:
return self.healthcheck.text
else:
return self.msg
class BaseThread(threading.Thread):
health = 'OK'
reported = False
daemon = True
def clean_event(event):
""" clean an event record """
if not event.first_timestamp:
log.error("first_timestamp is empty")
if event.metadata.creation_timestamp:
log.error("setting first_timestamp to the creation timestamp")
event.first_timestamp = event.metadata.creation_timestamp
else:
log.error("defaulting event first timestamp to current datetime")
event.first_timestamp = datetime.datetime.now()
if not event.last_timestamp:
log.error("setting event last timestamp to {}".format(event.first_timestamp))
event.last_timestamp = event.first_timestamp
if not event.count:
event.count = 1
return event
class NamespaceWatcher(BaseThread):
"""Watch events in a given namespace
Using the watch package we can listen to event traffic in the namespace to
get an idea of what kubernetes related events surround the ceph cluster. The
thing to bear in mind is that events have a TTL enforced by the kube-apiserver
so this stream will only really show activity inside this retention window.
"""
def __init__(self, api_client_config, namespace=None):
super(NamespaceWatcher, self).__init__()
if api_client_config:
self.api = client.CoreV1Api(api_client_config)
else:
self.api = client.CoreV1Api()
self.namespace = namespace
self.events = OrderedDict()
self.lock = threading.Lock()
self.active = None
self.resource_version = None
def fetch(self):
# clear the cache on every call to fetch
self.events.clear()
try:
resp = self.api.list_namespaced_event(self.namespace)
# TODO - Perhaps test for auth problem to be more specific in the except clause?
except:
self.active = False
self.health = "Unable to access events API (list_namespaced_event call failed)"
log.warning(self.health)
else:
self.active = True
self.resource_version = resp.metadata.resource_version
for item in resp.items:
self.events[item.metadata.name] = clean_event(item)
log.info('Added {} events'.format(len(resp.items)))
def run(self):
self.fetch()
func = getattr(self.api, "list_namespaced_event")
if self.active:
log.info("Namespace event watcher started")
while True:
try:
w = watch.Watch()
# execute generator to continually watch resource for changes
for item in w.stream(func, namespace=self.namespace, resource_version=self.resource_version, watch=True):
obj = item['object']
with self.lock:
if item['type'] in ['ADDED', 'MODIFIED']:
self.events[obj.metadata.name] = clean_event(obj)
elif item['type'] == 'DELETED':
del self.events[obj.metadata.name]
# TODO test the exception for auth problem (403?)
# Attribute error is generated when urllib3 on the system is old and doesn't have a
# read_chunked method
except AttributeError as e:
self.health = ("Error: Unable to 'watch' events API in namespace '{}' - "
"urllib3 too old? ({})".format(self.namespace, e))
self.active = False
log.warning(self.health)
break
except ApiException as e:
# refresh the resource_version & watcher
log.warning("API exception caught in watcher ({})".format(e))
log.warning("Restarting namespace watcher")
self.fetch()
except ProtocolError as e:
log.warning("Namespace watcher hit protocolerror ({}) - restarting".format(e))
self.fetch()
except Exception:
self.health = "{} Exception at {}".format(
sys.exc_info()[0].__name__,
datetime.strftime(datetime.now(),"%Y/%m/%d %H:%M:%S")
)
log.exception(self.health)
self.active = False
break
log.warning("Namespace event watcher stopped")
class KubernetesEvent(object):
def __init__(self, log_entry, unique_name=True, api_client_config=None, namespace=None):
if api_client_config:
self.api = client.CoreV1Api(api_client_config)
else:
self.api = client.CoreV1Api()
self.namespace = namespace
self.event_name = log_entry.event_name
self.message = log_entry.event_msg
self.event_type = log_entry.event_type
self.event_reason = log_entry.event_reason
self.unique_name = unique_name
self.host = os.environ.get('NODE_NAME', os.environ.get('HOSTNAME', 'UNKNOWN'))
self.api_status = 200
self.count = 1
self.first_timestamp = None
self.last_timestamp = None
@property
def type(self):
"""provide a type property matching a V1Event object"""
return self.event_type
@property
def event_body(self):
if self.unique_name:
obj_meta = client.V1ObjectMeta(name="{}".format(self.event_name))
else:
obj_meta = client.V1ObjectMeta(generate_name="{}".format(self.event_name))
# field_path is needed to prevent problems in the namespacewatcher when
# deleted event are received
obj_ref = client.V1ObjectReference(kind="CephCluster",
field_path='spec.containers{mgr}',
name=self.event_name,
namespace=self.namespace)
event_source = client.V1EventSource(component="ceph-mgr",
host=self.host)
return client.V1Event(
involved_object=obj_ref,
metadata=obj_meta,
message=self.message,
count=self.count,
type=self.event_type,
reason=self.event_reason,
source=event_source,
first_timestamp=self.first_timestamp,
last_timestamp=self.last_timestamp
)
def write(self):
now=datetime.now(UTC())
self.first_timestamp = now
self.last_timestamp = now
try:
self.api.create_namespaced_event(self.namespace, self.event_body)
except (OSError, ProtocolError):
# unable to reach to the API server
log.error("Unable to reach API server")
self.api_status = 400
except MaxRetryError:
# k8s config has not be defined properly
log.error("multiple attempts to connect to the API have failed")
self.api_status = 403 # Forbidden
except ApiException as e:
log.debug("event.write status:{}".format(e.status))
self.api_status = e.status
if e.status == 409:
log.debug("attempting event update for an existing event")
# 409 means the event is there already, so read it back (v1Event object returned)
# this could happen if the event has been created, and then the k8sevent module
# disabled and reenabled - i.e. the internal event tracking no longer matches k8s
response = self.api.read_namespaced_event(self.event_name, self.namespace)
#
# response looks like
#
# {'action': None,
# 'api_version': 'v1',
# 'count': 1,
# 'event_time': None,
# 'first_timestamp': datetime.datetime(2019, 7, 18, 5, 24, 59, tzinfo=tzlocal()),
# 'involved_object': {'api_version': None,
# 'field_path': None,
# 'kind': 'CephCluster',
# 'name': 'ceph-mgr.k8sevent-module',
# 'namespace': 'rook-ceph',
# 'resource_version': None,
# 'uid': None},
# 'kind': 'Event',
# 'last_timestamp': datetime.datetime(2019, 7, 18, 5, 24, 59, tzinfo=tzlocal()),
# 'message': 'Ceph log -> event tracking started',
# 'metadata': {'annotations': None,
# 'cluster_name': None,
# 'creation_timestamp': datetime.datetime(2019, 7, 18, 5, 24, 59, tzinfo=tzlocal()),
# 'deletion_grace_period_seconds': None,
# 'deletion_timestamp': None,
# 'finalizers': None,
# 'generate_name': 'ceph-mgr.k8sevent-module',
# 'generation': None,
# 'initializers': None,
# 'labels': None,
# 'name': 'ceph-mgr.k8sevent-module5z7kq',
# 'namespace': 'rook-ceph',
# 'owner_references': None,
# 'resource_version': '1195832',
# 'self_link': '/api/v1/namespaces/rook-ceph/events/ceph-mgr.k8sevent-module5z7kq',
# 'uid': '62fde5f1-a91c-11e9-9c80-6cde63a9debf'},
# 'reason': 'Started',
# 'related': None,
# 'reporting_component': '',
# 'reporting_instance': '',
# 'series': None,
# 'source': {'component': 'ceph-mgr', 'host': 'minikube'},
# 'type': 'Normal'}
# conflict event already exists
# read it
# update : count and last_timestamp and msg
self.count = response.count + 1
self.first_timestamp = response.first_timestamp
try:
self.api.patch_namespaced_event(self.event_name, self.namespace, self.event_body)
except ApiException as e:
log.error("event.patch failed for {} with status code:{}".format(self.event_name, e.status))
self.api_status = e.status
else:
log.debug("event {} patched".format(self.event_name))
self.api_status = 200
else:
log.debug("event {} created successfully".format(self.event_name))
self.api_status = 200
@property
def api_success(self):
return self.api_status == 200
def update(self, log_entry):
self.message = log_entry.event_msg
self.event_type = log_entry.event_type
self.last_timestamp = datetime.now(UTC())
self.count += 1
log.debug("performing event update for {}".format(self.event_name))
try:
self.api.patch_namespaced_event(self.event_name, self.namespace, self.event_body)
except ApiException as e:
log.error("event patch call failed: {}".format(e.status))
if e.status == 404:
# tried to patch, but hit a 404. The event's TTL must have been reached, and
# pruned by the kube-apiserver
log.debug("event not found, so attempting to create it")
try:
self.api.create_namespaced_event(self.namespace, self.event_body)
except ApiException as e:
log.error("unable to create the event: {}".format(e.status))
self.api_status = e.status
else:
log.debug("event {} created successfully".format(self.event_name))
self.api_status = 200
else:
log.debug("event {} updated".format(self.event_name))
self.api_status = 200
class EventProcessor(BaseThread):
"""Handle a global queue used to track events we want to send/update to kubernetes"""
can_run = True
def __init__(self, config_watcher, event_retention_days, api_client_config, namespace):
super(EventProcessor, self).__init__()
self.events = dict()
self.config_watcher = config_watcher
self.event_retention_days = event_retention_days
self.api_client_config = api_client_config
self.namespace = namespace
def startup(self):
"""Log an event to show we're active"""
event = KubernetesEvent(
LogEntry(
source='self',
msg='Ceph log -> event tracking started',
msg_type='startup',
level='INF',
tstamp=None
),
unique_name=False,
api_client_config=self.api_client_config,
namespace=self.namespace
)
event.write()
return event.api_success
@property
def ok(self):
return self.startup()
def prune_events(self):
log.debug("prune_events - looking for old events to remove from cache")
oldest = datetime.now(UTC()) - timedelta(days=self.event_retention_days)
local_events = dict(self.events)
for event_name in sorted(local_events,
key = lambda name: local_events[name].last_timestamp):
event = local_events[event_name]
if event.last_timestamp >= oldest:
break
else:
# drop this event
log.debug("prune_events - removing old event : {}".format(event_name))
del self.events[event_name]
def process(self, log_object):
log.debug("log entry being processed : {}".format(str(log_object)))
event_out = False
unique_name = True
if log_object.msg_type == 'audit':
# audit traffic : operator commands
if log_object.msg.endswith('finished'):
log.debug("K8sevents received command finished msg")
event_out = True
else:
# NO OP - ignoring 'dispatch' log records
return
elif log_object.msg_type == 'cluster':
# cluster messages : health checks
if log_object.event_name:
event_out = True
elif log_object.msg_type == 'config':
# configuration checker messages
event_out = True
unique_name = False
elif log_object.msg_type == 'heartbeat':
# hourly health message summary from Ceph
event_out = True
unique_name = False
log_object.msg = str(self.config_watcher)
else:
log.warning("K8sevents received unknown msg_type - {}".format(log_object.msg_type))
if event_out:
log.debug("k8sevents sending event to kubernetes")
# we don't cache non-unique events like heartbeats or config changes
if not unique_name or log_object.event_name not in self.events.keys():
event = KubernetesEvent(log_entry=log_object,
unique_name=unique_name,
api_client_config=self.api_client_config,
namespace=self.namespace)
event.write()
log.debug("event(unique={}) creation ended : {}".format(unique_name, event.api_status))
if event.api_success and unique_name:
self.events[log_object.event_name] = event
else:
event = self.events[log_object.event_name]
event.update(log_object)
log.debug("event update ended : {}".format(event.api_status))
self.prune_events()
else:
log.debug("K8sevents ignored message : {}".format(log_object.msg))
def run(self):
log.info("Ceph event processing thread started, "
"event retention set to {} days".format(self.event_retention_days))
while True:
try:
log_object = event_queue.get(block=False)
except queue.Empty:
pass
else:
try:
self.process(log_object)
except Exception:
self.health = "{} Exception at {}".format(
sys.exc_info()[0].__name__,
datetime.strftime(datetime.now(),"%Y/%m/%d %H:%M:%S")
)
log.exception(self.health)
break
if not self.can_run:
break
time.sleep(0.5)
log.warning("Ceph event processing thread stopped")
class ListDiff(object):
def __init__(self, before, after):
self.before = set(before)
self.after = set(after)
@property
def removed(self):
return list(self.before - self.after)
@property
def added(self):
return list(self.after - self.before)
@property
def is_equal(self):
return self.before == self.after
class CephConfigWatcher(BaseThread):
"""Detect configuration changes within the cluster and generate human readable events"""
def __init__(self, mgr):
super(CephConfigWatcher, self).__init__()
self.mgr = mgr
self.server_map = dict()
self.osd_map = dict()
self.pool_map = dict()
self.service_map = dict()
self.config_check_secs = mgr.config_check_secs
@property
def raw_capacity(self):
# Note. if the osd's are not online the capacity field will be 0
return sum([self.osd_map[osd]['capacity'] for osd in self.osd_map])
@property
def num_servers(self):
return len(self.server_map.keys())
@property
def num_osds(self):
return len(self.osd_map.keys())
@property
def num_pools(self):
return len(self.pool_map.keys())
def __str__(self):
s = ''
s += "{} : {:>3} host{}, {} pool{}, {} OSDs. Raw Capacity {}B".format(
json.loads(self.mgr.get('health')['json'])['status'],
self.num_servers,
text_suffix(self.num_servers),
self.num_pools,
text_suffix(self.num_pools),
self.num_osds,
MgrModule.to_pretty_iec(self.raw_capacity))
return s
def fetch_servers(self):
"""Return a server summary, and service summary"""
servers = self.mgr.list_servers()
server_map = dict() # host -> services
service_map = dict() # service -> host
for server_info in servers:
services = dict()
for svc in server_info['services']:
if svc.get('type') in services.keys():
services[svc.get('type')].append(svc.get('id'))
else:
services[svc.get('type')] = list([svc.get('id')])
# maintain the service xref map service -> host and version
service_map[(svc.get('type'), str(svc.get('id')))] = server_info.get('hostname', '')
server_map[server_info.get('hostname')] = services
return server_map, service_map
def fetch_pools(self):
interesting = ["type", "size", "min_size"]
# pools = [{'pool': 1, 'pool_name': 'replicapool', 'flags': 1, 'flags_names': 'hashpspool',
# 'type': 1, 'size': 3, 'min_size': 1, 'crush_rule': 1, 'object_hash': 2, 'pg_autoscale_mode': 'warn',
# 'pg_num': 100, 'pg_placement_num': 100, 'pg_placement_num_target': 100, 'pg_num_target': 100, 'pg_num_pending': 100,
# 'last_pg_merge_meta': {'ready_epoch': 0, 'last_epoch_started': 0, 'last_epoch_clean': 0, 'source_pgid': '0.0',
# 'source_version': "0'0", 'target_version': "0'0"}, 'auid': 0, 'snap_mode': 'selfmanaged', 'snap_seq': 0, 'snap_epoch': 0,
# 'pool_snaps': [], 'quota_max_bytes': 0, 'quota_max_objects': 0, 'tiers': [], 'tier_of': -1, 'read_tier': -1,
# 'write_tier': -1, 'cache_mode': 'none', 'target_max_bytes': 0, 'target_max_objects': 0,
# 'cache_target_dirty_ratio_micro': 400000, 'cache_target_dirty_high_ratio_micro': 600000,
# 'cache_target_full_ratio_micro': 800000, 'cache_min_flush_age': 0, 'cache_min_evict_age': 0,
# 'erasure_code_profile': '', 'hit_set_params': {'type': 'none'}, 'hit_set_period': 0, 'hit_set_count': 0,
# 'use_gmt_hitset': True, 'min_read_recency_for_promote': 0, 'min_write_recency_for_promote': 0,
# 'hit_set_grade_decay_rate': 0, 'hit_set_search_last_n': 0, 'grade_table': [], 'stripe_width': 0,
# 'expected_num_objects': 0, 'fast_read': False, 'options': {}, 'application_metadata': {'rbd': {}},
# 'create_time': '2019-08-02 02:23:01.618519', 'last_change': '19', 'last_force_op_resend': '0',
# 'last_force_op_resend_prenautilus': '0', 'last_force_op_resend_preluminous': '0', 'removed_snaps': '[]'}]
pools = self.mgr.get('osd_map')['pools']
pool_map = dict()
for pool in pools:
pool_map[pool.get('pool_name')] = {k:pool.get(k) for k in interesting}
return pool_map
def fetch_osd_map(self, service_map):
"""Create an osd map"""
stats = self.mgr.get('osd_stats')
osd_map = dict()
devices = self.mgr.get('osd_map_crush')['devices']
for dev in devices:
osd_id = str(dev['id'])
osd_map[osd_id] = dict(
deviceclass=dev.get('class'),
capacity=0,
hostname=service_map['osd', osd_id]
)
for osd_stat in stats['osd_stats']:
osd_id = str(osd_stat.get('osd'))
osd_map[osd_id]['capacity'] = osd_stat['statfs']['total']
return osd_map
def push_events(self, changes):
"""Add config change to the global queue to generate an event in kubernetes"""
log.debug("{} events will be generated")
for change in changes:
event_queue.put(change)
def _generate_config_logentry(self, msg):
return LogEntry(
source="config",
msg_type="config",
msg=msg,
level='INF',
tstamp=None
)
def _check_hosts(self, server_map):
log.debug("K8sevents checking host membership")
changes = list()
servers = ListDiff(self.server_map.keys(), server_map.keys())
if servers.is_equal:
# no hosts have been added or removed
pass
else:
# host changes detected, find out what
host_msg = "Host '{}' has been {} the cluster"
for new_server in servers.added:
changes.append(self._generate_config_logentry(
msg=host_msg.format(new_server, 'added to'))
)
for removed_server in servers.removed:
changes.append(self._generate_config_logentry(
msg=host_msg.format(removed_server, 'removed from'))
)
return changes
def _check_osds(self,server_map, osd_map):
log.debug("K8sevents checking OSD configuration")
changes = list()
before_osds = list()
for svr in self.server_map:
before_osds.extend(self.server_map[svr].get('osd',[]))
after_osds = list()
for svr in server_map:
after_osds.extend(server_map[svr].get('osd',[]))
if set(before_osds) == set(after_osds):
# no change in osd id's
pass
else:
# osd changes detected
osd_msg = "Ceph OSD '{}' ({} @ {}B) has been {} host {}"
osds = ListDiff(before_osds, after_osds)
for new_osd in osds.added:
changes.append(self._generate_config_logentry(
msg=osd_msg.format(
new_osd,
osd_map[new_osd]['deviceclass'],
MgrModule.to_pretty_iec(osd_map[new_osd]['capacity']),
'added to',
osd_map[new_osd]['hostname']))
)
for removed_osd in osds.removed:
changes.append(self._generate_config_logentry(
msg=osd_msg.format(
removed_osd,
osd_map[removed_osd]['deviceclass'],
MgrModule.to_pretty_iec(osd_map[removed_osd]['capacity']),
'removed from',
osd_map[removed_osd]['hostname']))
)
return changes
def _check_pools(self, pool_map):
changes = list()
log.debug("K8sevents checking pool configurations")
if self.pool_map.keys() == pool_map.keys():
# no pools added/removed
pass
else:
# Pool changes
pools = ListDiff(self.pool_map.keys(), pool_map.keys())
pool_msg = "Pool '{}' has been {} the cluster"
for new_pool in pools.added:
changes.append(self._generate_config_logentry(
msg=pool_msg.format(new_pool, 'added to'))
)
for removed_pool in pools.removed:
changes.append(self._generate_config_logentry(
msg=pool_msg.format(removed_pool, 'removed from'))
)
# check pool configuration changes
for pool_name in pool_map:
if not self.pool_map.get(pool_name, dict()):
# pool didn't exist before so just skip the checks
continue
if pool_map[pool_name] == self.pool_map[pool_name]:
# no changes - dicts match in key and value
continue
else:
# determine the change and add it to the change list
size_diff = pool_map[pool_name]['size'] - self.pool_map[pool_name]['size']
if size_diff != 0:
if size_diff < 0:
msg = "Data protection level of pool '{}' reduced to {} copies".format(pool_name,
pool_map[pool_name]['size'])
level = 'WRN'
else:
msg = "Data protection level of pool '{}' increased to {} copies".format(pool_name,
pool_map[pool_name]['size'])
level = 'INF'
changes.append(LogEntry(source="config",
msg_type="config",
msg=msg,
level=level,
tstamp=None)
)
if pool_map[pool_name]['min_size'] != self.pool_map[pool_name]['min_size']:
changes.append(LogEntry(source="config",
msg_type="config",
msg="Minimum acceptable number of replicas in pool '{}' has changed".format(pool_name),
level='WRN',
tstamp=None)
)
return changes
def get_changes(self, server_map, osd_map, pool_map):
"""Detect changes in maps between current observation and the last"""
changes = list()
changes.extend(self._check_hosts(server_map))
changes.extend(self._check_osds(server_map, osd_map))
changes.extend(self._check_pools(pool_map))
# FUTURE
# Could generate an event if a ceph daemon has moved hosts
# (assumes the ceph metadata host information is valid though!)
return changes
def run(self):
log.info("Ceph configuration watcher started, interval set to {}s".format(self.config_check_secs))
self.server_map, self.service_map = self.fetch_servers()
self.pool_map = self.fetch_pools()
self.osd_map = self.fetch_osd_map(self.service_map)
while True:
try:
start_time = time.time()
server_map, service_map = self.fetch_servers()
pool_map = self.fetch_pools()
osd_map = self.fetch_osd_map(service_map)
changes = self.get_changes(server_map, osd_map, pool_map)
if changes:
self.push_events(changes)
self.osd_map = osd_map
self.pool_map = pool_map
self.server_map = server_map
self.service_map = service_map
checks_duration = int(time.time() - start_time)
# check that the time it took to run the checks fits within the
# interval, and if not extend the interval and emit a log message
# to show that the runtime for the checks exceeded the desired
# interval
if checks_duration > self.config_check_secs:
new_interval = self.config_check_secs * 2
log.warning("K8sevents check interval warning. "
"Current checks took {}s, interval was {}s. "
"Increasing interval to {}s".format(int(checks_duration),
self.config_check_secs,
new_interval))
self.config_check_secs = new_interval
time.sleep(self.config_check_secs)
except Exception:
self.health = "{} Exception at {}".format(
sys.exc_info()[0].__name__,
datetime.strftime(datetime.now(),"%Y/%m/%d %H:%M:%S")
)
log.exception(self.health)
break
log.warning("Ceph configuration watcher stopped")
class Module(MgrModule):
COMMANDS = [
{
"cmd": "k8sevents status",
"desc": "Show the status of the data gathering threads",
"perm": "r"
},
{
"cmd": "k8sevents ls",
"desc": "List all current Kuberenetes events from the Ceph namespace",
"perm": "r"
},
{
"cmd": "k8sevents ceph",
"desc": "List Ceph events tracked & sent to the kubernetes cluster",
"perm": "r"
},
{
"cmd": "k8sevents set-access name=key,type=CephString",
"desc": "Set kubernetes access credentials. <key> must be cacrt or token and use -i <filename> syntax (e.g., ceph k8sevents set-access cacrt -i /root/ca.crt).",
"perm": "rw"
},
{
"cmd": "k8sevents set-config name=key,type=CephString name=value,type=CephString",
"desc": "Set kubernetes config paramters. <key> must be server or namespace (e.g., ceph k8sevents set-config server https://localhost:30433).",
"perm": "rw"
},
{
"cmd": "k8sevents clear-config",
"desc": "Clear external kubernetes configuration settings",
"perm": "rw"
},
]
MODULE_OPTIONS = [
{'name': 'config_check_secs',
'type': 'int',
'default': 10,
'min': 10,
'desc': "interval (secs) to check for cluster configuration changes"},
{'name': 'ceph_event_retention_days',
'type': 'int',
'default': 7,
'desc': "Days to hold ceph event information within local cache"}
]
NOTIFY_TYPES = [NotifyType.clog]
def __init__(self, *args, **kwargs):
self.run = True
self.kubernetes_control = 'POD_NAME' in os.environ
self.event_processor = None
self.config_watcher = None
self.ns_watcher = None
self.trackers = list()
self.error_msg = None
self._api_client_config = None
self._namespace = None
# Declare the module options we accept
self.config_check_secs = None
self.ceph_event_retention_days = None
self.k8s_config = dict(
cacrt = None,
token = None,
server = None,
namespace = None
)
super(Module, self).__init__(*args, **kwargs)
def k8s_ready(self):
"""Validate the k8s_config dict
Returns:
- bool .... indicating whether the config is ready to use
- string .. variables that need to be defined before the module will function
"""
missing = list()
ready = True
for k in self.k8s_config:
if not self.k8s_config[k]:
missing.append(k)
ready = False
return ready, missing
def config_notify(self):
"""Apply runtime module options, and defaults from the modules KV store"""
self.log.debug("applying runtime module option settings")
for opt in self.MODULE_OPTIONS:
setattr(self,
opt['name'],
self.get_module_option(opt['name']))
if not self.kubernetes_control:
# Populate the config
self.log.debug("loading config from KV store")
for k in self.k8s_config:
self.k8s_config[k] = self.get_store(k, default=None)
def fetch_events(self, limit=None):
"""Interface to expose current events to another mgr module"""
# FUTURE: Implement this to provide k8s events to the dashboard?
raise NotImplementedError
def process_clog(self, log_message):
"""Add log message to the event queue
:param log_message: dict from the cluster log (audit/cluster channels)
"""
required_fields = ['channel', 'message', 'priority', 'stamp']
_message_attrs = log_message.keys()
if all(_field in _message_attrs for _field in required_fields):
self.log.debug("clog entry received - adding to the queue")
if log_message.get('message').startswith('overall HEALTH'):
m_type = 'heartbeat'
else:
m_type = log_message.get('channel')
event_queue.put(
LogEntry(
source='log',
msg_type=m_type,
msg=log_message.get('message'),
level=log_message.get('priority')[1:-1],
tstamp=log_message.get('stamp')
)
)
else:
self.log.warning("Unexpected clog message format received - skipped: {}".format(log_message))
def notify(self, notify_type: NotifyType, notify_id):
"""
Called by the ceph-mgr service to notify the Python plugin
that new state is available.
:param notify_type: string indicating what kind of notification,
such as osd_map, mon_map, fs_map, mon_status,
health, pg_summary, command, service_map
:param notify_id: string (may be empty) that optionally specifies
which entity is being notified about. With
"command" notifications this is set to the tag
``from send_command``.
"""
# only interested in cluster log (clog) messages for now
if notify_type == NotifyType.clog:
self.log.debug("received a clog entry from mgr.notify")
if isinstance(notify_id, dict):
# create a log object to process
self.process_clog(notify_id)
else:
self.log.warning("Expected a 'dict' log record format, received {}".format(type(notify_type)))
def _show_events(self, events):
max_msg_length = max([len(events[k].message) for k in events])
fmt = "{:<20} {:<8} {:>5} {:<" + str(max_msg_length) + "} {}\n"
s = fmt.format("Last Seen (UTC)", "Type", "Count", "Message", "Event Object Name")
for event_name in sorted(events,
key = lambda name: events[name].last_timestamp,
reverse=True):
event = events[event_name]
s += fmt.format(
datetime.strftime(event.last_timestamp,"%Y/%m/%d %H:%M:%S"),
str(event.type),
str(event.count),
str(event.message),
str(event_name)
)
s += "Total : {:>3}\n".format(len(events))
return s
def show_events(self, events):
"""Show events we're holding from the ceph namespace - most recent 1st"""
if len(events):
return 0, "", self._show_events(events)
else:
return 0, "", "No events emitted yet, local cache is empty"
def show_status(self):
s = "Kubernetes\n"
s += "- Hostname : {}\n".format(self.k8s_config['server'])
s += "- Namespace : {}\n".format(self._namespace)
s += "Tracker Health\n"
for t in self.trackers:
s += "- {:<20} : {}\n".format(t.__class__.__name__, t.health)
s += "Tracked Events\n"
s += "- namespace : {:>3}\n".format(len(self.ns_watcher.events))
s += "- ceph events : {:>3}\n".format(len(self.event_processor.events))
return 0, "", s
def _valid_server(self, server):
# must be a valid server url format
server = server.strip()
res = urlparse(server)
port = res.netloc.split(":")[-1]
if res.scheme != 'https':
return False, "Server URL must use https"
elif not res.hostname:
return False, "Invalid server URL format"
elif res.hostname:
try:
socket.gethostbyname(res.hostname)
except socket.gaierror:
return False, "Unresolvable server URL"
if not port.isdigit():
return False, "Server URL must end in a port number"
return True, ""
def _valid_cacrt(self, cacrt_data):
"""use mgr_util.verify_cacrt to validate the CA file"""
cacrt_fname = create_temp_file("ca_file", cacrt_data)
try:
verify_cacrt(cacrt_fname)
except ServerConfigException as e:
return False, "Invalid CA certificate: {}".format(str(e))
else:
return True, ""
def _valid_token(self, token_data):
"""basic checks on the token"""
if not token_data:
return False, "Token file is empty"
pattern = re.compile(r"[a-zA-Z0-9\-\.\_]+$")
if not pattern.match(token_data):
return False, "Token contains invalid characters"
return True, ""
def _valid_namespace(self, namespace):
# Simple check - name must be a string <= 253 in length, alphanumeric with '.' and '-' symbols
if len(namespace) > 253:
return False, "Name too long"
if namespace.isdigit():
return False, "Invalid name - must be alphanumeric"
pattern = re.compile(r"^[a-z][a-z0-9\-\.]+$")
if not pattern.match(namespace):
return False, "Invalid characters in the name"
return True, ""
def _config_set(self, key, val):
"""Attempt to validate the content, then save to KV store"""
val = val.rstrip() # remove any trailing whitespace/newline
try:
checker = getattr(self, "_valid_" + key)
except AttributeError:
# no checker available, just let it pass
self.log.warning("Unable to validate '{}' parameter - checker not implemented".format(key))
valid = True
else:
valid, reason = checker(val)
if valid:
self.set_store(key, val)
self.log.info("Updated config KV Store item: " + key)
return 0, "", "Config updated for parameter '{}'".format(key)
else:
return -22, "", "Invalid value for '{}' :{}".format(key, reason)
def clear_config_settings(self):
for k in self.k8s_config:
self.set_store(k, None)
return 0,"","{} configuration keys removed".format(len(self.k8s_config.keys()))
def handle_command(self, inbuf, cmd):
access_options = ['cacrt', 'token']
config_options = ['server', 'namespace']
if cmd['prefix'] == 'k8sevents clear-config':
return self.clear_config_settings()
if cmd['prefix'] == 'k8sevents set-access':
if cmd['key'] not in access_options:
return -errno.EINVAL, "", "Unknown access option. Must be one of; {}".format(','.join(access_options))
if inbuf:
return self._config_set(cmd['key'], inbuf)
else:
return -errno.EINVAL, "", "Command must specify -i <filename>"
if cmd['prefix'] == 'k8sevents set-config':
if cmd['key'] not in config_options:
return -errno.EINVAL, "", "Unknown config option. Must be one of; {}".format(','.join(config_options))
return self._config_set(cmd['key'], cmd['value'])
# At this point the command is trying to interact with k8sevents, so intercept if the configuration is
# not ready
if self.error_msg:
_msg = "k8sevents unavailable: " + self.error_msg
ready, _ = self.k8s_ready()
if not self.kubernetes_control and not ready:
_msg += "\nOnce all variables have been defined, you must restart the k8sevents module for the changes to take effect"
return -errno.ENODATA, "", _msg
if cmd["prefix"] == "k8sevents status":
return self.show_status()
elif cmd["prefix"] == "k8sevents ls":
return self.show_events(self.ns_watcher.events)
elif cmd["prefix"] == "k8sevents ceph":
return self.show_events(self.event_processor.events)
else:
raise NotImplementedError(cmd["prefix"])
@staticmethod
def can_run():
"""Determine whether the pre-reqs for the module are in place"""
if not kubernetes_imported:
return False, "kubernetes python client is not available"
return True, ""
def load_kubernetes_config(self):
"""Load configuration for remote kubernetes API using KV store values
Attempt to create an API client configuration from settings stored in
KV store.
Returns:
client.ApiClient: kubernetes API client object
Raises:
OSError: unable to create the cacrt file
"""
# the kubernetes setting Configuration.ssl_ca_cert is a path, so we have to create a
# temporary file containing the cert for the client to load from
try:
ca_crt_file = create_temp_file('cacrt', self.k8s_config['cacrt'])
except OSError as e:
self.log.error("Unable to create file to hold cacrt: {}".format(str(e)))
raise OSError(str(e))
else:
self.log.debug("CA certificate from KV store, written to {}".format(ca_crt_file))
configuration = client.Configuration()
configuration.host = self.k8s_config['server']
configuration.ssl_ca_cert = ca_crt_file
configuration.api_key = { "authorization": "Bearer " + self.k8s_config['token'] }
api_client = client.ApiClient(configuration)
self.log.info("API client created for remote kubernetes access using cacrt and token from KV store")
return api_client
def serve(self):
# apply options set by CLI to this module
self.config_notify()
if not kubernetes_imported:
self.error_msg = "Unable to start : python kubernetes package is missing"
else:
if self.kubernetes_control:
# running under rook-ceph
config.load_incluster_config()
self.k8s_config['server'] = "https://{}:{}".format(os.environ.get('KUBERNETES_SERVICE_HOST', 'UNKNOWN'),
os.environ.get('KUBERNETES_SERVICE_PORT_HTTPS', 'UNKNOWN'))
self._api_client_config = None
self._namespace = os.environ.get("POD_NAMESPACE", "rook-ceph")
else:
# running outside of rook-ceph, so we need additional settings to tell us
# how to connect to the kubernetes cluster
ready, errors = self.k8s_ready()
if not ready:
self.error_msg = "Required settings missing. Use ceph k8sevents set-access | set-config to define {}".format(",".join(errors))
else:
try:
self._api_client_config = self.load_kubernetes_config()
except OSError as e:
self.error_msg = str(e)
else:
self._namespace = self.k8s_config['namespace']
self.log.info("k8sevents configuration loaded from KV store")
if self.error_msg:
self.log.error(self.error_msg)
return
# All checks have passed
self.config_watcher = CephConfigWatcher(self)
self.event_processor = EventProcessor(self.config_watcher,
self.ceph_event_retention_days,
self._api_client_config,
self._namespace)
self.ns_watcher = NamespaceWatcher(api_client_config=self._api_client_config,
namespace=self._namespace)
if self.event_processor.ok:
log.info("Ceph Log processor thread starting")
self.event_processor.start() # start log consumer thread
log.info("Ceph config watcher thread starting")
self.config_watcher.start()
log.info("Rook-ceph namespace events watcher starting")
self.ns_watcher.start()
self.trackers.extend([self.event_processor, self.config_watcher, self.ns_watcher])
while True:
# stay alive
time.sleep(1)
trackers = self.trackers
for t in trackers:
if not t.is_alive() and not t.reported:
log.error("K8sevents tracker thread '{}' stopped: {}".format(t.__class__.__name__, t.health))
t.reported = True
else:
self.error_msg = "Unable to access kubernetes API. Is it accessible? Are RBAC rules for our token valid?"
log.warning(self.error_msg)
log.warning("k8sevents module exiting")
self.run = False
def shutdown(self):
self.run = False
log.info("Shutting down k8sevents module")
self.event_processor.can_run = False
if self._rados:
self._rados.shutdown()
| 55,816 | 37.335852 | 172 | py |
null | ceph-main/src/pybind/mgr/k8sevents/rbac_sample.yaml | ---
# Create a namespace to receive our test events
apiVersion: v1
kind: Namespace
metadata:
name: ceph
---
# Define the access rules to open the events API to k8sevents
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: ceph-mgr-events-rules
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- create
- list
- watch
- patch
- get
---
# Define a service account to associate with our event stream
apiVersion: v1
kind: ServiceAccount
metadata:
name: ceph-mgr
namespace: ceph
---
# Allow the ceph-mgr service account access to the events api
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: ceph-mgr
namespace: ceph
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ceph-mgr-events-rules
subjects:
- kind: ServiceAccount
name: ceph-mgr
namespace: ceph
| 871 | 17.956522 | 61 | yaml |
null | ceph-main/src/pybind/mgr/localpool/__init__.py | # flake8: noqa
from .module import Module
| 42 | 13.333333 | 26 | py |
null | ceph-main/src/pybind/mgr/localpool/module.py | from mgr_module import MgrModule, CommandResult, Option, NotifyType
import json
import threading
from typing import cast, Any
class Module(MgrModule):
MODULE_OPTIONS = [
Option(
name='subtree',
type='str',
default='rack',
desc='CRUSH level for which to create a local pool',
long_desc='which CRUSH subtree type the module should create a pool for.',
runtime=True),
Option(
name='failure_domain',
type='str',
default='host',
desc='failure domain for any created local pool',
long_desc='what failure domain we should separate data replicas across.',
runtime=True),
Option(
name='min_size',
type='int',
desc='default min_size for any created local pool',
long_desc='value to set min_size to (unchanged from Ceph\'s default if this option is not set)',
runtime=True),
Option(
name='num_rep',
type='int',
default=3,
desc='default replica count for any created local pool',
runtime=True),
Option(
name='pg_num',
type='int',
default=128,
desc='default pg_num for any created local pool',
runtime=True),
Option(
name='prefix',
type='str',
default='',
desc='name prefix for any created local pool',
runtime=True),
]
NOTIFY_TYPES = [NotifyType.osd_map]
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(Module, self).__init__(*args, **kwargs)
self.serve_event = threading.Event()
def notify(self, notify_type: NotifyType, notify_id: str) -> None:
if notify_type == NotifyType.osd_map:
self.handle_osd_map()
def handle_osd_map(self) -> None:
"""
Check pools on each OSDMap change
"""
subtree_type = cast(str, self.get_module_option('subtree'))
failure_domain = self.get_module_option('failure_domain')
pg_num = self.get_module_option('pg_num')
num_rep = self.get_module_option('num_rep')
min_size = self.get_module_option('min_size')
prefix = cast(str, self.get_module_option('prefix')) or 'by-' + subtree_type + '-'
osdmap = self.get("osd_map")
lpools = []
for pool in osdmap['pools']:
if pool['pool_name'].find(prefix) == 0:
lpools.append(pool['pool_name'])
self.log.debug('localized pools = %s', lpools)
subtrees = []
tree = self.get('osd_map_tree')
for node in tree['nodes']:
if node['type'] == subtree_type:
subtrees.append(node['name'])
pool_name = prefix + node['name']
if pool_name not in lpools:
self.log.info('Creating localized pool %s', pool_name)
#
result = CommandResult("")
self.send_command(result, "mon", "", json.dumps({
"prefix": "osd crush rule create-replicated",
"format": "json",
"name": pool_name,
"root": node['name'],
"type": failure_domain,
}), "")
r, outb, outs = result.wait()
result = CommandResult("")
self.send_command(result, "mon", "", json.dumps({
"prefix": "osd pool create",
"format": "json",
"pool": pool_name,
'rule': pool_name,
"pool_type": 'replicated',
'pg_num': pg_num,
}), "")
r, outb, outs = result.wait()
result = CommandResult("")
self.send_command(result, "mon", "", json.dumps({
"prefix": "osd pool set",
"format": "json",
"pool": pool_name,
'var': 'size',
"val": str(num_rep),
}), "")
r, outb, outs = result.wait()
if min_size:
result = CommandResult("")
self.send_command(result, "mon", "", json.dumps({
"prefix": "osd pool set",
"format": "json",
"pool": pool_name,
'var': 'min_size',
"val": str(min_size),
}), "")
r, outb, outs = result.wait()
# TODO remove pools for hosts that don't exist?
def serve(self) -> None:
self.handle_osd_map()
self.serve_event.wait()
self.serve_event.clear()
def shutdown(self) -> None:
self.serve_event.set()
| 5,082 | 36.10219 | 108 | py |
null | ceph-main/src/pybind/mgr/mds_autoscaler/__init__.py | import os
if 'UNITTEST' in os.environ:
import tests
from .module import MDSAutoscaler
| 92 | 12.285714 | 33 | py |
null | ceph-main/src/pybind/mgr/mds_autoscaler/module.py | """
Automatically scale MDSs based on status of the file-system using the FSMap
"""
import logging
from typing import Any, Optional
from mgr_module import MgrModule, NotifyType
from orchestrator._interface import MDSSpec, ServiceSpec
import orchestrator
import copy
log = logging.getLogger(__name__)
class MDSAutoscaler(orchestrator.OrchestratorClientMixin, MgrModule):
"""
MDS autoscaler.
"""
NOTIFY_TYPES = [NotifyType.fs_map]
def __init__(self, *args: Any, **kwargs: Any) -> None:
MgrModule.__init__(self, *args, **kwargs)
self.set_mgr(self)
def get_service(self, fs_name: str) -> Optional[orchestrator.ServiceDescription]:
service = f"mds.{fs_name}"
completion = self.describe_service(service_type='mds',
service_name=service,
refresh=True)
orchestrator.raise_if_exception(completion)
if completion.result:
return completion.result[0]
return None
def update_daemon_count(self, spec: ServiceSpec, fs_name: str, abscount: int) -> MDSSpec:
ps = copy.deepcopy(spec.placement)
ps.count = abscount
newspec = MDSSpec(service_type=spec.service_type,
service_id=spec.service_id,
placement=ps)
return newspec
def get_required_standby_count(self, fs_map: dict, fs_name: str) -> int:
assert fs_map is not None
for fs in fs_map['filesystems']:
if fs['mdsmap']['fs_name'] == fs_name:
return fs['mdsmap']['standby_count_wanted']
assert False
def get_required_max_mds(self, fs_map: dict, fs_name: str) -> int:
assert fs_map is not None
for fs in fs_map['filesystems']:
if fs['mdsmap']['fs_name'] == fs_name:
return fs['mdsmap']['max_mds']
assert False
def verify_and_manage_mds_instance(self, fs_map: dict, fs_name: str) -> None:
assert fs_map is not None
try:
svc = self.get_service(fs_name)
if not svc:
self.log.info(f"fs {fs_name}: no service defined; skipping")
return
if not svc.spec.placement.count:
self.log.info(f"fs {fs_name}: service does not specify a count; skipping")
return
standbys_required = self.get_required_standby_count(fs_map, fs_name)
max_mds = self.get_required_max_mds(fs_map, fs_name)
want = max_mds + standbys_required
self.log.info(f"fs {fs_name}: "
f"max_mds={max_mds} "
f"standbys_required={standbys_required}, "
f"count={svc.spec.placement.count}")
if want == svc.spec.placement.count:
return
self.log.info(f"fs {fs_name}: adjusting daemon count from {svc.spec.placement.count} to {want}")
newspec = self.update_daemon_count(svc.spec, fs_name, want)
completion = self.apply_mds(newspec)
orchestrator.raise_if_exception(completion)
except orchestrator.OrchestratorError as e:
self.log.exception(f"fs {fs_name}: exception while updating service: {e}")
pass
def notify(self, notify_type: NotifyType, notify_id: str) -> None:
if notify_type != NotifyType.fs_map:
return
fs_map = self.get('fs_map')
if not fs_map:
return
# we don't know for which fs config has been changed
for fs in fs_map['filesystems']:
fs_name = fs['mdsmap']['fs_name']
self.verify_and_manage_mds_instance(fs_map, fs_name)
| 3,748 | 36.49 | 108 | py |
null | ceph-main/src/pybind/mgr/mds_autoscaler/tests/__init__.py | 0 | 0 | 0 | py | |
null | ceph-main/src/pybind/mgr/mds_autoscaler/tests/test_autoscaler.py | import pytest
from unittest import mock
from ceph.deployment.service_spec import ServiceSpec, PlacementSpec
from orchestrator import DaemonDescription, OrchResult, ServiceDescription
try:
from typing import Any, List
except ImportError:
pass
from mds_autoscaler.module import MDSAutoscaler
@pytest.fixture()
def mds_autoscaler_module():
yield MDSAutoscaler('mds_autoscaler', 0, 0)
class TestCephadm(object):
@mock.patch("mds_autoscaler.module.MDSAutoscaler.get")
@mock.patch("mds_autoscaler.module.MDSAutoscaler.list_daemons")
@mock.patch("mds_autoscaler.module.MDSAutoscaler.describe_service")
@mock.patch("mds_autoscaler.module.MDSAutoscaler.apply_mds")
def test_scale_up(self, _apply_mds, _describe_service, _list_daemons, _get, mds_autoscaler_module: MDSAutoscaler):
daemons = OrchResult(result=[
DaemonDescription(
hostname='myhost',
daemon_type='mds',
daemon_id='fs_name.myhost.a'
),
DaemonDescription(
hostname='myhost',
daemon_type='mds',
daemon_id='fs_name.myhost.b'
),
])
_list_daemons.return_value = daemons
services = OrchResult(result=[
ServiceDescription(
spec=ServiceSpec(
service_type='mds',
service_id='fs_name',
placement=PlacementSpec(
count=2
)
)
)
])
_describe_service.return_value = services
apply = OrchResult(result='')
_apply_mds.return_value = apply
_get.return_value = {
'filesystems': [
{
'mdsmap': {
'fs_name': 'fs_name',
'in': [
{
'name': 'mds.fs_name.myhost.a',
}
],
'standby_count_wanted': 2,
'max_mds': 1
}
}
],
'standbys': [
{
'name': 'mds.fs_name.myhost.b',
}
],
}
mds_autoscaler_module.notify('fs_map', None)
_apply_mds.assert_called_with(ServiceSpec(
service_type='mds',
service_id='fs_name',
placement=PlacementSpec(
count=3
)
))
| 2,566 | 27.842697 | 118 | py |
null | ceph-main/src/pybind/mgr/mirroring/__init__.py | from .module import Module
| 27 | 13 | 26 | py |
null | ceph-main/src/pybind/mgr/mirroring/module.py | from typing import List, Optional
from mgr_module import MgrModule, CLIReadCommand, CLIWriteCommand, Option, NotifyType
from .fs.snapshot_mirror import FSSnapshotMirror
class Module(MgrModule):
MODULE_OPTIONS: List[Option] = []
NOTIFY_TYPES = [NotifyType.fs_map]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fs_snapshot_mirror = FSSnapshotMirror(self)
def notify(self, notify_type: NotifyType, notify_id):
self.fs_snapshot_mirror.notify(notify_type)
@CLIWriteCommand('fs snapshot mirror enable')
def snapshot_mirror_enable(self,
fs_name: str):
"""Enable snapshot mirroring for a filesystem"""
return self.fs_snapshot_mirror.enable_mirror(fs_name)
@CLIWriteCommand('fs snapshot mirror disable')
def snapshot_mirror_disable(self,
fs_name: str):
"""Disable snapshot mirroring for a filesystem"""
return self.fs_snapshot_mirror.disable_mirror(fs_name)
@CLIWriteCommand('fs snapshot mirror peer_add')
def snapshot_mirorr_peer_add(self,
fs_name: str,
remote_cluster_spec: str,
remote_fs_name: Optional[str] = None,
remote_mon_host: Optional[str] = None,
cephx_key: Optional[str] = None):
"""Add a remote filesystem peer"""
conf = {}
if remote_mon_host and cephx_key:
conf['mon_host'] = remote_mon_host
conf['key'] = cephx_key
return self.fs_snapshot_mirror.peer_add(fs_name, remote_cluster_spec,
remote_fs_name, remote_conf=conf)
@CLIReadCommand('fs snapshot mirror peer_list')
def snapshot_mirror_peer_list(self,
fs_name: str):
"""List configured peers for a file system"""
return self.fs_snapshot_mirror.peer_list(fs_name)
@CLIWriteCommand('fs snapshot mirror peer_remove')
def snapshot_mirror_peer_remove(self,
fs_name: str,
peer_uuid: str):
"""Remove a filesystem peer"""
return self.fs_snapshot_mirror.peer_remove(fs_name, peer_uuid)
@CLIWriteCommand('fs snapshot mirror peer_bootstrap create')
def snapshot_mirror_peer_bootstrap_create(self,
fs_name: str,
client_name: str,
site_name: str):
"""Bootstrap a filesystem peer"""
return self.fs_snapshot_mirror.peer_bootstrap_create(fs_name, client_name, site_name)
@CLIWriteCommand('fs snapshot mirror peer_bootstrap import')
def snapshot_mirror_peer_bootstrap_import(self,
fs_name: str,
token: str):
"""Import a bootstrap token"""
return self.fs_snapshot_mirror.peer_bootstrap_import(fs_name, token)
@CLIWriteCommand('fs snapshot mirror add')
def snapshot_mirror_add_dir(self,
fs_name: str,
path: str):
"""Add a directory for snapshot mirroring"""
return self.fs_snapshot_mirror.add_dir(fs_name, path)
@CLIWriteCommand('fs snapshot mirror remove')
def snapshot_mirror_remove_dir(self,
fs_name: str,
path: str):
"""Remove a snapshot mirrored directory"""
return self.fs_snapshot_mirror.remove_dir(fs_name, path)
@CLIReadCommand('fs snapshot mirror dirmap')
def snapshot_mirror_dirmap(self,
fs_name: str,
path: str):
"""Get current mirror instance map for a directory"""
return self.fs_snapshot_mirror.status(fs_name, path)
@CLIReadCommand('fs snapshot mirror show distribution')
def snapshot_mirror_distribution(self,
fs_name: str):
"""Get current instance to directory map for a filesystem"""
return self.fs_snapshot_mirror.show_distribution(fs_name)
@CLIReadCommand('fs snapshot mirror daemon status')
def snapshot_mirror_daemon_status(self):
"""Get mirror daemon status"""
return self.fs_snapshot_mirror.daemon_status()
| 4,530 | 42.567308 | 93 | py |
null | ceph-main/src/pybind/mgr/mirroring/fs/__init__.py | 0 | 0 | 0 | py | |
null | ceph-main/src/pybind/mgr/mirroring/fs/blocklist.py | import logging
log = logging.getLogger(__name__)
def blocklist(mgr, addr):
cmd = {'prefix': 'osd blocklist', 'blocklistop': 'add', 'addr': str(addr)}
r, outs, err = mgr.mon_command(cmd)
if r != 0:
log.error(f'blocklist error: {err}')
return r
| 269 | 23.545455 | 78 | py |
null | ceph-main/src/pybind/mgr/mirroring/fs/exception.py | class MirrorException(Exception):
def __init__(self, error_code, error_msg=''):
super().__init__(error_code, error_msg)
| 132 | 32.25 | 49 | py |
null | ceph-main/src/pybind/mgr/mirroring/fs/notify.py | import errno
import json
import logging
import threading
import time
import rados
from .utils import MIRROR_OBJECT_PREFIX, AsyncOpTracker
log = logging.getLogger(__name__)
class Notifier:
def __init__(self, ioctx):
self.ioctx = ioctx
@staticmethod
def instance_object(instance_id):
return f'{MIRROR_OBJECT_PREFIX}.{instance_id}'
def notify_cbk(self, dir_path, callback):
def cbk(_, r, acks, timeouts):
log.debug(f'Notifier.notify_cbk: ret {r} acks: {acks} timeouts: {timeouts}')
callback(dir_path, r)
return cbk
def notify(self, dir_path, message, callback):
try:
instance_id = message[0]
message = message[1]
log.debug(f'Notifier.notify: {instance_id} {message} for {dir_path}')
self.ioctx.aio_notify(
Notifier.instance_object(
instance_id), self.notify_cbk(dir_path, callback), msg=message)
except rados.Error as e:
log.error(f'Notifier exception: {e}')
raise e
class InstanceWatcher:
INSTANCE_TIMEOUT = 30
NOTIFY_INTERVAL = 1
class Listener:
def handle_instances(self, added, removed):
raise NotImplementedError()
def __init__(self, ioctx, instances, listener):
self.ioctx = ioctx
self.listener = listener
self.instances = {}
for instance_id, data in instances.items():
self.instances[instance_id] = {'addr': data['addr'],
'seen': time.time()}
self.lock = threading.Lock()
self.cond = threading.Condition(self.lock)
self.done = threading.Event()
self.waiting = threading.Event()
self.notify_task = None
self.schedule_notify_task()
def schedule_notify_task(self):
assert self.notify_task == None
self.notify_task = threading.Timer(InstanceWatcher.NOTIFY_INTERVAL, self.notify)
self.notify_task.start()
def wait_and_stop(self):
with self.lock:
log.info('InstanceWatcher.wait_and_stop')
self.waiting.set()
self.cond.wait_for(lambda: self.done.is_set())
log.info('waiting done')
assert self.notify_task == None
def handle_notify(self, _, r, acks, timeouts):
log.debug(f'InstanceWatcher.handle_notify r={r} acks={acks} timeouts={timeouts}')
with self.lock:
try:
added = {}
removed = {}
if acks is None:
acks = []
ackd_instances = []
for ack in acks:
instance_id = str(ack[0])
ackd_instances.append(instance_id)
# sender data is quoted
notifier_data = json.loads(ack[2].decode('utf-8'))
log.debug(f'InstanceWatcher.handle_notify: {instance_id}: {notifier_data}')
if not instance_id in self.instances:
self.instances[instance_id] = {}
added[instance_id] = notifier_data['addr']
self.instances[instance_id]['addr'] = notifier_data['addr']
self.instances[instance_id]['seen'] = time.time()
# gather non responders
now = time.time()
for instance_id in list(self.instances.keys()):
data = self.instances[instance_id]
if (now - data['seen'] > InstanceWatcher.INSTANCE_TIMEOUT) or \
(self.waiting.is_set() and instance_id not in ackd_instances):
removed[instance_id] = data['addr']
self.instances.pop(instance_id)
if added or removed:
self.listener.handle_instances(added, removed)
except Exception as e:
log.warn(f'InstanceWatcher.handle_notify exception: {e}')
finally:
if not self.instances and self.waiting.is_set():
self.done.set()
self.cond.notifyAll()
else:
self.schedule_notify_task()
def notify(self):
with self.lock:
self.notify_task = None
try:
log.debug('InstanceWatcher.notify')
self.ioctx.aio_notify(MIRROR_OBJECT_PREFIX, self.handle_notify)
except rados.Error as e:
log.warn(f'InstanceWatcher exception: {e}')
self.schedule_notify_task()
| 4,607 | 36.770492 | 95 | py |
null | ceph-main/src/pybind/mgr/mirroring/fs/snapshot_mirror.py | import base64
import errno
import json
import logging
import os
import pickle
import re
import stat
import threading
import uuid
from typing import Dict, Any
import cephfs
import rados
from mgr_util import RTimer, CephfsClient, open_filesystem,\
CephfsConnectionException
from mgr_module import NotifyType
from .blocklist import blocklist
from .notify import Notifier, InstanceWatcher
from .utils import INSTANCE_ID_PREFIX, MIRROR_OBJECT_NAME, Finisher, \
AsyncOpTracker, connect_to_filesystem, disconnect_from_filesystem
from .exception import MirrorException
from .dir_map.create import create_mirror_object
from .dir_map.load import load_dir_map, load_instances
from .dir_map.update import UpdateDirMapRequest, UpdateInstanceRequest
from .dir_map.policy import Policy
from .dir_map.state_transition import ActionType
log = logging.getLogger(__name__)
CEPHFS_IMAGE_POLICY_UPDATE_THROTTLE_INTERVAL = 1
class FSPolicy:
class InstanceListener(InstanceWatcher.Listener):
def __init__(self, fspolicy):
self.fspolicy = fspolicy
def handle_instances(self, added, removed):
self.fspolicy.update_instances(added, removed)
def __init__(self, mgr, ioctx):
self.mgr = mgr
self.ioctx = ioctx
self.pending = []
self.policy = Policy()
self.lock = threading.Lock()
self.cond = threading.Condition(self.lock)
self.dir_paths = []
self.async_requests = {}
self.finisher = Finisher()
self.op_tracker = AsyncOpTracker()
self.notifier = Notifier(ioctx)
self.instance_listener = FSPolicy.InstanceListener(self)
self.instance_watcher = None
self.stopping = threading.Event()
self.timer_task = RTimer(CEPHFS_IMAGE_POLICY_UPDATE_THROTTLE_INTERVAL,
self.process_updates)
self.timer_task.start()
def schedule_action(self, dir_paths):
self.dir_paths.extend(dir_paths)
def init(self, dir_mapping, instances):
with self.lock:
self.policy.init(dir_mapping)
# we'll schedule action for all directories, so don't bother capturing
# directory names here.
self.policy.add_instances(list(instances.keys()), initial_update=True)
self.instance_watcher = InstanceWatcher(self.ioctx, instances,
self.instance_listener)
self.schedule_action(list(dir_mapping.keys()))
def shutdown(self):
with self.lock:
log.debug('FSPolicy.shutdown')
self.stopping.set()
log.debug('canceling update timer task')
self.timer_task.cancel()
log.debug('update timer task canceled')
if self.instance_watcher:
log.debug('stopping instance watcher')
self.instance_watcher.wait_and_stop()
log.debug('stopping instance watcher')
self.op_tracker.wait_for_ops()
log.debug('FSPolicy.shutdown done')
def handle_update_mapping(self, updates, removals, request_id, callback, r):
log.info(f'handle_update_mapping: {updates} {removals} {request_id} {callback} {r}')
with self.lock:
try:
self.async_requests.pop(request_id)
if callback:
callback(updates, removals, r)
finally:
self.op_tracker.finish_async_op()
def handle_update_instances(self, instances_added, instances_removed, request_id, r):
log.info(f'handle_update_instances: {instances_added} {instances_removed} {request_id} {r}')
with self.lock:
try:
self.async_requests.pop(request_id)
if self.stopping.is_set():
log.debug(f'handle_update_instances: policy shutting down')
return
schedules = []
if instances_removed:
schedules.extend(self.policy.remove_instances(instances_removed))
if instances_added:
schedules.extend(self.policy.add_instances(instances_added))
self.schedule_action(schedules)
finally:
self.op_tracker.finish_async_op()
def update_mapping(self, update_map, removals, callback=None):
log.info(f'updating directory map: {len(update_map)}+{len(removals)} updates')
request_id = str(uuid.uuid4())
def async_callback(r):
self.finisher.queue(self.handle_update_mapping,
[list(update_map.keys()), removals, request_id, callback, r])
request = UpdateDirMapRequest(self.ioctx, update_map.copy(), removals.copy(), async_callback)
self.async_requests[request_id] = request
self.op_tracker.start_async_op()
log.debug(f'async request_id: {request_id}')
request.send()
def update_instances(self, added, removed):
logging.debug(f'update_instances: added={added}, removed={removed}')
for instance_id, addr in removed.items():
log.info(f'blocklisting instance_id: {instance_id} addr: {addr}')
blocklist(self.mgr, addr)
with self.lock:
instances_added = {}
instances_removed = []
for instance_id, addr in added.items():
instances_added[instance_id] = {'version': 1, 'addr': addr}
instances_removed = list(removed.keys())
request_id = str(uuid.uuid4())
def async_callback(r):
self.finisher.queue(self.handle_update_instances,
[list(instances_added.keys()), instances_removed, request_id, r])
# blacklisted instances can be removed at this point. remapping directories
# mapped to blacklisted instances on module startup is handled in policy
# add_instances().
request = UpdateInstanceRequest(self.ioctx, instances_added.copy(),
instances_removed.copy(), async_callback)
self.async_requests[request_id] = request
log.debug(f'async request_id: {request_id}')
self.op_tracker.start_async_op()
request.send()
def continue_action(self, updates, removals, r):
log.debug(f'continuing action: {updates}+{removals} r={r}')
if self.stopping.is_set():
log.debug('continue_action: policy shutting down')
return
schedules = []
for dir_path in updates:
schedule = self.policy.finish_action(dir_path, r)
if schedule:
schedules.append(dir_path)
for dir_path in removals:
schedule = self.policy.finish_action(dir_path, r)
if schedule:
schedules.append(dir_path)
self.schedule_action(schedules)
def handle_peer_ack(self, dir_path, r):
log.info(f'handle_peer_ack: {dir_path} r={r}')
with self.lock:
try:
if self.stopping.is_set():
log.debug(f'handle_peer_ack: policy shutting down')
return
self.continue_action([dir_path], [], r)
finally:
self.op_tracker.finish_async_op()
def process_updates(self):
def acquire_message(dir_path):
return json.dumps({'dir_path': dir_path,
'mode': 'acquire'
})
def release_message(dir_path):
return json.dumps({'dir_path': dir_path,
'mode': 'release'
})
with self.lock:
if not self.dir_paths or self.stopping.is_set():
return
update_map = {}
removals = []
notifies = {}
instance_purges = []
for dir_path in self.dir_paths:
action_type = self.policy.start_action(dir_path)
lookup_info = self.policy.lookup(dir_path)
log.debug(f'processing action: dir_path: {dir_path}, lookup_info: {lookup_info}, action_type: {action_type}')
if action_type == ActionType.NONE:
continue
elif action_type == ActionType.MAP_UPDATE:
# take care to not overwrite purge status
update_map[dir_path] = {'version': 1,
'instance_id': lookup_info['instance_id'],
'last_shuffled': lookup_info['mapped_time']
}
if lookup_info['purging']:
update_map[dir_path]['purging'] = 1
elif action_type == ActionType.MAP_REMOVE:
removals.append(dir_path)
elif action_type == ActionType.ACQUIRE:
notifies[dir_path] = (lookup_info['instance_id'], acquire_message(dir_path))
elif action_type == ActionType.RELEASE:
notifies[dir_path] = (lookup_info['instance_id'], release_message(dir_path))
if update_map or removals:
self.update_mapping(update_map, removals, callback=self.continue_action)
for dir_path, message in notifies.items():
self.op_tracker.start_async_op()
self.notifier.notify(dir_path, message, self.handle_peer_ack)
self.dir_paths.clear()
def add_dir(self, dir_path):
with self.lock:
lookup_info = self.policy.lookup(dir_path)
if lookup_info:
if lookup_info['purging']:
raise MirrorException(-errno.EAGAIN, f'remove in-progress for {dir_path}')
else:
raise MirrorException(-errno.EEXIST, f'directory {dir_path} is already tracked')
schedule = self.policy.add_dir(dir_path)
if not schedule:
return
update_map = {dir_path: {'version': 1, 'instance_id': '', 'last_shuffled': 0.0}}
updated = False
def update_safe(updates, removals, r):
nonlocal updated
updated = True
self.cond.notifyAll()
self.update_mapping(update_map, [], callback=update_safe)
self.cond.wait_for(lambda: updated)
self.schedule_action([dir_path])
def remove_dir(self, dir_path):
with self.lock:
lookup_info = self.policy.lookup(dir_path)
if not lookup_info:
raise MirrorException(-errno.ENOENT, f'directory {dir_path} id not tracked')
if lookup_info['purging']:
raise MirrorException(-errno.EINVAL, f'directory {dir_path} is under removal')
update_map = {dir_path: {'version': 1,
'instance_id': lookup_info['instance_id'],
'last_shuffled': lookup_info['mapped_time'],
'purging': 1}}
updated = False
sync_lock = threading.Lock()
sync_cond = threading.Condition(sync_lock)
def update_safe(r):
with sync_lock:
nonlocal updated
updated = True
sync_cond.notifyAll()
request = UpdateDirMapRequest(self.ioctx, update_map.copy(), [], update_safe)
request.send()
with sync_lock:
sync_cond.wait_for(lambda: updated)
schedule = self.policy.remove_dir(dir_path)
if schedule:
self.schedule_action([dir_path])
def status(self, dir_path):
with self.lock:
res = self.policy.dir_status(dir_path)
return 0, json.dumps(res, indent=4, sort_keys=True), ''
def summary(self):
with self.lock:
res = self.policy.instance_summary()
return 0, json.dumps(res, indent=4, sort_keys=True), ''
class FSSnapshotMirror:
PEER_CONFIG_KEY_PREFIX = "cephfs/mirror/peer"
def __init__(self, mgr):
self.mgr = mgr
self.rados = mgr.rados
self.pool_policy = {}
self.fs_map = self.mgr.get('fs_map')
self.lock = threading.Lock()
self.refresh_pool_policy()
self.local_fs = CephfsClient(mgr)
def notify(self, notify_type: NotifyType):
log.debug(f'got notify type {notify_type}')
if notify_type == NotifyType.fs_map:
with self.lock:
self.fs_map = self.mgr.get('fs_map')
self.refresh_pool_policy_locked()
@staticmethod
def make_spec(client_name, cluster_name):
return f'{client_name}@{cluster_name}'
@staticmethod
def split_spec(spec):
try:
client_id, cluster_name = spec.split('@')
_, client_name = client_id.split('.')
return client_name, cluster_name
except ValueError:
raise MirrorException(-errno.EINVAL, f'invalid cluster spec {spec}')
@staticmethod
def get_metadata_pool(filesystem, fs_map):
for fs in fs_map['filesystems']:
if fs['mdsmap']['fs_name'] == filesystem:
return fs['mdsmap']['metadata_pool']
return None
@staticmethod
def get_filesystem_id(filesystem, fs_map):
for fs in fs_map['filesystems']:
if fs['mdsmap']['fs_name'] == filesystem:
return fs['id']
return None
@staticmethod
def peer_config_key(filesystem, peer_uuid):
return f'{FSSnapshotMirror.PEER_CONFIG_KEY_PREFIX}/{filesystem}/{peer_uuid}'
def config_set(self, key, val=None):
"""set or remove a key from mon config store"""
if val:
cmd = {'prefix': 'config-key set',
'key': key, 'val': val}
else:
cmd = {'prefix': 'config-key rm',
'key': key}
r, outs, err = self.mgr.mon_command(cmd)
if r < 0:
log.error(f'mon command to set/remove config-key {key} failed: {err}')
raise Exception(-errno.EINVAL)
def config_get(self, key):
"""fetch a config key value from mon config store"""
cmd = {'prefix': 'config-key get', 'key': key}
r, outs, err = self.mgr.mon_command(cmd)
if r < 0 and not r == -errno.ENOENT:
log.error(f'mon command to get config-key {key} failed: {err}')
raise Exception(-errno.EINVAL)
val = {}
if r == 0:
val = json.loads(outs)
return val
def filesystem_exist(self, filesystem):
for fs in self.fs_map['filesystems']:
if fs['mdsmap']['fs_name'] == filesystem:
return True
return False
def get_mirrored_filesystems(self):
return [fs['mdsmap']['fs_name'] for fs in self.fs_map['filesystems'] if fs.get('mirror_info', None)]
def get_filesystem_peers(self, filesystem):
"""To be used when mirroring in enabled for the filesystem"""
for fs in self.fs_map['filesystems']:
if fs['mdsmap']['fs_name'] == filesystem:
return fs['mirror_info']['peers']
return None
def peer_exists(self, filesystem, remote_cluster_spec, remote_fs_name):
peers = self.get_filesystem_peers(filesystem)
for _, rem in peers.items():
remote = rem['remote']
spec = FSSnapshotMirror.make_spec(remote['client_name'], remote['cluster_name'])
if spec == remote_cluster_spec and remote['fs_name'] == remote_fs_name:
return True
return False
@staticmethod
def get_mirror_info(fs):
try:
val = fs.getxattr('/', 'ceph.mirror.info')
match = re.search(r'^cluster_id=([a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}) fs_id=(\d+)$',
val.decode('utf-8'))
if match and len(match.groups()) == 2:
return {'cluster_id': match.group(1),
'fs_id': int(match.group(2))
}
raise MirrorException(-errno.EINVAL, 'invalid ceph.mirror.info value format')
except cephfs.Error as e:
raise MirrorException(-e.errno, 'error fetching ceph.mirror.info xattr')
@staticmethod
def set_mirror_info(local_cluster_id, local_fsid, remote_fs):
log.info(f'setting {local_cluster_id}::{local_fsid} on remote')
try:
remote_fs.setxattr('/', 'ceph.mirror.info',
f'cluster_id={local_cluster_id} fs_id={local_fsid}'.encode('utf-8'), os.XATTR_CREATE)
except cephfs.Error as e:
if e.errno == errno.EEXIST:
try:
mi = FSSnapshotMirror.get_mirror_info(remote_fs)
cluster_id = mi['cluster_id']
fs_id = mi['fs_id']
if not (cluster_id == local_cluster_id and fs_id == local_fsid):
raise MirrorException(-errno.EEXIST, f'peer mirrorred by: (cluster_id: {cluster_id}, fs_id: {fs_id})')
except MirrorException:
# if mirror info cannot be fetched for some reason, let's just
# fail.
raise MirrorException(-errno.EEXIST, f'already an active peer')
else:
log.error(f'error setting mirrored fsid: {e}')
raise Exception(-e.errno)
def resolve_peer(self, fs_name, peer_uuid):
peers = self.get_filesystem_peers(fs_name)
for peer, rem in peers.items():
if peer == peer_uuid:
return rem['remote']
return None
def purge_mirror_info(self, local_fs_name, peer_uuid):
log.debug(f'local fs={local_fs_name} peer_uuid={peer_uuid}')
# resolve the peer to its spec
rem = self.resolve_peer(local_fs_name, peer_uuid)
if not rem:
return
log.debug(f'peer_uuid={peer_uuid} resolved to {rem}')
_, client_name = rem['client_name'].split('.')
# fetch auth details from config store
remote_conf = self.config_get(FSSnapshotMirror.peer_config_key(local_fs_name, peer_uuid))
remote_cluster, remote_fs = connect_to_filesystem(client_name,
rem['cluster_name'],
rem['fs_name'], 'remote', conf_dct=remote_conf)
try:
remote_fs.removexattr('/', 'ceph.mirror.info')
except cephfs.Error as e:
if not e.errno == errno.ENOENT:
log.error('error removing mirror info')
raise Exception(-e.errno)
finally:
disconnect_from_filesystem(rem['cluster_name'], rem['fs_name'], remote_cluster, remote_fs)
def verify_and_set_mirror_info(self, local_fs_name, remote_cluster_spec, remote_fs_name, remote_conf={}):
log.debug(f'local fs={local_fs_name} remote={remote_cluster_spec}/{remote_fs_name}')
client_name, cluster_name = FSSnapshotMirror.split_spec(remote_cluster_spec)
remote_cluster, remote_fs = connect_to_filesystem(client_name, cluster_name, remote_fs_name,
'remote', conf_dct=remote_conf)
try:
local_cluster_id = self.rados.get_fsid()
remote_cluster_id = remote_cluster.get_fsid()
log.debug(f'local_cluster_id={local_cluster_id} remote_cluster_id={remote_cluster_id}')
if 'fsid' in remote_conf:
if not remote_cluster_id == remote_conf['fsid']:
raise MirrorException(-errno.EINVAL, 'FSID mismatch between bootstrap token and remote cluster')
local_fscid = remote_fscid = None
with open_filesystem(self.local_fs, local_fs_name) as local_fsh:
local_fscid = local_fsh.get_fscid()
remote_fscid = remote_fs.get_fscid()
log.debug(f'local_fscid={local_fscid} remote_fscid={remote_fscid}')
mi = None
try:
mi = FSSnapshotMirror.get_mirror_info(local_fsh)
except MirrorException as me:
if me.args[0] != -errno.ENODATA:
raise Exception(-errno.EINVAL)
if mi and mi['cluster_id'] == remote_cluster_id and mi['fs_id'] == remote_fscid:
raise MirrorException(-errno.EINVAL, f'file system is an active peer for file system: {remote_fs_name}')
if local_cluster_id == remote_cluster_id and local_fscid == remote_fscid:
raise MirrorException(-errno.EINVAL, "'Source and destination cluster fsid and "\
"file-system name can't be the same")
FSSnapshotMirror.set_mirror_info(local_cluster_id, local_fscid, remote_fs)
finally:
disconnect_from_filesystem(cluster_name, remote_fs_name, remote_cluster, remote_fs)
def init_pool_policy(self, filesystem):
metadata_pool_id = FSSnapshotMirror.get_metadata_pool(filesystem, self.fs_map)
if not metadata_pool_id:
log.error(f'cannot find metadata pool-id for filesystem {filesystem}')
raise Exception(-errno.EINVAL)
try:
ioctx = self.rados.open_ioctx2(metadata_pool_id)
# TODO: make async if required
dir_mapping = load_dir_map(ioctx)
instances = load_instances(ioctx)
# init policy
fspolicy = FSPolicy(self.mgr, ioctx)
log.debug(f'init policy for filesystem {filesystem}: pool-id {metadata_pool_id}')
fspolicy.init(dir_mapping, instances)
self.pool_policy[filesystem] = fspolicy
except rados.Error as e:
log.error(f'failed to access pool-id {metadata_pool_id} for filesystem {filesystem}: {e}')
raise Exception(-e.errno)
def refresh_pool_policy_locked(self):
filesystems = self.get_mirrored_filesystems()
log.debug(f'refreshing policy for {filesystems}')
for filesystem in list(self.pool_policy):
if not filesystem in filesystems:
log.info(f'shutdown pool policy for {filesystem}')
fspolicy = self.pool_policy.pop(filesystem)
fspolicy.shutdown()
for filesystem in filesystems:
if not filesystem in self.pool_policy:
log.info(f'init pool policy for {filesystem}')
self.init_pool_policy(filesystem)
def refresh_pool_policy(self):
with self.lock:
self.refresh_pool_policy_locked()
def enable_mirror(self, filesystem):
log.info(f'enabling mirror for filesystem {filesystem}')
with self.lock:
try:
metadata_pool_id = FSSnapshotMirror.get_metadata_pool(filesystem, self.fs_map)
if not metadata_pool_id:
log.error(f'cannot find metadata pool-id for filesystem {filesystem}')
raise Exception(-errno.EINVAL)
create_mirror_object(self.rados, metadata_pool_id)
cmd = {'prefix': 'fs mirror enable', 'fs_name': filesystem}
r, outs, err = self.mgr.mon_command(cmd)
if r < 0:
log.error(f'mon command to enable mirror failed: {err}')
raise Exception(-errno.EINVAL)
return 0, json.dumps({}), ''
except MirrorException as me:
return me.args[0], '', me.args[1]
except Exception as me:
return me.args[0], '', 'failed to enable mirroring'
def disable_mirror(self, filesystem):
log.info(f'disabling mirror for filesystem {filesystem}')
try:
with self.lock:
cmd = {'prefix': 'fs mirror disable', 'fs_name': filesystem}
r, outs, err = self.mgr.mon_command(cmd)
if r < 0:
log.error(f'mon command to disable mirror failed: {err}')
raise Exception(-errno.EINVAL)
return 0, json.dumps({}), ''
except MirrorException as me:
return me.args[0], '', me.args[1]
except Exception as e:
return e.args[0], '', 'failed to disable mirroring'
def peer_list(self, filesystem):
try:
with self.lock:
fspolicy = self.pool_policy.get(filesystem, None)
if not fspolicy:
raise MirrorException(-errno.EINVAL, f'filesystem {filesystem} is not mirrored')
peers = self.get_filesystem_peers(filesystem)
peer_res = {}
for peer_uuid, rem in peers.items():
conf = self.config_get(FSSnapshotMirror.peer_config_key(filesystem, peer_uuid))
remote = rem['remote']
peer_res[peer_uuid] = {'client_name': remote['client_name'],
'site_name': remote['cluster_name'],
'fs_name': remote['fs_name']
}
if 'mon_host' in conf:
peer_res[peer_uuid]['mon_host'] = conf['mon_host']
return 0, json.dumps(peer_res), ''
except MirrorException as me:
return me.args[0], '', me.args[1]
except Exception as e:
return e.args[0], '', 'failed to list peers'
def peer_add(self, filesystem, remote_cluster_spec, remote_fs_name, remote_conf):
try:
if remote_fs_name == None:
remote_fs_name = filesystem
with self.lock:
fspolicy = self.pool_policy.get(filesystem, None)
if not fspolicy:
raise MirrorException(-errno.EINVAL, f'filesystem {filesystem} is not mirrored')
### peer updates for key, site-name are not yet supported
if self.peer_exists(filesystem, remote_cluster_spec, remote_fs_name):
return 0, json.dumps({}), ''
# _own_ the peer
self.verify_and_set_mirror_info(filesystem, remote_cluster_spec, remote_fs_name, remote_conf)
# unique peer uuid
peer_uuid = str(uuid.uuid4())
config_key = FSSnapshotMirror.peer_config_key(filesystem, peer_uuid)
if remote_conf.get('mon_host') and remote_conf.get('key'):
self.config_set(config_key, json.dumps(remote_conf))
cmd = {'prefix': 'fs mirror peer_add',
'fs_name': filesystem,
'uuid': peer_uuid,
'remote_cluster_spec': remote_cluster_spec,
'remote_fs_name': remote_fs_name}
r, outs, err = self.mgr.mon_command(cmd)
if r < 0:
log.error(f'mon command to add peer failed: {err}')
try:
log.debug(f'cleaning up config-key for {peer_uuid}')
self.config_set(config_key)
except:
pass
raise Exception(-errno.EINVAL)
return 0, json.dumps({}), ''
except MirrorException as me:
return me.args[0], '', me.args[1]
except Exception as e:
return e.args[0], '', 'failed to add peer'
def peer_remove(self, filesystem, peer_uuid):
try:
with self.lock:
fspolicy = self.pool_policy.get(filesystem, None)
if not fspolicy:
raise MirrorException(-errno.EINVAL, f'filesystem {filesystem} is not mirrored')
# ok, this is being a bit lazy. remove mirror info from peer followed
# by purging the peer from fsmap. if the mirror daemon fs map updates
# are laggy, they happily continue to synchronize. ideally, we should
# purge the peer from fsmap here and purge mirror info on fsmap update
# (in notify()). but thats not straightforward -- before purging mirror
# info, we would need to wait for all mirror daemons to catch up with
# fsmap updates. this involves mirror daemons sending the fsmap epoch
# they have seen in reply to a notify request. TODO: fix this.
self.purge_mirror_info(filesystem, peer_uuid)
cmd = {'prefix': 'fs mirror peer_remove',
'fs_name': filesystem,
'uuid': peer_uuid}
r, outs, err = self.mgr.mon_command(cmd)
if r < 0:
log.error(f'mon command to remove peer failed: {err}')
raise Exception(-errno.EINVAL)
self.config_set(FSSnapshotMirror.peer_config_key(filesystem, peer_uuid))
return 0, json.dumps({}), ''
except MirrorException as me:
return me.args[0], '', me.args[1]
except Exception as e:
return e.args[0], '', 'failed to remove peer'
def peer_bootstrap_create(self, fs_name, client_name, site_name):
"""create a bootstrap token for this peer filesystem"""
try:
with self.lock:
cmd = {'prefix': 'fs authorize',
'filesystem': fs_name,
'entity': client_name,
'caps': ['/', 'rwps']}
r, outs, err = self.mgr.mon_command(cmd)
if r < 0:
log.error(f'mon command to create peer user failed: {err}')
raise Exception(-errno.EINVAL)
cmd = {'prefix': 'auth get',
'entity': client_name,
'format': 'json'}
r, outs, err = self.mgr.mon_command(cmd)
if r < 0:
log.error(f'mon command to fetch keyring failed: {err}')
raise Exception(-errno.EINVAL)
outs = json.loads(outs)
outs0 = outs[0]
token_dct = {'fsid': self.mgr.rados.get_fsid(),
'filesystem': fs_name,
'user': outs0['entity'],
'site_name': site_name,
'key': outs0['key'],
'mon_host': self.mgr.rados.conf_get('mon_host')}
token_str = json.dumps(token_dct).encode('utf-8')
encoded_token = base64.b64encode(token_str)
return 0, json.dumps({'token': encoded_token.decode('utf-8')}), ''
except MirrorException as me:
return me.args[0], '', me.args[1]
except Exception as e:
return e.args[0], '', 'failed to bootstrap peer'
def peer_bootstrap_import(self, filesystem, token):
try:
token_str = base64.b64decode(token)
token_dct = json.loads(token_str.decode('utf-8'))
except:
return -errno.EINVAL, '', 'failed to parse token'
client_name = token_dct.pop('user')
cluster_name = token_dct.pop('site_name')
remote_fs_name = token_dct.pop('filesystem')
remote_cluster_spec = f'{client_name}@{cluster_name}'
return self.peer_add(filesystem, remote_cluster_spec, remote_fs_name, token_dct)
@staticmethod
def norm_path(dir_path):
if not os.path.isabs(dir_path):
raise MirrorException(-errno.EINVAL, f'{dir_path} should be an absolute path')
return os.path.normpath(dir_path)
def add_dir(self, filesystem, dir_path):
try:
with self.lock:
if not self.filesystem_exist(filesystem):
raise MirrorException(-errno.ENOENT, f'filesystem {filesystem} does not exist')
fspolicy = self.pool_policy.get(filesystem, None)
if not fspolicy:
raise MirrorException(-errno.EINVAL, f'filesystem {filesystem} is not mirrored')
dir_path = FSSnapshotMirror.norm_path(dir_path)
log.debug(f'path normalized to {dir_path}')
fspolicy.add_dir(dir_path)
return 0, json.dumps({}), ''
except MirrorException as me:
return me.args[0], '', me.args[1]
except Exception as e:
return e.args[0], '', 'failed to add directory'
def remove_dir(self, filesystem, dir_path):
try:
with self.lock:
if not self.filesystem_exist(filesystem):
raise MirrorException(-errno.ENOENT, f'filesystem {filesystem} does not exist')
fspolicy = self.pool_policy.get(filesystem, None)
if not fspolicy:
raise MirrorException(-errno.EINVAL, f'filesystem {filesystem} is not mirrored')
dir_path = FSSnapshotMirror.norm_path(dir_path)
fspolicy.remove_dir(dir_path)
return 0, json.dumps({}), ''
except MirrorException as me:
return me.args[0], '', me.args[1]
except Exception as e:
return e.args[0], '', 'failed to remove directory'
def status(self,filesystem, dir_path):
try:
with self.lock:
if not self.filesystem_exist(filesystem):
raise MirrorException(-errno.ENOENT, f'filesystem {filesystem} does not exist')
fspolicy = self.pool_policy.get(filesystem, None)
if not fspolicy:
raise MirrorException(-errno.EINVAL, f'filesystem {filesystem} is not mirrored')
dir_path = FSSnapshotMirror.norm_path(dir_path)
return fspolicy.status(dir_path)
except MirrorException as me:
return me.args[0], '', me.args[1]
def show_distribution(self, filesystem):
try:
with self.lock:
if not self.filesystem_exist(filesystem):
raise MirrorException(-errno.ENOENT, f'filesystem {filesystem} does not exist')
fspolicy = self.pool_policy.get(filesystem, None)
if not fspolicy:
raise MirrorException(-errno.EINVAL, f'filesystem {filesystem} is not mirrored')
return fspolicy.summary()
except MirrorException as me:
return me.args[0], '', me.args[1]
def daemon_status(self):
try:
with self.lock:
daemons = []
sm = self.mgr.get('service_map')
daemon_entry = sm['services'].get('cephfs-mirror', None)
log.debug(f'daemon_entry: {daemon_entry}')
if daemon_entry is not None:
for daemon_key in daemon_entry.get('daemons', []):
try:
daemon_id = int(daemon_key)
except ValueError:
continue
daemon = {
'daemon_id' : daemon_id,
'filesystems' : []
} # type: Dict[str, Any]
daemon_status = self.mgr.get_daemon_status('cephfs-mirror', daemon_key)
if not daemon_status:
log.debug(f'daemon status not yet availble for cephfs-mirror daemon: {daemon_key}')
continue
status = json.loads(daemon_status['status_json'])
for fs_id, fs_desc in status.items():
fs = {'filesystem_id' : int(fs_id),
'name' : fs_desc['name'],
'directory_count' : fs_desc['directory_count'],
'peers' : []
} # type: Dict[str, Any]
for peer_uuid, peer_desc in fs_desc['peers'].items():
peer = {
'uuid' : peer_uuid,
'remote' : peer_desc['remote'],
'stats' : peer_desc['stats']
}
fs['peers'].append(peer)
daemon['filesystems'].append(fs)
daemons.append(daemon)
return 0, json.dumps(daemons), ''
except MirrorException as me:
return me.args[0], '', me.args[1]
| 36,865 | 45.489281 | 126 | py |
null | ceph-main/src/pybind/mgr/mirroring/fs/utils.py | import errno
import logging
import threading
import rados
import cephfs
from .exception import MirrorException
MIRROR_OBJECT_PREFIX = 'cephfs_mirror'
MIRROR_OBJECT_NAME = MIRROR_OBJECT_PREFIX
INSTANCE_ID_PREFIX = "instance_"
DIRECTORY_MAP_PREFIX = "dir_map_"
log = logging.getLogger(__name__)
def connect_to_cluster(client_name, cluster_name, conf_dct, desc=''):
try:
log.debug(f'connecting to {desc} cluster: {client_name}/{cluster_name}')
mon_host = conf_dct.get('mon_host', '')
cephx_key = conf_dct.get('key', '')
if mon_host and cephx_key:
r_rados = rados.Rados(rados_id=client_name, conf={'mon_host': mon_host,
'key': cephx_key})
else:
r_rados = rados.Rados(rados_id=client_name, clustername=cluster_name)
r_rados.conf_read_file()
r_rados.connect()
log.debug(f'connected to {desc} cluster')
return r_rados
except rados.Error as e:
if e.errno == errno.ENOENT:
raise MirrorException(-e.errno, f'cluster {cluster_name} does not exist')
else:
log.error(f'error connecting to cluster: {e}')
raise Exception(-e.errno)
def disconnect_from_cluster(cluster_name, cluster):
try:
log.debug(f'disconnecting from cluster {cluster_name}')
cluster.shutdown()
log.debug(f'disconnected from cluster {cluster_name}')
except Exception as e:
log.error(f'error disconnecting: {e}')
def connect_to_filesystem(client_name, cluster_name, fs_name, desc, conf_dct={}):
try:
cluster = connect_to_cluster(client_name, cluster_name, conf_dct, desc)
log.debug(f'connecting to {desc} filesystem: {fs_name}')
fs = cephfs.LibCephFS(rados_inst=cluster)
fs.conf_set("client_mount_uid", "0")
fs.conf_set("client_mount_gid", "0")
fs.conf_set("client_check_pool_perm", "false")
log.debug('CephFS initializing...')
fs.init()
log.debug('CephFS mounting...')
fs.mount(filesystem_name=fs_name.encode('utf-8'))
log.debug(f'Connection to cephfs {fs_name} complete')
return (cluster, fs)
except cephfs.Error as e:
if e.errno == errno.ENOENT:
raise MirrorException(-e.errno, f'filesystem {fs_name} does not exist')
else:
log.error(f'error connecting to filesystem {fs_name}: {e}')
raise Exception(-e.errno)
def disconnect_from_filesystem(cluster_name, fs_name, cluster, fs_handle):
try:
log.debug(f'disconnecting from filesystem {fs_name}')
fs_handle.shutdown()
log.debug(f'disconnected from filesystem {fs_name}')
disconnect_from_cluster(cluster_name, cluster)
except Exception as e:
log.error(f'error disconnecting: {e}')
class _ThreadWrapper(threading.Thread):
def __init__(self, name):
self.q = []
self.stopping = threading.Event()
self.terminated = threading.Event()
self.lock = threading.Lock()
self.cond = threading.Condition(self.lock)
super().__init__(name=name)
super().start()
def run(self):
try:
with self.lock:
while True:
self.cond.wait_for(lambda: self.q or self.stopping.is_set())
if self.stopping.is_set():
log.debug('thread exiting')
self.terminated.set()
self.cond.notifyAll()
return
q = self.q.copy()
self.q.clear()
self.lock.release()
try:
for item in q:
log.debug(f'calling {item[0]} params {item[1]}')
item[0](*item[1])
except Exception as e:
log.warn(f'callback exception: {e}')
self.lock.acquire()
except Exception as e:
log.info(f'threading exception: {e}')
def queue(self, cbk, args):
with self.lock:
self.q.append((cbk, args))
self.cond.notifyAll()
def stop(self):
with self.lock:
self.stopping.set()
self.cond.notifyAll()
self.cond.wait_for(lambda: self.terminated.is_set())
class Finisher:
def __init__(self):
self.lock = threading.Lock()
self.thread = _ThreadWrapper(name='finisher')
def queue(self, cbk, args=[]):
with self.lock:
self.thread.queue(cbk, args)
class AsyncOpTracker:
def __init__(self):
self.ops_in_progress = 0
self.lock = threading.Lock()
self.cond = threading.Condition(self.lock)
def start_async_op(self):
with self.lock:
self.ops_in_progress += 1
log.debug(f'start_async_op: {self.ops_in_progress}')
def finish_async_op(self):
with self.lock:
self.ops_in_progress -= 1
log.debug(f'finish_async_op: {self.ops_in_progress}')
assert(self.ops_in_progress >= 0)
self.cond.notifyAll()
def wait_for_ops(self):
with self.lock:
log.debug(f'wait_for_ops: {self.ops_in_progress}')
self.cond.wait_for(lambda: self.ops_in_progress == 0)
log.debug(f'done')
| 5,418 | 34.418301 | 85 | py |
null | ceph-main/src/pybind/mgr/mirroring/fs/dir_map/__init__.py | 0 | 0 | 0 | py | |
null | ceph-main/src/pybind/mgr/mirroring/fs/dir_map/create.py | import errno
import logging
import rados
from ..exception import MirrorException
from ..utils import MIRROR_OBJECT_NAME
log = logging.getLogger(__name__)
def create_mirror_object(rados_inst, pool_id):
log.info(f'creating mirror object: {MIRROR_OBJECT_NAME}')
try:
with rados_inst.open_ioctx2(pool_id) as ioctx:
with rados.WriteOpCtx() as write_op:
write_op.new(rados.LIBRADOS_CREATE_EXCLUSIVE)
ioctx.operate_write_op(write_op, MIRROR_OBJECT_NAME)
except rados.Error as e:
if e.errno == errno.EEXIST:
# be graceful
return -e.errno
log.error(f'failed to create mirror object: {e}')
raise Exception(-e.args[0])
| 724 | 29.208333 | 68 | py |
null | ceph-main/src/pybind/mgr/mirroring/fs/dir_map/load.py | import errno
import pickle
import logging
from typing import Dict
import rados
from ..exception import MirrorException
from ..utils import MIRROR_OBJECT_NAME, DIRECTORY_MAP_PREFIX, \
INSTANCE_ID_PREFIX
log = logging.getLogger(__name__)
MAX_RETURN = 256
def handle_dir_load(dir_mapping, dir_map):
for directory_str, encoded_map in dir_map.items():
dir_path = directory_str[len(DIRECTORY_MAP_PREFIX):]
decoded_map = pickle.loads(encoded_map)
log.debug(f'{dir_path} -> {decoded_map}')
dir_mapping[dir_path] = decoded_map
def load_dir_map(ioctx):
dir_mapping = {} # type: Dict[str, Dict]
log.info('loading dir map...')
try:
with rados.ReadOpCtx() as read_op:
start = ""
while True:
iter, ret = ioctx.get_omap_vals(read_op, start, DIRECTORY_MAP_PREFIX, MAX_RETURN)
if not ret == 0:
log.error(f'failed to fetch dir mapping omap')
raise Exception(-errno.EINVAL)
ioctx.operate_read_op(read_op, MIRROR_OBJECT_NAME)
dir_map = dict(iter)
if not dir_map:
break
handle_dir_load(dir_mapping, dir_map)
start = dir_map.popitem()[0]
log.info("loaded {0} directory mapping(s) from disk".format(len(dir_mapping)))
return dir_mapping
except rados.Error as e:
log.error(f'exception when loading directory mapping: {e}')
raise Exception(-e.errno)
def handle_instance_load(instance_mapping, instance_map):
for instance, e_data in instance_map.items():
instance_id = instance[len(INSTANCE_ID_PREFIX):]
d_data = pickle.loads(e_data)
log.debug(f'{instance_id} -> {d_data}')
instance_mapping[instance_id] = d_data
def load_instances(ioctx):
instance_mapping = {} # type: Dict[str, Dict]
log.info('loading instances...')
try:
with rados.ReadOpCtx() as read_op:
start = ""
while True:
iter, ret = ioctx.get_omap_vals(read_op, start, INSTANCE_ID_PREFIX, MAX_RETURN)
if not ret == 0:
log.error(f'failed to fetch instance omap')
raise Exception(-errno.EINVAL)
ioctx.operate_read_op(read_op, MIRROR_OBJECT_NAME)
instance_map = dict(iter)
if not instance_map:
break
handle_instance_load(instance_mapping, instance_map)
start = instance_map.popitem()[0]
log.info("loaded {0} instance(s) from disk".format(len(instance_mapping)))
return instance_mapping
except rados.Error as e:
log.error(f'exception when loading instances: {e}')
raise Exception(-e.errno)
| 2,809 | 36.466667 | 97 | py |
null | ceph-main/src/pybind/mgr/mirroring/fs/dir_map/policy.py | import os
import errno
import logging
import time
from threading import Lock
from typing import Dict
from .state_transition import ActionType, PolicyAction, Transition, \
State, StateTransition
from ..exception import MirrorException
log = logging.getLogger(__name__)
class DirectoryState:
def __init__(self, instance_id=None, mapped_time=None):
self.instance_id = instance_id
self.mapped_time = mapped_time
self.state = State.UNASSOCIATED
self.stalled = False
self.transition = Transition(ActionType.NONE)
self.next_state = None
self.purging = False
def __str__(self):
return f'[instance_id={self.instance_id}, mapped_time={self.mapped_time},'\
f' state={self.state}, transition={self.transition}, next_state={self.next_state},'\
f' purging={self.purging}]'
class Policy:
# number of seconds after which a directory can be reshuffled
# to other mirror daemon instances.
DIR_SHUFFLE_THROTTLE_INTERVAL = 300
def __init__(self):
self.dir_states = {}
self.instance_to_dir_map = {}
self.dead_instances = []
self.lock = Lock()
@staticmethod
def is_instance_action(action_type):
return action_type in (ActionType.ACQUIRE,
ActionType.RELEASE)
def is_dead_instance(self, instance_id):
return instance_id in self.dead_instances
def is_state_scheduled(self, dir_state, state):
return dir_state.state == state or dir_state.next_state == state
def is_shuffling(self, dir_path):
log.debug(f'is_shuffling: {dir_path}')
return self.is_state_scheduled(self.dir_states[dir_path], State.SHUFFLING)
def can_shuffle_dir(self, dir_path):
"""Right now, shuffle directories only based on idleness. Later, we
probably want to avoid shuffling images that were recently shuffled.
"""
log.debug(f'can_shuffle_dir: {dir_path}')
dir_state = self.dir_states[dir_path]
return StateTransition.is_idle(dir_state.state) and \
(time.time() - dir_state['mapped_time']) > Policy.DIR_SHUFFLE_THROTTLE_INTERVAL
def set_state(self, dir_state, state, ignore_current_state=False):
if not ignore_current_state and dir_state.state == state:
return False
elif StateTransition.is_idle(dir_state.state):
dir_state.state = state
dir_state.next_state = None
dir_state.transition = StateTransition.transit(
dir_state.state, dir_state.transition.action_type)
return True
dir_state.next_state = state
return False
def init(self, dir_mapping):
with self.lock:
for dir_path, dir_map in dir_mapping.items():
instance_id = dir_map['instance_id']
if instance_id:
if not instance_id in self.instance_to_dir_map:
self.instance_to_dir_map[instance_id] = []
self.instance_to_dir_map[instance_id].append(dir_path)
self.dir_states[dir_path] = DirectoryState(instance_id, dir_map['last_shuffled'])
dir_state = self.dir_states[dir_path]
state = State.INITIALIZING if instance_id else State.ASSOCIATING
purging = dir_map.get('purging', 0)
if purging:
dir_state.purging = True
state = State.DISASSOCIATING
if not instance_id:
dir_state.transition = StateTransition.transit(state,
dir_state.transition.action_type)
log.debug(f'starting state: {dir_path} {state}: {dir_state}')
self.set_state(dir_state, state)
log.debug(f'init dir_state: {dir_state}')
def lookup(self, dir_path):
log.debug(f'looking up {dir_path}')
with self.lock:
dir_state = self.dir_states.get(dir_path, None)
if dir_state:
return {'instance_id': dir_state.instance_id,
'mapped_time': dir_state.mapped_time,
'purging': dir_state.purging}
return None
def map(self, dir_path, dir_state):
log.debug(f'mapping {dir_path}')
min_instance_id = None
current_instance_id = dir_state.instance_id
if current_instance_id and not self.is_dead_instance(current_instance_id):
return True
if self.is_dead_instance(current_instance_id):
self.unmap(dir_path, dir_state)
for instance_id, dir_paths in self.instance_to_dir_map.items():
if self.is_dead_instance(instance_id):
continue
if not min_instance_id or len(dir_paths) < len(self.instance_to_dir_map[min_instance_id]):
min_instance_id = instance_id
if not min_instance_id:
log.debug(f'instance unavailable for {dir_path}')
return False
log.debug(f'dir_path {dir_path} maps to instance {min_instance_id}')
dir_state.instance_id = min_instance_id
dir_state.mapped_time = time.time()
self.instance_to_dir_map[min_instance_id].append(dir_path)
return True
def unmap(self, dir_path, dir_state):
instance_id = dir_state.instance_id
log.debug(f'unmapping {dir_path} from instance {instance_id}')
self.instance_to_dir_map[instance_id].remove(dir_path)
dir_state.instance_id = None
dir_state.mapped_time = None
if self.is_dead_instance(instance_id) and not self.instance_to_dir_map[instance_id]:
self.instance_to_dir_map.pop(instance_id)
self.dead_instances.remove(instance_id)
def shuffle(self, dirs_per_instance, include_stalled_dirs):
log.debug(f'directories per instance: {dirs_per_instance}')
shuffle_dirs = []
for instance_id, dir_paths in self.instance_to_dir_map.items():
cut_off = len(dir_paths) - dirs_per_instance
if cut_off > 0:
for dir_path in dir_paths:
if cut_off == 0:
break
if self.is_shuffling(dir_path):
cut_off -= 1
elif self.can_shuffle_dir(dir_path):
cut_off -= 1
shuffle_dirs.append(dir_path)
if include_stalled_dirs:
for dir_path, dir_state in self.dir_states.items():
if dir_state.stalled:
log.debug(f'{dir_path} is stalled: {dir_state} -- trigerring kick')
dir_state.stalled = False
shuffle_dirs.append(dir_path)
return shuffle_dirs
def execute_policy_action(self, dir_path, dir_state, policy_action):
log.debug(f'executing for directory {dir_path} policy_action {policy_action}')
done = True
if policy_action == PolicyAction.MAP:
done = self.map(dir_path, dir_state)
elif policy_action == PolicyAction.UNMAP:
self.unmap(dir_path, dir_state)
elif policy_action == PolicyAction.REMOVE:
if dir_state.state == State.UNASSOCIATED:
self.dir_states.pop(dir_path)
else:
raise Exception()
return done
def start_action(self, dir_path):
log.debug(f'start action: {dir_path}')
with self.lock:
dir_state = self.dir_states.get(dir_path, None)
if not dir_state:
raise Exception()
log.debug(f'dir_state: {dir_state}')
if dir_state.transition.start_policy_action:
stalled = not self.execute_policy_action(dir_path, dir_state,
dir_state.transition.start_policy_action)
if stalled:
next_action = ActionType.NONE
if dir_state.purging:
dir_state.next_state = None
dir_state.state = State.UNASSOCIATED
dir_state.transition = StateTransition.transit(State.DISASSOCIATING, ActionType.NONE)
self.set_state(dir_state, State.DISASSOCIATING)
next_action = dir_state.transition.action_type
else:
dir_state.stalled = True
log.debug(f'state machine stalled')
return next_action
return dir_state.transition.action_type
def finish_action(self, dir_path, r):
log.debug(f'finish action {dir_path} r={r}')
with self.lock:
dir_state = self.dir_states.get(dir_path, None)
if not dir_state:
raise Exception()
if r < 0 and (not Policy.is_instance_action(dir_state.transition.action_type) or
not dir_state.instance_id or
not dir_state.instance_id in self.dead_instances):
return True
log.debug(f'dir_state: {dir_state}')
finish_policy_action = dir_state.transition.finish_policy_action
dir_state.transition = StateTransition.transit(
dir_state.state, dir_state.transition.action_type)
log.debug(f'transitioned to dir_state: {dir_state}')
if dir_state.transition.final_state:
log.debug('reached final state')
dir_state.state = dir_state.transition.final_state
dir_state.transition = Transition(ActionType.NONE)
log.debug(f'final dir_state: {dir_state}')
if StateTransition.is_idle(dir_state.state) and dir_state.next_state:
self.set_state(dir_state, dir_state.next_state)
pending = not dir_state.transition.action_type == ActionType.NONE
if finish_policy_action:
self.execute_policy_action(dir_path, dir_state, finish_policy_action)
return pending
def find_tracked_ancestor_or_subtree(self, dir_path):
for tracked_path, _ in self.dir_states.items():
comp = [dir_path, tracked_path]
cpath = os.path.commonpath(comp)
if cpath in comp:
what = 'subtree' if cpath == tracked_path else 'ancestor'
return (tracked_path, what)
return None
def add_dir(self, dir_path):
log.debug(f'adding dir_path {dir_path}')
with self.lock:
if dir_path in self.dir_states:
return False
as_info = self.find_tracked_ancestor_or_subtree(dir_path)
if as_info:
raise MirrorException(-errno.EINVAL, f'{dir_path} is a {as_info[1]} of tracked path {as_info[0]}')
self.dir_states[dir_path] = DirectoryState()
dir_state = self.dir_states[dir_path]
log.debug(f'add dir_state: {dir_state}')
if dir_state.state == State.INITIALIZING:
return False
return self.set_state(dir_state, State.ASSOCIATING)
def remove_dir(self, dir_path):
log.debug(f'removing dir_path {dir_path}')
with self.lock:
dir_state = self.dir_states.get(dir_path, None)
if not dir_state:
return False
log.debug(f'removing dir_state: {dir_state}')
dir_state.purging = True
# advance the state machine with DISASSOCIATING state for removal
if dir_state.stalled:
dir_state.state = State.UNASSOCIATED
dir_state.transition = StateTransition.transit(State.DISASSOCIATING, ActionType.NONE)
r = self.set_state(dir_state, State.DISASSOCIATING)
log.debug(f'dir_state: {dir_state}')
return r
def add_instances_initial(self, instance_ids):
"""Take care of figuring out instances which no longer exist
and remove them. This is to be done only once on startup to
identify instances which were previously removed but directories
are still mapped (on-disk) to them.
"""
for instance_id in instance_ids:
if not instance_id in self.instance_to_dir_map:
self.instance_to_dir_map[instance_id] = []
dead_instances = []
for instance_id, _ in self.instance_to_dir_map.items():
if not instance_id in instance_ids:
dead_instances.append(instance_id)
if dead_instances:
self._remove_instances(dead_instances)
def add_instances(self, instance_ids, initial_update=False):
log.debug(f'adding instances: {instance_ids} initial_update {initial_update}')
with self.lock:
if initial_update:
self.add_instances_initial(instance_ids)
else:
nr_instances = len(self.instance_to_dir_map)
nr_dead_instances = len(self.dead_instances)
if nr_instances > 0:
# adjust dead instances
nr_instances -= nr_dead_instances
include_stalled_dirs = nr_instances == 0
for instance_id in instance_ids:
if not instance_id in self.instance_to_dir_map:
self.instance_to_dir_map[instance_id] = []
dirs_per_instance = int(len(self.dir_states) /
(len(self.instance_to_dir_map) - nr_dead_instances))
if dirs_per_instance == 0:
dirs_per_instance += 1
shuffle_dirs = []
# super set of directories which are candidates for shuffling -- choose
# those which can be shuffle rightaway (others will be shuffled when
# they reach idle state).
shuffle_dirs_ss = self.shuffle(dirs_per_instance, include_stalled_dirs)
if include_stalled_dirs:
return shuffle_dirs_ss
for dir_path in shuffle_dirs_ss:
dir_state = self.dir_states[dir_path]
if self.set_state(dir_state, State.SHUFFLING):
shuffle_dirs.append(dir_path)
log.debug(f'remapping directories: {shuffle_dirs}')
return shuffle_dirs
def remove_instances(self, instance_ids):
with self.lock:
return self._remove_instances(instance_ids)
def _remove_instances(self, instance_ids):
log.debug(f'removing instances: {instance_ids}')
shuffle_dirs = []
for instance_id in instance_ids:
if not instance_id in self.instance_to_dir_map:
continue
if not self.instance_to_dir_map[instance_id]:
self.instance_to_dir_map.pop(instance_id)
continue
self.dead_instances.append(instance_id)
dir_paths = self.instance_to_dir_map[instance_id]
log.debug(f'force shuffling instance_id {instance_id}, directories {dir_paths}')
for dir_path in dir_paths:
dir_state = self.dir_states[dir_path]
if self.is_state_scheduled(dir_state, State.DISASSOCIATING):
log.debug(f'dir_path {dir_path} is disassociating, ignoring...')
continue
log.debug(f'shuffling dir_path {dir_path}')
if self.set_state(dir_state, State.SHUFFLING, True):
shuffle_dirs.append(dir_path)
log.debug(f'shuffling {shuffle_dirs}')
return shuffle_dirs
def dir_status(self, dir_path):
with self.lock:
dir_state = self.dir_states.get(dir_path, None)
if not dir_state:
raise MirrorException(-errno.ENOENT, f'{dir_path} is not tracked')
res = {} # type: Dict
if dir_state.stalled:
res['state'] = 'stalled'
res['reason'] = 'no mirror daemons running'
elif dir_state.state == State.ASSOCIATING:
res['state'] = 'mapping'
else:
state = None
dstate = dir_state.state
if dstate == State.ASSOCIATING:
state = 'mapping'
elif dstate == State.DISASSOCIATING:
state = 'unmapping'
elif dstate == State.SHUFFLING:
state = 'shuffling'
elif dstate == State.ASSOCIATED:
state = 'mapped'
elif dstate == State.INITIALIZING:
state = 'resolving'
res['state'] = state
res['instance_id'] = dir_state.instance_id
res['last_shuffled'] = dir_state.mapped_time
return res
def instance_summary(self):
with self.lock:
res = {
'mapping': {}
} # type: Dict
for instance_id, dir_paths in self.instance_to_dir_map.items():
res['mapping'][instance_id] = f'{len(dir_paths)} directories'
return res
| 17,256 | 44.293963 | 114 | py |
null | ceph-main/src/pybind/mgr/mirroring/fs/dir_map/state_transition.py | import logging
from enum import Enum, unique
from typing import Dict
log = logging.getLogger(__name__)
@unique
class State(Enum):
UNASSOCIATED = 0
INITIALIZING = 1
ASSOCIATING = 2
ASSOCIATED = 3
SHUFFLING = 4
DISASSOCIATING = 5
@unique
class ActionType(Enum):
NONE = 0
MAP_UPDATE = 1
MAP_REMOVE = 2
ACQUIRE = 3
RELEASE = 4
@unique
class PolicyAction(Enum):
MAP = 0
UNMAP = 1
REMOVE = 2
class TransitionKey:
def __init__(self, state, action_type):
self.transition_key = [state, action_type]
def __hash__(self):
return hash(tuple(self.transition_key))
def __eq__(self, other):
return self.transition_key == other.transition_key
def __neq__(self, other):
return not(self == other)
class Transition:
def __init__(self, action_type, start_policy_action=None,
finish_policy_action=None, final_state=None):
self.action_type = action_type
self.start_policy_action = start_policy_action
self.finish_policy_action = finish_policy_action
self.final_state = final_state
def __str__(self):
return "[action_type={0}, start_policy_action={1}, finish_policy_action={2}, final_state={3}".format(
self.action_type, self.start_policy_action, self.finish_policy_action, self.final_state)
class StateTransition:
transition_table = {} # type: Dict[TransitionKey, Transition]
@staticmethod
def transit(state, action_type):
try:
return StateTransition.transition_table[TransitionKey(state, action_type)]
except KeyError:
raise Exception()
@staticmethod
def is_idle(state):
return state in (State.UNASSOCIATED, State.ASSOCIATED)
StateTransition.transition_table = {
TransitionKey(State.INITIALIZING, ActionType.NONE) : Transition(ActionType.ACQUIRE),
TransitionKey(State.INITIALIZING, ActionType.ACQUIRE) : Transition(ActionType.NONE,
final_state=State.ASSOCIATED),
TransitionKey(State.ASSOCIATING, ActionType.NONE) : Transition(ActionType.MAP_UPDATE,
start_policy_action=PolicyAction.MAP),
TransitionKey(State.ASSOCIATING, ActionType.MAP_UPDATE) : Transition(ActionType.ACQUIRE),
TransitionKey(State.ASSOCIATING, ActionType.ACQUIRE) : Transition(ActionType.NONE,
final_state=State.ASSOCIATED),
TransitionKey(State.DISASSOCIATING, ActionType.NONE) : Transition(ActionType.RELEASE,
finish_policy_action=PolicyAction.UNMAP),
TransitionKey(State.DISASSOCIATING, ActionType.RELEASE) : Transition(ActionType.MAP_REMOVE,
finish_policy_action=PolicyAction.REMOVE),
TransitionKey(State.DISASSOCIATING, ActionType.MAP_REMOVE) : Transition(ActionType.NONE,
final_state=State.UNASSOCIATED),
TransitionKey(State.SHUFFLING, ActionType.NONE) : Transition(ActionType.RELEASE,
finish_policy_action=PolicyAction.UNMAP),
TransitionKey(State.SHUFFLING, ActionType.RELEASE) : Transition(ActionType.MAP_UPDATE,
start_policy_action=PolicyAction.MAP),
TransitionKey(State.SHUFFLING, ActionType.MAP_UPDATE) : Transition(ActionType.ACQUIRE),
TransitionKey(State.SHUFFLING, ActionType.ACQUIRE) : Transition(ActionType.NONE,
final_state=State.ASSOCIATED),
}
| 3,884 | 39.894737 | 115 | py |
null | ceph-main/src/pybind/mgr/mirroring/fs/dir_map/update.py | import errno
import pickle
import logging
import rados
from ..utils import MIRROR_OBJECT_NAME, DIRECTORY_MAP_PREFIX, \
INSTANCE_ID_PREFIX, MIRROR_OBJECT_PREFIX
log = logging.getLogger(__name__)
MAX_UPDATE = 256
class UpdateDirMapRequest:
def __init__(self, ioctx, update_mapping, removals, on_finish_callback):
self.ioctx = ioctx
self.update_mapping = update_mapping
self.removals = removals
self.on_finish_callback = on_finish_callback
@staticmethod
def omap_key(dir_path):
return f'{DIRECTORY_MAP_PREFIX}{dir_path}'
def send(self):
log.info('updating image map')
self.send_update()
def send_update(self):
log.debug(f'pending updates: {len(self.update_mapping)}+{len(self.removals)}')
try:
with rados.WriteOpCtx() as write_op:
keys = []
vals = []
dir_keys = list(self.update_mapping.keys())[0:MAX_UPDATE]
# gather updates
for dir_path in dir_keys:
mapping = self.update_mapping.pop(dir_path)
keys.append(UpdateDirMapRequest.omap_key(dir_path))
vals.append(pickle.dumps(mapping))
self.ioctx.set_omap(write_op, tuple(keys), tuple(vals))
# gather deletes
slicept = MAX_UPDATE - len(dir_keys)
removals = [UpdateDirMapRequest.omap_key(dir_path) for dir_path in self.removals[0:slicept]]
self.removals = self.removals[slicept:]
self.ioctx.remove_omap_keys(write_op, tuple(removals))
log.debug(f'applying {len(keys)} updates, {len(removals)} deletes')
self.ioctx.operate_aio_write_op(write_op, MIRROR_OBJECT_NAME, oncomplete=self.handle_update)
except rados.Error as e:
log.error(f'UpdateDirMapRequest.send_update exception: {e}')
self.finish(-e.args[0])
def handle_update(self, completion):
r = completion.get_return_value()
log.debug(f'handle_update: r={r}')
if not r == 0:
self.finish(r)
elif self.update_mapping or self.removals:
self.send_update()
else:
self.finish(0)
def finish(self, r):
log.info(f'finish: r={r}')
self.on_finish_callback(r)
class UpdateInstanceRequest:
def __init__(self, ioctx, instances_added, instances_removed, on_finish_callback):
self.ioctx = ioctx
self.instances_added = instances_added
# purge vs remove: purge list is for purging on-disk instance
# object. remove is for purging instance map.
self.instances_removed = instances_removed.copy()
self.instances_purge = instances_removed.copy()
self.on_finish_callback = on_finish_callback
@staticmethod
def omap_key(instance_id):
return f'{INSTANCE_ID_PREFIX}{instance_id}'
@staticmethod
def cephfs_mirror_object_name(instance_id):
assert instance_id != ''
return f'{MIRROR_OBJECT_PREFIX}.{instance_id}'
def send(self):
log.info('updating instances')
self.send_update()
def send_update(self):
self.remove_instance_object()
def remove_instance_object(self):
log.debug(f'pending purges: {len(self.instances_purge)}')
if not self.instances_purge:
self.update_instance_map()
return
instance_id = self.instances_purge.pop()
self.ioctx.aio_remove(
UpdateInstanceRequest.cephfs_mirror_object_name(instance_id), oncomplete=self.handle_remove)
def handle_remove(self, completion):
r = completion.get_return_value()
log.debug(f'handle_remove: r={r}')
# cephfs-mirror instances remove their respective instance
# objects upon termination. so we handle ENOENT here. note
# that when an instance is blocklisted, it wont be able to
# purge its instance object, so we do it on its behalf.
if not r == 0 and not r == -errno.ENOENT:
self.finish(r)
return
self.remove_instance_object()
def update_instance_map(self):
log.debug(f'pending updates: {len(self.instances_added)}+{len(self.instances_removed)}')
try:
with rados.WriteOpCtx() as write_op:
keys = []
vals = []
instance_ids = list(self.instances_added.keys())[0:MAX_UPDATE]
# gather updates
for instance_id in instance_ids:
data = self.instances_added.pop(instance_id)
keys.append(UpdateInstanceRequest.omap_key(instance_id))
vals.append(pickle.dumps(data))
self.ioctx.set_omap(write_op, tuple(keys), tuple(vals))
# gather deletes
slicept = MAX_UPDATE - len(instance_ids)
removals = [UpdateInstanceRequest.omap_key(instance_id) \
for instance_id in self.instances_removed[0:slicept]]
self.instances_removed = self.instances_removed[slicept:]
self.ioctx.remove_omap_keys(write_op, tuple(removals))
log.debug(f'applying {len(keys)} updates, {len(removals)} deletes')
self.ioctx.operate_aio_write_op(write_op, MIRROR_OBJECT_NAME, oncomplete=self.handle_update)
except rados.Error as e:
log.error(f'UpdateInstanceRequest.update_instance_map exception: {e}')
self.finish(-e.args[0])
def handle_update(self, completion):
r = completion.get_return_value()
log.debug(f'handle_update: r={r}')
if not r == 0:
self.finish(r)
elif self.instances_added or self.instances_removed:
self.update_instance_map()
else:
self.finish(0)
def finish(self, r):
log.info(f'finish: r={r}')
self.on_finish_callback(r)
| 5,996 | 38.453947 | 108 | py |
null | ceph-main/src/pybind/mgr/nfs/__init__.py | # flake8: noqa
import os
if 'UNITTEST' in os.environ:
import tests
from .module import Module
| 100 | 11.625 | 28 | py |
null | ceph-main/src/pybind/mgr/nfs/cluster.py | import ipaddress
import logging
import re
import socket
from typing import cast, Dict, List, Any, Union, Optional, TYPE_CHECKING
from mgr_module import NFS_POOL_NAME as POOL_NAME
from ceph.deployment.service_spec import NFSServiceSpec, PlacementSpec, IngressSpec
from object_format import ErrorResponse
import orchestrator
from orchestrator.module import IngressType
from .exception import NFSInvalidOperation, ClusterNotFound
from .utils import (
ManualRestartRequired,
NonFatalError,
available_clusters,
conf_obj_name,
restart_nfs_service,
user_conf_obj_name)
from .export import NFSRados
if TYPE_CHECKING:
from nfs.module import Module
from mgr_module import MgrModule
log = logging.getLogger(__name__)
def resolve_ip(hostname: str) -> str:
try:
r = socket.getaddrinfo(hostname, None, flags=socket.AI_CANONNAME,
type=socket.SOCK_STREAM)
# pick first v4 IP, if present
for a in r:
if a[0] == socket.AF_INET:
return a[4][0]
return r[0][4][0]
except socket.gaierror as e:
raise NFSInvalidOperation(f"Cannot resolve IP for host {hostname}: {e}")
def create_ganesha_pool(mgr: 'MgrModule') -> None:
pool_list = [p['pool_name'] for p in mgr.get_osdmap().dump().get('pools', [])]
if POOL_NAME not in pool_list:
mgr.check_mon_command({'prefix': 'osd pool create',
'pool': POOL_NAME,
'yes_i_really_mean_it': True})
mgr.check_mon_command({'prefix': 'osd pool application enable',
'pool': POOL_NAME,
'app': 'nfs'})
log.debug("Successfully created nfs-ganesha pool %s", POOL_NAME)
class NFSCluster:
def __init__(self, mgr: 'Module') -> None:
self.mgr = mgr
def _call_orch_apply_nfs(
self,
cluster_id: str,
placement: Optional[str] = None,
virtual_ip: Optional[str] = None,
ingress_mode: Optional[IngressType] = None,
port: Optional[int] = None,
) -> None:
if not port:
port = 2049 # default nfs port
if virtual_ip:
# nfs + ingress
# run NFS on non-standard port
if not ingress_mode:
ingress_mode = IngressType.default
ingress_mode = ingress_mode.canonicalize()
pspec = PlacementSpec.from_string(placement)
if ingress_mode == IngressType.keepalive_only:
# enforce count=1 for nfs over keepalive only
pspec.count = 1
ganesha_port = 10000 + port # semi-arbitrary, fix me someday
frontend_port: Optional[int] = port
virtual_ip_for_ganesha: Optional[str] = None
keepalive_only: bool = False
enable_haproxy_protocol: bool = False
if ingress_mode == IngressType.haproxy_protocol:
enable_haproxy_protocol = True
elif ingress_mode == IngressType.keepalive_only:
keepalive_only = True
virtual_ip_for_ganesha = virtual_ip.split('/')[0]
ganesha_port = port
frontend_port = None
spec = NFSServiceSpec(service_type='nfs', service_id=cluster_id,
placement=pspec,
# use non-default port so we don't conflict with ingress
port=ganesha_port,
virtual_ip=virtual_ip_for_ganesha,
enable_haproxy_protocol=enable_haproxy_protocol)
completion = self.mgr.apply_nfs(spec)
orchestrator.raise_if_exception(completion)
ispec = IngressSpec(service_type='ingress',
service_id='nfs.' + cluster_id,
backend_service='nfs.' + cluster_id,
placement=pspec,
frontend_port=frontend_port,
monitor_port=7000 + port, # semi-arbitrary, fix me someday
virtual_ip=virtual_ip,
keepalive_only=keepalive_only,
enable_haproxy_protocol=enable_haproxy_protocol)
completion = self.mgr.apply_ingress(ispec)
orchestrator.raise_if_exception(completion)
else:
# standalone nfs
spec = NFSServiceSpec(service_type='nfs', service_id=cluster_id,
placement=PlacementSpec.from_string(placement),
port=port)
completion = self.mgr.apply_nfs(spec)
orchestrator.raise_if_exception(completion)
log.debug("Successfully deployed nfs daemons with cluster id %s and placement %s",
cluster_id, placement)
def create_empty_rados_obj(self, cluster_id: str) -> None:
common_conf = conf_obj_name(cluster_id)
self._rados(cluster_id).write_obj('', conf_obj_name(cluster_id))
log.info("Created empty object:%s", common_conf)
def delete_config_obj(self, cluster_id: str) -> None:
self._rados(cluster_id).remove_all_obj()
log.info("Deleted %s object and all objects in %s",
conf_obj_name(cluster_id), cluster_id)
def create_nfs_cluster(
self,
cluster_id: str,
placement: Optional[str],
virtual_ip: Optional[str],
ingress: Optional[bool] = None,
ingress_mode: Optional[IngressType] = None,
port: Optional[int] = None,
) -> None:
try:
if virtual_ip:
# validate virtual_ip value: ip_address throws a ValueError
# exception in case it's not a valid ipv4 or ipv6 address
ip = virtual_ip.split('/')[0]
ipaddress.ip_address(ip)
if virtual_ip and not ingress:
raise NFSInvalidOperation('virtual_ip can only be provided with ingress enabled')
if not virtual_ip and ingress:
raise NFSInvalidOperation('ingress currently requires a virtual_ip')
if ingress_mode and not ingress:
raise NFSInvalidOperation('--ingress-mode must be passed along with --ingress')
invalid_str = re.search('[^A-Za-z0-9-_.]', cluster_id)
if invalid_str:
raise NFSInvalidOperation(f"cluster id {cluster_id} is invalid. "
f"{invalid_str.group()} is char not permitted")
create_ganesha_pool(self.mgr)
self.create_empty_rados_obj(cluster_id)
if cluster_id not in available_clusters(self.mgr):
self._call_orch_apply_nfs(cluster_id, placement, virtual_ip, ingress_mode, port)
return
raise NonFatalError(f"{cluster_id} cluster already exists")
except Exception as e:
log.exception(f"NFS Cluster {cluster_id} could not be created")
raise ErrorResponse.wrap(e)
def delete_nfs_cluster(self, cluster_id: str) -> None:
try:
cluster_list = available_clusters(self.mgr)
if cluster_id in cluster_list:
self.mgr.export_mgr.delete_all_exports(cluster_id)
completion = self.mgr.remove_service('ingress.nfs.' + cluster_id)
orchestrator.raise_if_exception(completion)
completion = self.mgr.remove_service('nfs.' + cluster_id)
orchestrator.raise_if_exception(completion)
self.delete_config_obj(cluster_id)
return
raise NonFatalError("Cluster does not exist")
except Exception as e:
log.exception(f"Failed to delete NFS Cluster {cluster_id}")
raise ErrorResponse.wrap(e)
def list_nfs_cluster(self) -> List[str]:
try:
return available_clusters(self.mgr)
except Exception as e:
log.exception("Failed to list NFS Cluster")
raise ErrorResponse.wrap(e)
def _show_nfs_cluster_info(self, cluster_id: str) -> Dict[str, Any]:
completion = self.mgr.list_daemons(daemon_type='nfs')
# Here completion.result is a list DaemonDescription objects
clusters = orchestrator.raise_if_exception(completion)
backends: List[Dict[str, Union[Any]]] = []
for cluster in clusters:
if cluster_id == cluster.service_id():
assert cluster.hostname
try:
if cluster.ip:
ip = cluster.ip
else:
c = self.mgr.get_hosts()
orchestrator.raise_if_exception(c)
hosts = [h for h in c.result or []
if h.hostname == cluster.hostname]
if hosts:
ip = resolve_ip(hosts[0].addr)
else:
# sigh
ip = resolve_ip(cluster.hostname)
backends.append({
"hostname": cluster.hostname,
"ip": ip,
"port": cluster.ports[0] if cluster.ports else None
})
except orchestrator.OrchestratorError:
continue
r: Dict[str, Any] = {
'virtual_ip': None,
'backend': backends,
}
sc = self.mgr.describe_service(service_type='ingress')
services = orchestrator.raise_if_exception(sc)
for i in services:
spec = cast(IngressSpec, i.spec)
if spec.backend_service == f'nfs.{cluster_id}':
r['virtual_ip'] = i.virtual_ip.split('/')[0] if i.virtual_ip else None
if i.ports:
r['port'] = i.ports[0]
if len(i.ports) > 1:
r['monitor_port'] = i.ports[1]
log.debug("Successfully fetched %s info: %s", cluster_id, r)
return r
def show_nfs_cluster_info(self, cluster_id: Optional[str] = None) -> Dict[str, Any]:
try:
if cluster_id and cluster_id not in available_clusters(self.mgr):
raise ClusterNotFound()
info_res = {}
if cluster_id:
cluster_ls = [cluster_id]
else:
cluster_ls = available_clusters(self.mgr)
for cluster_id in cluster_ls:
res = self._show_nfs_cluster_info(cluster_id)
if res:
info_res[cluster_id] = res
return info_res
except Exception as e:
log.exception("Failed to show info for cluster")
raise ErrorResponse.wrap(e)
def get_nfs_cluster_config(self, cluster_id: str) -> str:
try:
if cluster_id in available_clusters(self.mgr):
rados_obj = self._rados(cluster_id)
conf = rados_obj.read_obj(user_conf_obj_name(cluster_id))
return conf or ""
raise ClusterNotFound()
except Exception as e:
log.exception(f"Fetching NFS-Ganesha Config failed for {cluster_id}")
raise ErrorResponse.wrap(e)
def set_nfs_cluster_config(self, cluster_id: str, nfs_config: str) -> None:
try:
if cluster_id in available_clusters(self.mgr):
rados_obj = self._rados(cluster_id)
if rados_obj.check_user_config():
raise NonFatalError("NFS-Ganesha User Config already exists")
rados_obj.write_obj(nfs_config, user_conf_obj_name(cluster_id),
conf_obj_name(cluster_id))
log.debug("Successfully saved %s's user config: \n %s", cluster_id, nfs_config)
restart_nfs_service(self.mgr, cluster_id)
return
raise ClusterNotFound()
except NotImplementedError:
raise ManualRestartRequired("NFS-Ganesha Config Added Successfully")
except Exception as e:
log.exception(f"Setting NFS-Ganesha Config failed for {cluster_id}")
raise ErrorResponse.wrap(e)
def reset_nfs_cluster_config(self, cluster_id: str) -> None:
try:
if cluster_id in available_clusters(self.mgr):
rados_obj = self._rados(cluster_id)
if not rados_obj.check_user_config():
raise NonFatalError("NFS-Ganesha User Config does not exist")
rados_obj.remove_obj(user_conf_obj_name(cluster_id),
conf_obj_name(cluster_id))
restart_nfs_service(self.mgr, cluster_id)
return
raise ClusterNotFound()
except NotImplementedError:
raise ManualRestartRequired("NFS-Ganesha Config Removed Successfully")
except Exception as e:
log.exception(f"Resetting NFS-Ganesha Config failed for {cluster_id}")
raise ErrorResponse.wrap(e)
def _rados(self, cluster_id: str) -> NFSRados:
"""Return a new NFSRados object for the given cluster id."""
return NFSRados(self.mgr.rados, cluster_id)
| 13,463 | 42.432258 | 97 | py |
null | ceph-main/src/pybind/mgr/nfs/exception.py | import errno
from typing import Optional
class NFSException(Exception):
def __init__(self, err_msg: str, errno: int = -1) -> None:
super(NFSException, self).__init__(errno, err_msg)
self.errno = errno
self.err_msg = err_msg
def __str__(self) -> str:
return self.err_msg
class NFSInvalidOperation(NFSException):
def __init__(self, err_msg: str) -> None:
super(NFSInvalidOperation, self).__init__(err_msg, -errno.EINVAL)
class NFSObjectNotFound(NFSException):
def __init__(self, err_msg: str) -> None:
super(NFSObjectNotFound, self).__init__(err_msg, -errno.ENOENT)
class FSNotFound(NFSObjectNotFound):
def __init__(self, fs_name: Optional[str]) -> None:
super(FSNotFound, self).__init__(f'filesystem {fs_name} not found')
class ClusterNotFound(NFSObjectNotFound):
def __init__(self) -> None:
super(ClusterNotFound, self).__init__('cluster does not exist')
| 954 | 27.939394 | 75 | py |
null | ceph-main/src/pybind/mgr/nfs/export.py | import errno
import json
import logging
from typing import (
List,
Any,
Dict,
Optional,
TYPE_CHECKING,
TypeVar,
Callable,
Set,
cast)
from os.path import normpath
import cephfs
from rados import TimedOut, ObjectNotFound, Rados, LIBRADOS_ALL_NSPACES
from object_format import ErrorResponse
from orchestrator import NoOrchestrator
from mgr_module import NFS_POOL_NAME as POOL_NAME, NFS_GANESHA_SUPPORTED_FSALS
from .ganesha_conf import (
CephFSFSAL,
Export,
GaneshaConfParser,
RGWFSAL,
RawBlock,
format_block)
from .exception import NFSException, NFSInvalidOperation, FSNotFound, NFSObjectNotFound
from .utils import (
CONF_PREFIX,
EXPORT_PREFIX,
NonFatalError,
USER_CONF_PREFIX,
export_obj_name,
conf_obj_name,
available_clusters,
check_fs,
restart_nfs_service, cephfs_path_is_dir)
if TYPE_CHECKING:
from nfs.module import Module
FuncT = TypeVar('FuncT', bound=Callable)
log = logging.getLogger(__name__)
def known_cluster_ids(mgr: 'Module') -> Set[str]:
"""Return the set of known cluster IDs."""
try:
clusters = set(available_clusters(mgr))
except NoOrchestrator:
clusters = nfs_rados_configs(mgr.rados)
return clusters
def _check_rados_notify(ioctx: Any, obj: str) -> None:
try:
ioctx.notify(obj)
except TimedOut:
log.exception("Ganesha timed out")
def normalize_path(path: str) -> str:
if path:
path = normpath(path.strip())
if path[:2] == "//":
path = path[1:]
return path
class NFSRados:
def __init__(self, rados: 'Rados', namespace: str) -> None:
self.rados = rados
self.pool = POOL_NAME
self.namespace = namespace
def _make_rados_url(self, obj: str) -> str:
return "rados://{}/{}/{}".format(self.pool, self.namespace, obj)
def _create_url_block(self, obj_name: str) -> RawBlock:
return RawBlock('%url', values={'value': self._make_rados_url(obj_name)})
def write_obj(self, conf_block: str, obj: str, config_obj: str = '') -> None:
with self.rados.open_ioctx(self.pool) as ioctx:
ioctx.set_namespace(self.namespace)
ioctx.write_full(obj, conf_block.encode('utf-8'))
if not config_obj:
# Return after creating empty common config object
return
log.debug("write configuration into rados object %s/%s/%s",
self.pool, self.namespace, obj)
# Add created obj url to common config obj
ioctx.append(config_obj, format_block(
self._create_url_block(obj)).encode('utf-8'))
_check_rados_notify(ioctx, config_obj)
log.debug("Added %s url to %s", obj, config_obj)
def read_obj(self, obj: str) -> Optional[str]:
with self.rados.open_ioctx(self.pool) as ioctx:
ioctx.set_namespace(self.namespace)
try:
return ioctx.read(obj, 1048576).decode()
except ObjectNotFound:
return None
def update_obj(self, conf_block: str, obj: str, config_obj: str,
should_notify: Optional[bool] = True) -> None:
with self.rados.open_ioctx(self.pool) as ioctx:
ioctx.set_namespace(self.namespace)
ioctx.write_full(obj, conf_block.encode('utf-8'))
log.debug("write configuration into rados object %s/%s/%s",
self.pool, self.namespace, obj)
if should_notify:
_check_rados_notify(ioctx, config_obj)
log.debug("Update export %s in %s", obj, config_obj)
def remove_obj(self, obj: str, config_obj: str) -> None:
with self.rados.open_ioctx(self.pool) as ioctx:
ioctx.set_namespace(self.namespace)
export_urls = ioctx.read(config_obj)
url = '%url "{}"\n\n'.format(self._make_rados_url(obj))
export_urls = export_urls.replace(url.encode('utf-8'), b'')
ioctx.remove_object(obj)
ioctx.write_full(config_obj, export_urls)
_check_rados_notify(ioctx, config_obj)
log.debug("Object deleted: %s", url)
def remove_all_obj(self) -> None:
with self.rados.open_ioctx(self.pool) as ioctx:
ioctx.set_namespace(self.namespace)
for obj in ioctx.list_objects():
obj.remove()
def check_user_config(self) -> bool:
with self.rados.open_ioctx(self.pool) as ioctx:
ioctx.set_namespace(self.namespace)
for obj in ioctx.list_objects():
if obj.key.startswith(USER_CONF_PREFIX):
return True
return False
def nfs_rados_configs(rados: 'Rados', nfs_pool: str = POOL_NAME) -> Set[str]:
"""Return a set of all the namespaces in the nfs_pool where nfs
configuration objects are found. The namespaces also correspond
to the cluster ids.
"""
ns: Set[str] = set()
prefixes = (EXPORT_PREFIX, CONF_PREFIX, USER_CONF_PREFIX)
with rados.open_ioctx(nfs_pool) as ioctx:
ioctx.set_namespace(LIBRADOS_ALL_NSPACES)
for obj in ioctx.list_objects():
if obj.key.startswith(prefixes):
ns.add(obj.nspace)
return ns
class AppliedExportResults:
"""Gathers the results of multiple changed exports.
Returned by apply_export.
"""
def __init__(self) -> None:
self.changes: List[Dict[str, str]] = []
self.has_error = False
def append(self, value: Dict[str, str]) -> None:
if value.get("state", "") == "error":
self.has_error = True
self.changes.append(value)
def to_simplified(self) -> List[Dict[str, str]]:
return self.changes
def mgr_return_value(self) -> int:
return -errno.EIO if self.has_error else 0
class ExportMgr:
def __init__(
self,
mgr: 'Module',
export_ls: Optional[Dict[str, List[Export]]] = None
) -> None:
self.mgr = mgr
self.rados_pool = POOL_NAME
self._exports: Optional[Dict[str, List[Export]]] = export_ls
@property
def exports(self) -> Dict[str, List[Export]]:
if self._exports is None:
self._exports = {}
log.info("Begin export parsing")
for cluster_id in known_cluster_ids(self.mgr):
self.export_conf_objs = [] # type: List[Export]
self._read_raw_config(cluster_id)
self._exports[cluster_id] = self.export_conf_objs
log.info("Exports parsed successfully %s", self.exports.items())
return self._exports
def _fetch_export(
self,
cluster_id: str,
pseudo_path: str
) -> Optional[Export]:
try:
for ex in self.exports[cluster_id]:
if ex.pseudo == pseudo_path:
return ex
return None
except KeyError:
log.info('no exports for cluster %s', cluster_id)
return None
def _fetch_export_id(
self,
cluster_id: str,
export_id: int
) -> Optional[Export]:
try:
for ex in self.exports[cluster_id]:
if ex.export_id == export_id:
return ex
return None
except KeyError:
log.info(f'no exports for cluster {cluster_id}')
return None
def _delete_export_user(self, export: Export) -> None:
if isinstance(export.fsal, CephFSFSAL):
assert export.fsal.user_id
self.mgr.check_mon_command({
'prefix': 'auth rm',
'entity': 'client.{}'.format(export.fsal.user_id),
})
log.info("Deleted export user %s", export.fsal.user_id)
elif isinstance(export.fsal, RGWFSAL):
# do nothing; we're using the bucket owner creds.
pass
def _create_export_user(self, export: Export) -> None:
if isinstance(export.fsal, CephFSFSAL):
fsal = cast(CephFSFSAL, export.fsal)
assert fsal.fs_name
fsal.user_id = f"nfs.{export.cluster_id}.{export.export_id}"
fsal.cephx_key = self._create_user_key(
export.cluster_id, fsal.user_id, export.path, fsal.fs_name
)
log.debug("Successfully created user %s for cephfs path %s", fsal.user_id, export.path)
elif isinstance(export.fsal, RGWFSAL):
rgwfsal = cast(RGWFSAL, export.fsal)
if not rgwfsal.user_id:
assert export.path
ret, out, err = self.mgr.tool_exec(
['radosgw-admin', 'bucket', 'stats', '--bucket', export.path]
)
if ret:
raise NFSException(f'Failed to fetch owner for bucket {export.path}')
j = json.loads(out)
owner = j.get('owner', '')
rgwfsal.user_id = owner
assert rgwfsal.user_id
ret, out, err = self.mgr.tool_exec([
'radosgw-admin', 'user', 'info', '--uid', rgwfsal.user_id
])
if ret:
raise NFSException(
f'Failed to fetch key for bucket {export.path} owner {rgwfsal.user_id}'
)
j = json.loads(out)
# FIXME: make this more tolerate of unexpected output?
rgwfsal.access_key_id = j['keys'][0]['access_key']
rgwfsal.secret_access_key = j['keys'][0]['secret_key']
log.debug("Successfully fetched user %s for RGW path %s", rgwfsal.user_id, export.path)
def _gen_export_id(self, cluster_id: str) -> int:
exports = sorted([ex.export_id for ex in self.exports[cluster_id]])
nid = 1
for e_id in exports:
if e_id == nid:
nid += 1
else:
break
return nid
def _read_raw_config(self, rados_namespace: str) -> None:
with self.mgr.rados.open_ioctx(self.rados_pool) as ioctx:
ioctx.set_namespace(rados_namespace)
for obj in ioctx.list_objects():
if obj.key.startswith(EXPORT_PREFIX):
size, _ = obj.stat()
raw_config = obj.read(size)
raw_config = raw_config.decode("utf-8")
log.debug("read export configuration from rados "
"object %s/%s/%s", self.rados_pool,
rados_namespace, obj.key)
self.export_conf_objs.append(Export.from_export_block(
GaneshaConfParser(raw_config).parse()[0], rados_namespace))
def _save_export(self, cluster_id: str, export: Export) -> None:
self.exports[cluster_id].append(export)
self._rados(cluster_id).write_obj(
format_block(export.to_export_block()),
export_obj_name(export.export_id),
conf_obj_name(export.cluster_id)
)
def _delete_export(
self,
cluster_id: str,
pseudo_path: Optional[str],
export_obj: Optional[Export] = None
) -> None:
try:
if export_obj:
export: Optional[Export] = export_obj
else:
assert pseudo_path
export = self._fetch_export(cluster_id, pseudo_path)
if export:
if pseudo_path:
self._rados(cluster_id).remove_obj(
export_obj_name(export.export_id), conf_obj_name(cluster_id))
self.exports[cluster_id].remove(export)
self._delete_export_user(export)
if not self.exports[cluster_id]:
del self.exports[cluster_id]
log.debug("Deleted all exports for cluster %s", cluster_id)
return None
raise NonFatalError("Export does not exist")
except Exception as e:
log.exception(f"Failed to delete {pseudo_path} export for {cluster_id}")
raise ErrorResponse.wrap(e)
def _fetch_export_obj(self, cluster_id: str, ex_id: int) -> Optional[Export]:
try:
with self.mgr.rados.open_ioctx(self.rados_pool) as ioctx:
ioctx.set_namespace(cluster_id)
export = Export.from_export_block(
GaneshaConfParser(
ioctx.read(export_obj_name(ex_id)).decode("utf-8")
).parse()[0],
cluster_id
)
return export
except ObjectNotFound:
log.exception("Export ID: %s not found", ex_id)
return None
def _update_export(self, cluster_id: str, export: Export,
need_nfs_service_restart: bool) -> None:
self.exports[cluster_id].append(export)
self._rados(cluster_id).update_obj(
format_block(export.to_export_block()),
export_obj_name(export.export_id), conf_obj_name(export.cluster_id),
should_notify=not need_nfs_service_restart)
if need_nfs_service_restart:
restart_nfs_service(self.mgr, export.cluster_id)
def _validate_cluster_id(self, cluster_id: str) -> None:
"""Raise an exception if cluster_id is not valid."""
clusters = known_cluster_ids(self.mgr)
log.debug("checking for %r in known nfs clusters: %r",
cluster_id, clusters)
if cluster_id not in clusters:
raise ErrorResponse(f"Cluster {cluster_id!r} does not exist",
return_value=-errno.ENOENT)
def create_export(self, addr: Optional[List[str]] = None, **kwargs: Any) -> Dict[str, Any]:
self._validate_cluster_id(kwargs['cluster_id'])
# if addr(s) are provided, construct client list and adjust outer block
clients = []
if addr:
clients = [{
'addresses': addr,
'access_type': 'ro' if kwargs['read_only'] else 'rw',
'squash': kwargs['squash'],
}]
kwargs['squash'] = 'none'
kwargs['clients'] = clients
if clients:
kwargs['access_type'] = "none"
elif kwargs['read_only']:
kwargs['access_type'] = "RO"
else:
kwargs['access_type'] = "RW"
if kwargs['cluster_id'] not in self.exports:
self.exports[kwargs['cluster_id']] = []
try:
fsal_type = kwargs.pop('fsal_type')
if fsal_type == 'cephfs':
return self.create_cephfs_export(**kwargs)
if fsal_type == 'rgw':
return self.create_rgw_export(**kwargs)
raise NotImplementedError()
except Exception as e:
log.exception(
f"Failed to create {kwargs['pseudo_path']} export for {kwargs['cluster_id']}")
raise ErrorResponse.wrap(e)
def delete_export(self,
cluster_id: str,
pseudo_path: str) -> None:
self._validate_cluster_id(cluster_id)
return self._delete_export(cluster_id, pseudo_path)
def delete_all_exports(self, cluster_id: str) -> None:
try:
export_list = list(self.exports[cluster_id])
except KeyError:
log.info("No exports to delete")
return
for export in export_list:
try:
self._delete_export(cluster_id=cluster_id, pseudo_path=None,
export_obj=export)
except Exception as e:
raise NFSException(f"Failed to delete export {export.export_id}: {e}")
log.info("All exports successfully deleted for cluster id: %s", cluster_id)
def list_all_exports(self) -> List[Dict[str, Any]]:
r = []
for cluster_id, ls in self.exports.items():
r.extend([e.to_dict() for e in ls])
return r
def list_exports(self,
cluster_id: str,
detailed: bool = False) -> List[Any]:
self._validate_cluster_id(cluster_id)
try:
if detailed:
result_d = [export.to_dict() for export in self.exports[cluster_id]]
return result_d
else:
result_ps = [export.pseudo for export in self.exports[cluster_id]]
return result_ps
except KeyError:
log.warning("No exports to list for %s", cluster_id)
return []
except Exception as e:
log.exception(f"Failed to list exports for {cluster_id}")
raise ErrorResponse.wrap(e)
def _get_export_dict(self, cluster_id: str, pseudo_path: str) -> Optional[Dict[str, Any]]:
export = self._fetch_export(cluster_id, pseudo_path)
if export:
return export.to_dict()
log.warning(f"No {pseudo_path} export to show for {cluster_id}")
return None
def get_export(
self,
cluster_id: str,
pseudo_path: str,
) -> Dict[str, Any]:
self._validate_cluster_id(cluster_id)
try:
export_dict = self._get_export_dict(cluster_id, pseudo_path)
log.info(f"Fetched {export_dict!r} for {cluster_id!r}, {pseudo_path!r}")
return export_dict if export_dict else {}
except Exception as e:
log.exception(f"Failed to get {pseudo_path} export for {cluster_id}")
raise ErrorResponse.wrap(e)
def get_export_by_id(
self,
cluster_id: str,
export_id: int
) -> Optional[Dict[str, Any]]:
export = self._fetch_export_id(cluster_id, export_id)
return export.to_dict() if export else None
def get_export_by_pseudo(
self,
cluster_id: str,
pseudo_path: str
) -> Optional[Dict[str, Any]]:
export = self._fetch_export(cluster_id, pseudo_path)
return export.to_dict() if export else None
# This method is used by the dashboard module (../dashboard/controllers/nfs.py)
# Do not change interface without updating the Dashboard code
def apply_export(self, cluster_id: str, export_config: str) -> AppliedExportResults:
try:
exports = self._read_export_config(cluster_id, export_config)
except Exception as e:
log.exception(f'Failed to update export: {e}')
raise ErrorResponse.wrap(e)
aeresults = AppliedExportResults()
for export in exports:
aeresults.append(self._change_export(cluster_id, export))
return aeresults
def _read_export_config(self, cluster_id: str, export_config: str) -> List[Dict]:
if not export_config:
raise NFSInvalidOperation("Empty Config!!")
try:
j = json.loads(export_config)
except ValueError:
# okay, not JSON. is it an EXPORT block?
try:
blocks = GaneshaConfParser(export_config).parse()
exports = [
Export.from_export_block(block, cluster_id)
for block in blocks
]
j = [export.to_dict() for export in exports]
except Exception as ex:
raise NFSInvalidOperation(f"Input must be JSON or a ganesha EXPORT block: {ex}")
# check export type - always return a list
if isinstance(j, list):
return j # j is already a list object
return [j] # return a single object list, with j as the only item
def _change_export(self, cluster_id: str, export: Dict) -> Dict[str, str]:
try:
return self._apply_export(cluster_id, export)
except NotImplementedError:
# in theory, the NotImplementedError here may be raised by a hook back to
# an orchestration module. If the orchestration module supports it the NFS
# servers may be restarted. If not supported the expectation is that an
# (unfortunately generic) NotImplementedError will be raised. We then
# indicate to the user that manual intervention may be needed now that the
# configuration changes have been applied.
return {
"pseudo": export['pseudo'],
"state": "warning",
"msg": "changes applied (Manual restart of NFS Pods required)",
}
except Exception as ex:
msg = f'Failed to apply export: {ex}'
log.exception(msg)
return {"state": "error", "msg": msg}
def _update_user_id(
self,
cluster_id: str,
path: str,
fs_name: str,
user_id: str
) -> None:
osd_cap = 'allow rw pool={} namespace={}, allow rw tag cephfs data={}'.format(
self.rados_pool, cluster_id, fs_name)
# NFS-Ganesha can dynamically enforce an export's access type changes, but Ceph server
# daemons can't dynamically enforce changes in Ceph user caps of the Ceph clients. To
# allow dynamic updates of CephFS NFS exports, always set FSAL Ceph user's MDS caps with
# path restricted read-write access. Rely on the ganesha servers to enforce the export
# access type requested for the NFS clients.
self.mgr.check_mon_command({
'prefix': 'auth caps',
'entity': f'client.{user_id}',
'caps': ['mon', 'allow r', 'osd', osd_cap, 'mds', 'allow rw path={}'.format(path)],
})
log.info("Export user updated %s", user_id)
def _create_user_key(
self,
cluster_id: str,
entity: str,
path: str,
fs_name: str,
) -> str:
osd_cap = 'allow rw pool={} namespace={}, allow rw tag cephfs data={}'.format(
self.rados_pool, cluster_id, fs_name)
nfs_caps = [
'mon', 'allow r',
'osd', osd_cap,
'mds', 'allow rw path={}'.format(path)
]
ret, out, err = self.mgr.mon_command({
'prefix': 'auth get-or-create',
'entity': 'client.{}'.format(entity),
'caps': nfs_caps,
'format': 'json',
})
if ret == -errno.EINVAL and 'does not match' in err:
ret, out, err = self.mgr.mon_command({
'prefix': 'auth caps',
'entity': 'client.{}'.format(entity),
'caps': nfs_caps,
'format': 'json',
})
if err:
raise NFSException(f'Failed to update caps for {entity}: {err}')
ret, out, err = self.mgr.mon_command({
'prefix': 'auth get',
'entity': 'client.{}'.format(entity),
'format': 'json',
})
if err:
raise NFSException(f'Failed to fetch caps for {entity}: {err}')
json_res = json.loads(out)
log.info("Export user created is %s", json_res[0]['entity'])
return json_res[0]['key']
def create_export_from_dict(self,
cluster_id: str,
ex_id: int,
ex_dict: Dict[str, Any]) -> Export:
pseudo_path = ex_dict.get("pseudo")
if not pseudo_path:
raise NFSInvalidOperation("export must specify pseudo path")
path = ex_dict.get("path")
if path is None:
raise NFSInvalidOperation("export must specify path")
path = normalize_path(path)
fsal = ex_dict.get("fsal", {})
fsal_type = fsal.get("name")
if fsal_type == NFS_GANESHA_SUPPORTED_FSALS[1]:
if '/' in path and path != '/':
raise NFSInvalidOperation('"/" is not allowed in path with bucket name')
elif fsal_type == NFS_GANESHA_SUPPORTED_FSALS[0]:
fs_name = fsal.get("fs_name")
if not fs_name:
raise NFSInvalidOperation("export FSAL must specify fs_name")
if not check_fs(self.mgr, fs_name):
raise FSNotFound(fs_name)
user_id = f"nfs.{cluster_id}.{ex_id}"
if "user_id" in fsal and fsal["user_id"] != user_id:
raise NFSInvalidOperation(f"export FSAL user_id must be '{user_id}'")
else:
raise NFSInvalidOperation(f"NFS Ganesha supported FSALs are {NFS_GANESHA_SUPPORTED_FSALS}."
"Export must specify any one of it.")
ex_dict["fsal"] = fsal
ex_dict["cluster_id"] = cluster_id
export = Export.from_dict(ex_id, ex_dict)
export.validate(self.mgr)
log.debug("Successfully created %s export-%s from dict for cluster %s",
fsal_type, ex_id, cluster_id)
return export
def create_cephfs_export(self,
fs_name: str,
cluster_id: str,
pseudo_path: str,
read_only: bool,
path: str,
squash: str,
access_type: str,
clients: list = [],
sectype: Optional[List[str]] = None) -> Dict[str, Any]:
try:
cephfs_path_is_dir(self.mgr, fs_name, path)
except NotADirectoryError:
raise NFSException(f"path {path} is not a dir", -errno.ENOTDIR)
except cephfs.ObjectNotFound:
raise NFSObjectNotFound(f"path {path} does not exist")
except cephfs.Error as e:
raise NFSException(e.args[1], -e.args[0])
pseudo_path = normalize_path(pseudo_path)
if not self._fetch_export(cluster_id, pseudo_path):
export = self.create_export_from_dict(
cluster_id,
self._gen_export_id(cluster_id),
{
"pseudo": pseudo_path,
"path": path,
"access_type": access_type,
"squash": squash,
"fsal": {
"name": NFS_GANESHA_SUPPORTED_FSALS[0],
"fs_name": fs_name,
},
"clients": clients,
"sectype": sectype,
}
)
log.debug("creating cephfs export %s", export)
self._create_export_user(export)
self._save_export(cluster_id, export)
result = {
"bind": export.pseudo,
"fs": fs_name,
"path": export.path,
"cluster": cluster_id,
"mode": export.access_type,
}
return result
raise NonFatalError("Export already exists")
def create_rgw_export(self,
cluster_id: str,
pseudo_path: str,
access_type: str,
read_only: bool,
squash: str,
bucket: Optional[str] = None,
user_id: Optional[str] = None,
clients: list = [],
sectype: Optional[List[str]] = None) -> Dict[str, Any]:
pseudo_path = normalize_path(pseudo_path)
if not bucket and not user_id:
raise ErrorResponse("Must specify either bucket or user_id")
if not self._fetch_export(cluster_id, pseudo_path):
export = self.create_export_from_dict(
cluster_id,
self._gen_export_id(cluster_id),
{
"pseudo": pseudo_path,
"path": bucket or '/',
"access_type": access_type,
"squash": squash,
"fsal": {
"name": NFS_GANESHA_SUPPORTED_FSALS[1],
"user_id": user_id,
},
"clients": clients,
"sectype": sectype,
}
)
log.debug("creating rgw export %s", export)
self._create_export_user(export)
self._save_export(cluster_id, export)
result = {
"bind": export.pseudo,
"path": export.path,
"cluster": cluster_id,
"mode": export.access_type,
"squash": export.squash,
}
return result
raise NonFatalError("Export already exists")
def _apply_export(
self,
cluster_id: str,
new_export_dict: Dict,
) -> Dict[str, str]:
for k in ['path', 'pseudo']:
if k not in new_export_dict:
raise NFSInvalidOperation(f'Export missing required field {k}')
if cluster_id not in self.exports:
self.exports[cluster_id] = []
new_export_dict['path'] = normalize_path(new_export_dict['path'])
new_export_dict['pseudo'] = normalize_path(new_export_dict['pseudo'])
old_export = self._fetch_export(cluster_id, new_export_dict['pseudo'])
if old_export:
# Check if export id matches
if new_export_dict.get('export_id'):
if old_export.export_id != new_export_dict.get('export_id'):
raise NFSInvalidOperation('Export ID changed, Cannot update export')
else:
new_export_dict['export_id'] = old_export.export_id
elif new_export_dict.get('export_id'):
old_export = self._fetch_export_obj(cluster_id, new_export_dict['export_id'])
if old_export:
# re-fetch via old pseudo
old_export = self._fetch_export(cluster_id, old_export.pseudo)
assert old_export
log.debug("export %s pseudo %s -> %s",
old_export.export_id, old_export.pseudo, new_export_dict['pseudo'])
new_export = self.create_export_from_dict(
cluster_id,
new_export_dict.get('export_id', self._gen_export_id(cluster_id)),
new_export_dict
)
if not old_export:
self._create_export_user(new_export)
self._save_export(cluster_id, new_export)
return {"pseudo": new_export.pseudo, "state": "added"}
need_nfs_service_restart = True
if old_export.fsal.name != new_export.fsal.name:
raise NFSInvalidOperation('FSAL change not allowed')
if old_export.pseudo != new_export.pseudo:
log.debug('export %s pseudo %s -> %s',
new_export.export_id, old_export.pseudo, new_export.pseudo)
if old_export.fsal.name == NFS_GANESHA_SUPPORTED_FSALS[0]:
old_fsal = cast(CephFSFSAL, old_export.fsal)
new_fsal = cast(CephFSFSAL, new_export.fsal)
if old_fsal.user_id != new_fsal.user_id:
self._delete_export_user(old_export)
self._create_export_user(new_export)
elif (
old_export.path != new_export.path
or old_fsal.fs_name != new_fsal.fs_name
):
self._update_user_id(
cluster_id,
new_export.path,
cast(str, new_fsal.fs_name),
cast(str, new_fsal.user_id)
)
new_fsal.cephx_key = old_fsal.cephx_key
else:
expected_mds_caps = 'allow rw path={}'.format(new_export.path)
entity = new_fsal.user_id
ret, out, err = self.mgr.mon_command({
'prefix': 'auth get',
'entity': 'client.{}'.format(entity),
'format': 'json',
})
if ret:
raise NFSException(f'Failed to fetch caps for {entity}: {err}')
actual_mds_caps = json.loads(out)[0]['caps'].get('mds')
if actual_mds_caps != expected_mds_caps:
self._update_user_id(
cluster_id,
new_export.path,
cast(str, new_fsal.fs_name),
cast(str, new_fsal.user_id)
)
elif old_export.pseudo == new_export.pseudo:
need_nfs_service_restart = False
new_fsal.cephx_key = old_fsal.cephx_key
if old_export.fsal.name == NFS_GANESHA_SUPPORTED_FSALS[1]:
old_rgw_fsal = cast(RGWFSAL, old_export.fsal)
new_rgw_fsal = cast(RGWFSAL, new_export.fsal)
if old_rgw_fsal.user_id != new_rgw_fsal.user_id:
self._delete_export_user(old_export)
self._create_export_user(new_export)
elif old_rgw_fsal.access_key_id != new_rgw_fsal.access_key_id:
raise NFSInvalidOperation('access_key_id change is not allowed')
elif old_rgw_fsal.secret_access_key != new_rgw_fsal.secret_access_key:
raise NFSInvalidOperation('secret_access_key change is not allowed')
self.exports[cluster_id].remove(old_export)
self._update_export(cluster_id, new_export, need_nfs_service_restart)
return {"pseudo": new_export.pseudo, "state": "updated"}
def _rados(self, cluster_id: str) -> NFSRados:
"""Return a new NFSRados object for the given cluster id."""
return NFSRados(self.mgr.rados, cluster_id)
| 33,797 | 38.437573 | 103 | py |
null | ceph-main/src/pybind/mgr/nfs/ganesha_conf.py | from typing import cast, List, Dict, Any, Optional, TYPE_CHECKING
from os.path import isabs
from mgr_module import NFS_GANESHA_SUPPORTED_FSALS
from .exception import NFSInvalidOperation, FSNotFound
from .utils import check_fs
if TYPE_CHECKING:
from nfs.module import Module
def _indentation(depth: int, size: int = 4) -> str:
return " " * (depth * size)
def _format_val(block_name: str, key: str, val: str) -> str:
if isinstance(val, list):
return ', '.join([_format_val(block_name, key, v) for v in val])
if isinstance(val, bool):
return str(val).lower()
if isinstance(val, int) or (block_name == 'CLIENT'
and key == 'clients'):
return '{}'.format(val)
return '"{}"'.format(val)
def _validate_squash(squash: str) -> None:
valid_squash_ls = [
"root", "root_squash", "rootsquash", "rootid", "root_id_squash",
"rootidsquash", "all", "all_squash", "allsquash", "all_anomnymous",
"allanonymous", "no_root_squash", "none", "noidsquash",
]
if squash.lower() not in valid_squash_ls:
raise NFSInvalidOperation(
f"squash {squash} not in valid list {valid_squash_ls}"
)
def _validate_access_type(access_type: str) -> None:
valid_access_types = ['rw', 'ro', 'none']
if not isinstance(access_type, str) or access_type.lower() not in valid_access_types:
raise NFSInvalidOperation(
f'{access_type} is invalid, valid access type are'
f'{valid_access_types}'
)
def _validate_sec_type(sec_type: str) -> None:
valid_sec_types = ["none", "sys", "krb5", "krb5i", "krb5p"]
if not isinstance(sec_type, str) or sec_type not in valid_sec_types:
raise NFSInvalidOperation(
f"SecType {sec_type} invalid, valid types are {valid_sec_types}")
class RawBlock():
def __init__(self, block_name: str, blocks: List['RawBlock'] = [], values: Dict[str, Any] = {}):
if not values: # workaround mutable default argument
values = {}
if not blocks: # workaround mutable default argument
blocks = []
self.block_name = block_name
self.blocks = blocks
self.values = values
def __eq__(self, other: Any) -> bool:
if not isinstance(other, RawBlock):
return False
return self.block_name == other.block_name and \
self.blocks == other.blocks and \
self.values == other.values
def __repr__(self) -> str:
return f'RawBlock({self.block_name!r}, {self.blocks!r}, {self.values!r})'
class GaneshaConfParser:
def __init__(self, raw_config: str):
self.pos = 0
self.text = ""
for line in raw_config.split("\n"):
line = line.lstrip()
if line.startswith("%"):
self.text += line.replace('"', "")
self.text += "\n"
else:
self.text += "".join(line.split())
def stream(self) -> str:
return self.text[self.pos:]
def last_context(self) -> str:
return f'"...{self.text[max(0, self.pos - 30):self.pos]}<here>{self.stream()[:30]}"'
def parse_block_name(self) -> str:
idx = self.stream().find('{')
if idx == -1:
raise Exception(f"Cannot find block name at {self.last_context()}")
block_name = self.stream()[:idx]
self.pos += idx + 1
return block_name
def parse_block_or_section(self) -> RawBlock:
if self.stream().startswith("%url "):
# section line
self.pos += 5
idx = self.stream().find('\n')
if idx == -1:
value = self.stream()
self.pos += len(value)
else:
value = self.stream()[:idx]
self.pos += idx + 1
block_dict = RawBlock('%url', values={'value': value})
return block_dict
block_dict = RawBlock(self.parse_block_name().upper())
self.parse_block_body(block_dict)
if self.stream()[0] != '}':
raise Exception("No closing bracket '}' found at the end of block")
self.pos += 1
return block_dict
def parse_parameter_value(self, raw_value: str) -> Any:
if raw_value.find(',') != -1:
return [self.parse_parameter_value(v.strip())
for v in raw_value.split(',')]
try:
return int(raw_value)
except ValueError:
if raw_value == "true":
return True
if raw_value == "false":
return False
if raw_value.find('"') == 0:
return raw_value[1:-1]
return raw_value
def parse_stanza(self, block_dict: RawBlock) -> None:
equal_idx = self.stream().find('=')
if equal_idx == -1:
raise Exception("Malformed stanza: no equal symbol found.")
semicolon_idx = self.stream().find(';')
parameter_name = self.stream()[:equal_idx].lower()
parameter_value = self.stream()[equal_idx + 1:semicolon_idx]
block_dict.values[parameter_name] = self.parse_parameter_value(parameter_value)
self.pos += semicolon_idx + 1
def parse_block_body(self, block_dict: RawBlock) -> None:
while True:
if self.stream().find('}') == 0:
# block end
return
last_pos = self.pos
semicolon_idx = self.stream().find(';')
lbracket_idx = self.stream().find('{')
is_semicolon = (semicolon_idx != -1)
is_lbracket = (lbracket_idx != -1)
is_semicolon_lt_lbracket = (semicolon_idx < lbracket_idx)
if is_semicolon and ((is_lbracket and is_semicolon_lt_lbracket) or not is_lbracket):
self.parse_stanza(block_dict)
elif is_lbracket and ((is_semicolon and not is_semicolon_lt_lbracket)
or (not is_semicolon)):
block_dict.blocks.append(self.parse_block_or_section())
else:
raise Exception("Malformed stanza: no semicolon found.")
if last_pos == self.pos:
raise Exception("Infinite loop while parsing block content")
def parse(self) -> List[RawBlock]:
blocks = []
while self.stream():
blocks.append(self.parse_block_or_section())
return blocks
class FSAL(object):
def __init__(self, name: str) -> None:
self.name = name
@classmethod
def from_dict(cls, fsal_dict: Dict[str, Any]) -> 'FSAL':
if fsal_dict.get('name') == NFS_GANESHA_SUPPORTED_FSALS[0]:
return CephFSFSAL.from_dict(fsal_dict)
if fsal_dict.get('name') == NFS_GANESHA_SUPPORTED_FSALS[1]:
return RGWFSAL.from_dict(fsal_dict)
raise NFSInvalidOperation(f'Unknown FSAL {fsal_dict.get("name")}')
@classmethod
def from_fsal_block(cls, fsal_block: RawBlock) -> 'FSAL':
if fsal_block.values.get('name') == NFS_GANESHA_SUPPORTED_FSALS[0]:
return CephFSFSAL.from_fsal_block(fsal_block)
if fsal_block.values.get('name') == NFS_GANESHA_SUPPORTED_FSALS[1]:
return RGWFSAL.from_fsal_block(fsal_block)
raise NFSInvalidOperation(f'Unknown FSAL {fsal_block.values.get("name")}')
def to_fsal_block(self) -> RawBlock:
raise NotImplementedError
def to_dict(self) -> Dict[str, Any]:
raise NotImplementedError
class CephFSFSAL(FSAL):
def __init__(self,
name: str,
user_id: Optional[str] = None,
fs_name: Optional[str] = None,
sec_label_xattr: Optional[str] = None,
cephx_key: Optional[str] = None) -> None:
super().__init__(name)
assert name == 'CEPH'
self.fs_name = fs_name
self.user_id = user_id
self.sec_label_xattr = sec_label_xattr
self.cephx_key = cephx_key
@classmethod
def from_fsal_block(cls, fsal_block: RawBlock) -> 'CephFSFSAL':
return cls(fsal_block.values['name'],
fsal_block.values.get('user_id'),
fsal_block.values.get('filesystem'),
fsal_block.values.get('sec_label_xattr'),
fsal_block.values.get('secret_access_key'))
def to_fsal_block(self) -> RawBlock:
result = RawBlock('FSAL', values={'name': self.name})
if self.user_id:
result.values['user_id'] = self.user_id
if self.fs_name:
result.values['filesystem'] = self.fs_name
if self.sec_label_xattr:
result.values['sec_label_xattr'] = self.sec_label_xattr
if self.cephx_key:
result.values['secret_access_key'] = self.cephx_key
return result
@classmethod
def from_dict(cls, fsal_dict: Dict[str, Any]) -> 'CephFSFSAL':
return cls(fsal_dict['name'],
fsal_dict.get('user_id'),
fsal_dict.get('fs_name'),
fsal_dict.get('sec_label_xattr'),
fsal_dict.get('cephx_key'))
def to_dict(self) -> Dict[str, str]:
r = {'name': self.name}
if self.user_id:
r['user_id'] = self.user_id
if self.fs_name:
r['fs_name'] = self.fs_name
if self.sec_label_xattr:
r['sec_label_xattr'] = self.sec_label_xattr
return r
class RGWFSAL(FSAL):
def __init__(self,
name: str,
user_id: Optional[str] = None,
access_key_id: Optional[str] = None,
secret_access_key: Optional[str] = None
) -> None:
super().__init__(name)
assert name == 'RGW'
# RGW user uid
self.user_id = user_id
# S3 credentials
self.access_key_id = access_key_id
self.secret_access_key = secret_access_key
@classmethod
def from_fsal_block(cls, fsal_block: RawBlock) -> 'RGWFSAL':
return cls(fsal_block.values['name'],
fsal_block.values.get('user_id'),
fsal_block.values.get('access_key_id'),
fsal_block.values.get('secret_access_key'))
def to_fsal_block(self) -> RawBlock:
result = RawBlock('FSAL', values={'name': self.name})
if self.user_id:
result.values['user_id'] = self.user_id
if self.access_key_id:
result.values['access_key_id'] = self.access_key_id
if self.secret_access_key:
result.values['secret_access_key'] = self.secret_access_key
return result
@classmethod
def from_dict(cls, fsal_dict: Dict[str, str]) -> 'RGWFSAL':
return cls(fsal_dict['name'],
fsal_dict.get('user_id'),
fsal_dict.get('access_key_id'),
fsal_dict.get('secret_access_key'))
def to_dict(self) -> Dict[str, str]:
r = {'name': self.name}
if self.user_id:
r['user_id'] = self.user_id
if self.access_key_id:
r['access_key_id'] = self.access_key_id
if self.secret_access_key:
r['secret_access_key'] = self.secret_access_key
return r
class Client:
def __init__(self,
addresses: List[str],
access_type: str,
squash: str):
self.addresses = addresses
self.access_type = access_type
self.squash = squash
@classmethod
def from_client_block(cls, client_block: RawBlock) -> 'Client':
addresses = client_block.values.get('clients', [])
if isinstance(addresses, str):
addresses = [addresses]
return cls(addresses,
client_block.values.get('access_type', None),
client_block.values.get('squash', None))
def to_client_block(self) -> RawBlock:
result = RawBlock('CLIENT', values={'clients': self.addresses})
if self.access_type:
result.values['access_type'] = self.access_type
if self.squash:
result.values['squash'] = self.squash
return result
@classmethod
def from_dict(cls, client_dict: Dict[str, Any]) -> 'Client':
return cls(client_dict['addresses'], client_dict['access_type'],
client_dict['squash'])
def to_dict(self) -> Dict[str, Any]:
return {
'addresses': self.addresses,
'access_type': self.access_type,
'squash': self.squash
}
class Export:
def __init__(
self,
export_id: int,
path: str,
cluster_id: str,
pseudo: str,
access_type: str,
squash: str,
security_label: bool,
protocols: List[int],
transports: List[str],
fsal: FSAL,
clients: Optional[List[Client]] = None,
sectype: Optional[List[str]] = None) -> None:
self.export_id = export_id
self.path = path
self.fsal = fsal
self.cluster_id = cluster_id
self.pseudo = pseudo
self.access_type = access_type
self.squash = squash
self.attr_expiration_time = 0
self.security_label = security_label
self.protocols = protocols
self.transports = transports
self.clients: List[Client] = clients or []
self.sectype = sectype
@classmethod
def from_export_block(cls, export_block: RawBlock, cluster_id: str) -> 'Export':
fsal_blocks = [b for b in export_block.blocks
if b.block_name == "FSAL"]
client_blocks = [b for b in export_block.blocks
if b.block_name == "CLIENT"]
protocols = export_block.values.get('protocols')
if not isinstance(protocols, list):
protocols = [protocols]
transports = export_block.values.get('transports')
if isinstance(transports, str):
transports = [transports]
elif not transports:
transports = []
# if this module wrote the ganesha conf the param is camelcase
# "SecType". but for compatiblity with manually edited ganesha confs,
# accept "sectype" too.
sectype = (export_block.values.get("SecType")
or export_block.values.get("sectype") or None)
return cls(export_block.values['export_id'],
export_block.values['path'],
cluster_id,
export_block.values['pseudo'],
export_block.values.get('access_type', 'none'),
export_block.values.get('squash', 'no_root_squash'),
export_block.values.get('security_label', True),
protocols,
transports,
FSAL.from_fsal_block(fsal_blocks[0]),
[Client.from_client_block(client)
for client in client_blocks],
sectype=sectype)
def to_export_block(self) -> RawBlock:
values = {
'export_id': self.export_id,
'path': self.path,
'pseudo': self.pseudo,
'access_type': self.access_type,
'squash': self.squash,
'attr_expiration_time': self.attr_expiration_time,
'security_label': self.security_label,
'protocols': self.protocols,
'transports': self.transports,
}
if self.sectype:
values['SecType'] = self.sectype
result = RawBlock("EXPORT", values=values)
result.blocks = [
self.fsal.to_fsal_block()
] + [
client.to_client_block()
for client in self.clients
]
return result
@classmethod
def from_dict(cls, export_id: int, ex_dict: Dict[str, Any]) -> 'Export':
return cls(export_id,
ex_dict.get('path', '/'),
ex_dict['cluster_id'],
ex_dict['pseudo'],
ex_dict.get('access_type', 'RO'),
ex_dict.get('squash', 'no_root_squash'),
ex_dict.get('security_label', True),
ex_dict.get('protocols', [4]),
ex_dict.get('transports', ['TCP']),
FSAL.from_dict(ex_dict.get('fsal', {})),
[Client.from_dict(client) for client in ex_dict.get('clients', [])],
sectype=ex_dict.get("sectype"))
def to_dict(self) -> Dict[str, Any]:
values = {
'export_id': self.export_id,
'path': self.path,
'cluster_id': self.cluster_id,
'pseudo': self.pseudo,
'access_type': self.access_type,
'squash': self.squash,
'security_label': self.security_label,
'protocols': sorted([p for p in self.protocols]),
'transports': sorted([t for t in self.transports]),
'fsal': self.fsal.to_dict(),
'clients': [client.to_dict() for client in self.clients]
}
if self.sectype:
values['sectype'] = self.sectype
return values
def validate(self, mgr: 'Module') -> None:
if not isabs(self.pseudo) or self.pseudo == "/":
raise NFSInvalidOperation(
f"pseudo path {self.pseudo} is invalid. It should be an absolute "
"path and it cannot be just '/'."
)
_validate_squash(self.squash)
_validate_access_type(self.access_type)
if not isinstance(self.security_label, bool):
raise NFSInvalidOperation('security_label must be a boolean value')
for p in self.protocols:
if p not in [3, 4]:
raise NFSInvalidOperation(f"Invalid protocol {p}")
valid_transport = ["UDP", "TCP"]
for trans in self.transports:
if trans.upper() not in valid_transport:
raise NFSInvalidOperation(f'{trans} is not a valid transport protocol')
for client in self.clients:
if client.squash:
_validate_squash(client.squash)
if client.access_type:
_validate_access_type(client.access_type)
if self.fsal.name == NFS_GANESHA_SUPPORTED_FSALS[0]:
fs = cast(CephFSFSAL, self.fsal)
if not fs.fs_name or not check_fs(mgr, fs.fs_name):
raise FSNotFound(fs.fs_name)
elif self.fsal.name == NFS_GANESHA_SUPPORTED_FSALS[1]:
rgw = cast(RGWFSAL, self.fsal) # noqa
pass
else:
raise NFSInvalidOperation('FSAL {self.fsal.name} not supported')
for st in (self.sectype or []):
_validate_sec_type(st)
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Export):
return False
return self.to_dict() == other.to_dict()
def _format_block_body(block: RawBlock, depth: int = 0) -> str:
conf_str = ""
for blo in block.blocks:
conf_str += format_block(blo, depth)
for key, val in block.values.items():
if val is not None:
conf_str += _indentation(depth)
fval = _format_val(block.block_name, key, val)
conf_str += '{} = {};\n'.format(key, fval)
return conf_str
def format_block(block: RawBlock, depth: int = 0) -> str:
"""Format a raw block object into text suitable as a ganesha configuration
block.
"""
if block.block_name == "%url":
return '%url "{}"\n\n'.format(block.values['value'])
conf_str = ""
conf_str += _indentation(depth)
conf_str += format(block.block_name)
conf_str += " {\n"
conf_str += _format_block_body(block, depth + 1)
conf_str += _indentation(depth)
conf_str += "}\n"
return conf_str
| 19,920 | 35.285974 | 100 | py |
null | ceph-main/src/pybind/mgr/nfs/module.py | import logging
import threading
from typing import Tuple, Optional, List, Dict, Any
from mgr_module import MgrModule, CLICommand, Option, CLICheckNonemptyFileInput
import object_format
import orchestrator
from orchestrator.module import IngressType
from .export import ExportMgr, AppliedExportResults
from .cluster import NFSCluster
from .utils import available_clusters
log = logging.getLogger(__name__)
class Module(orchestrator.OrchestratorClientMixin, MgrModule):
MODULE_OPTIONS: List[Option] = []
def __init__(self, *args: str, **kwargs: Any) -> None:
self.inited = False
self.lock = threading.Lock()
super(Module, self).__init__(*args, **kwargs)
with self.lock:
self.export_mgr = ExportMgr(self)
self.nfs = NFSCluster(self)
self.inited = True
@CLICommand('nfs export create cephfs', perm='rw')
@object_format.Responder()
def _cmd_nfs_export_create_cephfs(
self,
cluster_id: str,
pseudo_path: str,
fsname: str,
path: Optional[str] = '/',
readonly: Optional[bool] = False,
client_addr: Optional[List[str]] = None,
squash: str = 'none',
sectype: Optional[List[str]] = None,
) -> Dict[str, Any]:
"""Create a CephFS export"""
return self.export_mgr.create_export(
fsal_type='cephfs',
fs_name=fsname,
cluster_id=cluster_id,
pseudo_path=pseudo_path,
read_only=readonly,
path=path,
squash=squash,
addr=client_addr,
sectype=sectype,
)
@CLICommand('nfs export create rgw', perm='rw')
@object_format.Responder()
def _cmd_nfs_export_create_rgw(
self,
cluster_id: str,
pseudo_path: str,
bucket: Optional[str] = None,
user_id: Optional[str] = None,
readonly: Optional[bool] = False,
client_addr: Optional[List[str]] = None,
squash: str = 'none',
sectype: Optional[List[str]] = None,
) -> Dict[str, Any]:
"""Create an RGW export"""
return self.export_mgr.create_export(
fsal_type='rgw',
bucket=bucket,
user_id=user_id,
cluster_id=cluster_id,
pseudo_path=pseudo_path,
read_only=readonly,
squash=squash,
addr=client_addr,
sectype=sectype,
)
@CLICommand('nfs export rm', perm='rw')
@object_format.EmptyResponder()
def _cmd_nfs_export_rm(self, cluster_id: str, pseudo_path: str) -> None:
"""Remove a cephfs export"""
return self.export_mgr.delete_export(cluster_id=cluster_id, pseudo_path=pseudo_path)
@CLICommand('nfs export delete', perm='rw')
@object_format.EmptyResponder()
def _cmd_nfs_export_delete(self, cluster_id: str, pseudo_path: str) -> None:
"""Delete a cephfs export (DEPRECATED)"""
return self.export_mgr.delete_export(cluster_id=cluster_id, pseudo_path=pseudo_path)
@CLICommand('nfs export ls', perm='r')
@object_format.Responder()
def _cmd_nfs_export_ls(self, cluster_id: str, detailed: bool = False) -> List[Any]:
"""List exports of a NFS cluster"""
return self.export_mgr.list_exports(cluster_id=cluster_id, detailed=detailed)
@CLICommand('nfs export info', perm='r')
@object_format.Responder()
def _cmd_nfs_export_info(self, cluster_id: str, pseudo_path: str) -> Dict[str, Any]:
"""Fetch a export of a NFS cluster given the pseudo path/binding"""
return self.export_mgr.get_export(cluster_id=cluster_id, pseudo_path=pseudo_path)
@CLICommand('nfs export get', perm='r')
@object_format.Responder()
def _cmd_nfs_export_get(self, cluster_id: str, pseudo_path: str) -> Dict[str, Any]:
"""Fetch a export of a NFS cluster given the pseudo path/binding (DEPRECATED)"""
return self.export_mgr.get_export(cluster_id=cluster_id, pseudo_path=pseudo_path)
@CLICommand('nfs export apply', perm='rw')
@CLICheckNonemptyFileInput(desc='Export JSON or Ganesha EXPORT specification')
@object_format.Responder()
def _cmd_nfs_export_apply(self, cluster_id: str, inbuf: str) -> AppliedExportResults:
"""Create or update an export by `-i <json_or_ganesha_export_file>`"""
return self.export_mgr.apply_export(cluster_id, export_config=inbuf)
@CLICommand('nfs cluster create', perm='rw')
@object_format.EmptyResponder()
def _cmd_nfs_cluster_create(self,
cluster_id: str,
placement: Optional[str] = None,
ingress: Optional[bool] = None,
virtual_ip: Optional[str] = None,
ingress_mode: Optional[IngressType] = None,
port: Optional[int] = None) -> None:
"""Create an NFS Cluster"""
return self.nfs.create_nfs_cluster(cluster_id=cluster_id, placement=placement,
virtual_ip=virtual_ip, ingress=ingress,
ingress_mode=ingress_mode, port=port)
@CLICommand('nfs cluster rm', perm='rw')
@object_format.EmptyResponder()
def _cmd_nfs_cluster_rm(self, cluster_id: str) -> None:
"""Removes an NFS Cluster"""
return self.nfs.delete_nfs_cluster(cluster_id=cluster_id)
@CLICommand('nfs cluster delete', perm='rw')
@object_format.EmptyResponder()
def _cmd_nfs_cluster_delete(self, cluster_id: str) -> None:
"""Removes an NFS Cluster (DEPRECATED)"""
return self.nfs.delete_nfs_cluster(cluster_id=cluster_id)
@CLICommand('nfs cluster ls', perm='r')
@object_format.Responder()
def _cmd_nfs_cluster_ls(self) -> List[str]:
"""List NFS Clusters"""
return self.nfs.list_nfs_cluster()
@CLICommand('nfs cluster info', perm='r')
@object_format.Responder()
def _cmd_nfs_cluster_info(self, cluster_id: Optional[str] = None) -> Dict[str, Any]:
"""Displays NFS Cluster info"""
return self.nfs.show_nfs_cluster_info(cluster_id=cluster_id)
@CLICommand('nfs cluster config get', perm='r')
@object_format.ErrorResponseHandler()
def _cmd_nfs_cluster_config_get(self, cluster_id: str) -> Tuple[int, str, str]:
"""Fetch NFS-Ganesha config"""
conf = self.nfs.get_nfs_cluster_config(cluster_id=cluster_id)
return 0, conf, ""
@CLICommand('nfs cluster config set', perm='rw')
@CLICheckNonemptyFileInput(desc='NFS-Ganesha Configuration')
@object_format.EmptyResponder()
def _cmd_nfs_cluster_config_set(self, cluster_id: str, inbuf: str) -> None:
"""Set NFS-Ganesha config by `-i <config_file>`"""
return self.nfs.set_nfs_cluster_config(cluster_id=cluster_id, nfs_config=inbuf)
@CLICommand('nfs cluster config reset', perm='rw')
@object_format.EmptyResponder()
def _cmd_nfs_cluster_config_reset(self, cluster_id: str) -> None:
"""Reset NFS-Ganesha Config to default"""
return self.nfs.reset_nfs_cluster_config(cluster_id=cluster_id)
def fetch_nfs_export_obj(self) -> ExportMgr:
return self.export_mgr
def export_ls(self) -> List[Dict[Any, Any]]:
return self.export_mgr.list_all_exports()
def export_get(self, cluster_id: str, export_id: int) -> Optional[Dict[str, Any]]:
return self.export_mgr.get_export_by_id(cluster_id, export_id)
def export_rm(self, cluster_id: str, pseudo: str) -> None:
self.export_mgr.delete_export(cluster_id=cluster_id, pseudo_path=pseudo)
def cluster_ls(self) -> List[str]:
return available_clusters(self)
| 7,831 | 40.221053 | 92 | py |
null | ceph-main/src/pybind/mgr/nfs/utils.py | import functools
import logging
import stat
from typing import List, Tuple, TYPE_CHECKING
from object_format import ErrorResponseBase
import orchestrator
import cephfs
from mgr_util import CephfsClient, open_filesystem
if TYPE_CHECKING:
from nfs.module import Module
EXPORT_PREFIX: str = "export-"
CONF_PREFIX: str = "conf-nfs."
USER_CONF_PREFIX: str = "userconf-nfs."
log = logging.getLogger(__name__)
class NonFatalError(ErrorResponseBase):
"""Raise this exception when you want to interrupt the flow of a function
and return an informative message to the user. In certain situations the
NFS MGR module wants to indicate an action was or was not taken but still
return a success code so that non-interactive scripts continue as if the
overall action was completed.
"""
def __init__(self, msg: str) -> None:
super().__init__(msg)
self.msg = msg
def format_response(self) -> Tuple[int, str, str]:
return 0, "", self.msg
class ManualRestartRequired(NonFatalError):
"""Raise this exception type if all other changes were successful but
user needs to manually restart nfs services.
"""
def __init__(self, msg: str) -> None:
super().__init__(" ".join((msg, "(Manual Restart of NFS Pods required)")))
def export_obj_name(export_id: int) -> str:
"""Return a rados object name for the export."""
return f"{EXPORT_PREFIX}{export_id}"
def conf_obj_name(cluster_id: str) -> str:
"""Return a rados object name for the config."""
return f"{CONF_PREFIX}{cluster_id}"
def user_conf_obj_name(cluster_id: str) -> str:
"""Returna a rados object name for the user config."""
return f"{USER_CONF_PREFIX}{cluster_id}"
def available_clusters(mgr: 'Module') -> List[str]:
'''
This method returns list of available cluster ids.
Service name is service_type.service_id
Example:
completion.result value:
<ServiceDescription of <NFSServiceSpec for service_name=nfs.vstart>>
return value: ['vstart']
'''
# TODO check cephadm cluster list with rados pool conf objects
completion = mgr.describe_service(service_type='nfs')
orchestrator.raise_if_exception(completion)
assert completion.result is not None
return [cluster.spec.service_id for cluster in completion.result
if cluster.spec.service_id]
def restart_nfs_service(mgr: 'Module', cluster_id: str) -> None:
'''
This methods restarts the nfs daemons
'''
completion = mgr.service_action(action='restart',
service_name='nfs.' + cluster_id)
orchestrator.raise_if_exception(completion)
def check_fs(mgr: 'Module', fs_name: str) -> bool:
'''
This method checks if given fs is valid
'''
fs_map = mgr.get('fs_map')
return fs_name in [fs['mdsmap']['fs_name'] for fs in fs_map['filesystems']]
def cephfs_path_is_dir(mgr: 'Module', fs: str, path: str) -> None:
@functools.lru_cache(maxsize=1)
def _get_cephfs_client() -> CephfsClient:
return CephfsClient(mgr)
cephfs_client = _get_cephfs_client()
with open_filesystem(cephfs_client, fs) as fs_handle:
stx = fs_handle.statx(path.encode('utf-8'), cephfs.CEPH_STATX_MODE,
cephfs.AT_SYMLINK_NOFOLLOW)
if not stat.S_ISDIR(stx.get('mode')):
raise NotADirectoryError()
| 3,382 | 31.219048 | 82 | py |
null | ceph-main/src/pybind/mgr/nfs/tests/__init__.py | 0 | 0 | 0 | py | |
null | ceph-main/src/pybind/mgr/nfs/tests/test_nfs.py | # flake8: noqa
import json
import pytest
from typing import Optional, Tuple, Iterator, List, Any
from contextlib import contextmanager
from unittest import mock
from unittest.mock import MagicMock
from mgr_module import MgrModule, NFS_POOL_NAME
from rados import ObjectNotFound
from ceph.deployment.service_spec import NFSServiceSpec
from nfs import Module
from nfs.export import ExportMgr, normalize_path
from nfs.ganesha_conf import GaneshaConfParser, Export, RawBlock
from nfs.cluster import NFSCluster
from orchestrator import ServiceDescription, DaemonDescription, OrchResult
class TestNFS:
cluster_id = "foo"
export_1 = """
EXPORT {
Export_ID=1;
Protocols = 4;
Path = /;
Pseudo = /cephfs_a/;
Access_Type = RW;
Protocols = 4;
Attr_Expiration_Time = 0;
# Squash = root;
FSAL {
Name = CEPH;
Filesystem = "a";
User_Id = "ganesha";
# Secret_Access_Key = "YOUR SECRET KEY HERE";
}
CLIENT
{
Clients = 192.168.0.10, 192.168.1.0/8;
Squash = None;
}
CLIENT
{
Clients = 192.168.0.0/16;
Squash = All;
Access_Type = RO;
}
}
"""
export_2 = """
EXPORT
{
Export_ID=2;
Path = "/";
Pseudo = "/rgw";
Access_Type = RW;
squash = AllAnonymous;
Protocols = 4, 3;
Transports = TCP, UDP;
FSAL {
Name = RGW;
User_Id = "nfs.foo.bucket";
Access_Key_Id ="the_access_key";
Secret_Access_Key = "the_secret_key";
}
}
"""
export_3 = """
EXPORT {
FSAL {
name = "CEPH";
user_id = "nfs.foo.1";
filesystem = "a";
secret_access_key = "AQCjU+hgjyReLBAAddJa0Dza/ZHqjX5+JiePMA==";
}
export_id = 1;
path = "/";
pseudo = "/a";
access_type = "RW";
squash = "none";
attr_expiration_time = 0;
security_label = true;
protocols = 4;
transports = "TCP";
}
"""
export_4 = """
EXPORT {
FSAL {
name = "CEPH";
user_id = "nfs.foo.1";
filesystem = "a";
secret_access_key = "AQCjU+hgjyReLBAAddJa0Dza/ZHqjX5+JiePMA==";
}
export_id = 1;
path = "/secure/me";
pseudo = "/secure1";
access_type = "RW";
squash = "no_root_squash";
SecType = "krb5p", "krb5i";
attr_expiration_time = 0;
security_label = true;
protocols = 4;
transports = "TCP";
}
"""
conf_nfs_foo = f'''
%url "rados://{NFS_POOL_NAME}/{cluster_id}/export-1"
%url "rados://{NFS_POOL_NAME}/{cluster_id}/export-2"'''
class RObject(object):
def __init__(self, key: str, raw: str) -> None:
self.key = key
self.raw = raw
def read(self, _: Optional[int]) -> bytes:
return self.raw.encode('utf-8')
def stat(self) -> Tuple[int, None]:
return len(self.raw), None
def _ioctx_write_full_mock(self, key: str, content: bytes) -> None:
if key not in self.temp_store[self.temp_store_namespace]:
self.temp_store[self.temp_store_namespace][key] = \
TestNFS.RObject(key, content.decode('utf-8'))
else:
self.temp_store[self.temp_store_namespace][key].raw = content.decode('utf-8')
def _ioctx_remove_mock(self, key: str) -> None:
del self.temp_store[self.temp_store_namespace][key]
def _ioctx_list_objects_mock(self) -> List['TestNFS.RObject']:
r = [obj for _, obj in self.temp_store[self.temp_store_namespace].items()]
return r
def _ioctl_stat_mock(self, key):
return self.temp_store[self.temp_store_namespace][key].stat()
def _ioctl_read_mock(self, key: str, size: Optional[Any] = None) -> bytes:
if key not in self.temp_store[self.temp_store_namespace]:
raise ObjectNotFound
return self.temp_store[self.temp_store_namespace][key].read(size)
def _ioctx_set_namespace_mock(self, namespace: str) -> None:
self.temp_store_namespace = namespace
def _reset_temp_store(self) -> None:
self.temp_store_namespace = None
self.temp_store = {
'foo': {
'export-1': TestNFS.RObject("export-1", self.export_1),
'export-2': TestNFS.RObject("export-2", self.export_2),
'conf-nfs.foo': TestNFS.RObject("conf-nfs.foo", self.conf_nfs_foo)
}
}
@contextmanager
def _mock_orchestrator(self, enable: bool) -> Iterator:
self.io_mock = MagicMock()
self.io_mock.set_namespace.side_effect = self._ioctx_set_namespace_mock
self.io_mock.read = self._ioctl_read_mock
self.io_mock.stat = self._ioctl_stat_mock
self.io_mock.list_objects.side_effect = self._ioctx_list_objects_mock
self.io_mock.write_full.side_effect = self._ioctx_write_full_mock
self.io_mock.remove_object.side_effect = self._ioctx_remove_mock
# mock nfs services
orch_nfs_services = [
ServiceDescription(spec=NFSServiceSpec(service_id=self.cluster_id))
] if enable else []
orch_nfs_daemons = [
DaemonDescription('nfs', 'foo.mydaemon', 'myhostname')
] if enable else []
def mock_exec(cls, args):
if args[1:3] == ['bucket', 'stats']:
bucket_info = {
"owner": "bucket_owner_user",
}
return 0, json.dumps(bucket_info), ''
u = {
"user_id": "abc",
"display_name": "foo",
"email": "",
"suspended": 0,
"max_buckets": 1000,
"subusers": [],
"keys": [
{
"user": "abc",
"access_key": "the_access_key",
"secret_key": "the_secret_key"
}
],
"swift_keys": [],
"caps": [],
"op_mask": "read, write, delete",
"default_placement": "",
"default_storage_class": "",
"placement_tags": [],
"bucket_quota": {
"enabled": False,
"check_on_raw": False,
"max_size": -1,
"max_size_kb": 0,
"max_objects": -1
},
"user_quota": {
"enabled": False,
"check_on_raw": False,
"max_size": -1,
"max_size_kb": 0,
"max_objects": -1
},
"temp_url_keys": [],
"type": "rgw",
"mfa_ids": []
}
if args[2] == 'list':
return 0, json.dumps([u]), ''
return 0, json.dumps(u), ''
def mock_describe_service(cls, *args, **kwargs):
if kwargs['service_type'] == 'nfs':
return OrchResult(orch_nfs_services)
return OrchResult([])
def mock_list_daemons(cls, *args, **kwargs):
if kwargs['daemon_type'] == 'nfs':
return OrchResult(orch_nfs_daemons)
return OrchResult([])
with mock.patch('nfs.module.Module.describe_service', mock_describe_service) as describe_service, \
mock.patch('nfs.module.Module.list_daemons', mock_list_daemons) as list_daemons, \
mock.patch('nfs.module.Module.rados') as rados, \
mock.patch('nfs.export.available_clusters',
return_value=[self.cluster_id]), \
mock.patch('nfs.export.restart_nfs_service'), \
mock.patch('nfs.cluster.restart_nfs_service'), \
mock.patch.object(MgrModule, 'tool_exec', mock_exec), \
mock.patch('nfs.export.check_fs', return_value=True), \
mock.patch('nfs.ganesha_conf.check_fs', return_value=True), \
mock.patch('nfs.export.ExportMgr._create_user_key',
return_value='thekeyforclientabc'), \
mock.patch('nfs.export.cephfs_path_is_dir'):
rados.open_ioctx.return_value.__enter__.return_value = self.io_mock
rados.open_ioctx.return_value.__exit__ = mock.Mock(return_value=None)
self._reset_temp_store()
yield
def test_parse_daemon_raw_config(self) -> None:
expected_daemon_config = [
RawBlock('NFS_CORE_PARAM', values={
"enable_nlm": False,
"enable_rquota": False,
"protocols": 4,
"nfs_port": 14000
}),
RawBlock('MDCACHE', values={
"dir_chunk": 0
}),
RawBlock('NFSV4', values={
"recoverybackend": "rados_cluster",
"minor_versions": [1, 2]
}),
RawBlock('RADOS_KV', values={
"pool": NFS_POOL_NAME,
"namespace": "vstart",
"userid": "vstart",
"nodeid": "a"
}),
RawBlock('RADOS_URLS', values={
"userid": "vstart",
"watch_url": f"'rados://{NFS_POOL_NAME}/vstart/conf-nfs.vstart'"
}),
RawBlock('%url', values={
"value": f"rados://{NFS_POOL_NAME}/vstart/conf-nfs.vstart"
})
]
daemon_raw_config = """
NFS_CORE_PARAM {
Enable_NLM = false;
Enable_RQUOTA = false;
Protocols = 4;
NFS_Port = 14000;
}
MDCACHE {
Dir_Chunk = 0;
}
NFSv4 {
RecoveryBackend = rados_cluster;
Minor_Versions = 1, 2;
}
RADOS_KV {
pool = {};
namespace = vstart;
UserId = vstart;
nodeid = a;
}
RADOS_URLS {
Userid = vstart;
watch_url = 'rados://{}/vstart/conf-nfs.vstart';
}
%url rados://{}/vstart/conf-nfs.vstart
""".replace('{}', NFS_POOL_NAME)
daemon_config = GaneshaConfParser(daemon_raw_config).parse()
assert daemon_config == expected_daemon_config
def _validate_export_1(self, export: Export):
assert export.export_id == 1
assert export.path == "/"
assert export.pseudo == "/cephfs_a/"
assert export.access_type == "RW"
# assert export.squash == "root_squash" # probably correct value
assert export.squash == "no_root_squash"
assert export.protocols == [4]
# assert export.transports == {"TCP", "UDP"}
assert export.fsal.name == "CEPH"
assert export.fsal.user_id == "ganesha"
assert export.fsal.fs_name == "a"
assert export.fsal.sec_label_xattr == None
assert len(export.clients) == 2
assert export.clients[0].addresses == \
["192.168.0.10", "192.168.1.0/8"]
# assert export.clients[0].squash == "no_root_squash" # probably correct value
assert export.clients[0].squash == "None"
assert export.clients[0].access_type is None
assert export.clients[1].addresses == ["192.168.0.0/16"]
# assert export.clients[1].squash == "all_squash" # probably correct value
assert export.clients[1].squash == "All"
assert export.clients[1].access_type == "RO"
assert export.cluster_id == 'foo'
assert export.attr_expiration_time == 0
# assert export.security_label == False # probably correct value
assert export.security_label == True
def test_export_parser_1(self) -> None:
blocks = GaneshaConfParser(self.export_1).parse()
assert isinstance(blocks, list)
assert len(blocks) == 1
export = Export.from_export_block(blocks[0], self.cluster_id)
self._validate_export_1(export)
def _validate_export_2(self, export: Export):
assert export.export_id == 2
assert export.path == "/"
assert export.pseudo == "/rgw"
assert export.access_type == "RW"
# assert export.squash == "all_squash" # probably correct value
assert export.squash == "AllAnonymous"
assert export.protocols == [4, 3]
assert set(export.transports) == {"TCP", "UDP"}
assert export.fsal.name == "RGW"
assert export.fsal.user_id == "nfs.foo.bucket"
assert export.fsal.access_key_id == "the_access_key"
assert export.fsal.secret_access_key == "the_secret_key"
assert len(export.clients) == 0
assert export.cluster_id == 'foo'
def test_export_parser_2(self) -> None:
blocks = GaneshaConfParser(self.export_2).parse()
assert isinstance(blocks, list)
assert len(blocks) == 1
export = Export.from_export_block(blocks[0], self.cluster_id)
self._validate_export_2(export)
def test_daemon_conf_parser(self) -> None:
blocks = GaneshaConfParser(self.conf_nfs_foo).parse()
assert isinstance(blocks, list)
assert len(blocks) == 2
assert blocks[0].block_name == "%url"
assert blocks[0].values['value'] == f"rados://{NFS_POOL_NAME}/{self.cluster_id}/export-1"
assert blocks[1].block_name == "%url"
assert blocks[1].values['value'] == f"rados://{NFS_POOL_NAME}/{self.cluster_id}/export-2"
def _do_mock_test(self, func, *args) -> None:
with self._mock_orchestrator(True):
func(*args)
self._reset_temp_store()
def test_ganesha_conf(self) -> None:
self._do_mock_test(self._do_test_ganesha_conf)
def _do_test_ganesha_conf(self) -> None:
nfs_mod = Module('nfs', '', '')
ganesha_conf = ExportMgr(nfs_mod)
exports = ganesha_conf.exports[self.cluster_id]
assert len(exports) == 2
self._validate_export_1([e for e in exports if e.export_id == 1][0])
self._validate_export_2([e for e in exports if e.export_id == 2][0])
def test_config_dict(self) -> None:
self._do_mock_test(self._do_test_config_dict)
def _do_test_config_dict(self) -> None:
nfs_mod = Module('nfs', '', '')
conf = ExportMgr(nfs_mod)
export = [e for e in conf.exports['foo'] if e.export_id == 1][0]
ex_dict = export.to_dict()
assert ex_dict == {'access_type': 'RW',
'clients': [{'access_type': None,
'addresses': ['192.168.0.10', '192.168.1.0/8'],
'squash': 'None'},
{'access_type': 'RO',
'addresses': ['192.168.0.0/16'],
'squash': 'All'}],
'cluster_id': self.cluster_id,
'export_id': 1,
'fsal': {'fs_name': 'a', 'name': 'CEPH', 'user_id': 'ganesha'},
'path': '/',
'protocols': [4],
'pseudo': '/cephfs_a/',
'security_label': True,
'squash': 'no_root_squash',
'transports': []}
export = [e for e in conf.exports['foo'] if e.export_id == 2][0]
ex_dict = export.to_dict()
assert ex_dict == {'access_type': 'RW',
'clients': [],
'cluster_id': self.cluster_id,
'export_id': 2,
'fsal': {'name': 'RGW',
'access_key_id': 'the_access_key',
'secret_access_key': 'the_secret_key',
'user_id': 'nfs.foo.bucket'},
'path': '/',
'protocols': [3, 4],
'pseudo': '/rgw',
'security_label': True,
'squash': 'AllAnonymous',
'transports': ['TCP', 'UDP']}
def test_config_from_dict(self) -> None:
self._do_mock_test(self._do_test_config_from_dict)
def _do_test_config_from_dict(self) -> None:
export = Export.from_dict(1, {
'export_id': 1,
'path': '/',
'cluster_id': self.cluster_id,
'pseudo': '/cephfs_a',
'access_type': 'RW',
'squash': 'root_squash',
'security_label': True,
'protocols': [4],
'transports': ['TCP', 'UDP'],
'clients': [{
'addresses': ["192.168.0.10", "192.168.1.0/8"],
'access_type': None,
'squash': 'no_root_squash'
}, {
'addresses': ["192.168.0.0/16"],
'access_type': 'RO',
'squash': 'all_squash'
}],
'fsal': {
'name': 'CEPH',
'user_id': 'ganesha',
'fs_name': 'a',
'sec_label_xattr': 'security.selinux'
}
})
assert export.export_id == 1
assert export.path == "/"
assert export.pseudo == "/cephfs_a"
assert export.access_type == "RW"
assert export.squash == "root_squash"
assert set(export.protocols) == {4}
assert set(export.transports) == {"TCP", "UDP"}
assert export.fsal.name == "CEPH"
assert export.fsal.user_id == "ganesha"
assert export.fsal.fs_name == "a"
assert export.fsal.sec_label_xattr == 'security.selinux'
assert len(export.clients) == 2
assert export.clients[0].addresses == \
["192.168.0.10", "192.168.1.0/8"]
assert export.clients[0].squash == "no_root_squash"
assert export.clients[0].access_type is None
assert export.clients[1].addresses == ["192.168.0.0/16"]
assert export.clients[1].squash == "all_squash"
assert export.clients[1].access_type == "RO"
assert export.cluster_id == self.cluster_id
assert export.attr_expiration_time == 0
assert export.security_label
export = Export.from_dict(2, {
'export_id': 2,
'path': 'bucket',
'pseudo': '/rgw',
'cluster_id': self.cluster_id,
'access_type': 'RW',
'squash': 'all_squash',
'security_label': False,
'protocols': [4, 3],
'transports': ['TCP', 'UDP'],
'clients': [],
'fsal': {
'name': 'RGW',
'user_id': 'rgw.foo.bucket',
'access_key_id': 'the_access_key',
'secret_access_key': 'the_secret_key'
}
})
assert export.export_id == 2
assert export.path == "bucket"
assert export.pseudo == "/rgw"
assert export.access_type == "RW"
assert export.squash == "all_squash"
assert set(export.protocols) == {4, 3}
assert set(export.transports) == {"TCP", "UDP"}
assert export.fsal.name == "RGW"
assert export.fsal.user_id == "rgw.foo.bucket"
assert export.fsal.access_key_id == "the_access_key"
assert export.fsal.secret_access_key == "the_secret_key"
assert len(export.clients) == 0
assert export.cluster_id == self.cluster_id
@pytest.mark.parametrize(
"block",
[
export_1,
export_2,
]
)
def test_export_from_to_export_block(self, block):
blocks = GaneshaConfParser(block).parse()
export = Export.from_export_block(blocks[0], self.cluster_id)
newblock = export.to_export_block()
export2 = Export.from_export_block(newblock, self.cluster_id)
newblock2 = export2.to_export_block()
assert newblock == newblock2
@pytest.mark.parametrize(
"block",
[
export_1,
export_2,
]
)
def test_export_from_to_dict(self, block):
blocks = GaneshaConfParser(block).parse()
export = Export.from_export_block(blocks[0], self.cluster_id)
j = export.to_dict()
export2 = Export.from_dict(j['export_id'], j)
j2 = export2.to_dict()
assert j == j2
@pytest.mark.parametrize(
"block",
[
export_1,
export_2,
]
)
def test_export_validate(self, block):
blocks = GaneshaConfParser(block).parse()
export = Export.from_export_block(blocks[0], self.cluster_id)
nfs_mod = Module('nfs', '', '')
with mock.patch('nfs.ganesha_conf.check_fs', return_value=True):
export.validate(nfs_mod)
def test_update_export(self):
self._do_mock_test(self._do_test_update_export)
def _do_test_update_export(self):
nfs_mod = Module('nfs', '', '')
conf = ExportMgr(nfs_mod)
r = conf.apply_export(self.cluster_id, json.dumps({
'export_id': 2,
'path': 'bucket',
'pseudo': '/rgw/bucket',
'cluster_id': self.cluster_id,
'access_type': 'RW',
'squash': 'all_squash',
'security_label': False,
'protocols': [4, 3],
'transports': ['TCP', 'UDP'],
'clients': [{
'addresses': ["192.168.0.0/16"],
'access_type': None,
'squash': None
}],
'fsal': {
'name': 'RGW',
'user_id': 'nfs.foo.bucket',
'access_key_id': 'the_access_key',
'secret_access_key': 'the_secret_key',
}
}))
assert len(r.changes) == 1
export = conf._fetch_export('foo', '/rgw/bucket')
assert export.export_id == 2
assert export.path == "bucket"
assert export.pseudo == "/rgw/bucket"
assert export.access_type == "RW"
assert export.squash == "all_squash"
assert export.protocols == [4, 3]
assert export.transports == ["TCP", "UDP"]
assert export.fsal.name == "RGW"
assert export.fsal.access_key_id == "the_access_key"
assert export.fsal.secret_access_key == "the_secret_key"
assert len(export.clients) == 1
assert export.clients[0].squash is None
assert export.clients[0].access_type is None
assert export.cluster_id == self.cluster_id
# do it again, with changes
r = conf.apply_export(self.cluster_id, json.dumps({
'export_id': 2,
'path': 'newbucket',
'pseudo': '/rgw/bucket',
'cluster_id': self.cluster_id,
'access_type': 'RO',
'squash': 'root',
'security_label': False,
'protocols': [4],
'transports': ['TCP'],
'clients': [{
'addresses': ["192.168.10.0/16"],
'access_type': None,
'squash': None
}],
'fsal': {
'name': 'RGW',
'user_id': 'nfs.foo.newbucket',
'access_key_id': 'the_access_key',
'secret_access_key': 'the_secret_key',
}
}))
assert len(r.changes) == 1
export = conf._fetch_export('foo', '/rgw/bucket')
assert export.export_id == 2
assert export.path == "newbucket"
assert export.pseudo == "/rgw/bucket"
assert export.access_type == "RO"
assert export.squash == "root"
assert export.protocols == [4]
assert export.transports == ["TCP"]
assert export.fsal.name == "RGW"
assert export.fsal.access_key_id == "the_access_key"
assert export.fsal.secret_access_key == "the_secret_key"
assert len(export.clients) == 1
assert export.clients[0].squash is None
assert export.clients[0].access_type is None
assert export.cluster_id == self.cluster_id
# again, but without export_id
r = conf.apply_export(self.cluster_id, json.dumps({
'path': 'newestbucket',
'pseudo': '/rgw/bucket',
'cluster_id': self.cluster_id,
'access_type': 'RW',
'squash': 'root',
'security_label': False,
'protocols': [4],
'transports': ['TCP'],
'clients': [{
'addresses': ["192.168.10.0/16"],
'access_type': None,
'squash': None
}],
'fsal': {
'name': 'RGW',
'user_id': 'nfs.foo.newestbucket',
'access_key_id': 'the_access_key',
'secret_access_key': 'the_secret_key',
}
}))
assert len(r.changes) == 1
export = conf._fetch_export(self.cluster_id, '/rgw/bucket')
assert export.export_id == 2
assert export.path == "newestbucket"
assert export.pseudo == "/rgw/bucket"
assert export.access_type == "RW"
assert export.squash == "root"
assert export.protocols == [4]
assert export.transports == ["TCP"]
assert export.fsal.name == "RGW"
assert export.fsal.access_key_id == "the_access_key"
assert export.fsal.secret_access_key == "the_secret_key"
assert len(export.clients) == 1
assert export.clients[0].squash is None
assert export.clients[0].access_type is None
assert export.cluster_id == self.cluster_id
def test_update_export_sectype(self):
self._do_mock_test(self._test_update_export_sectype)
def _test_update_export_sectype(self):
nfs_mod = Module('nfs', '', '')
conf = ExportMgr(nfs_mod)
r = conf.apply_export(self.cluster_id, json.dumps({
'export_id': 2,
'path': 'bucket',
'pseudo': '/rgw/bucket',
'cluster_id': self.cluster_id,
'access_type': 'RW',
'squash': 'all_squash',
'security_label': False,
'protocols': [4, 3],
'transports': ['TCP', 'UDP'],
'clients': [{
'addresses': ["192.168.0.0/16"],
'access_type': None,
'squash': None
}],
'fsal': {
'name': 'RGW',
'user_id': 'nfs.foo.bucket',
'access_key_id': 'the_access_key',
'secret_access_key': 'the_secret_key',
}
}))
assert len(r.changes) == 1
# no sectype was given, key not present
info = conf._get_export_dict(self.cluster_id, "/rgw/bucket")
assert info["export_id"] == 2
assert info["path"] == "bucket"
assert "sectype" not in info
r = conf.apply_export(self.cluster_id, json.dumps({
'export_id': 2,
'path': 'bucket',
'pseudo': '/rgw/bucket',
'cluster_id': self.cluster_id,
'access_type': 'RW',
'squash': 'all_squash',
'security_label': False,
'protocols': [4, 3],
'transports': ['TCP', 'UDP'],
'clients': [{
'addresses': ["192.168.0.0/16"],
'access_type': None,
'squash': None
}],
'sectype': ["krb5p", "krb5i", "sys"],
'fsal': {
'name': 'RGW',
'user_id': 'nfs.foo.bucket',
'access_key_id': 'the_access_key',
'secret_access_key': 'the_secret_key',
}
}))
assert len(r.changes) == 1
# assert sectype matches new value(s)
info = conf._get_export_dict(self.cluster_id, "/rgw/bucket")
assert info["export_id"] == 2
assert info["path"] == "bucket"
assert info["sectype"] == ["krb5p", "krb5i", "sys"]
def test_update_export_with_ganesha_conf(self):
self._do_mock_test(self._do_test_update_export_with_ganesha_conf)
def _do_test_update_export_with_ganesha_conf(self):
nfs_mod = Module('nfs', '', '')
conf = ExportMgr(nfs_mod)
r = conf.apply_export(self.cluster_id, self.export_3)
assert len(r.changes) == 1
def test_update_export_with_ganesha_conf_sectype(self):
self._do_mock_test(
self._do_test_update_export_with_ganesha_conf_sectype,
self.export_4, ["krb5p", "krb5i"])
def test_update_export_with_ganesha_conf_sectype_lcase(self):
export_conf = self.export_4.replace("SecType", "sectype").replace("krb5i", "sys")
self._do_mock_test(
self._do_test_update_export_with_ganesha_conf_sectype,
export_conf, ["krb5p", "sys"])
def _do_test_update_export_with_ganesha_conf_sectype(self, export_conf, expect_sectype):
nfs_mod = Module('nfs', '', '')
conf = ExportMgr(nfs_mod)
r = conf.apply_export(self.cluster_id, export_conf)
assert len(r.changes) == 1
# assert sectype matches new value(s)
info = conf._get_export_dict(self.cluster_id, "/secure1")
assert info["export_id"] == 1
assert info["path"] == "/secure/me"
assert info["sectype"] == expect_sectype
def test_update_export_with_list(self):
self._do_mock_test(self._do_test_update_export_with_list)
def _do_test_update_export_with_list(self):
nfs_mod = Module('nfs', '', '')
conf = ExportMgr(nfs_mod)
r = conf.apply_export(self.cluster_id, json.dumps([
{
'path': 'bucket',
'pseudo': '/rgw/bucket',
'cluster_id': self.cluster_id,
'access_type': 'RW',
'squash': 'root',
'security_label': False,
'protocols': [4],
'transports': ['TCP'],
'clients': [{
'addresses': ["192.168.0.0/16"],
'access_type': None,
'squash': None
}],
'fsal': {
'name': 'RGW',
'user_id': 'nfs.foo.bucket',
'access_key_id': 'the_access_key',
'secret_access_key': 'the_secret_key',
}
},
{
'path': 'bucket2',
'pseudo': '/rgw/bucket2',
'cluster_id': self.cluster_id,
'access_type': 'RO',
'squash': 'root',
'security_label': False,
'protocols': [4],
'transports': ['TCP'],
'clients': [{
'addresses': ["192.168.0.0/16"],
'access_type': None,
'squash': None
}],
'fsal': {
'name': 'RGW',
'user_id': 'nfs.foo.bucket2',
'access_key_id': 'the_access_key',
'secret_access_key': 'the_secret_key',
}
},
]))
# The input object above contains TWO items (two different pseudo paths)
# therefore we expect the result to report that two changes have been
# applied, rather than the typical 1 change.
assert len(r.changes) == 2
export = conf._fetch_export('foo', '/rgw/bucket')
assert export.export_id == 3
assert export.path == "bucket"
assert export.pseudo == "/rgw/bucket"
assert export.access_type == "RW"
assert export.squash == "root"
assert export.protocols == [4]
assert export.transports == ["TCP"]
assert export.fsal.name == "RGW"
assert export.fsal.access_key_id == "the_access_key"
assert export.fsal.secret_access_key == "the_secret_key"
assert len(export.clients) == 1
assert export.clients[0].squash is None
assert export.clients[0].access_type is None
assert export.cluster_id == self.cluster_id
export = conf._fetch_export('foo', '/rgw/bucket2')
assert export.export_id == 4
assert export.path == "bucket2"
assert export.pseudo == "/rgw/bucket2"
assert export.access_type == "RO"
assert export.squash == "root"
assert export.protocols == [4]
assert export.transports == ["TCP"]
assert export.fsal.name == "RGW"
assert export.fsal.access_key_id == "the_access_key"
assert export.fsal.secret_access_key == "the_secret_key"
assert len(export.clients) == 1
assert export.clients[0].squash is None
assert export.clients[0].access_type is None
assert export.cluster_id == self.cluster_id
def test_remove_export(self) -> None:
self._do_mock_test(self._do_test_remove_export)
def _do_test_remove_export(self) -> None:
nfs_mod = Module('nfs', '', '')
conf = ExportMgr(nfs_mod)
assert len(conf.exports[self.cluster_id]) == 2
conf.delete_export(cluster_id=self.cluster_id,
pseudo_path="/rgw")
exports = conf.exports[self.cluster_id]
assert len(exports) == 1
assert exports[0].export_id == 1
def test_create_export_rgw_bucket(self):
self._do_mock_test(self._do_test_create_export_rgw_bucket)
def _do_test_create_export_rgw_bucket(self):
nfs_mod = Module('nfs', '', '')
conf = ExportMgr(nfs_mod)
ls = conf.list_exports(cluster_id=self.cluster_id)
assert len(ls) == 2
r = conf.create_export(
fsal_type='rgw',
cluster_id=self.cluster_id,
bucket='bucket',
pseudo_path='/mybucket',
read_only=False,
squash='root',
addr=["192.168.0.0/16"]
)
assert r["bind"] == "/mybucket"
ls = conf.list_exports(cluster_id=self.cluster_id)
assert len(ls) == 3
export = conf._fetch_export('foo', '/mybucket')
assert export.export_id
assert export.path == "bucket"
assert export.pseudo == "/mybucket"
assert export.access_type == "none"
assert export.squash == "none"
assert export.protocols == [4]
assert export.transports == ["TCP"]
assert export.fsal.name == "RGW"
assert export.fsal.user_id == "bucket_owner_user"
assert export.fsal.access_key_id == "the_access_key"
assert export.fsal.secret_access_key == "the_secret_key"
assert len(export.clients) == 1
assert export.clients[0].squash == 'root'
assert export.clients[0].access_type == 'rw'
assert export.clients[0].addresses == ["192.168.0.0/16"]
assert export.cluster_id == self.cluster_id
def test_create_export_rgw_bucket_user(self):
self._do_mock_test(self._do_test_create_export_rgw_bucket_user)
def _do_test_create_export_rgw_bucket_user(self):
nfs_mod = Module('nfs', '', '')
conf = ExportMgr(nfs_mod)
ls = conf.list_exports(cluster_id=self.cluster_id)
assert len(ls) == 2
r = conf.create_export(
fsal_type='rgw',
cluster_id=self.cluster_id,
bucket='bucket',
user_id='other_user',
pseudo_path='/mybucket',
read_only=False,
squash='root',
addr=["192.168.0.0/16"]
)
assert r["bind"] == "/mybucket"
ls = conf.list_exports(cluster_id=self.cluster_id)
assert len(ls) == 3
export = conf._fetch_export('foo', '/mybucket')
assert export.export_id
assert export.path == "bucket"
assert export.pseudo == "/mybucket"
assert export.access_type == "none"
assert export.squash == "none"
assert export.protocols == [4]
assert export.transports == ["TCP"]
assert export.fsal.name == "RGW"
assert export.fsal.access_key_id == "the_access_key"
assert export.fsal.secret_access_key == "the_secret_key"
assert len(export.clients) == 1
assert export.clients[0].squash == 'root'
assert export.fsal.user_id == "other_user"
assert export.clients[0].access_type == 'rw'
assert export.clients[0].addresses == ["192.168.0.0/16"]
assert export.cluster_id == self.cluster_id
def test_create_export_rgw_user(self):
self._do_mock_test(self._do_test_create_export_rgw_user)
def _do_test_create_export_rgw_user(self):
nfs_mod = Module('nfs', '', '')
conf = ExportMgr(nfs_mod)
ls = conf.list_exports(cluster_id=self.cluster_id)
assert len(ls) == 2
r = conf.create_export(
fsal_type='rgw',
cluster_id=self.cluster_id,
user_id='some_user',
pseudo_path='/mybucket',
read_only=False,
squash='root',
addr=["192.168.0.0/16"]
)
assert r["bind"] == "/mybucket"
ls = conf.list_exports(cluster_id=self.cluster_id)
assert len(ls) == 3
export = conf._fetch_export('foo', '/mybucket')
assert export.export_id
assert export.path == "/"
assert export.pseudo == "/mybucket"
assert export.access_type == "none"
assert export.squash == "none"
assert export.protocols == [4]
assert export.transports == ["TCP"]
assert export.fsal.name == "RGW"
assert export.fsal.access_key_id == "the_access_key"
assert export.fsal.secret_access_key == "the_secret_key"
assert len(export.clients) == 1
assert export.clients[0].squash == 'root'
assert export.fsal.user_id == "some_user"
assert export.clients[0].access_type == 'rw'
assert export.clients[0].addresses == ["192.168.0.0/16"]
assert export.cluster_id == self.cluster_id
def test_create_export_cephfs(self):
self._do_mock_test(self._do_test_create_export_cephfs)
def _do_test_create_export_cephfs(self):
nfs_mod = Module('nfs', '', '')
conf = ExportMgr(nfs_mod)
ls = conf.list_exports(cluster_id=self.cluster_id)
assert len(ls) == 2
r = conf.create_export(
fsal_type='cephfs',
cluster_id=self.cluster_id,
fs_name='myfs',
path='/',
pseudo_path='/cephfs2',
read_only=False,
squash='root',
addr=["192.168.1.0/8"],
)
assert r["bind"] == "/cephfs2"
ls = conf.list_exports(cluster_id=self.cluster_id)
assert len(ls) == 3
export = conf._fetch_export('foo', '/cephfs2')
assert export.export_id
assert export.path == "/"
assert export.pseudo == "/cephfs2"
assert export.access_type == "none"
assert export.squash == "none"
assert export.protocols == [4]
assert export.transports == ["TCP"]
assert export.fsal.name == "CEPH"
assert export.fsal.user_id == "nfs.foo.3"
assert export.fsal.cephx_key == "thekeyforclientabc"
assert len(export.clients) == 1
assert export.clients[0].squash == 'root'
assert export.clients[0].access_type == 'rw'
assert export.clients[0].addresses == ["192.168.1.0/8"]
assert export.cluster_id == self.cluster_id
def _do_test_cluster_ls(self):
nfs_mod = Module('nfs', '', '')
cluster = NFSCluster(nfs_mod)
out = cluster.list_nfs_cluster()
assert out[0] == self.cluster_id
def test_cluster_ls(self):
self._do_mock_test(self._do_test_cluster_ls)
def _do_test_cluster_info(self):
nfs_mod = Module('nfs', '', '')
cluster = NFSCluster(nfs_mod)
out = cluster.show_nfs_cluster_info(self.cluster_id)
assert out == {"foo": {"virtual_ip": None, "backend": []}}
def test_cluster_info(self):
self._do_mock_test(self._do_test_cluster_info)
def _do_test_cluster_config(self):
nfs_mod = Module('nfs', '', '')
cluster = NFSCluster(nfs_mod)
out = cluster.get_nfs_cluster_config(self.cluster_id)
assert out == ""
cluster.set_nfs_cluster_config(self.cluster_id, '# foo\n')
out = cluster.get_nfs_cluster_config(self.cluster_id)
assert out == "# foo\n"
cluster.reset_nfs_cluster_config(self.cluster_id)
out = cluster.get_nfs_cluster_config(self.cluster_id)
assert out == ""
def test_cluster_config(self):
self._do_mock_test(self._do_test_cluster_config)
@pytest.mark.parametrize(
"path,expected",
[
("/foo/bar/baz", "/foo/bar/baz"),
("/foo/bar/baz/", "/foo/bar/baz"),
("/foo/bar/baz ", "/foo/bar/baz"),
("/foo/./bar/baz", "/foo/bar/baz"),
("/foo/bar/baz/..", "/foo/bar"),
("//foo/bar/baz", "/foo/bar/baz"),
("", ""),
]
)
def test_normalize_path(path, expected):
assert normalize_path(path) == expected
def test_ganesha_validate_squash():
"""Check error handling of internal validation function for squash value."""
from nfs.ganesha_conf import _validate_squash
from nfs.exception import NFSInvalidOperation
_validate_squash("root")
with pytest.raises(NFSInvalidOperation):
_validate_squash("toot")
def test_ganesha_validate_access_type():
"""Check error handling of internal validation function for access type value."""
from nfs.ganesha_conf import _validate_access_type
from nfs.exception import NFSInvalidOperation
for ok in ("rw", "ro", "none"):
_validate_access_type(ok)
with pytest.raises(NFSInvalidOperation):
_validate_access_type("any")
| 41,758 | 35.092481 | 107 | py |
null | ceph-main/src/pybind/mgr/orchestrator/README.md | # Orchestrator CLI
See also [orchestrator cli doc](https://docs.ceph.com/en/latest/mgr/orchestrator/).
## Running the Teuthology tests
To run the API tests against a real Ceph cluster, we leverage the Teuthology
framework and the `test_orchestrator` backend.
``source`` the script and run the tests manually::
$ pushd ../dashboard ; source ./run-backend-api-tests.sh ; popd
$ run_teuthology_tests tasks.mgr.test_orchestrator_cli
$ cleanup_teuthology
| 467 | 30.2 | 83 | md |
null | ceph-main/src/pybind/mgr/orchestrator/__init__.py | # flake8: noqa
from .module import OrchestratorCli
# usage: E.g. `from orchestrator import StatelessServiceSpec`
from ._interface import \
OrchResult, raise_if_exception, handle_orch_error, \
CLICommand, _cli_write_command, _cli_read_command, CLICommandMeta, \
Orchestrator, OrchestratorClientMixin, \
OrchestratorValidationError, OrchestratorError, NoOrchestrator, \
ServiceDescription, InventoryFilter, HostSpec, \
DaemonDescription, DaemonDescriptionStatus, \
OrchestratorEvent, set_exception_subject, \
InventoryHost, DeviceLightLoc, \
UpgradeStatusSpec, daemon_type_to_service, service_to_daemon_types, KNOWN_DAEMON_TYPES
import os
if 'UNITTEST' in os.environ:
import tests
| 723 | 33.47619 | 90 | py |
null | ceph-main/src/pybind/mgr/orchestrator/_interface.py |
"""
ceph-mgr orchestrator interface
Please see the ceph-mgr module developer's guide for more information.
"""
import copy
import datetime
import enum
import errno
import logging
import pickle
import re
from collections import namedtuple, OrderedDict
from contextlib import contextmanager
from functools import wraps, reduce, update_wrapper
from typing import TypeVar, Generic, List, Optional, Union, Tuple, Iterator, Callable, Any, \
Sequence, Dict, cast, Mapping
try:
from typing import Protocol # Protocol was added in Python 3.8
except ImportError:
class Protocol: # type: ignore
pass
import yaml
from ceph.deployment import inventory
from ceph.deployment.service_spec import (
ArgumentList,
ArgumentSpec,
GeneralArgList,
IngressSpec,
IscsiServiceSpec,
MDSSpec,
NFSServiceSpec,
RGWSpec,
SNMPGatewaySpec,
ServiceSpec,
TunedProfileSpec,
)
from ceph.deployment.drive_group import DriveGroupSpec
from ceph.deployment.hostspec import HostSpec, SpecValidationError
from ceph.utils import datetime_to_str, str_to_datetime
from mgr_module import MgrModule, CLICommand, HandleCommandResult
logger = logging.getLogger(__name__)
T = TypeVar('T')
FuncT = TypeVar('FuncT', bound=Callable[..., Any])
class OrchestratorError(Exception):
"""
General orchestrator specific error.
Used for deployment, configuration or user errors.
It's not intended for programming errors or orchestrator internal errors.
"""
def __init__(self,
msg: str,
errno: int = -errno.EINVAL,
event_kind_subject: Optional[Tuple[str, str]] = None) -> None:
super(Exception, self).__init__(msg)
self.errno = errno
# See OrchestratorEvent.subject
self.event_subject = event_kind_subject
class NoOrchestrator(OrchestratorError):
"""
No orchestrator in configured.
"""
def __init__(self, msg: str = "No orchestrator configured (try `ceph orch set backend`)") -> None:
super(NoOrchestrator, self).__init__(msg, errno=-errno.ENOENT)
class OrchestratorValidationError(OrchestratorError):
"""
Raised when an orchestrator doesn't support a specific feature.
"""
@contextmanager
def set_exception_subject(kind: str, subject: str, overwrite: bool = False) -> Iterator[None]:
try:
yield
except OrchestratorError as e:
if overwrite or hasattr(e, 'event_subject'):
e.event_subject = (kind, subject)
raise
def handle_exception(prefix: str, perm: str, func: FuncT) -> FuncT:
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
try:
return func(*args, **kwargs)
except (OrchestratorError, SpecValidationError) as e:
# Do not print Traceback for expected errors.
return HandleCommandResult(e.errno, stderr=str(e))
except ImportError as e:
return HandleCommandResult(-errno.ENOENT, stderr=str(e))
except NotImplementedError:
msg = 'This Orchestrator does not support `{}`'.format(prefix)
return HandleCommandResult(-errno.ENOENT, stderr=msg)
# misuse lambda to copy `wrapper`
wrapper_copy = lambda *l_args, **l_kwargs: wrapper(*l_args, **l_kwargs) # noqa: E731
wrapper_copy._prefix = prefix # type: ignore
wrapper_copy._cli_command = CLICommand(prefix, perm) # type: ignore
wrapper_copy._cli_command.store_func_metadata(func) # type: ignore
wrapper_copy._cli_command.func = wrapper_copy # type: ignore
return cast(FuncT, wrapper_copy)
def handle_orch_error(f: Callable[..., T]) -> Callable[..., 'OrchResult[T]']:
"""
Decorator to make Orchestrator methods return
an OrchResult.
"""
@wraps(f)
def wrapper(*args: Any, **kwargs: Any) -> OrchResult[T]:
try:
return OrchResult(f(*args, **kwargs))
except Exception as e:
logger.exception(e)
import os
if 'UNITTEST' in os.environ:
raise # This makes debugging of Tracebacks from unittests a bit easier
return OrchResult(None, exception=e)
return cast(Callable[..., OrchResult[T]], wrapper)
class InnerCliCommandCallable(Protocol):
def __call__(self, prefix: str) -> Callable[[FuncT], FuncT]:
...
def _cli_command(perm: str) -> InnerCliCommandCallable:
def inner_cli_command(prefix: str) -> Callable[[FuncT], FuncT]:
return lambda func: handle_exception(prefix, perm, func)
return inner_cli_command
_cli_read_command = _cli_command('r')
_cli_write_command = _cli_command('rw')
class CLICommandMeta(type):
"""
This is a workaround for the use of a global variable CLICommand.COMMANDS which
prevents modules from importing any other module.
We make use of CLICommand, except for the use of the global variable.
"""
def __init__(cls, name: str, bases: Any, dct: Any) -> None:
super(CLICommandMeta, cls).__init__(name, bases, dct)
dispatch: Dict[str, CLICommand] = {}
for v in dct.values():
try:
dispatch[v._prefix] = v._cli_command
except AttributeError:
pass
def handle_command(self: Any, inbuf: Optional[str], cmd: dict) -> Any:
if cmd['prefix'] not in dispatch:
return self.handle_command(inbuf, cmd)
return dispatch[cmd['prefix']].call(self, cmd, inbuf)
cls.COMMANDS = [cmd.dump_cmd() for cmd in dispatch.values()]
cls.handle_command = handle_command
class OrchResult(Generic[T]):
"""
Stores a result and an exception. Mainly to circumvent the
MgrModule.remote() method that hides all exceptions and for
handling different sub-interpreters.
"""
def __init__(self, result: Optional[T], exception: Optional[Exception] = None) -> None:
self.result = result
self.serialized_exception: Optional[bytes] = None
self.exception_str: str = ''
self.set_exception(exception)
__slots__ = 'result', 'serialized_exception', 'exception_str'
def set_exception(self, e: Optional[Exception]) -> None:
if e is None:
self.serialized_exception = None
self.exception_str = ''
return
self.exception_str = f'{type(e)}: {str(e)}'
try:
self.serialized_exception = pickle.dumps(e)
except pickle.PicklingError:
logger.error(f"failed to pickle {e}")
if isinstance(e, Exception):
e = Exception(*e.args)
else:
e = Exception(str(e))
# degenerate to a plain Exception
self.serialized_exception = pickle.dumps(e)
def result_str(self) -> str:
"""Force a string."""
if self.result is None:
return ''
if isinstance(self.result, list):
return '\n'.join(str(x) for x in self.result)
return str(self.result)
def raise_if_exception(c: OrchResult[T]) -> T:
"""
Due to different sub-interpreters, this MUST not be in the `OrchResult` class.
"""
if c.serialized_exception is not None:
try:
e = pickle.loads(c.serialized_exception)
except (KeyError, AttributeError):
raise Exception(c.exception_str)
raise e
assert c.result is not None, 'OrchResult should either have an exception or a result'
return c.result
def _hide_in_features(f: FuncT) -> FuncT:
f._hide_in_features = True # type: ignore
return f
class Orchestrator(object):
"""
Calls in this class may do long running remote operations, with time
periods ranging from network latencies to package install latencies and large
internet downloads. For that reason, all are asynchronous, and return
``Completion`` objects.
Methods should only return the completion and not directly execute
anything, like network calls. Otherwise the purpose of
those completions is defeated.
Implementations are not required to start work on an operation until
the caller waits on the relevant Completion objects. Callers making
multiple updates should not wait on Completions until they're done
sending operations: this enables implementations to batch up a series
of updates when wait() is called on a set of Completion objects.
Implementations are encouraged to keep reasonably fresh caches of
the status of the system: it is better to serve a stale-but-recent
result read of e.g. device inventory than it is to keep the caller waiting
while you scan hosts every time.
"""
@_hide_in_features
def is_orchestrator_module(self) -> bool:
"""
Enable other modules to interrogate this module to discover
whether it's usable as an orchestrator module.
Subclasses do not need to override this.
"""
return True
@_hide_in_features
def available(self) -> Tuple[bool, str, Dict[str, Any]]:
"""
Report whether we can talk to the orchestrator. This is the
place to give the user a meaningful message if the orchestrator
isn't running or can't be contacted.
This method may be called frequently (e.g. every page load
to conditionally display a warning banner), so make sure it's
not too expensive. It's okay to give a slightly stale status
(e.g. based on a periodic background ping of the orchestrator)
if that's necessary to make this method fast.
.. note::
`True` doesn't mean that the desired functionality
is actually available in the orchestrator. I.e. this
won't work as expected::
>>> #doctest: +SKIP
... if OrchestratorClientMixin().available()[0]: # wrong.
... OrchestratorClientMixin().get_hosts()
:return: boolean representing whether the module is available/usable
:return: string describing any error
:return: dict containing any module specific information
"""
raise NotImplementedError()
@_hide_in_features
def get_feature_set(self) -> Dict[str, dict]:
"""Describes which methods this orchestrator implements
.. note::
`True` doesn't mean that the desired functionality
is actually possible in the orchestrator. I.e. this
won't work as expected::
>>> #doctest: +SKIP
... api = OrchestratorClientMixin()
... if api.get_feature_set()['get_hosts']['available']: # wrong.
... api.get_hosts()
It's better to ask for forgiveness instead::
>>> #doctest: +SKIP
... try:
... OrchestratorClientMixin().get_hosts()
... except (OrchestratorError, NotImplementedError):
... ...
:returns: Dict of API method names to ``{'available': True or False}``
"""
module = self.__class__
features = {a: {'available': getattr(Orchestrator, a, None) != getattr(module, a)}
for a in Orchestrator.__dict__
if not a.startswith('_') and not getattr(getattr(Orchestrator, a), '_hide_in_features', False)
}
return features
def cancel_completions(self) -> None:
"""
Cancels ongoing completions. Unstuck the mgr.
"""
raise NotImplementedError()
def pause(self) -> None:
raise NotImplementedError()
def resume(self) -> None:
raise NotImplementedError()
def add_host(self, host_spec: HostSpec) -> OrchResult[str]:
"""
Add a host to the orchestrator inventory.
:param host: hostname
"""
raise NotImplementedError()
def remove_host(self, host: str, force: bool, offline: bool) -> OrchResult[str]:
"""
Remove a host from the orchestrator inventory.
:param host: hostname
"""
raise NotImplementedError()
def drain_host(self, hostname: str, force: bool = False) -> OrchResult[str]:
"""
drain all daemons from a host
:param hostname: hostname
"""
raise NotImplementedError()
def update_host_addr(self, host: str, addr: str) -> OrchResult[str]:
"""
Update a host's address
:param host: hostname
:param addr: address (dns name or IP)
"""
raise NotImplementedError()
def get_hosts(self) -> OrchResult[List[HostSpec]]:
"""
Report the hosts in the cluster.
:return: list of HostSpec
"""
raise NotImplementedError()
def get_facts(self, hostname: Optional[str] = None) -> OrchResult[List[Dict[str, Any]]]:
"""
Return hosts metadata(gather_facts).
"""
raise NotImplementedError()
def add_host_label(self, host: str, label: str) -> OrchResult[str]:
"""
Add a host label
"""
raise NotImplementedError()
def remove_host_label(self, host: str, label: str, force: bool = False) -> OrchResult[str]:
"""
Remove a host label
"""
raise NotImplementedError()
def host_ok_to_stop(self, hostname: str) -> OrchResult:
"""
Check if the specified host can be safely stopped without reducing availability
:param host: hostname
"""
raise NotImplementedError()
def enter_host_maintenance(self, hostname: str, force: bool = False, yes_i_really_mean_it: bool = False) -> OrchResult:
"""
Place a host in maintenance, stopping daemons and disabling it's systemd target
"""
raise NotImplementedError()
def exit_host_maintenance(self, hostname: str) -> OrchResult:
"""
Return a host from maintenance, restarting the clusters systemd target
"""
raise NotImplementedError()
def rescan_host(self, hostname: str) -> OrchResult:
"""Use cephadm to issue a disk rescan on each HBA
Some HBAs and external enclosures don't automatically register
device insertion with the kernel, so for these scenarios we need
to manually rescan
:param hostname: (str) host name
"""
raise NotImplementedError()
def get_inventory(self, host_filter: Optional['InventoryFilter'] = None, refresh: bool = False) -> OrchResult[List['InventoryHost']]:
"""
Returns something that was created by `ceph-volume inventory`.
:return: list of InventoryHost
"""
raise NotImplementedError()
def service_discovery_dump_cert(self) -> OrchResult:
"""
Returns service discovery server root certificate
:return: service discovery root certificate
"""
raise NotImplementedError()
def describe_service(self, service_type: Optional[str] = None, service_name: Optional[str] = None, refresh: bool = False) -> OrchResult[List['ServiceDescription']]:
"""
Describe a service (of any kind) that is already configured in
the orchestrator. For example, when viewing an OSD in the dashboard
we might like to also display information about the orchestrator's
view of the service (like the kubernetes pod ID).
When viewing a CephFS filesystem in the dashboard, we would use this
to display the pods being currently run for MDS daemons.
:return: list of ServiceDescription objects.
"""
raise NotImplementedError()
def list_daemons(self, service_name: Optional[str] = None, daemon_type: Optional[str] = None, daemon_id: Optional[str] = None, host: Optional[str] = None, refresh: bool = False) -> OrchResult[List['DaemonDescription']]:
"""
Describe a daemon (of any kind) that is already configured in
the orchestrator.
:return: list of DaemonDescription objects.
"""
raise NotImplementedError()
@handle_orch_error
def apply(self, specs: Sequence["GenericSpec"], no_overwrite: bool = False) -> List[str]:
"""
Applies any spec
"""
fns: Dict[str, Callable[..., OrchResult[str]]] = {
'alertmanager': self.apply_alertmanager,
'crash': self.apply_crash,
'grafana': self.apply_grafana,
'iscsi': self.apply_iscsi,
'mds': self.apply_mds,
'mgr': self.apply_mgr,
'mon': self.apply_mon,
'nfs': self.apply_nfs,
'node-exporter': self.apply_node_exporter,
'ceph-exporter': self.apply_ceph_exporter,
'osd': lambda dg: self.apply_drivegroups([dg]), # type: ignore
'prometheus': self.apply_prometheus,
'loki': self.apply_loki,
'promtail': self.apply_promtail,
'rbd-mirror': self.apply_rbd_mirror,
'rgw': self.apply_rgw,
'ingress': self.apply_ingress,
'snmp-gateway': self.apply_snmp_gateway,
'host': self.add_host,
}
def merge(l: OrchResult[List[str]], r: OrchResult[str]) -> OrchResult[List[str]]: # noqa: E741
l_res = raise_if_exception(l)
r_res = raise_if_exception(r)
l_res.append(r_res)
return OrchResult(l_res)
return raise_if_exception(reduce(merge, [fns[spec.service_type](spec) for spec in specs], OrchResult([])))
def set_unmanaged(self, service_name: str, value: bool) -> OrchResult[str]:
"""
Set unmanaged parameter to True/False for a given service
:return: None
"""
raise NotImplementedError()
def plan(self, spec: Sequence["GenericSpec"]) -> OrchResult[List]:
"""
Plan (Dry-run, Preview) a List of Specs.
"""
raise NotImplementedError()
def remove_daemons(self, names: List[str]) -> OrchResult[List[str]]:
"""
Remove specific daemon(s).
:return: None
"""
raise NotImplementedError()
def remove_service(self, service_name: str, force: bool = False) -> OrchResult[str]:
"""
Remove a service (a collection of daemons).
:return: None
"""
raise NotImplementedError()
def service_action(self, action: str, service_name: str) -> OrchResult[List[str]]:
"""
Perform an action (start/stop/reload) on a service (i.e., all daemons
providing the logical service).
:param action: one of "start", "stop", "restart", "redeploy", "reconfig"
:param service_name: service_type + '.' + service_id
(e.g. "mon", "mgr", "mds.mycephfs", "rgw.realm.zone", ...)
:rtype: OrchResult
"""
# assert action in ["start", "stop", "reload, "restart", "redeploy"]
raise NotImplementedError()
def daemon_action(self, action: str, daemon_name: str, image: Optional[str] = None) -> OrchResult[str]:
"""
Perform an action (start/stop/reload) on a daemon.
:param action: one of "start", "stop", "restart", "redeploy", "reconfig"
:param daemon_name: name of daemon
:param image: Container image when redeploying that daemon
:rtype: OrchResult
"""
# assert action in ["start", "stop", "reload, "restart", "redeploy"]
raise NotImplementedError()
def create_osds(self, drive_group: DriveGroupSpec) -> OrchResult[str]:
"""
Create one or more OSDs within a single Drive Group.
The principal argument here is the drive_group member
of OsdSpec: other fields are advisory/extensible for any
finer-grained OSD feature enablement (choice of backing store,
compression/encryption, etc).
"""
raise NotImplementedError()
def apply_drivegroups(self, specs: List[DriveGroupSpec]) -> OrchResult[List[str]]:
""" Update OSD cluster """
raise NotImplementedError()
def set_unmanaged_flag(self,
unmanaged_flag: bool,
service_type: str = 'osd',
service_name: Optional[str] = None
) -> HandleCommandResult:
raise NotImplementedError()
def preview_osdspecs(self,
osdspec_name: Optional[str] = 'osd',
osdspecs: Optional[List[DriveGroupSpec]] = None
) -> OrchResult[str]:
""" Get a preview for OSD deployments """
raise NotImplementedError()
def remove_osds(self, osd_ids: List[str],
replace: bool = False,
force: bool = False,
zap: bool = False,
no_destroy: bool = False) -> OrchResult[str]:
"""
:param osd_ids: list of OSD IDs
:param replace: marks the OSD as being destroyed. See :ref:`orchestrator-osd-replace`
:param force: Forces the OSD removal process without waiting for the data to be drained first.
:param zap: Zap/Erase all devices associated with the OSDs (DESTROYS DATA)
:param no_destroy: Do not destroy associated VGs/LVs with the OSD.
.. note:: this can only remove OSDs that were successfully
created (i.e. got an OSD ID).
"""
raise NotImplementedError()
def stop_remove_osds(self, osd_ids: List[str]) -> OrchResult:
"""
TODO
"""
raise NotImplementedError()
def remove_osds_status(self) -> OrchResult:
"""
Returns a status of the ongoing OSD removal operations.
"""
raise NotImplementedError()
def blink_device_light(self, ident_fault: str, on: bool, locations: List['DeviceLightLoc']) -> OrchResult[List[str]]:
"""
Instructs the orchestrator to enable or disable either the ident or the fault LED.
:param ident_fault: either ``"ident"`` or ``"fault"``
:param on: ``True`` = on.
:param locations: See :class:`orchestrator.DeviceLightLoc`
"""
raise NotImplementedError()
def zap_device(self, host: str, path: str) -> OrchResult[str]:
"""Zap/Erase a device (DESTROYS DATA)"""
raise NotImplementedError()
def add_daemon(self, spec: ServiceSpec) -> OrchResult[List[str]]:
"""Create daemons daemon(s) for unmanaged services"""
raise NotImplementedError()
def apply_mon(self, spec: ServiceSpec) -> OrchResult[str]:
"""Update mon cluster"""
raise NotImplementedError()
def apply_mgr(self, spec: ServiceSpec) -> OrchResult[str]:
"""Update mgr cluster"""
raise NotImplementedError()
def apply_mds(self, spec: MDSSpec) -> OrchResult[str]:
"""Update MDS cluster"""
raise NotImplementedError()
def apply_rgw(self, spec: RGWSpec) -> OrchResult[str]:
"""Update RGW cluster"""
raise NotImplementedError()
def apply_ingress(self, spec: IngressSpec) -> OrchResult[str]:
"""Update ingress daemons"""
raise NotImplementedError()
def apply_rbd_mirror(self, spec: ServiceSpec) -> OrchResult[str]:
"""Update rbd-mirror cluster"""
raise NotImplementedError()
def apply_nfs(self, spec: NFSServiceSpec) -> OrchResult[str]:
"""Update NFS cluster"""
raise NotImplementedError()
def apply_iscsi(self, spec: IscsiServiceSpec) -> OrchResult[str]:
"""Update iscsi cluster"""
raise NotImplementedError()
def apply_prometheus(self, spec: ServiceSpec) -> OrchResult[str]:
"""Update prometheus cluster"""
raise NotImplementedError()
def get_prometheus_access_info(self) -> OrchResult[Dict[str, str]]:
"""get prometheus access information"""
raise NotImplementedError()
def get_alertmanager_access_info(self) -> OrchResult[Dict[str, str]]:
"""get alertmanager access information"""
raise NotImplementedError()
def apply_node_exporter(self, spec: ServiceSpec) -> OrchResult[str]:
"""Update existing a Node-Exporter daemon(s)"""
raise NotImplementedError()
def apply_ceph_exporter(self, spec: ServiceSpec) -> OrchResult[str]:
"""Update existing a ceph exporter daemon(s)"""
raise NotImplementedError()
def apply_loki(self, spec: ServiceSpec) -> OrchResult[str]:
"""Update existing a Loki daemon(s)"""
raise NotImplementedError()
def apply_promtail(self, spec: ServiceSpec) -> OrchResult[str]:
"""Update existing a Promtail daemon(s)"""
raise NotImplementedError()
def apply_crash(self, spec: ServiceSpec) -> OrchResult[str]:
"""Update existing a crash daemon(s)"""
raise NotImplementedError()
def apply_grafana(self, spec: ServiceSpec) -> OrchResult[str]:
"""Update existing a grafana service"""
raise NotImplementedError()
def apply_alertmanager(self, spec: ServiceSpec) -> OrchResult[str]:
"""Update an existing AlertManager daemon(s)"""
raise NotImplementedError()
def apply_snmp_gateway(self, spec: SNMPGatewaySpec) -> OrchResult[str]:
"""Update an existing snmp gateway service"""
raise NotImplementedError()
def apply_tuned_profiles(self, specs: List[TunedProfileSpec], no_overwrite: bool) -> OrchResult[str]:
"""Add or update an existing tuned profile"""
raise NotImplementedError()
def rm_tuned_profile(self, profile_name: str) -> OrchResult[str]:
"""Remove a tuned profile"""
raise NotImplementedError()
def tuned_profile_ls(self) -> OrchResult[List[TunedProfileSpec]]:
"""See current tuned profiles"""
raise NotImplementedError()
def tuned_profile_add_setting(self, profile_name: str, setting: str, value: str) -> OrchResult[str]:
"""Change/Add a specific setting for a tuned profile"""
raise NotImplementedError()
def tuned_profile_rm_setting(self, profile_name: str, setting: str) -> OrchResult[str]:
"""Remove a specific setting for a tuned profile"""
raise NotImplementedError()
def upgrade_check(self, image: Optional[str], version: Optional[str]) -> OrchResult[str]:
raise NotImplementedError()
def upgrade_ls(self, image: Optional[str], tags: bool, show_all_versions: Optional[bool] = False) -> OrchResult[Dict[Any, Any]]:
raise NotImplementedError()
def upgrade_start(self, image: Optional[str], version: Optional[str], daemon_types: Optional[List[str]],
hosts: Optional[str], services: Optional[List[str]], limit: Optional[int]) -> OrchResult[str]:
raise NotImplementedError()
def upgrade_pause(self) -> OrchResult[str]:
raise NotImplementedError()
def upgrade_resume(self) -> OrchResult[str]:
raise NotImplementedError()
def upgrade_stop(self) -> OrchResult[str]:
raise NotImplementedError()
def upgrade_status(self) -> OrchResult['UpgradeStatusSpec']:
"""
If an upgrade is currently underway, report on where
we are in the process, or if some error has occurred.
:return: UpgradeStatusSpec instance
"""
raise NotImplementedError()
@_hide_in_features
def upgrade_available(self) -> OrchResult:
"""
Report on what versions are available to upgrade to
:return: List of strings
"""
raise NotImplementedError()
GenericSpec = Union[ServiceSpec, HostSpec]
def json_to_generic_spec(spec: dict) -> GenericSpec:
if 'service_type' in spec and spec['service_type'] == 'host':
return HostSpec.from_json(spec)
else:
return ServiceSpec.from_json(spec)
def daemon_type_to_service(dtype: str) -> str:
mapping = {
'mon': 'mon',
'mgr': 'mgr',
'mds': 'mds',
'rgw': 'rgw',
'osd': 'osd',
'haproxy': 'ingress',
'keepalived': 'ingress',
'iscsi': 'iscsi',
'rbd-mirror': 'rbd-mirror',
'cephfs-mirror': 'cephfs-mirror',
'nfs': 'nfs',
'grafana': 'grafana',
'alertmanager': 'alertmanager',
'prometheus': 'prometheus',
'node-exporter': 'node-exporter',
'ceph-exporter': 'ceph-exporter',
'loki': 'loki',
'promtail': 'promtail',
'crash': 'crash',
'crashcollector': 'crash', # Specific Rook Daemon
'container': 'container',
'agent': 'agent',
'snmp-gateway': 'snmp-gateway',
'elasticsearch': 'elasticsearch',
'jaeger-agent': 'jaeger-agent',
'jaeger-collector': 'jaeger-collector',
'jaeger-query': 'jaeger-query'
}
return mapping[dtype]
def service_to_daemon_types(stype: str) -> List[str]:
mapping = {
'mon': ['mon'],
'mgr': ['mgr'],
'mds': ['mds'],
'rgw': ['rgw'],
'osd': ['osd'],
'ingress': ['haproxy', 'keepalived'],
'iscsi': ['iscsi'],
'rbd-mirror': ['rbd-mirror'],
'cephfs-mirror': ['cephfs-mirror'],
'nfs': ['nfs'],
'grafana': ['grafana'],
'alertmanager': ['alertmanager'],
'prometheus': ['prometheus'],
'loki': ['loki'],
'promtail': ['promtail'],
'node-exporter': ['node-exporter'],
'ceph-exporter': ['ceph-exporter'],
'crash': ['crash'],
'container': ['container'],
'agent': ['agent'],
'snmp-gateway': ['snmp-gateway'],
'elasticsearch': ['elasticsearch'],
'jaeger-agent': ['jaeger-agent'],
'jaeger-collector': ['jaeger-collector'],
'jaeger-query': ['jaeger-query'],
'jaeger-tracing': ['elasticsearch', 'jaeger-query', 'jaeger-collector', 'jaeger-agent']
}
return mapping[stype]
KNOWN_DAEMON_TYPES: List[str] = list(
sum((service_to_daemon_types(t) for t in ServiceSpec.KNOWN_SERVICE_TYPES), []))
class UpgradeStatusSpec(object):
# Orchestrator's report on what's going on with any ongoing upgrade
def __init__(self) -> None:
self.in_progress = False # Is an upgrade underway?
self.target_image: Optional[str] = None
self.services_complete: List[str] = [] # Which daemon types are fully updated?
self.which: str = '<unknown>' # for if user specified daemon types, services or hosts
self.progress: Optional[str] = None # How many of the daemons have we upgraded
self.message = "" # Freeform description
self.is_paused: bool = False # Is the upgrade paused?
def to_json(self) -> dict:
return {
'in_progress': self.in_progress,
'target_image': self.target_image,
'which': self.which,
'services_complete': self.services_complete,
'progress': self.progress,
'message': self.message,
'is_paused': self.is_paused,
}
def handle_type_error(method: FuncT) -> FuncT:
@wraps(method)
def inner(cls: Any, *args: Any, **kwargs: Any) -> Any:
try:
return method(cls, *args, **kwargs)
except TypeError as e:
error_msg = '{}: {}'.format(cls.__name__, e)
raise OrchestratorValidationError(error_msg)
return cast(FuncT, inner)
class DaemonDescriptionStatus(enum.IntEnum):
unknown = -2
error = -1
stopped = 0
running = 1
starting = 2 #: Daemon is deployed, but not yet running
@staticmethod
def to_str(status: Optional['DaemonDescriptionStatus']) -> str:
if status is None:
status = DaemonDescriptionStatus.unknown
return {
DaemonDescriptionStatus.unknown: 'unknown',
DaemonDescriptionStatus.error: 'error',
DaemonDescriptionStatus.stopped: 'stopped',
DaemonDescriptionStatus.running: 'running',
DaemonDescriptionStatus.starting: 'starting',
}.get(status, '<unknown>')
class DaemonDescription(object):
"""
For responding to queries about the status of a particular daemon,
stateful or stateless.
This is not about health or performance monitoring of daemons: it's
about letting the orchestrator tell Ceph whether and where a
daemon is scheduled in the cluster. When an orchestrator tells
Ceph "it's running on host123", that's not a promise that the process
is literally up this second, it's a description of where the orchestrator
has decided the daemon should run.
"""
def __init__(self,
daemon_type: Optional[str] = None,
daemon_id: Optional[str] = None,
hostname: Optional[str] = None,
container_id: Optional[str] = None,
container_image_id: Optional[str] = None,
container_image_name: Optional[str] = None,
container_image_digests: Optional[List[str]] = None,
version: Optional[str] = None,
status: Optional[DaemonDescriptionStatus] = None,
status_desc: Optional[str] = None,
last_refresh: Optional[datetime.datetime] = None,
created: Optional[datetime.datetime] = None,
started: Optional[datetime.datetime] = None,
last_configured: Optional[datetime.datetime] = None,
osdspec_affinity: Optional[str] = None,
last_deployed: Optional[datetime.datetime] = None,
events: Optional[List['OrchestratorEvent']] = None,
is_active: bool = False,
memory_usage: Optional[int] = None,
memory_request: Optional[int] = None,
memory_limit: Optional[int] = None,
cpu_percentage: Optional[str] = None,
service_name: Optional[str] = None,
ports: Optional[List[int]] = None,
ip: Optional[str] = None,
deployed_by: Optional[List[str]] = None,
rank: Optional[int] = None,
rank_generation: Optional[int] = None,
extra_container_args: Optional[GeneralArgList] = None,
extra_entrypoint_args: Optional[GeneralArgList] = None,
) -> None:
#: Host is at the same granularity as InventoryHost
self.hostname: Optional[str] = hostname
# Not everyone runs in containers, but enough people do to
# justify having the container_id (runtime id) and container_image
# (image name)
self.container_id = container_id # runtime id
self.container_image_id = container_image_id # image id locally
self.container_image_name = container_image_name # image friendly name
self.container_image_digests = container_image_digests # reg hashes
#: The type of service (osd, mon, mgr, etc.)
self.daemon_type = daemon_type
#: The orchestrator will have picked some names for daemons,
#: typically either based on hostnames or on pod names.
#: This is the <foo> in mds.<foo>, the ID that will appear
#: in the FSMap/ServiceMap.
self.daemon_id: Optional[str] = daemon_id
self.daemon_name = self.name()
#: Some daemon types have a numeric rank assigned
self.rank: Optional[int] = rank
self.rank_generation: Optional[int] = rank_generation
self._service_name: Optional[str] = service_name
#: Service version that was deployed
self.version = version
# Service status: -2 unknown, -1 error, 0 stopped, 1 running, 2 starting
self._status = status
#: Service status description when status == error.
self.status_desc = status_desc
#: datetime when this info was last refreshed
self.last_refresh: Optional[datetime.datetime] = last_refresh
self.created: Optional[datetime.datetime] = created
self.started: Optional[datetime.datetime] = started
self.last_configured: Optional[datetime.datetime] = last_configured
self.last_deployed: Optional[datetime.datetime] = last_deployed
#: Affinity to a certain OSDSpec
self.osdspec_affinity: Optional[str] = osdspec_affinity
self.events: List[OrchestratorEvent] = events or []
self.memory_usage: Optional[int] = memory_usage
self.memory_request: Optional[int] = memory_request
self.memory_limit: Optional[int] = memory_limit
self.cpu_percentage: Optional[str] = cpu_percentage
self.ports: Optional[List[int]] = ports
self.ip: Optional[str] = ip
self.deployed_by = deployed_by
self.is_active = is_active
self.extra_container_args: Optional[ArgumentList] = None
self.extra_entrypoint_args: Optional[ArgumentList] = None
if extra_container_args:
self.extra_container_args = ArgumentSpec.from_general_args(
extra_container_args)
if extra_entrypoint_args:
self.extra_entrypoint_args = ArgumentSpec.from_general_args(
extra_entrypoint_args)
def __setattr__(self, name: str, value: Any) -> None:
if value is not None and name in ('extra_container_args', 'extra_entrypoint_args'):
for v in value:
tname = str(type(v))
if 'ArgumentSpec' not in tname:
raise TypeError(f"{name} is not all ArgumentSpec values: {v!r}(is {type(v)} in {value!r}")
super().__setattr__(name, value)
@property
def status(self) -> Optional[DaemonDescriptionStatus]:
return self._status
@status.setter
def status(self, new: DaemonDescriptionStatus) -> None:
self._status = new
self.status_desc = DaemonDescriptionStatus.to_str(new)
def get_port_summary(self) -> str:
if not self.ports:
return ''
return f"{self.ip or '*'}:{','.join(map(str, self.ports or []))}"
def name(self) -> str:
return '%s.%s' % (self.daemon_type, self.daemon_id)
def matches_service(self, service_name: Optional[str]) -> bool:
assert self.daemon_id is not None
assert self.daemon_type is not None
if service_name:
return (daemon_type_to_service(self.daemon_type) + '.' + self.daemon_id).startswith(service_name + '.')
return False
def matches_digests(self, digests: Optional[List[str]]) -> bool:
# the DaemonDescription class maintains a list of container digests
# for the container image last reported as being used for the daemons.
# This function checks if any of those digests match any of the digests
# in the list of digests provided as an arg to this function
if not digests or not self.container_image_digests:
return False
return any(d in digests for d in self.container_image_digests)
def matches_image_name(self, image_name: Optional[str]) -> bool:
# the DaemonDescription class has an attribute that tracks the image
# name of the container image last reported as being used by the daemon.
# This function compares if the image name provided as an arg matches
# the image name in said attribute
if not image_name or not self.container_image_name:
return False
return image_name == self.container_image_name
def service_id(self) -> str:
assert self.daemon_id is not None
assert self.daemon_type is not None
if self._service_name:
if '.' in self._service_name:
return self._service_name.split('.', 1)[1]
else:
return ''
if self.daemon_type == 'osd':
if self.osdspec_affinity and self.osdspec_affinity != 'None':
return self.osdspec_affinity
return ''
def _match() -> str:
assert self.daemon_id is not None
err = OrchestratorError("DaemonDescription: Cannot calculate service_id: "
f"daemon_id='{self.daemon_id}' hostname='{self.hostname}'")
if not self.hostname:
# TODO: can a DaemonDescription exist without a hostname?
raise err
# use the bare hostname, not the FQDN.
host = self.hostname.split('.')[0]
if host == self.daemon_id:
# daemon_id == "host"
return self.daemon_id
elif host in self.daemon_id:
# daemon_id == "service_id.host"
# daemon_id == "service_id.host.random"
pre, post = self.daemon_id.rsplit(host, 1)
if not pre.endswith('.'):
# '.' sep missing at front of host
raise err
elif post and not post.startswith('.'):
# '.' sep missing at end of host
raise err
return pre[:-1]
# daemon_id == "service_id.random"
if self.daemon_type == 'rgw':
v = self.daemon_id.split('.')
if len(v) in [3, 4]:
return '.'.join(v[0:2])
if self.daemon_type == 'iscsi':
v = self.daemon_id.split('.')
return '.'.join(v[0:-1])
# daemon_id == "service_id"
return self.daemon_id
if daemon_type_to_service(self.daemon_type) in ServiceSpec.REQUIRES_SERVICE_ID:
return _match()
return self.daemon_id
def service_name(self) -> str:
if self._service_name:
return self._service_name
assert self.daemon_type is not None
if daemon_type_to_service(self.daemon_type) in ServiceSpec.REQUIRES_SERVICE_ID:
return f'{daemon_type_to_service(self.daemon_type)}.{self.service_id()}'
return daemon_type_to_service(self.daemon_type)
def __repr__(self) -> str:
return "<DaemonDescription>({type}.{id})".format(type=self.daemon_type,
id=self.daemon_id)
def __str__(self) -> str:
return f"{self.name()} in status {self.status_desc} on {self.hostname}"
def to_json(self) -> dict:
out: Dict[str, Any] = OrderedDict()
out['daemon_type'] = self.daemon_type
out['daemon_id'] = self.daemon_id
out['service_name'] = self._service_name
out['daemon_name'] = self.name()
out['hostname'] = self.hostname
out['container_id'] = self.container_id
out['container_image_id'] = self.container_image_id
out['container_image_name'] = self.container_image_name
out['container_image_digests'] = self.container_image_digests
out['memory_usage'] = self.memory_usage
out['memory_request'] = self.memory_request
out['memory_limit'] = self.memory_limit
out['cpu_percentage'] = self.cpu_percentage
out['version'] = self.version
out['status'] = self.status.value if self.status is not None else None
out['status_desc'] = self.status_desc
if self.daemon_type == 'osd':
out['osdspec_affinity'] = self.osdspec_affinity
out['is_active'] = self.is_active
out['ports'] = self.ports
out['ip'] = self.ip
out['rank'] = self.rank
out['rank_generation'] = self.rank_generation
for k in ['last_refresh', 'created', 'started', 'last_deployed',
'last_configured']:
if getattr(self, k):
out[k] = datetime_to_str(getattr(self, k))
if self.events:
out['events'] = [e.to_json() for e in self.events]
empty = [k for k, v in out.items() if v is None]
for e in empty:
del out[e]
return out
def to_dict(self) -> dict:
out: Dict[str, Any] = OrderedDict()
out['daemon_type'] = self.daemon_type
out['daemon_id'] = self.daemon_id
out['daemon_name'] = self.name()
out['hostname'] = self.hostname
out['container_id'] = self.container_id
out['container_image_id'] = self.container_image_id
out['container_image_name'] = self.container_image_name
out['container_image_digests'] = self.container_image_digests
out['memory_usage'] = self.memory_usage
out['memory_request'] = self.memory_request
out['memory_limit'] = self.memory_limit
out['cpu_percentage'] = self.cpu_percentage
out['version'] = self.version
out['status'] = self.status.value if self.status is not None else None
out['status_desc'] = self.status_desc
if self.daemon_type == 'osd':
out['osdspec_affinity'] = self.osdspec_affinity
out['is_active'] = self.is_active
out['ports'] = self.ports
out['ip'] = self.ip
for k in ['last_refresh', 'created', 'started', 'last_deployed',
'last_configured']:
if getattr(self, k):
out[k] = datetime_to_str(getattr(self, k))
if self.events:
out['events'] = [e.to_dict() for e in self.events]
empty = [k for k, v in out.items() if v is None]
for e in empty:
del out[e]
return out
@classmethod
@handle_type_error
def from_json(cls, data: dict) -> 'DaemonDescription':
c = data.copy()
event_strs = c.pop('events', [])
for k in ['last_refresh', 'created', 'started', 'last_deployed',
'last_configured']:
if k in c:
c[k] = str_to_datetime(c[k])
events = [OrchestratorEvent.from_json(e) for e in event_strs]
status_int = c.pop('status', None)
if 'daemon_name' in c:
del c['daemon_name']
if 'service_name' in c and c['service_name'].startswith('osd.'):
# if the service_name is a osd.NNN (numeric osd id) then
# ignore it -- it is not a valid service_name and
# (presumably) came from an older version of cephadm.
try:
int(c['service_name'][4:])
del c['service_name']
except ValueError:
pass
status = DaemonDescriptionStatus(status_int) if status_int is not None else None
return cls(events=events, status=status, **c)
def __copy__(self) -> 'DaemonDescription':
# feel free to change this:
return DaemonDescription.from_json(self.to_json())
@staticmethod
def yaml_representer(dumper: 'yaml.SafeDumper', data: 'DaemonDescription') -> Any:
return dumper.represent_dict(cast(Mapping, data.to_json().items()))
yaml.add_representer(DaemonDescription, DaemonDescription.yaml_representer)
class ServiceDescription(object):
"""
For responding to queries about the status of a particular service,
stateful or stateless.
This is not about health or performance monitoring of services: it's
about letting the orchestrator tell Ceph whether and where a
service is scheduled in the cluster. When an orchestrator tells
Ceph "it's running on host123", that's not a promise that the process
is literally up this second, it's a description of where the orchestrator
has decided the service should run.
"""
def __init__(self,
spec: ServiceSpec,
container_image_id: Optional[str] = None,
container_image_name: Optional[str] = None,
service_url: Optional[str] = None,
last_refresh: Optional[datetime.datetime] = None,
created: Optional[datetime.datetime] = None,
deleted: Optional[datetime.datetime] = None,
size: int = 0,
running: int = 0,
events: Optional[List['OrchestratorEvent']] = None,
virtual_ip: Optional[str] = None,
ports: List[int] = []) -> None:
# Not everyone runs in containers, but enough people do to
# justify having the container_image_id (image hash) and container_image
# (image name)
self.container_image_id = container_image_id # image hash
self.container_image_name = container_image_name # image friendly name
# If the service exposes REST-like API, this attribute should hold
# the URL.
self.service_url = service_url
# Number of daemons
self.size = size
# Number of daemons up
self.running = running
# datetime when this info was last refreshed
self.last_refresh: Optional[datetime.datetime] = last_refresh
self.created: Optional[datetime.datetime] = created
self.deleted: Optional[datetime.datetime] = deleted
self.spec: ServiceSpec = spec
self.events: List[OrchestratorEvent] = events or []
self.virtual_ip = virtual_ip
self.ports = ports
def service_type(self) -> str:
return self.spec.service_type
def __repr__(self) -> str:
return f"<ServiceDescription of {self.spec.one_line_str()}>"
def get_port_summary(self) -> str:
if not self.ports:
return ''
ports = sorted([int(x) for x in self.ports])
return f"{(self.virtual_ip or '?').split('/')[0]}:{','.join(map(str, ports or []))}"
def to_json(self) -> OrderedDict:
out = self.spec.to_json()
status = {
'container_image_id': self.container_image_id,
'container_image_name': self.container_image_name,
'service_url': self.service_url,
'size': self.size,
'running': self.running,
'last_refresh': self.last_refresh,
'created': self.created,
'virtual_ip': self.virtual_ip,
'ports': self.ports if self.ports else None,
}
for k in ['last_refresh', 'created']:
if getattr(self, k):
status[k] = datetime_to_str(getattr(self, k))
status = {k: v for (k, v) in status.items() if v is not None}
out['status'] = status
if self.events:
out['events'] = [e.to_json() for e in self.events]
return out
def to_dict(self) -> OrderedDict:
out = self.spec.to_json()
status = {
'container_image_id': self.container_image_id,
'container_image_name': self.container_image_name,
'service_url': self.service_url,
'size': self.size,
'running': self.running,
'last_refresh': self.last_refresh,
'created': self.created,
'virtual_ip': self.virtual_ip,
'ports': self.ports if self.ports else None,
}
for k in ['last_refresh', 'created']:
if getattr(self, k):
status[k] = datetime_to_str(getattr(self, k))
status = {k: v for (k, v) in status.items() if v is not None}
out['status'] = status
if self.events:
out['events'] = [e.to_dict() for e in self.events]
return out
@classmethod
@handle_type_error
def from_json(cls, data: dict) -> 'ServiceDescription':
c = data.copy()
status = c.pop('status', {})
event_strs = c.pop('events', [])
spec = ServiceSpec.from_json(c)
c_status = status.copy()
for k in ['last_refresh', 'created']:
if k in c_status:
c_status[k] = str_to_datetime(c_status[k])
events = [OrchestratorEvent.from_json(e) for e in event_strs]
return cls(spec=spec, events=events, **c_status)
@staticmethod
def yaml_representer(dumper: 'yaml.SafeDumper', data: 'ServiceDescription') -> Any:
return dumper.represent_dict(cast(Mapping, data.to_json().items()))
yaml.add_representer(ServiceDescription, ServiceDescription.yaml_representer)
class InventoryFilter(object):
"""
When fetching inventory, use this filter to avoid unnecessarily
scanning the whole estate.
Typical use:
filter by host when presenting UI workflow for configuring
a particular server.
filter by label when not all of estate is Ceph servers,
and we want to only learn about the Ceph servers.
filter by label when we are interested particularly
in e.g. OSD servers.
"""
def __init__(self, labels: Optional[List[str]] = None, hosts: Optional[List[str]] = None) -> None:
#: Optional: get info about hosts matching labels
self.labels = labels
#: Optional: get info about certain named hosts only
self.hosts = hosts
class InventoryHost(object):
"""
When fetching inventory, all Devices are groups inside of an
InventoryHost.
"""
def __init__(self, name: str, devices: Optional[inventory.Devices] = None, labels: Optional[List[str]] = None, addr: Optional[str] = None) -> None:
if devices is None:
devices = inventory.Devices([])
if labels is None:
labels = []
assert isinstance(devices, inventory.Devices)
self.name = name # unique within cluster. For example a hostname.
self.addr = addr or name
self.devices = devices
self.labels = labels
def to_json(self) -> dict:
return {
'name': self.name,
'addr': self.addr,
'devices': self.devices.to_json(),
'labels': self.labels,
}
@classmethod
def from_json(cls, data: dict) -> 'InventoryHost':
try:
_data = copy.deepcopy(data)
name = _data.pop('name')
addr = _data.pop('addr', None) or name
devices = inventory.Devices.from_json(_data.pop('devices'))
labels = _data.pop('labels', list())
if _data:
error_msg = 'Unknown key(s) in Inventory: {}'.format(','.join(_data.keys()))
raise OrchestratorValidationError(error_msg)
return cls(name, devices, labels, addr)
except KeyError as e:
error_msg = '{} is required for {}'.format(e, cls.__name__)
raise OrchestratorValidationError(error_msg)
except TypeError as e:
raise OrchestratorValidationError('Failed to read inventory: {}'.format(e))
@classmethod
def from_nested_items(cls, hosts: List[dict]) -> List['InventoryHost']:
devs = inventory.Devices.from_json
return [cls(item[0], devs(item[1].data)) for item in hosts]
def __repr__(self) -> str:
return "<InventoryHost>({name})".format(name=self.name)
@staticmethod
def get_host_names(hosts: List['InventoryHost']) -> List[str]:
return [host.name for host in hosts]
def __eq__(self, other: Any) -> bool:
return self.name == other.name and self.devices == other.devices
class DeviceLightLoc(namedtuple('DeviceLightLoc', ['host', 'dev', 'path'])):
"""
Describes a specific device on a specific host. Used for enabling or disabling LEDs
on devices.
hostname as in :func:`orchestrator.Orchestrator.get_hosts`
device_id: e.g. ``ABC1234DEF567-1R1234_ABC8DE0Q``.
See ``ceph osd metadata | jq '.[].device_ids'``
"""
__slots__ = ()
class OrchestratorEvent:
"""
Similar to K8s Events.
Some form of "important" log message attached to something.
"""
INFO = 'INFO'
ERROR = 'ERROR'
regex_v1 = re.compile(r'^([^ ]+) ([^:]+):([^ ]+) \[([^\]]+)\] "((?:.|\n)*)"$', re.MULTILINE)
def __init__(self, created: Union[str, datetime.datetime], kind: str,
subject: str, level: str, message: str) -> None:
if isinstance(created, str):
created = str_to_datetime(created)
self.created: datetime.datetime = created
assert kind in "service daemon".split()
self.kind: str = kind
# service name, or daemon danem or something
self.subject: str = subject
# Events are not meant for debugging. debugs should end in the log.
assert level in "INFO ERROR".split()
self.level = level
self.message: str = message
__slots__ = ('created', 'kind', 'subject', 'level', 'message')
def kind_subject(self) -> str:
return f'{self.kind}:{self.subject}'
def to_json(self) -> str:
# Make a long list of events readable.
created = datetime_to_str(self.created)
return f'{created} {self.kind_subject()} [{self.level}] "{self.message}"'
def to_dict(self) -> dict:
# Convert events data to dict.
return {
'created': datetime_to_str(self.created),
'subject': self.kind_subject(),
'level': self.level,
'message': self.message
}
@classmethod
@handle_type_error
def from_json(cls, data: str) -> "OrchestratorEvent":
"""
>>> OrchestratorEvent.from_json('''2020-06-10T10:20:25.691255 daemon:crash.ubuntu [INFO] "Deployed crash.ubuntu on host 'ubuntu'"''').to_json()
'2020-06-10T10:20:25.691255Z daemon:crash.ubuntu [INFO] "Deployed crash.ubuntu on host \\'ubuntu\\'"'
:param data:
:return:
"""
match = cls.regex_v1.match(data)
if match:
return cls(*match.groups())
raise ValueError(f'Unable to match: "{data}"')
def __eq__(self, other: Any) -> bool:
if not isinstance(other, OrchestratorEvent):
return False
return self.created == other.created and self.kind == other.kind \
and self.subject == other.subject and self.message == other.message
def __repr__(self) -> str:
return f'OrchestratorEvent.from_json({self.to_json()!r})'
def _mk_orch_methods(cls: Any) -> Any:
# Needs to be defined outside of for.
# Otherwise meth is always bound to last key
def shim(method_name: str) -> Callable:
def inner(self: Any, *args: Any, **kwargs: Any) -> Any:
completion = self._oremote(method_name, args, kwargs)
return completion
return inner
for name, method in Orchestrator.__dict__.items():
if not name.startswith('_') and name not in ['is_orchestrator_module']:
remote_call = update_wrapper(shim(name), method)
setattr(cls, name, remote_call)
return cls
@_mk_orch_methods
class OrchestratorClientMixin(Orchestrator):
"""
A module that inherents from `OrchestratorClientMixin` can directly call
all :class:`Orchestrator` methods without manually calling remote.
Every interface method from ``Orchestrator`` is converted into a stub method that internally
calls :func:`OrchestratorClientMixin._oremote`
>>> class MyModule(OrchestratorClientMixin):
... def func(self):
... completion = self.add_host('somehost') # calls `_oremote()`
... self.log.debug(completion.result)
.. note:: Orchestrator implementations should not inherit from `OrchestratorClientMixin`.
Reason is, that OrchestratorClientMixin magically redirects all methods to the
"real" implementation of the orchestrator.
>>> import mgr_module
>>> #doctest: +SKIP
... class MyImplementation(mgr_module.MgrModule, Orchestrator):
... def __init__(self, ...):
... self.orch_client = OrchestratorClientMixin()
... self.orch_client.set_mgr(self.mgr))
"""
def set_mgr(self, mgr: MgrModule) -> None:
"""
Useable in the Dashboard that uses a global ``mgr``
"""
self.__mgr = mgr # Make sure we're not overwriting any other `mgr` properties
def __get_mgr(self) -> Any:
try:
return self.__mgr
except AttributeError:
return self
def _oremote(self, meth: Any, args: Any, kwargs: Any) -> Any:
"""
Helper for invoking `remote` on whichever orchestrator is enabled
:raises RuntimeError: If the remote method failed.
:raises OrchestratorError: orchestrator failed to perform
:raises ImportError: no `orchestrator` module or backend not found.
"""
mgr = self.__get_mgr()
try:
o = mgr._select_orchestrator()
except AttributeError:
o = mgr.remote('orchestrator', '_select_orchestrator')
if o is None:
raise NoOrchestrator()
mgr.log.debug("_oremote {} -> {}.{}(*{}, **{})".format(mgr.module_name, o, meth, args, kwargs))
try:
return mgr.remote(o, meth, *args, **kwargs)
except Exception as e:
if meth == 'get_feature_set':
raise # self.get_feature_set() calls self._oremote()
f_set = self.get_feature_set()
if meth not in f_set or not f_set[meth]['available']:
raise NotImplementedError(f'{o} does not implement {meth}') from e
raise
| 61,137 | 35.874548 | 223 | py |
null | ceph-main/src/pybind/mgr/orchestrator/module.py | import enum
import errno
import json
from typing import List, Set, Optional, Iterator, cast, Dict, Any, Union, Sequence, Mapping
import re
import datetime
import math
import yaml
from prettytable import PrettyTable
try:
from natsort import natsorted
except ImportError:
# fallback to normal sort
natsorted = sorted # type: ignore
from ceph.deployment.inventory import Device # noqa: F401; pylint: disable=unused-variable
from ceph.deployment.drive_group import DriveGroupSpec, DeviceSelection, OSDMethod
from ceph.deployment.service_spec import PlacementSpec, ServiceSpec, service_spec_allow_invalid_from_json, TracingSpec
from ceph.deployment.hostspec import SpecValidationError
from ceph.utils import datetime_now
from mgr_util import to_pretty_timedelta, format_bytes
from mgr_module import MgrModule, HandleCommandResult, Option
from object_format import Format
from ._interface import OrchestratorClientMixin, DeviceLightLoc, _cli_read_command, \
raise_if_exception, _cli_write_command, OrchestratorError, \
NoOrchestrator, OrchestratorValidationError, NFSServiceSpec, \
RGWSpec, InventoryFilter, InventoryHost, HostSpec, CLICommandMeta, \
ServiceDescription, DaemonDescription, IscsiServiceSpec, json_to_generic_spec, \
GenericSpec, DaemonDescriptionStatus, SNMPGatewaySpec, MDSSpec, TunedProfileSpec
def nice_delta(now: datetime.datetime, t: Optional[datetime.datetime], suffix: str = '') -> str:
if t:
return to_pretty_timedelta(now - t) + suffix
else:
return '-'
def nice_bytes(v: Optional[int]) -> str:
if not v:
return '-'
return format_bytes(v, 5)
class HostDetails:
def __init__(self,
host: Optional[HostSpec] = None,
facts: Optional[Dict[str, Any]] = None,
object_dump: Optional[Dict[str, Any]] = None):
self._hostspec = host
self._facts = facts
self.hdd_summary = 'N/A'
self.ram = 'N/A'
self.cpu_summary = 'N/A'
self.server = 'N/A'
self.os = 'N/A'
self.ssd_summary = 'N/A'
self.nic_count = 'N/A'
assert host or object_dump
if object_dump:
self._load(object_dump)
else:
self._build()
def _load(self, object_dump: Dict[str, Any]) -> None:
"""Build the object from predefined dictionary"""
self.addr = object_dump.get('addr')
self.hostname = object_dump.get('hostname')
self.labels = object_dump.get('labels')
self.status = object_dump.get('status')
self.location = object_dump.get('location')
self.server = object_dump.get('server', 'N/A')
self.hdd_summary = object_dump.get('hdd_summary', 'N/A')
self.ssd_summary = object_dump.get('ssd_summary', 'N/A')
self.os = object_dump.get('os', 'N/A')
self.cpu_summary = object_dump.get('cpu_summary', 'N/A')
self.ram = object_dump.get('ram', 'N/A')
self.nic_count = object_dump.get('nic_count', 'N/A')
def _build(self) -> None:
"""build host details from the HostSpec and facts"""
for a in self._hostspec.__dict__:
setattr(self, a, getattr(self._hostspec, a))
if self._facts:
self.server = f"{self._facts.get('vendor', '').strip()} {self._facts.get('model', '').strip()}"
_cores = self._facts.get('cpu_cores', 0) * self._facts.get('cpu_count', 0)
_threads = self._facts.get('cpu_threads', 0) * _cores
self.os = self._facts.get('operating_system', 'N/A')
self.cpu_summary = f"{_cores}C/{_threads}T" if _cores > 0 else 'N/A'
_total_bytes = self._facts.get('memory_total_kb', 0) * 1024
divisor, suffix = (1073741824, 'GiB') if _total_bytes > 1073741824 else (1048576, 'MiB')
self.ram = f'{math.ceil(_total_bytes / divisor)} {suffix}'
_hdd_capacity = self._facts.get('hdd_capacity', '')
_ssd_capacity = self._facts.get('flash_capacity', '')
if _hdd_capacity:
if self._facts.get('hdd_count', 0) == 0:
self.hdd_summary = '-'
else:
self.hdd_summary = f"{self._facts.get('hdd_count', 0)}/{self._facts.get('hdd_capacity', 0)}"
if _ssd_capacity:
if self._facts.get('flash_count', 0) == 0:
self.ssd_summary = '-'
else:
self.ssd_summary = f"{self._facts.get('flash_count', 0)}/{self._facts.get('flash_capacity', 0)}"
self.nic_count = self._facts.get('nic_count', '')
def to_json(self) -> Dict[str, Any]:
return {k: v for k, v in self.__dict__.items() if not k.startswith('_')}
@classmethod
def from_json(cls, host_details: dict) -> 'HostDetails':
_cls = cls(object_dump=host_details)
return _cls
@staticmethod
def yaml_representer(dumper: 'yaml.SafeDumper', data: 'HostDetails') -> Any:
return dumper.represent_dict(cast(Mapping, data.to_json().items()))
yaml.add_representer(HostDetails, HostDetails.yaml_representer)
class ServiceType(enum.Enum):
mon = 'mon'
mgr = 'mgr'
rbd_mirror = 'rbd-mirror'
cephfs_mirror = 'cephfs-mirror'
crash = 'crash'
alertmanager = 'alertmanager'
grafana = 'grafana'
node_exporter = 'node-exporter'
ceph_exporter = 'ceph-exporter'
prometheus = 'prometheus'
loki = 'loki'
promtail = 'promtail'
mds = 'mds'
rgw = 'rgw'
nfs = 'nfs'
iscsi = 'iscsi'
snmp_gateway = 'snmp-gateway'
elasticsearch = 'elasticsearch'
jaeger_agent = 'jaeger-agent'
jaeger_collector = 'jaeger-collector'
jaeger_query = 'jaeger-query'
class ServiceAction(enum.Enum):
start = 'start'
stop = 'stop'
restart = 'restart'
redeploy = 'redeploy'
reconfig = 'reconfig'
rotate_key = 'rotate-key'
class DaemonAction(enum.Enum):
start = 'start'
stop = 'stop'
restart = 'restart'
reconfig = 'reconfig'
rotate_key = 'rotate-key'
class IngressType(enum.Enum):
default = 'default'
keepalive_only = 'keepalive-only'
haproxy_standard = 'haproxy-standard'
haproxy_protocol = 'haproxy-protocol'
def canonicalize(self) -> "IngressType":
if self == self.default:
return IngressType(self.haproxy_standard)
return IngressType(self)
def to_format(what: Any, format: Format, many: bool, cls: Any) -> Any:
def to_json_1(obj: Any) -> Any:
if hasattr(obj, 'to_json'):
return obj.to_json()
return obj
def to_json_n(objs: List) -> List:
return [to_json_1(o) for o in objs]
to_json = to_json_n if many else to_json_1
if format == Format.json:
return json.dumps(to_json(what), sort_keys=True)
elif format == Format.json_pretty:
return json.dumps(to_json(what), indent=2, sort_keys=True)
elif format == Format.yaml:
# fun with subinterpreters again. pyyaml depends on object identity.
# as what originates from a different subinterpreter we have to copy things here.
if cls:
flat = to_json(what)
copy = [cls.from_json(o) for o in flat] if many else cls.from_json(flat)
else:
copy = what
def to_yaml_1(obj: Any) -> Any:
if hasattr(obj, 'yaml_representer'):
return obj
return to_json_1(obj)
def to_yaml_n(objs: list) -> list:
return [to_yaml_1(o) for o in objs]
to_yaml = to_yaml_n if many else to_yaml_1
if many:
return yaml.dump_all(to_yaml(copy), default_flow_style=False)
return yaml.dump(to_yaml(copy), default_flow_style=False)
elif format == Format.xml or format == Format.xml_pretty:
raise OrchestratorError(f"format '{format.name}' is not implemented.")
else:
raise OrchestratorError(f'unsupported format type: {format}')
def generate_preview_tables(data: Any, osd_only: bool = False) -> str:
error = [x.get('error') for x in data if x.get('error')]
if error:
return json.dumps(error)
warning = [x.get('warning') for x in data if x.get('warning')]
osd_table = preview_table_osd(data)
service_table = preview_table_services(data)
if osd_only:
tables = f"""
{''.join(warning)}
################
OSDSPEC PREVIEWS
################
{osd_table}
"""
return tables
else:
tables = f"""
{''.join(warning)}
####################
SERVICESPEC PREVIEWS
####################
{service_table}
################
OSDSPEC PREVIEWS
################
{osd_table}
"""
return tables
def preview_table_osd(data: List) -> str:
table = PrettyTable(header_style='upper', title='OSDSPEC PREVIEWS', border=True)
table.field_names = "service name host data db wal".split()
table.align = 'l'
table.left_padding_width = 0
table.right_padding_width = 2
notes = ''
for osd_data in data:
if osd_data.get('service_type') != 'osd':
continue
for host, specs in osd_data.get('data').items():
for spec in specs:
if spec.get('error'):
return spec.get('message')
dg_name = spec.get('osdspec')
if spec.get('notes', []):
notes += '\n'.join(spec.get('notes')) + '\n'
for osd in spec.get('data', []):
db_path = osd.get('block_db', '-')
wal_path = osd.get('block_wal', '-')
block_data = osd.get('data', '')
if not block_data:
continue
table.add_row(('osd', dg_name, host, block_data, db_path, wal_path))
return notes + table.get_string()
def preview_table_services(data: List) -> str:
table = PrettyTable(header_style='upper', title="SERVICESPEC PREVIEW", border=True)
table.field_names = 'SERVICE NAME ADD_TO REMOVE_FROM'.split()
table.align = 'l'
table.left_padding_width = 0
table.right_padding_width = 2
for item in data:
if item.get('warning'):
continue
if item.get('service_type') != 'osd':
table.add_row((item.get('service_type'), item.get('service_name'),
" ".join(item.get('add')), " ".join(item.get('remove'))))
return table.get_string()
class OrchestratorCli(OrchestratorClientMixin, MgrModule,
metaclass=CLICommandMeta):
MODULE_OPTIONS = [
Option(
'orchestrator',
type='str',
default=None,
desc='Orchestrator backend',
enum_allowed=['cephadm', 'rook', 'test_orchestrator'],
runtime=True,
),
Option(
'fail_fs',
type='bool',
default=False,
desc='Fail filesystem for rapid multi-rank mds upgrade'
),
]
NATIVE_OPTIONS = [] # type: List[dict]
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(OrchestratorCli, self).__init__(*args, **kwargs)
self.ident: Set[str] = set()
self.fault: Set[str] = set()
self._load()
self._refresh_health()
def _load(self) -> None:
active = self.get_store('active_devices')
if active:
decoded = json.loads(active)
self.ident = set(decoded.get('ident', []))
self.fault = set(decoded.get('fault', []))
self.log.debug('ident {}, fault {}'.format(self.ident, self.fault))
def _save(self) -> None:
encoded = json.dumps({
'ident': list(self.ident),
'fault': list(self.fault),
})
self.set_store('active_devices', encoded)
def _refresh_health(self) -> None:
h = {}
if self.ident:
h['DEVICE_IDENT_ON'] = {
'severity': 'warning',
'summary': '%d devices have ident light turned on' % len(
self.ident),
'detail': ['{} ident light enabled'.format(d) for d in self.ident]
}
if self.fault:
h['DEVICE_FAULT_ON'] = {
'severity': 'warning',
'summary': '%d devices have fault light turned on' % len(
self.fault),
'detail': ['{} fault light enabled'.format(d) for d in self.ident]
}
self.set_health_checks(h)
def _get_device_locations(self, dev_id):
# type: (str) -> List[DeviceLightLoc]
locs = [d['location'] for d in self.get('devices')['devices'] if d['devid'] == dev_id]
return [DeviceLightLoc(**loc) for loc in sum(locs, [])]
@_cli_read_command(prefix='device ls-lights')
def _device_ls(self) -> HandleCommandResult:
"""List currently active device indicator lights"""
return HandleCommandResult(
stdout=json.dumps({
'ident': list(self.ident),
'fault': list(self.fault)
}, indent=4, sort_keys=True))
def light_on(self, fault_ident, devid):
# type: (str, str) -> HandleCommandResult
assert fault_ident in ("fault", "ident")
locs = self._get_device_locations(devid)
if locs is None:
return HandleCommandResult(stderr='device {} not found'.format(devid),
retval=-errno.ENOENT)
getattr(self, fault_ident).add(devid)
self._save()
self._refresh_health()
completion = self.blink_device_light(fault_ident, True, locs)
return HandleCommandResult(stdout=str(completion.result))
def light_off(self, fault_ident, devid, force):
# type: (str, str, bool) -> HandleCommandResult
assert fault_ident in ("fault", "ident")
locs = self._get_device_locations(devid)
if locs is None:
return HandleCommandResult(stderr='device {} not found'.format(devid),
retval=-errno.ENOENT)
try:
completion = self.blink_device_light(fault_ident, False, locs)
if devid in getattr(self, fault_ident):
getattr(self, fault_ident).remove(devid)
self._save()
self._refresh_health()
return HandleCommandResult(stdout=str(completion.result))
except Exception:
# There are several reasons the try: block might fail:
# 1. the device no longer exist
# 2. the device is no longer known to Ceph
# 3. the host is not reachable
if force and devid in getattr(self, fault_ident):
getattr(self, fault_ident).remove(devid)
self._save()
self._refresh_health()
raise
class DeviceLightEnable(enum.Enum):
on = 'on'
off = 'off'
class DeviceLightType(enum.Enum):
ident = 'ident'
fault = 'fault'
@_cli_write_command(prefix='device light')
def _device_light(self,
enable: DeviceLightEnable,
devid: str,
light_type: DeviceLightType = DeviceLightType.ident,
force: bool = False) -> HandleCommandResult:
"""
Enable or disable the device light. Default type is `ident`
'Usage: device light (on|off) <devid> [ident|fault] [--force]'
"""""
if enable == self.DeviceLightEnable.on:
return self.light_on(light_type.value, devid)
else:
return self.light_off(light_type.value, devid, force)
def _select_orchestrator(self) -> str:
return cast(str, self.get_module_option("orchestrator"))
def _get_fail_fs_value(self) -> bool:
return bool(self.get_module_option("fail_fs"))
@_cli_write_command('orch host add')
def _add_host(self,
hostname: str,
addr: Optional[str] = None,
labels: Optional[List[str]] = None,
maintenance: Optional[bool] = False) -> HandleCommandResult:
"""Add a host"""
_status = 'maintenance' if maintenance else ''
# split multiple labels passed in with --labels=label1,label2
if labels and len(labels) == 1:
labels = labels[0].split(',')
s = HostSpec(hostname=hostname, addr=addr, labels=labels, status=_status)
return self._apply_misc([s], False, Format.plain)
@_cli_write_command('orch host rm')
def _remove_host(self, hostname: str, force: bool = False, offline: bool = False) -> HandleCommandResult:
"""Remove a host"""
completion = self.remove_host(hostname, force, offline)
raise_if_exception(completion)
return HandleCommandResult(stdout=completion.result_str())
@_cli_write_command('orch host drain')
def _drain_host(self, hostname: str, force: bool = False) -> HandleCommandResult:
"""drain all daemons from a host"""
completion = self.drain_host(hostname, force)
raise_if_exception(completion)
return HandleCommandResult(stdout=completion.result_str())
@_cli_write_command('orch host set-addr')
def _update_set_addr(self, hostname: str, addr: str) -> HandleCommandResult:
"""Update a host address"""
completion = self.update_host_addr(hostname, addr)
raise_if_exception(completion)
return HandleCommandResult(stdout=completion.result_str())
@_cli_read_command('orch host ls')
def _get_hosts(self,
format: Format = Format.plain,
host_pattern: str = '',
label: str = '',
host_status: str = '',
detail: bool = False) -> HandleCommandResult:
"""List high level host information"""
completion = self.get_hosts()
hosts = raise_if_exception(completion)
cephadm_active = True if self._select_orchestrator() == "cephadm" else False
show_detail = cephadm_active and detail
filter_spec = PlacementSpec(
host_pattern=host_pattern,
label=label
)
filtered_hosts: List[str] = filter_spec.filter_matching_hostspecs(hosts)
hosts = [h for h in hosts if h.hostname in filtered_hosts]
if host_status:
hosts = [h for h in hosts if h.status.lower() == host_status]
if show_detail:
# switch to a HostDetails based representation
_hosts = []
for h in hosts:
facts_completion = self.get_facts(h.hostname)
host_facts = raise_if_exception(facts_completion)
_hosts.append(HostDetails(host=h, facts=host_facts[0]))
hosts: List[HostDetails] = _hosts # type: ignore [no-redef]
if format != Format.plain:
if show_detail:
output = to_format(hosts, format, many=True, cls=HostDetails)
else:
output = to_format(hosts, format, many=True, cls=HostSpec)
else:
if show_detail:
table_headings = ['HOST', 'ADDR', 'LABELS', 'STATUS',
'VENDOR/MODEL', 'CPU', 'RAM', 'HDD', 'SSD', 'NIC']
else:
table_headings = ['HOST', 'ADDR', 'LABELS', 'STATUS']
table = PrettyTable(
table_headings,
border=False)
table.align = 'l'
table.left_padding_width = 0
table.right_padding_width = 2
for host in natsorted(hosts, key=lambda h: h.hostname):
row = (host.hostname, host.addr, ','.join(
host.labels), host.status.capitalize())
if show_detail and isinstance(host, HostDetails):
row += (host.server, host.cpu_summary, host.ram,
host.hdd_summary, host.ssd_summary, host.nic_count)
table.add_row(row)
output = table.get_string()
if format == Format.plain:
output += f'\n{len(hosts)} hosts in cluster'
if label:
output += f' who had label {label}'
if host_pattern:
output += f' whose hostname matched {host_pattern}'
if host_status:
output += f' with status {host_status}'
return HandleCommandResult(stdout=output)
@_cli_write_command('orch host label add')
def _host_label_add(self, hostname: str, label: str) -> HandleCommandResult:
"""Add a host label"""
completion = self.add_host_label(hostname, label)
raise_if_exception(completion)
return HandleCommandResult(stdout=completion.result_str())
@_cli_write_command('orch host label rm')
def _host_label_rm(self, hostname: str, label: str, force: bool = False) -> HandleCommandResult:
"""Remove a host label"""
completion = self.remove_host_label(hostname, label, force)
raise_if_exception(completion)
return HandleCommandResult(stdout=completion.result_str())
@_cli_write_command('orch host ok-to-stop')
def _host_ok_to_stop(self, hostname: str) -> HandleCommandResult:
"""Check if the specified host can be safely stopped without reducing availability"""""
completion = self.host_ok_to_stop(hostname)
raise_if_exception(completion)
return HandleCommandResult(stdout=completion.result_str())
@_cli_write_command('orch host maintenance enter')
def _host_maintenance_enter(self, hostname: str, force: bool = False, yes_i_really_mean_it: bool = False) -> HandleCommandResult:
"""
Prepare a host for maintenance by shutting down and disabling all Ceph daemons (cephadm only)
"""
completion = self.enter_host_maintenance(hostname, force=force, yes_i_really_mean_it=yes_i_really_mean_it)
raise_if_exception(completion)
return HandleCommandResult(stdout=completion.result_str())
@_cli_write_command('orch host maintenance exit')
def _host_maintenance_exit(self, hostname: str) -> HandleCommandResult:
"""
Return a host from maintenance, restarting all Ceph daemons (cephadm only)
"""
completion = self.exit_host_maintenance(hostname)
raise_if_exception(completion)
return HandleCommandResult(stdout=completion.result_str())
@_cli_write_command('orch host rescan')
def _host_rescan(self, hostname: str, with_summary: bool = False) -> HandleCommandResult:
"""Perform a disk rescan on a host"""
completion = self.rescan_host(hostname)
raise_if_exception(completion)
if with_summary:
return HandleCommandResult(stdout=completion.result_str())
return HandleCommandResult(stdout=completion.result_str().split('.')[0])
@_cli_read_command('orch device ls')
def _list_devices(self,
hostname: Optional[List[str]] = None,
format: Format = Format.plain,
refresh: bool = False,
wide: bool = False) -> HandleCommandResult:
"""
List devices on a host
"""
# Provide information about storage devices present in cluster hosts
#
# Note: this does not have to be completely synchronous. Slightly out of
# date hardware inventory is fine as long as hardware ultimately appears
# in the output of this command.
nf = InventoryFilter(hosts=hostname) if hostname else None
completion = self.get_inventory(host_filter=nf, refresh=refresh)
inv_hosts = raise_if_exception(completion)
if format != Format.plain:
return HandleCommandResult(stdout=to_format(inv_hosts,
format,
many=True,
cls=InventoryHost))
else:
display_map = {
"Unsupported": "N/A",
"N/A": "N/A",
"On": "On",
"Off": "Off",
True: "Yes",
False: "No",
}
out = []
if wide:
table = PrettyTable(
['HOST', 'PATH', 'TYPE', 'TRANSPORT', 'RPM', 'DEVICE ID', 'SIZE',
'HEALTH', 'IDENT', 'FAULT',
'AVAILABLE', 'REFRESHED', 'REJECT REASONS'],
border=False)
else:
table = PrettyTable(
['HOST', 'PATH', 'TYPE', 'DEVICE ID', 'SIZE',
'AVAILABLE', 'REFRESHED', 'REJECT REASONS'],
border=False)
table.align = 'l'
table._align['SIZE'] = 'r'
table.left_padding_width = 0
table.right_padding_width = 2
now = datetime_now()
for host_ in natsorted(inv_hosts, key=lambda h: h.name): # type: InventoryHost
for d in sorted(host_.devices.devices, key=lambda d: d.path): # type: Device
led_ident = 'N/A'
led_fail = 'N/A'
if d.lsm_data.get('ledSupport', None):
led_ident = d.lsm_data['ledSupport']['IDENTstatus']
led_fail = d.lsm_data['ledSupport']['FAILstatus']
if wide:
table.add_row(
(
host_.name,
d.path,
d.human_readable_type,
d.lsm_data.get('transport', ''),
d.lsm_data.get('rpm', ''),
d.device_id,
format_bytes(d.sys_api.get('size', 0), 5),
d.lsm_data.get('health', ''),
display_map[led_ident],
display_map[led_fail],
display_map[d.available],
nice_delta(now, d.created, ' ago'),
', '.join(d.rejected_reasons)
)
)
else:
table.add_row(
(
host_.name,
d.path,
d.human_readable_type,
d.device_id,
format_bytes(d.sys_api.get('size', 0), 5),
display_map[d.available],
nice_delta(now, d.created, ' ago'),
', '.join(d.rejected_reasons)
)
)
out.append(table.get_string())
return HandleCommandResult(stdout='\n'.join(out))
@_cli_write_command('orch device zap')
def _zap_device(self, hostname: str, path: str, force: bool = False) -> HandleCommandResult:
"""
Zap (erase!) a device so it can be re-used
"""
if not force:
raise OrchestratorError('must pass --force to PERMANENTLY ERASE DEVICE DATA')
completion = self.zap_device(hostname, path)
raise_if_exception(completion)
return HandleCommandResult(stdout=completion.result_str())
@_cli_write_command('orch sd dump cert')
def _service_discovery_dump_cert(self) -> HandleCommandResult:
"""
Returns service discovery server root certificate
"""
completion = self.service_discovery_dump_cert()
raise_if_exception(completion)
return HandleCommandResult(stdout=completion.result_str())
@_cli_read_command('orch ls')
def _list_services(self,
service_type: Optional[str] = None,
service_name: Optional[str] = None,
export: bool = False,
format: Format = Format.plain,
refresh: bool = False) -> HandleCommandResult:
"""
List services known to orchestrator
"""
if export and format == Format.plain:
format = Format.yaml
completion = self.describe_service(service_type,
service_name,
refresh=refresh)
services = raise_if_exception(completion)
def ukn(s: Optional[str]) -> str:
return '<unknown>' if s is None else s
# Sort the list for display
services.sort(key=lambda s: (ukn(s.spec.service_name())))
if len(services) == 0:
return HandleCommandResult(stdout="No services reported")
elif format != Format.plain:
with service_spec_allow_invalid_from_json():
if export:
data = [s.spec for s in services if s.deleted is None]
return HandleCommandResult(stdout=to_format(data, format, many=True, cls=ServiceSpec))
else:
return HandleCommandResult(stdout=to_format(services, format, many=True, cls=ServiceDescription))
else:
now = datetime_now()
table = PrettyTable(
[
'NAME', 'PORTS',
'RUNNING', 'REFRESHED', 'AGE',
'PLACEMENT',
],
border=False)
table.align['NAME'] = 'l'
table.align['PORTS'] = 'l'
table.align['RUNNING'] = 'r'
table.align['REFRESHED'] = 'l'
table.align['AGE'] = 'l'
table.align['PLACEMENT'] = 'l'
table.left_padding_width = 0
table.right_padding_width = 2
for s in services:
if not s.spec:
pl = '<no spec>'
elif s.spec.unmanaged:
pl = '<unmanaged>'
else:
pl = s.spec.placement.pretty_str()
if s.deleted:
refreshed = '<deleting>'
else:
refreshed = nice_delta(now, s.last_refresh, ' ago')
if s.spec.service_type == 'osd':
running = str(s.running)
else:
running = '{}/{}'.format(s.running, s.size)
table.add_row((
s.spec.service_name(),
s.get_port_summary(),
running,
refreshed,
nice_delta(now, s.created),
pl,
))
return HandleCommandResult(stdout=table.get_string())
@_cli_read_command('orch ps')
def _list_daemons(self,
hostname: Optional[str] = None,
_end_positional_: int = 0,
service_name: Optional[str] = None,
daemon_type: Optional[str] = None,
daemon_id: Optional[str] = None,
format: Format = Format.plain,
refresh: bool = False) -> HandleCommandResult:
"""
List daemons known to orchestrator
"""
completion = self.list_daemons(service_name,
daemon_type,
daemon_id=daemon_id,
host=hostname,
refresh=refresh)
daemons = raise_if_exception(completion)
def ukn(s: Optional[str]) -> str:
return '<unknown>' if s is None else s
# Sort the list for display
daemons.sort(key=lambda s: (ukn(s.daemon_type), ukn(s.hostname), ukn(s.daemon_id)))
if format != Format.plain:
return HandleCommandResult(stdout=to_format(daemons, format, many=True, cls=DaemonDescription))
else:
if len(daemons) == 0:
return HandleCommandResult(stdout="No daemons reported")
now = datetime_now()
table = PrettyTable(
['NAME', 'HOST', 'PORTS',
'STATUS', 'REFRESHED', 'AGE',
'MEM USE', 'MEM LIM',
'VERSION', 'IMAGE ID', 'CONTAINER ID'],
border=False)
table.align = 'l'
table._align['REFRESHED'] = 'r'
table._align['AGE'] = 'r'
table._align['MEM USE'] = 'r'
table._align['MEM LIM'] = 'r'
table.left_padding_width = 0
table.right_padding_width = 2
for s in natsorted(daemons, key=lambda d: d.name()):
if s.status_desc:
status = s.status_desc
else:
status = DaemonDescriptionStatus.to_str(s.status)
if s.status == DaemonDescriptionStatus.running and s.started: # See DDS.starting
status += ' (%s)' % to_pretty_timedelta(now - s.started)
table.add_row((
s.name(),
ukn(s.hostname),
s.get_port_summary(),
status,
nice_delta(now, s.last_refresh, ' ago'),
nice_delta(now, s.created),
nice_bytes(s.memory_usage),
nice_bytes(s.memory_request),
ukn(s.version),
ukn(s.container_image_id)[0:12],
ukn(s.container_id)))
remove_column = 'CONTAINER ID'
if table.get_string(fields=[remove_column], border=False,
header=False).count('<unknown>') == len(daemons):
try:
table.del_column(remove_column)
except AttributeError as e:
# del_column method was introduced in prettytable 2.0
if str(e) != "del_column":
raise
table.field_names.remove(remove_column)
table._rows = [row[:-1] for row in table._rows]
return HandleCommandResult(stdout=table.get_string())
@_cli_write_command('orch prometheus access info')
def _get_prometheus_access_info(self) -> HandleCommandResult:
completion = self.get_prometheus_access_info()
access_info = raise_if_exception(completion)
return HandleCommandResult(stdout=json.dumps(access_info))
@_cli_write_command('orch alertmanager access info')
def _get_alertmanager_access_info(self) -> HandleCommandResult:
completion = self.get_alertmanager_access_info()
access_info = raise_if_exception(completion)
return HandleCommandResult(stdout=json.dumps(access_info))
@_cli_write_command('orch apply osd')
def _apply_osd(self,
all_available_devices: bool = False,
format: Format = Format.plain,
unmanaged: Optional[bool] = None,
dry_run: bool = False,
no_overwrite: bool = False,
method: Optional[OSDMethod] = None,
inbuf: Optional[str] = None # deprecated. Was deprecated before Quincy
) -> HandleCommandResult:
"""
Create OSD daemon(s) on all available devices
"""
if inbuf and all_available_devices:
return HandleCommandResult(-errno.EINVAL, '-i infile and --all-available-devices are mutually exclusive')
if not inbuf and not all_available_devices:
# one parameter must be present
return HandleCommandResult(-errno.EINVAL, '--all-available-devices is required')
if inbuf:
if unmanaged is not None:
return HandleCommandResult(-errno.EINVAL, stderr='-i infile and --unmanaged are mutually exclusive')
try:
drivegroups = [_dg for _dg in yaml.safe_load_all(inbuf)]
except yaml.scanner.ScannerError as e:
msg = f"Invalid YAML received : {str(e)}"
self.log.exception(e)
return HandleCommandResult(-errno.EINVAL, stderr=msg)
dg_specs = []
for dg in drivegroups:
spec = DriveGroupSpec.from_json(dg)
if dry_run:
spec.preview_only = True
dg_specs.append(spec)
return self._apply_misc(dg_specs, dry_run, format, no_overwrite)
if all_available_devices:
if unmanaged is None:
unmanaged = False
dg_specs = [
DriveGroupSpec(
service_id='all-available-devices',
placement=PlacementSpec(host_pattern='*'),
data_devices=DeviceSelection(all=True),
unmanaged=unmanaged,
preview_only=dry_run,
method=method
)
]
return self._apply_misc(dg_specs, dry_run, format, no_overwrite)
return HandleCommandResult(-errno.EINVAL, stderr='--all-available-devices is required')
@_cli_write_command('orch daemon add osd')
def _daemon_add_osd(self,
svc_arg: Optional[str] = None,
method: Optional[OSDMethod] = None) -> HandleCommandResult:
"""Create OSD daemon(s) on specified host and device(s) (e.g., ceph orch daemon add osd myhost:/dev/sdb)"""
# Create one or more OSDs"""
usage = """
Usage:
ceph orch daemon add osd host:device1,device2,...
ceph orch daemon add osd host:data_devices=device1,device2,db_devices=device3,osds_per_device=2,...
"""
if not svc_arg:
return HandleCommandResult(-errno.EINVAL, stderr=usage)
try:
host_name, raw = svc_arg.split(":")
drive_group_spec = {
'data_devices': []
} # type: Dict
drv_grp_spec_arg = None
values = raw.split(',')
while values:
v = values[0].split(',', 1)[0]
if '=' in v:
drv_grp_spec_arg, value = v.split('=')
if drv_grp_spec_arg in ['data_devices',
'db_devices',
'wal_devices',
'journal_devices']:
drive_group_spec[drv_grp_spec_arg] = []
drive_group_spec[drv_grp_spec_arg].append(value)
else:
drive_group_spec[drv_grp_spec_arg] = value
elif drv_grp_spec_arg is not None:
drive_group_spec[drv_grp_spec_arg].append(v)
else:
drive_group_spec['data_devices'].append(v)
values.remove(v)
for dev_type in ['data_devices', 'db_devices', 'wal_devices', 'journal_devices']:
drive_group_spec[dev_type] = DeviceSelection(
paths=drive_group_spec[dev_type]) if drive_group_spec.get(dev_type) else None
drive_group = DriveGroupSpec(
placement=PlacementSpec(host_pattern=host_name),
method=method,
**drive_group_spec,
)
except (TypeError, KeyError, ValueError) as e:
msg = f"Invalid 'host:device' spec: '{svc_arg}': {e}" + usage
return HandleCommandResult(-errno.EINVAL, stderr=msg)
completion = self.create_osds(drive_group)
raise_if_exception(completion)
return HandleCommandResult(stdout=completion.result_str())
@_cli_write_command('orch osd rm')
def _osd_rm_start(self,
osd_id: List[str],
replace: bool = False,
force: bool = False,
zap: bool = False,
no_destroy: bool = False) -> HandleCommandResult:
"""Remove OSD daemons"""
completion = self.remove_osds(osd_id, replace=replace, force=force,
zap=zap, no_destroy=no_destroy)
raise_if_exception(completion)
return HandleCommandResult(stdout=completion.result_str())
@_cli_write_command('orch osd rm stop')
def _osd_rm_stop(self, osd_id: List[str]) -> HandleCommandResult:
"""Cancel ongoing OSD removal operation"""
completion = self.stop_remove_osds(osd_id)
raise_if_exception(completion)
return HandleCommandResult(stdout=completion.result_str())
@_cli_write_command('orch osd rm status')
def _osd_rm_status(self, format: Format = Format.plain) -> HandleCommandResult:
"""Status of OSD removal operation"""
completion = self.remove_osds_status()
raise_if_exception(completion)
report = completion.result
if not report:
return HandleCommandResult(stdout="No OSD remove/replace operations reported")
if format != Format.plain:
out = to_format(report, format, many=True, cls=None)
else:
table = PrettyTable(
['OSD', 'HOST', 'STATE', 'PGS', 'REPLACE', 'FORCE', 'ZAP',
'DRAIN STARTED AT'],
border=False)
table.align = 'l'
table._align['PGS'] = 'r'
table.left_padding_width = 0
table.right_padding_width = 2
for osd in sorted(report, key=lambda o: o.osd_id):
table.add_row([osd.osd_id, osd.hostname, osd.drain_status_human(),
osd.get_pg_count(), osd.replace, osd.force, osd.zap,
osd.drain_started_at or ''])
out = table.get_string()
return HandleCommandResult(stdout=out)
@_cli_write_command('orch daemon add')
def daemon_add_misc(self,
daemon_type: Optional[ServiceType] = None,
placement: Optional[str] = None,
inbuf: Optional[str] = None) -> HandleCommandResult:
"""Add daemon(s)"""
usage = f"""Usage:
ceph orch daemon add -i <json_file>
ceph orch daemon add {daemon_type or '<daemon_type>'} <placement>"""
if inbuf:
if daemon_type or placement:
raise OrchestratorValidationError(usage)
spec = ServiceSpec.from_json(yaml.safe_load(inbuf))
else:
if not placement or not daemon_type:
raise OrchestratorValidationError(usage)
placement_spec = PlacementSpec.from_string(placement)
spec = ServiceSpec(daemon_type.value, placement=placement_spec)
return self._daemon_add_misc(spec)
def _daemon_add_misc(self, spec: ServiceSpec) -> HandleCommandResult:
completion = self.add_daemon(spec)
raise_if_exception(completion)
return HandleCommandResult(stdout=completion.result_str())
@_cli_write_command('orch daemon add mds')
def _mds_add(self,
fs_name: str,
placement: Optional[str] = None,
inbuf: Optional[str] = None) -> HandleCommandResult:
"""Start MDS daemon(s)"""
if inbuf:
raise OrchestratorValidationError('unrecognized command -i; -h or --help for usage')
spec = ServiceSpec(
service_type='mds',
service_id=fs_name,
placement=PlacementSpec.from_string(placement),
)
return self._daemon_add_misc(spec)
@_cli_write_command('orch daemon add rgw')
def _rgw_add(self,
svc_id: str,
placement: Optional[str] = None,
_end_positional_: int = 0,
port: Optional[int] = None,
ssl: bool = False,
inbuf: Optional[str] = None) -> HandleCommandResult:
"""Start RGW daemon(s)"""
if inbuf:
raise OrchestratorValidationError('unrecognized command -i; -h or --help for usage')
spec = RGWSpec(
service_id=svc_id,
rgw_frontend_port=port,
ssl=ssl,
placement=PlacementSpec.from_string(placement),
)
return self._daemon_add_misc(spec)
@_cli_write_command('orch daemon add nfs')
def _nfs_add(self,
svc_id: str,
placement: Optional[str] = None,
inbuf: Optional[str] = None) -> HandleCommandResult:
"""Start NFS daemon(s)"""
if inbuf:
raise OrchestratorValidationError('unrecognized command -i; -h or --help for usage')
spec = NFSServiceSpec(
service_id=svc_id,
placement=PlacementSpec.from_string(placement),
)
return self._daemon_add_misc(spec)
@_cli_write_command('orch daemon add iscsi')
def _iscsi_add(self,
pool: str,
api_user: str,
api_password: str,
trusted_ip_list: Optional[str] = None,
placement: Optional[str] = None,
inbuf: Optional[str] = None) -> HandleCommandResult:
"""Start iscsi daemon(s)"""
if inbuf:
raise OrchestratorValidationError('unrecognized command -i; -h or --help for usage')
spec = IscsiServiceSpec(
service_id='iscsi',
pool=pool,
api_user=api_user,
api_password=api_password,
trusted_ip_list=trusted_ip_list,
placement=PlacementSpec.from_string(placement),
)
return self._daemon_add_misc(spec)
@_cli_write_command('orch')
def _service_action(self, action: ServiceAction, service_name: str) -> HandleCommandResult:
"""Start, stop, restart, redeploy, or reconfig an entire service (i.e. all daemons)"""
completion = self.service_action(action.value, service_name)
raise_if_exception(completion)
return HandleCommandResult(stdout=completion.result_str())
@_cli_write_command('orch daemon')
def _daemon_action(self, action: DaemonAction, name: str) -> HandleCommandResult:
"""Start, stop, restart, redeploy, reconfig, or rotate-key for a specific daemon"""
if '.' not in name:
raise OrchestratorError('%s is not a valid daemon name' % name)
completion = self.daemon_action(action.value, name)
raise_if_exception(completion)
return HandleCommandResult(stdout=completion.result_str())
@_cli_write_command('orch daemon redeploy')
def _daemon_action_redeploy(self,
name: str,
image: Optional[str] = None) -> HandleCommandResult:
"""Redeploy a daemon (with a specific image)"""
if '.' not in name:
raise OrchestratorError('%s is not a valid daemon name' % name)
completion = self.daemon_action("redeploy", name, image=image)
raise_if_exception(completion)
return HandleCommandResult(stdout=completion.result_str())
@_cli_write_command('orch daemon rm')
def _daemon_rm(self,
names: List[str],
force: Optional[bool] = False) -> HandleCommandResult:
"""Remove specific daemon(s)"""
for name in names:
if '.' not in name:
raise OrchestratorError('%s is not a valid daemon name' % name)
(daemon_type) = name.split('.')[0]
if not force and daemon_type in ['osd', 'mon', 'prometheus']:
raise OrchestratorError(
'must pass --force to REMOVE daemon with potentially PRECIOUS DATA for %s' % name)
completion = self.remove_daemons(names)
raise_if_exception(completion)
return HandleCommandResult(stdout=completion.result_str())
@_cli_write_command('orch rm')
def _service_rm(self,
service_name: str,
force: bool = False) -> HandleCommandResult:
"""Remove a service"""
if service_name in ['mon', 'mgr'] and not force:
raise OrchestratorError('The mon and mgr services cannot be removed')
completion = self.remove_service(service_name, force=force)
raise_if_exception(completion)
return HandleCommandResult(stdout=completion.result_str())
@_cli_write_command('orch apply')
def apply_misc(self,
service_type: Optional[ServiceType] = None,
placement: Optional[str] = None,
dry_run: bool = False,
format: Format = Format.plain,
unmanaged: bool = False,
no_overwrite: bool = False,
inbuf: Optional[str] = None) -> HandleCommandResult:
"""Update the size or placement for a service or apply a large yaml spec"""
usage = """Usage:
ceph orch apply -i <yaml spec> [--dry-run]
ceph orch apply <service_type> [--placement=<placement_string>] [--unmanaged]
"""
if inbuf:
if service_type or placement or unmanaged:
raise OrchestratorValidationError(usage)
yaml_objs: Iterator = yaml.safe_load_all(inbuf)
specs: List[Union[ServiceSpec, HostSpec]] = []
# YAML '---' document separator with no content generates
# None entries in the output. Let's skip them silently.
content = [o for o in yaml_objs if o is not None]
for s in content:
spec = json_to_generic_spec(s)
# validate the config (we need MgrModule for that)
if isinstance(spec, ServiceSpec) and spec.config:
for k, v in spec.config.items():
try:
self.get_foreign_ceph_option('mon', k)
except KeyError:
raise SpecValidationError(f'Invalid config option {k} in spec')
if dry_run and not isinstance(spec, HostSpec):
spec.preview_only = dry_run
if isinstance(spec, TracingSpec) and spec.service_type == 'jaeger-tracing':
specs.extend(spec.get_tracing_specs())
continue
specs.append(spec)
else:
placementspec = PlacementSpec.from_string(placement)
if not service_type:
raise OrchestratorValidationError(usage)
specs = [ServiceSpec(service_type.value, placement=placementspec,
unmanaged=unmanaged, preview_only=dry_run)]
return self._apply_misc(specs, dry_run, format, no_overwrite)
def _apply_misc(self, specs: Sequence[GenericSpec], dry_run: bool, format: Format, no_overwrite: bool = False) -> HandleCommandResult:
completion = self.apply(specs, no_overwrite)
raise_if_exception(completion)
out = completion.result_str()
if dry_run:
completion = self.plan(specs)
raise_if_exception(completion)
data = completion.result
if format == Format.plain:
out = generate_preview_tables(data)
else:
out = to_format(data, format, many=True, cls=None)
return HandleCommandResult(stdout=out)
@_cli_write_command('orch apply mds')
def _apply_mds(self,
fs_name: str,
placement: Optional[str] = None,
dry_run: bool = False,
unmanaged: bool = False,
format: Format = Format.plain,
no_overwrite: bool = False,
inbuf: Optional[str] = None) -> HandleCommandResult:
"""Update the number of MDS instances for the given fs_name"""
if inbuf:
raise OrchestratorValidationError('unrecognized command -i; -h or --help for usage')
spec = MDSSpec(
service_type='mds',
service_id=fs_name,
placement=PlacementSpec.from_string(placement),
unmanaged=unmanaged,
preview_only=dry_run)
spec.validate() # force any validation exceptions to be caught correctly
return self._apply_misc([spec], dry_run, format, no_overwrite)
@_cli_write_command('orch apply rgw')
def _apply_rgw(self,
svc_id: str,
placement: Optional[str] = None,
_end_positional_: int = 0,
realm: Optional[str] = None,
zonegroup: Optional[str] = None,
zone: Optional[str] = None,
port: Optional[int] = None,
ssl: bool = False,
dry_run: bool = False,
format: Format = Format.plain,
unmanaged: bool = False,
no_overwrite: bool = False,
inbuf: Optional[str] = None) -> HandleCommandResult:
"""Update the number of RGW instances for the given zone"""
if inbuf:
raise OrchestratorValidationError('unrecognized command -i; -h or --help for usage')
if realm and not zone:
raise OrchestratorValidationError(
'Cannot add RGW: Realm specified but no zone specified')
if zone and not realm:
raise OrchestratorValidationError(
'Cannot add RGW: Zone specified but no realm specified')
spec = RGWSpec(
service_id=svc_id,
rgw_realm=realm,
rgw_zonegroup=zonegroup,
rgw_zone=zone,
rgw_frontend_port=port,
ssl=ssl,
placement=PlacementSpec.from_string(placement),
unmanaged=unmanaged,
preview_only=dry_run
)
spec.validate() # force any validation exceptions to be caught correctly
return self._apply_misc([spec], dry_run, format, no_overwrite)
@_cli_write_command('orch apply nfs')
def _apply_nfs(self,
svc_id: str,
placement: Optional[str] = None,
format: Format = Format.plain,
port: Optional[int] = None,
dry_run: bool = False,
unmanaged: bool = False,
no_overwrite: bool = False,
inbuf: Optional[str] = None) -> HandleCommandResult:
"""Scale an NFS service"""
if inbuf:
raise OrchestratorValidationError('unrecognized command -i; -h or --help for usage')
spec = NFSServiceSpec(
service_id=svc_id,
port=port,
placement=PlacementSpec.from_string(placement),
unmanaged=unmanaged,
preview_only=dry_run
)
spec.validate() # force any validation exceptions to be caught correctly
return self._apply_misc([spec], dry_run, format, no_overwrite)
@_cli_write_command('orch apply iscsi')
def _apply_iscsi(self,
pool: str,
api_user: str,
api_password: str,
trusted_ip_list: Optional[str] = None,
placement: Optional[str] = None,
unmanaged: bool = False,
dry_run: bool = False,
format: Format = Format.plain,
no_overwrite: bool = False,
inbuf: Optional[str] = None) -> HandleCommandResult:
"""Scale an iSCSI service"""
if inbuf:
raise OrchestratorValidationError('unrecognized command -i; -h or --help for usage')
spec = IscsiServiceSpec(
service_id=pool,
pool=pool,
api_user=api_user,
api_password=api_password,
trusted_ip_list=trusted_ip_list,
placement=PlacementSpec.from_string(placement),
unmanaged=unmanaged,
preview_only=dry_run
)
spec.validate() # force any validation exceptions to be caught correctly
return self._apply_misc([spec], dry_run, format, no_overwrite)
@_cli_write_command('orch apply snmp-gateway')
def _apply_snmp_gateway(self,
snmp_version: SNMPGatewaySpec.SNMPVersion,
destination: str,
port: Optional[int] = None,
engine_id: Optional[str] = None,
auth_protocol: Optional[SNMPGatewaySpec.SNMPAuthType] = None,
privacy_protocol: Optional[SNMPGatewaySpec.SNMPPrivacyType] = None,
placement: Optional[str] = None,
unmanaged: bool = False,
dry_run: bool = False,
format: Format = Format.plain,
no_overwrite: bool = False,
inbuf: Optional[str] = None) -> HandleCommandResult:
"""Add a Prometheus to SNMP gateway service (cephadm only)"""
if not inbuf:
raise OrchestratorValidationError(
'missing credential configuration file. Retry with -i <filename>')
try:
# load inbuf
credentials = yaml.safe_load(inbuf)
except (OSError, yaml.YAMLError):
raise OrchestratorValidationError('credentials file must be valid YAML')
spec = SNMPGatewaySpec(
snmp_version=snmp_version,
port=port,
credentials=credentials,
snmp_destination=destination,
engine_id=engine_id,
auth_protocol=auth_protocol,
privacy_protocol=privacy_protocol,
placement=PlacementSpec.from_string(placement),
unmanaged=unmanaged,
preview_only=dry_run
)
spec.validate() # force any validation exceptions to be caught correctly
return self._apply_misc([spec], dry_run, format, no_overwrite)
@_cli_write_command('orch apply jaeger')
def _apply_jaeger(self,
es_nodes: Optional[str] = None,
without_query: bool = False,
placement: Optional[str] = None,
unmanaged: bool = False,
dry_run: bool = False,
format: Format = Format.plain,
no_overwrite: bool = False,
inbuf: Optional[str] = None) -> HandleCommandResult:
"""Apply jaeger tracing services"""
if inbuf:
raise OrchestratorValidationError('unrecognized command -i; -h or --help for usage')
spec = TracingSpec(service_type='jaeger-tracing',
es_nodes=es_nodes,
without_query=without_query,
placement=PlacementSpec.from_string(placement),
unmanaged=unmanaged)
specs: List[ServiceSpec] = spec.get_tracing_specs()
return self._apply_misc(specs, dry_run, format, no_overwrite)
@_cli_write_command('orch set-unmanaged')
def _set_unmanaged(self, service_name: str) -> HandleCommandResult:
"""Set 'unmanaged: true' for the given service name"""
completion = self.set_unmanaged(service_name, True)
raise_if_exception(completion)
out = completion.result_str()
return HandleCommandResult(stdout=out)
@_cli_write_command('orch set-managed')
def _set_managed(self, service_name: str) -> HandleCommandResult:
"""Set 'unmanaged: false' for the given service name"""
completion = self.set_unmanaged(service_name, False)
raise_if_exception(completion)
out = completion.result_str()
return HandleCommandResult(stdout=out)
@_cli_write_command('orch set backend')
def _set_backend(self, module_name: Optional[str] = None) -> HandleCommandResult:
"""
Select orchestrator module backend
"""
# We implement a setter command instead of just having the user
# modify the setting directly, so that we can validate they're setting
# it to a module that really exists and is enabled.
# There isn't a mechanism for ensuring they don't *disable* the module
# later, but this is better than nothing.
mgr_map = self.get("mgr_map")
if module_name is None or module_name == "":
self.set_module_option("orchestrator", None)
return HandleCommandResult()
for module in mgr_map['available_modules']:
if module['name'] != module_name:
continue
if not module['can_run']:
continue
enabled = module['name'] in mgr_map['modules']
if not enabled:
return HandleCommandResult(-errno.EINVAL,
stderr="Module '{module_name}' is not enabled. \n Run "
"`ceph mgr module enable {module_name}` "
"to enable.".format(module_name=module_name))
try:
is_orchestrator = self.remote(module_name,
"is_orchestrator_module")
except NameError:
is_orchestrator = False
if not is_orchestrator:
return HandleCommandResult(-errno.EINVAL,
stderr="'{0}' is not an orchestrator module".format(module_name))
self.set_module_option("orchestrator", module_name)
return HandleCommandResult()
return HandleCommandResult(-errno.EINVAL, stderr="Module '{0}' not found".format(module_name))
@_cli_write_command('orch pause')
def _pause(self) -> HandleCommandResult:
"""Pause orchestrator background work"""
self.pause()
return HandleCommandResult()
@_cli_write_command('orch resume')
def _resume(self) -> HandleCommandResult:
"""Resume orchestrator background work (if paused)"""
self.resume()
return HandleCommandResult()
@_cli_write_command('orch cancel')
def _cancel(self) -> HandleCommandResult:
"""
Cancel ongoing background operations
"""
self.cancel_completions()
return HandleCommandResult()
@_cli_read_command('orch status')
def _status(self,
detail: bool = False,
format: Format = Format.plain) -> HandleCommandResult:
"""Report configured backend and its status"""
o = self._select_orchestrator()
if o is None:
raise NoOrchestrator()
avail, why, module_details = self.available()
result: Dict[str, Any] = {
"available": avail,
"backend": o,
}
if avail:
result.update(module_details)
else:
result['reason'] = why
if format != Format.plain:
output = to_format(result, format, many=False, cls=None)
else:
output = "Backend: {0}".format(result['backend'])
output += f"\nAvailable: {'Yes' if result['available'] else 'No'}"
if 'reason' in result:
output += ' ({0})'.format(result['reason'])
if 'paused' in result:
output += f"\nPaused: {'Yes' if result['paused'] else 'No'}"
if 'workers' in result and detail:
output += f"\nHost Parallelism: {result['workers']}"
return HandleCommandResult(stdout=output)
@_cli_write_command('orch tuned-profile apply')
def _apply_tuned_profiles(self,
profile_name: Optional[str] = None,
placement: Optional[str] = None,
settings: Optional[str] = None,
no_overwrite: bool = False,
inbuf: Optional[str] = None) -> HandleCommandResult:
"""Add or update a tuned profile"""
usage = """Usage:
ceph orch tuned-profile apply -i <yaml spec>
ceph orch tuned-profile apply <profile_name> [--placement=<placement_string>] [--settings='option=value,option2=value2']
"""
if inbuf:
if profile_name or placement or settings:
raise OrchestratorValidationError(usage)
yaml_objs: Iterator = yaml.safe_load_all(inbuf)
specs: List[TunedProfileSpec] = []
# YAML '---' document separator with no content generates
# None entries in the output. Let's skip them silently.
content = [o for o in yaml_objs if o is not None]
for spec in content:
specs.append(TunedProfileSpec.from_json(spec))
else:
if not profile_name:
raise OrchestratorValidationError(usage)
placement_spec = PlacementSpec.from_string(
placement) if placement else PlacementSpec(host_pattern='*')
settings_dict = {}
if settings:
settings_list = settings.split(',')
for setting in settings_list:
if '=' not in setting:
raise SpecValidationError('settings defined on cli for tuned profile must '
+ 'be of format "setting_name=value,setting_name2=value2" etc.')
name, value = setting.split('=', 1)
settings_dict[name.strip()] = value.strip()
tuned_profile_spec = TunedProfileSpec(
profile_name=profile_name, placement=placement_spec, settings=settings_dict)
specs = [tuned_profile_spec]
completion = self.apply_tuned_profiles(specs, no_overwrite)
res = raise_if_exception(completion)
return HandleCommandResult(stdout=res)
@_cli_write_command('orch tuned-profile rm')
def _rm_tuned_profiles(self, profile_name: str) -> HandleCommandResult:
completion = self.rm_tuned_profile(profile_name)
res = raise_if_exception(completion)
return HandleCommandResult(stdout=res)
@_cli_read_command('orch tuned-profile ls')
def _tuned_profile_ls(self, format: Format = Format.plain) -> HandleCommandResult:
completion = self.tuned_profile_ls()
profiles: List[TunedProfileSpec] = raise_if_exception(completion)
if format != Format.plain:
return HandleCommandResult(stdout=to_format(profiles, format, many=True, cls=TunedProfileSpec))
else:
out = ''
for profile in profiles:
out += f'profile_name: {profile.profile_name}\n'
out += f'placement: {profile.placement.pretty_str()}\n'
out += 'settings:\n'
for k, v in profile.settings.items():
out += f' {k}: {v}\n'
out += '---\n'
return HandleCommandResult(stdout=out)
@_cli_write_command('orch tuned-profile add-setting')
def _tuned_profile_add_setting(self, profile_name: str, setting: str, value: str) -> HandleCommandResult:
completion = self.tuned_profile_add_setting(profile_name, setting, value)
res = raise_if_exception(completion)
return HandleCommandResult(stdout=res)
@_cli_write_command('orch tuned-profile rm-setting')
def _tuned_profile_rm_setting(self, profile_name: str, setting: str) -> HandleCommandResult:
completion = self.tuned_profile_rm_setting(profile_name, setting)
res = raise_if_exception(completion)
return HandleCommandResult(stdout=res)
def self_test(self) -> None:
old_orch = self._select_orchestrator()
self._set_backend('')
assert self._select_orchestrator() is None
self._set_backend(old_orch)
old_fs_fail_value = self._get_fail_fs_value()
self.set_module_option("fail_fs", True)
assert self._get_fail_fs_value() is True
self.set_module_option("fail_fs", False)
assert self._get_fail_fs_value() is False
self.set_module_option("fail_fs", old_fs_fail_value)
e1 = self.remote('selftest', 'remote_from_orchestrator_cli_self_test', "ZeroDivisionError")
try:
raise_if_exception(e1)
assert False
except ZeroDivisionError as e:
assert e.args == ('hello, world',)
e2 = self.remote('selftest', 'remote_from_orchestrator_cli_self_test', "OrchestratorError")
try:
raise_if_exception(e2)
assert False
except OrchestratorError as e:
assert e.args == ('hello, world',)
@staticmethod
def _upgrade_check_image_name(image: Optional[str], ceph_version: Optional[str]) -> None:
"""
>>> OrchestratorCli._upgrade_check_image_name('v15.2.0', None)
Traceback (most recent call last):
orchestrator._interface.OrchestratorValidationError: Error: unable to pull image name `v15.2.0`.
Maybe you meant `--ceph-version 15.2.0`?
"""
if image and re.match(r'^v?\d+\.\d+\.\d+$', image) and ceph_version is None:
ver = image[1:] if image.startswith('v') else image
s = f"Error: unable to pull image name `{image}`.\n" \
f" Maybe you meant `--ceph-version {ver}`?"
raise OrchestratorValidationError(s)
@_cli_write_command('orch upgrade check')
def _upgrade_check(self,
image: Optional[str] = None,
ceph_version: Optional[str] = None) -> HandleCommandResult:
"""Check service versions vs available and target containers"""
self._upgrade_check_image_name(image, ceph_version)
completion = self.upgrade_check(image=image, version=ceph_version)
raise_if_exception(completion)
return HandleCommandResult(stdout=completion.result_str())
@_cli_read_command('orch upgrade ls')
def _upgrade_ls(self,
image: Optional[str] = None,
tags: bool = False,
show_all_versions: Optional[bool] = False
) -> HandleCommandResult:
"""Check for available versions (or tags) we can upgrade to"""
completion = self.upgrade_ls(image, tags, show_all_versions)
r = raise_if_exception(completion)
out = json.dumps(r, indent=4)
return HandleCommandResult(stdout=out)
@_cli_write_command('orch upgrade status')
def _upgrade_status(self) -> HandleCommandResult:
"""Check the status of any potential ongoing upgrade operation"""
completion = self.upgrade_status()
status = raise_if_exception(completion)
r = {
'target_image': status.target_image,
'in_progress': status.in_progress,
'which': status.which,
'services_complete': status.services_complete,
'progress': status.progress,
'message': status.message,
'is_paused': status.is_paused,
}
out = json.dumps(r, indent=4)
return HandleCommandResult(stdout=out)
@_cli_write_command('orch upgrade start')
def _upgrade_start(self,
image: Optional[str] = None,
_end_positional_: int = 0,
daemon_types: Optional[str] = None,
hosts: Optional[str] = None,
services: Optional[str] = None,
limit: Optional[int] = None,
ceph_version: Optional[str] = None) -> HandleCommandResult:
"""Initiate upgrade"""
self._upgrade_check_image_name(image, ceph_version)
dtypes = daemon_types.split(',') if daemon_types is not None else None
service_names = services.split(',') if services is not None else None
completion = self.upgrade_start(image, ceph_version, dtypes, hosts, service_names, limit)
raise_if_exception(completion)
return HandleCommandResult(stdout=completion.result_str())
@_cli_write_command('orch upgrade pause')
def _upgrade_pause(self) -> HandleCommandResult:
"""Pause an in-progress upgrade"""
completion = self.upgrade_pause()
raise_if_exception(completion)
return HandleCommandResult(stdout=completion.result_str())
@_cli_write_command('orch upgrade resume')
def _upgrade_resume(self) -> HandleCommandResult:
"""Resume paused upgrade"""
completion = self.upgrade_resume()
raise_if_exception(completion)
return HandleCommandResult(stdout=completion.result_str())
@_cli_write_command('orch upgrade stop')
def _upgrade_stop(self) -> HandleCommandResult:
"""Stop an in-progress upgrade"""
completion = self.upgrade_stop()
raise_if_exception(completion)
return HandleCommandResult(stdout=completion.result_str())
| 73,342 | 40.36661 | 138 | py |
null | ceph-main/src/pybind/mgr/orchestrator/tests/__init__.py | 0 | 0 | 0 | py | |
null | ceph-main/src/pybind/mgr/orchestrator/tests/test_orchestrator.py |
import json
import textwrap
import pytest
import yaml
from ceph.deployment.hostspec import HostSpec
from ceph.deployment.inventory import Devices, Device
from ceph.deployment.service_spec import ServiceSpec
from ceph.deployment import inventory
from ceph.utils import datetime_now
from mgr_module import HandleCommandResult
from test_orchestrator import TestOrchestrator as _TestOrchestrator
from orchestrator import InventoryHost, DaemonDescription, ServiceDescription, DaemonDescriptionStatus, OrchResult
from orchestrator import OrchestratorValidationError
from orchestrator.module import to_format, Format, OrchestratorCli, preview_table_osd
from unittest import mock
def _test_resource(data, resource_class, extra=None):
# ensure we can deserialize and serialize
rsc = resource_class.from_json(data)
assert rsc.to_json() == resource_class.from_json(rsc.to_json()).to_json()
if extra:
# if there is an unexpected data provided
data_copy = data.copy()
data_copy.update(extra)
with pytest.raises(OrchestratorValidationError):
resource_class.from_json(data_copy)
def test_inventory():
json_data = {
'name': 'host0',
'addr': '1.2.3.4',
'devices': [
{
'sys_api': {
'rotational': '1',
'size': 1024,
},
'path': '/dev/sda',
'available': False,
'rejected_reasons': [],
'lvs': []
}
]
}
_test_resource(json_data, InventoryHost, {'abc': False})
for devices in json_data['devices']:
_test_resource(devices, inventory.Device)
json_data = [{}, {'name': 'host0', 'addr': '1.2.3.4'}, {'devices': []}]
for data in json_data:
with pytest.raises(OrchestratorValidationError):
InventoryHost.from_json(data)
def test_daemon_description():
json_data = {
'hostname': 'test',
'daemon_type': 'mon',
'daemon_id': 'a',
'status': -1,
}
_test_resource(json_data, DaemonDescription, {'abc': False})
dd = DaemonDescription.from_json(json_data)
assert dd.status.value == DaemonDescriptionStatus.error.value
def test_apply():
to = _TestOrchestrator('', 0, 0)
completion = to.apply([
ServiceSpec(service_type='nfs', service_id='foo'),
ServiceSpec(service_type='nfs', service_id='foo'),
ServiceSpec(service_type='nfs', service_id='foo'),
])
res = '<NFSServiceSpec for service_name=nfs.foo>'
assert completion.result == [res, res, res]
def test_yaml():
y = """daemon_type: crash
daemon_id: ubuntu
daemon_name: crash.ubuntu
hostname: ubuntu
status: 1
status_desc: starting
is_active: false
events:
- 2020-06-10T10:08:22.933241Z daemon:crash.ubuntu [INFO] "Deployed crash.ubuntu on
host 'ubuntu'"
---
service_type: crash
service_name: crash
placement:
host_pattern: '*'
status:
container_image_id: 74803e884bea289d2d2d3ebdf6d37cd560499e955595695b1390a89800f4e37a
container_image_name: docker.io/ceph/daemon-base:latest-master-devel
created: '2020-06-10T10:37:31.051288Z'
last_refresh: '2020-06-10T10:57:40.715637Z'
running: 1
size: 1
events:
- 2020-06-10T10:37:31.139159Z service:crash [INFO] "service was created"
"""
types = (DaemonDescription, ServiceDescription)
for y, cls in zip(y.split('---\n'), types):
data = yaml.safe_load(y)
object = cls.from_json(data)
assert to_format(object, Format.yaml, False, cls) == y
assert to_format([object], Format.yaml, True, cls) == y
j = json.loads(to_format(object, Format.json, False, cls))
assert to_format(cls.from_json(j), Format.yaml, False, cls) == y
def test_event_multiline():
from .._interface import OrchestratorEvent
e = OrchestratorEvent(datetime_now(), 'service', 'subject', 'ERROR', 'message')
assert OrchestratorEvent.from_json(e.to_json()) == e
e = OrchestratorEvent(datetime_now(), 'service',
'subject', 'ERROR', 'multiline\nmessage')
assert OrchestratorEvent.from_json(e.to_json()) == e
def test_handle_command():
cmd = {
'prefix': 'orch daemon add',
'daemon_type': 'mon',
'placement': 'smithi044:[v2:172.21.15.44:3301,v1:172.21.15.44:6790]=c',
}
m = OrchestratorCli('orchestrator', 0, 0)
r = m._handle_command(None, cmd)
assert r == HandleCommandResult(
retval=-2, stdout='', stderr='No orchestrator configured (try `ceph orch set backend`)')
r = OrchResult([ServiceDescription(spec=ServiceSpec(service_type='osd'), running=123)])
@mock.patch("orchestrator.OrchestratorCli.describe_service", return_value=r)
def test_orch_ls(_describe_service):
cmd = {
'prefix': 'orch ls',
}
m = OrchestratorCli('orchestrator', 0, 0)
r = m._handle_command(None, cmd)
out = 'NAME PORTS RUNNING REFRESHED AGE PLACEMENT \n' \
'osd 123 - - '
assert r == HandleCommandResult(retval=0, stdout=out, stderr='')
cmd = {
'prefix': 'orch ls',
'format': 'yaml',
}
m = OrchestratorCli('orchestrator', 0, 0)
r = m._handle_command(None, cmd)
out = textwrap.dedent("""
service_type: osd
service_name: osd
spec:
filter_logic: AND
objectstore: bluestore
status:
running: 123
size: 0
""").lstrip()
assert r == HandleCommandResult(retval=0, stdout=out, stderr='')
dlist = OrchResult([DaemonDescription(daemon_type="osd", daemon_id="1"), DaemonDescription(
daemon_type="osd", daemon_id="10"), DaemonDescription(daemon_type="osd", daemon_id="2")])
@mock.patch("orchestrator.OrchestratorCli.list_daemons", return_value=dlist)
def test_orch_ps(_describe_service):
# Ensure natural sorting on daemon names (osd.1, osd.2, osd.10)
cmd = {
'prefix': 'orch ps'
}
m = OrchestratorCli('orchestrator', 0, 0)
r = m._handle_command(None, cmd)
expected_out = 'NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID \n'\
'osd.1 <unknown> unknown - - - - <unknown> <unknown> \n'\
'osd.2 <unknown> unknown - - - - <unknown> <unknown> \n'\
'osd.10 <unknown> unknown - - - - <unknown> <unknown> '
expected_out = [c for c in expected_out if c.isalpha()]
actual_out = [c for c in r.stdout if c.isalpha()]
assert r.retval == 0
assert expected_out == actual_out
assert r.stderr == ''
hlist = OrchResult([HostSpec("ceph-node-1"), HostSpec("ceph-node-2"), HostSpec("ceph-node-10")])
@mock.patch("orchestrator.OrchestratorCli.get_hosts", return_value=hlist)
def test_orch_host_ls(_describe_service):
# Ensure natural sorting on hostnames (ceph-node-1, ceph-node-2, ceph-node-10)
cmd = {
'prefix': 'orch host ls'
}
m = OrchestratorCli('orchestrator', 0, 0)
r = m._handle_command(None, cmd)
expected_out = 'HOST ADDR LABELS STATUS \n'\
'ceph-node-1 ceph-node-1 \n'\
'ceph-node-2 ceph-node-2 \n'\
'ceph-node-10 ceph-node-10 \n'\
'3 hosts in cluster'
expected_out = [c for c in expected_out if c.isalpha()]
actual_out = [c for c in r.stdout if c.isalpha()]
assert r.retval == 0
assert expected_out == actual_out
assert r.stderr == ''
def test_orch_device_ls():
devices = Devices([Device("/dev/vdb", available=True)])
ilist = OrchResult([InventoryHost("ceph-node-1", devices=devices), InventoryHost("ceph-node-2",
devices=devices), InventoryHost("ceph-node-10", devices=devices)])
with mock.patch("orchestrator.OrchestratorCli.get_inventory", return_value=ilist):
# Ensure natural sorting on hostnames (ceph-node-1, ceph-node-2, ceph-node-10)
cmd = {
'prefix': 'orch device ls'
}
m = OrchestratorCli('orchestrator', 0, 0)
r = m._handle_command(None, cmd)
expected_out = 'HOST PATH TYPE DEVICE ID SIZE AVAILABLE REFRESHED REJECT REASONS \n'\
'ceph-node-1 /dev/vdb unknown None 0 Yes 0s ago \n'\
'ceph-node-2 /dev/vdb unknown None 0 Yes 0s ago \n'\
'ceph-node-10 /dev/vdb unknown None 0 Yes 0s ago '
expected_out = [c for c in expected_out if c.isalpha()]
actual_out = [c for c in r.stdout if c.isalpha()]
assert r.retval == 0
assert expected_out == actual_out
assert r.stderr == ''
def test_preview_table_osd_smoke():
data = [
{
'service_type': 'osd',
'data':
{
'foo host':
[
{
'osdspec': 'foo',
'error': '',
'data':
[
{
"block_db": "/dev/nvme0n1",
"block_db_size": "66.67 GB",
"data": "/dev/sdb",
"data_size": "300.00 GB",
"encryption": "None"
},
{
"block_db": "/dev/nvme0n1",
"block_db_size": "66.67 GB",
"data": "/dev/sdc",
"data_size": "300.00 GB",
"encryption": "None"
},
{
"block_db": "/dev/nvme0n1",
"block_db_size": "66.67 GB",
"data": "/dev/sdd",
"data_size": "300.00 GB",
"encryption": "None"
}
]
}
]
}
}
]
preview_table_osd(data)
| 10,480 | 34.771331 | 117 | py |
null | ceph-main/src/pybind/mgr/osd_perf_query/__init__.py | from .module import OSDPerfQuery
| 33 | 16 | 32 | py |
null | ceph-main/src/pybind/mgr/osd_perf_query/module.py |
"""
osd_perf_query module
"""
from itertools import groupby
from time import time
import errno
import prettytable
from mgr_module import MgrModule
def get_human_readable(bytes, precision=2):
suffixes = ['', 'Ki', 'Mi', 'Gi', 'Ti']
suffix_index = 0
while bytes > 1024 and suffix_index < 4:
# increment the index of the suffix
suffix_index += 1
# apply the division
bytes = bytes / 1024.0
return '%.*f%s' % (precision, bytes, suffixes[suffix_index])
class OSDPerfQuery(MgrModule):
COMMANDS = [
{
"cmd": "osd perf query add "
"name=query,type=CephChoices,"
"strings=client_id|rbd_image_id|all_subkeys",
"desc": "add osd perf query",
"perm": "w"
},
{
"cmd": "osd perf query remove "
"name=query_id,type=CephInt,req=true",
"desc": "remove osd perf query",
"perm": "w"
},
{
"cmd": "osd perf counters get "
"name=query_id,type=CephInt,req=true",
"desc": "fetch osd perf counters",
"perm": "w"
},
]
CLIENT_ID_QUERY = {
'key_descriptor': [
{'type': 'client_id', 'regex': '^(.+)$'},
],
'performance_counter_descriptors': [
'bytes', 'write_ops', 'read_ops', 'write_bytes', 'read_bytes',
'write_latency', 'read_latency',
],
'limit': {'order_by': 'bytes', 'max_count': 10},
}
RBD_IMAGE_ID_QUERY = {
'key_descriptor': [
{'type': 'pool_id', 'regex': '^(.+)$'},
{'type': 'object_name',
'regex': '^(?:rbd|journal)_data\.(?:([0-9]+)\.)?([^.]+)\.'},
],
'performance_counter_descriptors': [
'bytes', 'write_ops', 'read_ops', 'write_bytes', 'read_bytes',
'write_latency', 'read_latency',
],
'limit': {'order_by': 'bytes', 'max_count': 10},
}
ALL_SUBKEYS_QUERY = {
'key_descriptor': [
{'type': 'client_id', 'regex': '^(.*)$'},
{'type': 'client_address', 'regex': '^(.*)$'},
{'type': 'pool_id', 'regex': '^(.*)$'},
{'type': 'namespace', 'regex': '^(.*)$'},
{'type': 'osd_id', 'regex': '^(.*)$'},
{'type': 'pg_id', 'regex': '^(.*)$'},
{'type': 'object_name', 'regex': '^(.*)$'},
{'type': 'snap_id', 'regex': '^(.*)$'},
],
'performance_counter_descriptors': [
'write_ops', 'read_ops',
],
}
queries = {}
def handle_command(self, inbuf, cmd):
if cmd['prefix'] == "osd perf query add":
if cmd['query'] == 'rbd_image_id':
query = self.RBD_IMAGE_ID_QUERY
elif cmd['query'] == 'client_id':
query = self.CLIENT_ID_QUERY
else:
query = self.ALL_SUBKEYS_QUERY
query_id = self.add_osd_perf_query(query)
if query_id is None:
return -errno.EINVAL, "", "Invalid query"
self.queries[query_id] = [query, time()]
return 0, str(query_id), "added query " + cmd['query'] + " with id " + str(query_id)
elif cmd['prefix'] == "osd perf query remove":
if cmd['query_id'] not in self.queries:
return -errno.ENOENT, "", "unknown query id " + str(cmd['query_id'])
self.remove_osd_perf_query(cmd['query_id'])
del self.queries[cmd['query_id']]
return 0, "", "removed query with id " + str(cmd['query_id'])
elif cmd['prefix'] == "osd perf counters get":
if cmd['query_id'] not in self.queries:
return -errno.ENOENT, "", "unknown query id " + str(cmd['query_id'])
query = self.queries[cmd['query_id']][0]
res = self.get_osd_perf_counters(cmd['query_id'])
now = time()
last_update = self.queries[cmd['query_id']][1]
descriptors = query['performance_counter_descriptors']
if query == self.RBD_IMAGE_ID_QUERY:
column_names = ["POOL_ID", "RBD IMAGE ID"]
else:
column_names = [sk['type'].upper() for sk in query['key_descriptor']]
for d in descriptors:
desc = d
if d in ['bytes']:
continue
elif d in ['write_bytes', 'read_bytes']:
desc += '/sec'
elif d in ['write_latency', 'read_latency']:
desc += '(msec)'
column_names.append(desc.upper())
table = prettytable.PrettyTable(tuple(column_names),
hrules=prettytable.FRAME)
table.left_padding_width = 0
table.right_padding_width = 2
if query == self.RBD_IMAGE_ID_QUERY:
# typical output:
# {'k': [['3'], ['', '16fe5b5a8435e']],
# 'c': [[1024, 0], [1, 0], ...]}
# pool id fixup: if the object_name regex has matched pool id
# use it as the image pool id
for c in res['counters']:
if c['k'][1][0]:
c['k'][0][0] = c['k'][1][0]
# group by (pool_id, image_id)
processed = []
res['counters'].sort(key=lambda c: [c['k'][0][0], c['k'][1][1]])
for key, group in groupby(res['counters'],
lambda c: [c['k'][0][0], c['k'][1][1]]):
counters = [[0, 0] for x in descriptors]
for c in group:
for i in range(len(counters)):
counters[i][0] += c['c'][i][0]
counters[i][1] += c['c'][i][1]
processed.append({'k' : key, 'c' : counters})
else:
# typical output:
# {'k': [['client.94348']], 'c': [[1024, 0], [1, 0], ...]}
processed = res['counters']
max_count = len(processed)
if 'limit' in query:
if 'max_count' in query['limit']:
max_count = query['limit']['max_count']
if 'order_by' in query['limit']:
i = descriptors.index(query['limit']['order_by'])
processed.sort(key=lambda x: x['c'][i][0], reverse=True)
for c in processed[:max_count]:
if query == self.RBD_IMAGE_ID_QUERY:
row = c['k']
else:
row = [sk[0] for sk in c['k']]
counters = c['c']
for i in range(len(descriptors)):
if descriptors[i] in ['bytes']:
continue
elif descriptors[i] in ['write_bytes', 'read_bytes']:
bps = counters[i][0] / (now - last_update)
row.append(get_human_readable(bps))
elif descriptors[i] in ['write_latency', 'read_latency']:
lat = 0
if counters[i][1] > 0:
lat = 1.0 * counters[i][0] / counters[i][1] / 1000000
row.append("%.2f" % lat)
else:
row.append("%d" % counters[i][0])
table.add_row(row)
msg = "counters for the query id %d for the last %d sec" % \
(cmd['query_id'], now - last_update)
self.queries[cmd['query_id']][1] = now
return 0, table.get_string() + "\n", msg
else:
raise NotImplementedError(cmd['prefix'])
| 7,796 | 38.57868 | 96 | py |
null | ceph-main/src/pybind/mgr/osd_support/__init__.py | from .module import OSDSupport
| 31 | 15 | 30 | py |
null | ceph-main/src/pybind/mgr/osd_support/module.py | from mgr_module import MgrModule
class OSDSupport(MgrModule):
# Kept to keep upgrades from older point releases working.
# This module can be removed as soon as we no longer
# support upgrades from old octopus point releases.
# On the other hand, if you find a use for this module,
# Feel free to use it!
COMMANDS = []
MODULE_OPTIONS: []
NATIVE_OPTIONS: []
def __init__(self, *args, **kwargs):
super(OSDSupport, self).__init__(*args, **kwargs)
| 495 | 23.8 | 62 | py |
null | ceph-main/src/pybind/mgr/pg_autoscaler/__init__.py | import os
if 'UNITTEST' in os.environ:
import tests
from .module import PgAutoscaler, effective_target_ratio
| 115 | 15.571429 | 56 | py |
null | ceph-main/src/pybind/mgr/pg_autoscaler/module.py | """
Automatically scale pg_num based on how much data is stored in each pool.
"""
import json
import mgr_util
import threading
from typing import Any, Dict, List, Optional, Set, Tuple, TYPE_CHECKING, Union
import uuid
from prettytable import PrettyTable
from mgr_module import HealthChecksT, CLIReadCommand, CLIWriteCommand, CRUSHMap, MgrModule, Option, OSDMap
"""
Some terminology is made up for the purposes of this module:
- "raw pgs": pg count after applying replication, i.e. the real resource
consumption of a pool.
- "grow/shrink" - increase/decrease the pg_num in a pool
- "crush subtree" - non-overlapping domains in crush hierarchy: used as
units of resource management.
"""
INTERVAL = 5
PG_NUM_MIN = 32 # unless specified on a per-pool basis
if TYPE_CHECKING:
import sys
if sys.version_info >= (3, 8):
from typing import Literal
else:
from typing_extensions import Literal
PassT = Literal['first', 'second', 'third']
def nearest_power_of_two(n: int) -> int:
v = int(n)
v -= 1
v |= v >> 1
v |= v >> 2
v |= v >> 4
v |= v >> 8
v |= v >> 16
# High bound power of two
v += 1
# Low bound power of tow
x = v >> 1
return x if (v - n) > (n - x) else v
def effective_target_ratio(target_ratio: float,
total_target_ratio: float,
total_target_bytes: int,
capacity: int) -> float:
"""
Returns the target ratio after normalizing for ratios across pools and
adjusting for capacity reserved by pools that have target_size_bytes set.
"""
target_ratio = float(target_ratio)
if total_target_ratio:
target_ratio = target_ratio / total_target_ratio
if total_target_bytes and capacity:
fraction_available = 1.0 - min(1.0, float(total_target_bytes) / capacity)
target_ratio *= fraction_available
return target_ratio
class PgAdjustmentProgress(object):
"""
Keeps the initial and target pg_num values
"""
def __init__(self, pool_id: int, pg_num: int, pg_num_target: int) -> None:
self.ev_id = str(uuid.uuid4())
self.pool_id = pool_id
self.reset(pg_num, pg_num_target)
def reset(self, pg_num: int, pg_num_target: int) -> None:
self.pg_num = pg_num
self.pg_num_target = pg_num_target
def update(self, module: MgrModule, progress: float) -> None:
desc = 'increasing' if self.pg_num < self.pg_num_target else 'decreasing'
module.remote('progress', 'update', self.ev_id,
ev_msg="PG autoscaler %s pool %d PGs from %d to %d" %
(desc, self.pool_id, self.pg_num, self.pg_num_target),
ev_progress=progress,
refs=[("pool", self.pool_id)])
class CrushSubtreeResourceStatus:
def __init__(self) -> None:
self.root_ids: List[int] = []
self.osds: Set[int] = set()
self.osd_count: Optional[int] = None # Number of OSDs
self.pg_target: Optional[int] = None # Ideal full-capacity PG count?
self.pg_current = 0 # How many PGs already?
self.pg_left = 0
self.capacity: Optional[int] = None # Total capacity of OSDs in subtree
self.pool_ids: List[int] = []
self.pool_names: List[str] = []
self.pool_count: Optional[int] = None
self.pool_used = 0
self.total_target_ratio = 0.0
self.total_target_bytes = 0 # including replication / EC overhead
class PgAutoscaler(MgrModule):
"""
PG autoscaler.
"""
NATIVE_OPTIONS = [
'mon_target_pg_per_osd',
'mon_max_pg_per_osd',
]
MODULE_OPTIONS = [
Option(
name='sleep_interval',
type='secs',
default=60),
Option(
name='threshold',
type='float',
desc='scaling threshold',
long_desc=('The factor by which the `NEW PG_NUM` must vary from the current'
'`PG_NUM` before being accepted. Cannot be less than 1.0'),
default=3.0,
min=1.0),
Option(
name='noautoscale',
type='bool',
desc='global autoscale flag',
long_desc=('Option to turn on/off the autoscaler for all pools'),
default=False),
]
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(PgAutoscaler, self).__init__(*args, **kwargs)
self._shutdown = threading.Event()
self._event: Dict[int, PgAdjustmentProgress] = {}
# So much of what we do peeks at the osdmap that it's easiest
# to just keep a copy of the pythonized version.
self._osd_map = None
if TYPE_CHECKING:
self.sleep_interval = 60
self.mon_target_pg_per_osd = 0
self.threshold = 3.0
self.noautoscale = False
def config_notify(self) -> None:
for opt in self.NATIVE_OPTIONS:
setattr(self,
opt,
self.get_ceph_option(opt))
self.log.debug(' native option %s = %s', opt, getattr(self, opt))
for opt in self.MODULE_OPTIONS:
setattr(self,
opt['name'],
self.get_module_option(opt['name']))
self.log.debug(' mgr option %s = %s',
opt['name'], getattr(self, opt['name']))
@CLIReadCommand('osd pool autoscale-status')
def _command_autoscale_status(self, format: str = 'plain') -> Tuple[int, str, str]:
"""
report on pool pg_num sizing recommendation and intent
"""
osdmap = self.get_osdmap()
pools = osdmap.get_pools_by_name()
ps, root_map = self._get_pool_status(osdmap, pools)
if format in ('json', 'json-pretty'):
return 0, json.dumps(ps, indent=4, sort_keys=True), ''
else:
table = PrettyTable(['POOL', 'SIZE', 'TARGET SIZE',
'RATE', 'RAW CAPACITY',
'RATIO', 'TARGET RATIO',
'EFFECTIVE RATIO',
'BIAS',
'PG_NUM',
# 'IDEAL',
'NEW PG_NUM', 'AUTOSCALE',
'BULK'],
border=False)
table.left_padding_width = 0
table.right_padding_width = 2
table.align['POOL'] = 'l'
table.align['SIZE'] = 'r'
table.align['TARGET SIZE'] = 'r'
table.align['RATE'] = 'r'
table.align['RAW CAPACITY'] = 'r'
table.align['RATIO'] = 'r'
table.align['TARGET RATIO'] = 'r'
table.align['EFFECTIVE RATIO'] = 'r'
table.align['BIAS'] = 'r'
table.align['PG_NUM'] = 'r'
# table.align['IDEAL'] = 'r'
table.align['NEW PG_NUM'] = 'r'
table.align['AUTOSCALE'] = 'l'
table.align['BULK'] = 'l'
for p in ps:
if p['would_adjust']:
final = str(p['pg_num_final'])
else:
final = ''
if p['target_bytes'] > 0:
ts = mgr_util.format_bytes(p['target_bytes'], 6)
else:
ts = ''
if p['target_ratio'] > 0.0:
tr = '%.4f' % p['target_ratio']
else:
tr = ''
if p['effective_target_ratio'] > 0.0:
etr = '%.4f' % p['effective_target_ratio']
else:
etr = ''
table.add_row([
p['pool_name'],
mgr_util.format_bytes(p['logical_used'], 6),
ts,
p['raw_used_rate'],
mgr_util.format_bytes(p['subtree_capacity'], 6),
'%.4f' % p['capacity_ratio'],
tr,
etr,
p['bias'],
p['pg_num_target'],
# p['pg_num_ideal'],
final,
p['pg_autoscale_mode'],
str(p['bulk'])
])
return 0, table.get_string(), ''
@CLIWriteCommand("osd pool set threshold")
def set_scaling_threshold(self, num: float) -> Tuple[int, str, str]:
"""
set the autoscaler threshold
A.K.A. the factor by which the new PG_NUM must vary from the existing PG_NUM
"""
if num < 1.0:
return 22, "", "threshold cannot be set less than 1.0"
self.set_module_option("threshold", num)
return 0, "threshold updated", ""
def complete_all_progress_events(self) -> None:
for pool_id in list(self._event):
ev = self._event[pool_id]
self.remote('progress', 'complete', ev.ev_id)
del self._event[pool_id]
def set_autoscale_mode_all_pools(self, status: str) -> None:
osdmap = self.get_osdmap()
pools = osdmap.get_pools_by_name()
for pool_name, _ in pools.items():
self.mon_command({
'prefix': 'osd pool set',
'pool': pool_name,
'var': 'pg_autoscale_mode',
'val': status
})
@CLIWriteCommand("osd pool get noautoscale")
def get_noautoscale(self) -> Tuple[int, str, str]:
"""
Get the noautoscale flag to see if all pools
are setting the autoscaler on or off as well
as newly created pools in the future.
"""
if self.noautoscale == None:
raise TypeError("noautoscale cannot be None")
elif self.noautoscale:
return 0, "", "noautoscale is on"
else:
return 0, "", "noautoscale is off"
@CLIWriteCommand("osd pool unset noautoscale")
def unset_noautoscale(self) -> Tuple[int, str, str]:
"""
Unset the noautoscale flag so all pools will
have autoscale enabled (including newly created
pools in the future).
"""
if not self.noautoscale:
return 0, "", "noautoscale is already unset!"
else:
self.set_module_option("noautoscale", False)
self.mon_command({
'prefix': 'config set',
'who': 'global',
'name': 'osd_pool_default_pg_autoscale_mode',
'value': 'on'
})
self.set_autoscale_mode_all_pools("on")
return 0, "", "noautoscale is unset, all pools now have autoscale on"
@CLIWriteCommand("osd pool set noautoscale")
def set_noautoscale(self) -> Tuple[int, str, str]:
"""
set the noautoscale for all pools (including
newly created pools in the future)
and complete all on-going progress events
regarding PG-autoscaling.
"""
if self.noautoscale:
return 0, "", "noautoscale is already set!"
else:
self.set_module_option("noautoscale", True)
self.mon_command({
'prefix': 'config set',
'who': 'global',
'name': 'osd_pool_default_pg_autoscale_mode',
'value': 'off'
})
self.set_autoscale_mode_all_pools("off")
self.complete_all_progress_events()
return 0, "", "noautoscale is set, all pools now have autoscale off"
def serve(self) -> None:
self.config_notify()
while not self._shutdown.is_set():
self._maybe_adjust()
self._update_progress_events()
self._shutdown.wait(timeout=self.sleep_interval)
def shutdown(self) -> None:
self.log.info('Stopping pg_autoscaler')
self._shutdown.set()
def identify_subtrees_and_overlaps(self,
osdmap: OSDMap,
crush: CRUSHMap,
result: Dict[int, CrushSubtreeResourceStatus],
overlapped_roots: Set[int],
roots: List[CrushSubtreeResourceStatus]) -> \
Tuple[List[CrushSubtreeResourceStatus],
Set[int]]:
# We identify subtrees and overlapping roots from osdmap
for pool_id, pool in osdmap.get_pools().items():
crush_rule = crush.get_rule_by_id(pool['crush_rule'])
assert crush_rule is not None
cr_name = crush_rule['rule_name']
root_id = crush.get_rule_root(cr_name)
assert root_id is not None
osds = set(crush.get_osds_under(root_id))
# Are there overlapping roots?
s = None
for prev_root_id, prev in result.items():
if osds & prev.osds:
s = prev
if prev_root_id != root_id:
overlapped_roots.add(prev_root_id)
overlapped_roots.add(root_id)
self.log.warning("pool %s won't scale due to overlapping roots: %s",
pool['pool_name'], overlapped_roots)
self.log.warning("Please See: https://docs.ceph.com/en/"
"latest/rados/operations/placement-groups"
"/#automated-scaling")
break
if not s:
s = CrushSubtreeResourceStatus()
roots.append(s)
result[root_id] = s
s.root_ids.append(root_id)
s.osds |= osds
s.pool_ids.append(pool_id)
s.pool_names.append(pool['pool_name'])
s.pg_current += pool['pg_num_target'] * pool['size']
target_ratio = pool['options'].get('target_size_ratio', 0.0)
if target_ratio:
s.total_target_ratio += target_ratio
else:
target_bytes = pool['options'].get('target_size_bytes', 0)
if target_bytes:
s.total_target_bytes += target_bytes * osdmap.pool_raw_used_rate(pool_id)
return roots, overlapped_roots
def get_subtree_resource_status(self,
osdmap: OSDMap,
crush: CRUSHMap) -> Tuple[Dict[int, CrushSubtreeResourceStatus],
Set[int]]:
"""
For each CRUSH subtree of interest (i.e. the roots under which
we have pools), calculate the current resource usages and targets,
such as how many PGs there are, vs. how many PGs we would
like there to be.
"""
result: Dict[int, CrushSubtreeResourceStatus] = {}
roots: List[CrushSubtreeResourceStatus] = []
overlapped_roots: Set[int] = set()
# identify subtrees and overlapping roots
roots, overlapped_roots = self.identify_subtrees_and_overlaps(osdmap,
crush, result, overlapped_roots, roots)
# finish subtrees
all_stats = self.get('osd_stats')
for s in roots:
assert s.osds is not None
s.osd_count = len(s.osds)
s.pg_target = s.osd_count * self.mon_target_pg_per_osd
s.pg_left = s.pg_target
s.pool_count = len(s.pool_ids)
capacity = 0
for osd_stats in all_stats['osd_stats']:
if osd_stats['osd'] in s.osds:
# Intentionally do not apply the OSD's reweight to
# this, because we want to calculate PG counts based
# on the physical storage available, not how it is
# reweighted right now.
capacity += osd_stats['kb'] * 1024
s.capacity = capacity
self.log.debug('root_ids %s pools %s with %d osds, pg_target %d',
s.root_ids,
s.pool_ids,
s.osd_count,
s.pg_target)
return result, overlapped_roots
def _calc_final_pg_target(
self,
p: Dict[str, Any],
pool_name: str,
root_map: Dict[int, CrushSubtreeResourceStatus],
root_id: int,
capacity_ratio: float,
bias: float,
even_pools: Dict[str, Dict[str, Any]],
bulk_pools: Dict[str, Dict[str, Any]],
func_pass: 'PassT',
bulk: bool,
) -> Union[Tuple[float, int, int], Tuple[None, None, None]]:
"""
`profile` determines behaviour of the autoscaler.
`first_pass` flag used to determine if this is the first
pass where the caller tries to calculate/adjust pools that has
used_ratio > even_ratio else this is the second pass,
we calculate final_ratio by giving it 1 / pool_count
of the root we are currently looking at.
"""
if func_pass == 'first':
# first pass to deal with small pools (no bulk flag)
# calculating final_pg_target based on capacity ratio
# we also keep track of bulk_pools to be used in second pass
if not bulk:
final_ratio = capacity_ratio
pg_left = root_map[root_id].pg_left
assert pg_left is not None
used_pg = final_ratio * pg_left
root_map[root_id].pg_left -= int(used_pg)
root_map[root_id].pool_used += 1
pool_pg_target = used_pg / p['size'] * bias
else:
bulk_pools[pool_name] = p
return None, None, None
elif func_pass == 'second':
# second pass we calculate the final_pg_target
# for pools that have used_ratio > even_ratio
# and we keep track of even pools to be used in third pass
pool_count = root_map[root_id].pool_count
assert pool_count is not None
even_ratio = 1 / (pool_count - root_map[root_id].pool_used)
used_ratio = capacity_ratio
if used_ratio > even_ratio:
root_map[root_id].pool_used += 1
else:
even_pools[pool_name] = p
return None, None, None
final_ratio = max(used_ratio, even_ratio)
pg_left = root_map[root_id].pg_left
assert pg_left is not None
used_pg = final_ratio * pg_left
root_map[root_id].pg_left -= int(used_pg)
pool_pg_target = used_pg / p['size'] * bias
else:
# third pass we just split the pg_left to all even_pools
pool_count = root_map[root_id].pool_count
assert pool_count is not None
final_ratio = 1 / (pool_count - root_map[root_id].pool_used)
pool_pg_target = (final_ratio * root_map[root_id].pg_left) / p['size'] * bias
min_pg = p.get('options', {}).get('pg_num_min', PG_NUM_MIN)
max_pg = p.get('options', {}).get('pg_num_max')
final_pg_target = max(min_pg, nearest_power_of_two(pool_pg_target))
if max_pg and max_pg < final_pg_target:
final_pg_target = max_pg
self.log.info("Pool '{0}' root_id {1} using {2} of space, bias {3}, "
"pg target {4} quantized to {5} (current {6})".format(
p['pool_name'],
root_id,
capacity_ratio,
bias,
pool_pg_target,
final_pg_target,
p['pg_num_target']
))
return final_ratio, pool_pg_target, final_pg_target
def _get_pool_pg_targets(
self,
osdmap: OSDMap,
pools: Dict[str, Dict[str, Any]],
crush_map: CRUSHMap,
root_map: Dict[int, CrushSubtreeResourceStatus],
pool_stats: Dict[int, Dict[str, int]],
ret: List[Dict[str, Any]],
threshold: float,
func_pass: 'PassT',
overlapped_roots: Set[int],
) -> Tuple[List[Dict[str, Any]], Dict[str, Dict[str, Any]] , Dict[str, Dict[str, Any]]]:
"""
Calculates final_pg_target of each pools and determine if it needs
scaling, this depends on the profile of the autoscaler. For scale-down,
we start out with a full complement of pgs and only descrease it when other
pools needs more pgs due to increased usage. For scale-up, we start out with
the minimal amount of pgs and only scale when there is increase in usage.
"""
even_pools: Dict[str, Dict[str, Any]] = {}
bulk_pools: Dict[str, Dict[str, Any]] = {}
for pool_name, p in pools.items():
pool_id = p['pool']
if pool_id not in pool_stats:
# race with pool deletion; skip
continue
# FIXME: we assume there is only one take per pool, but that
# may not be true.
crush_rule = crush_map.get_rule_by_id(p['crush_rule'])
assert crush_rule is not None
cr_name = crush_rule['rule_name']
root_id = crush_map.get_rule_root(cr_name)
assert root_id is not None
if root_id in overlapped_roots:
# skip pools
# with overlapping roots
self.log.warn("pool %d contains an overlapping root %d"
"... skipping scaling", pool_id, root_id)
continue
capacity = root_map[root_id].capacity
assert capacity is not None
if capacity == 0:
self.log.debug("skipping empty subtree {0}".format(cr_name))
continue
raw_used_rate = osdmap.pool_raw_used_rate(pool_id)
pool_logical_used = pool_stats[pool_id]['stored']
bias = p['options'].get('pg_autoscale_bias', 1.0)
target_bytes = 0
# ratio takes precedence if both are set
if p['options'].get('target_size_ratio', 0.0) == 0.0:
target_bytes = p['options'].get('target_size_bytes', 0)
# What proportion of space are we using?
actual_raw_used = pool_logical_used * raw_used_rate
actual_capacity_ratio = float(actual_raw_used) / capacity
pool_raw_used = max(pool_logical_used, target_bytes) * raw_used_rate
capacity_ratio = float(pool_raw_used) / capacity
self.log.info("effective_target_ratio {0} {1} {2} {3}".format(
p['options'].get('target_size_ratio', 0.0),
root_map[root_id].total_target_ratio,
root_map[root_id].total_target_bytes,
capacity))
target_ratio = effective_target_ratio(p['options'].get('target_size_ratio', 0.0),
root_map[root_id].total_target_ratio,
root_map[root_id].total_target_bytes,
capacity)
# determine if the pool is a bulk
bulk = False
flags = p['flags_names'].split(",")
if "bulk" in flags:
bulk = True
capacity_ratio = max(capacity_ratio, target_ratio)
final_ratio, pool_pg_target, final_pg_target = self._calc_final_pg_target(
p, pool_name, root_map, root_id,
capacity_ratio, bias, even_pools,
bulk_pools, func_pass, bulk)
if final_ratio is None:
continue
adjust = False
if (final_pg_target > p['pg_num_target'] * threshold or
final_pg_target < p['pg_num_target'] / threshold) and \
final_ratio >= 0.0 and \
final_ratio <= 1.0 and \
p['pg_autoscale_mode'] == 'on':
adjust = True
assert pool_pg_target is not None
ret.append({
'pool_id': pool_id,
'pool_name': p['pool_name'],
'crush_root_id': root_id,
'pg_autoscale_mode': p['pg_autoscale_mode'],
'pg_num_target': p['pg_num_target'],
'logical_used': pool_logical_used,
'target_bytes': target_bytes,
'raw_used_rate': raw_used_rate,
'subtree_capacity': capacity,
'actual_raw_used': actual_raw_used,
'raw_used': pool_raw_used,
'actual_capacity_ratio': actual_capacity_ratio,
'capacity_ratio': capacity_ratio,
'target_ratio': p['options'].get('target_size_ratio', 0.0),
'effective_target_ratio': target_ratio,
'pg_num_ideal': int(pool_pg_target),
'pg_num_final': final_pg_target,
'would_adjust': adjust,
'bias': p.get('options', {}).get('pg_autoscale_bias', 1.0),
'bulk': bulk,
})
return ret, bulk_pools, even_pools
def _get_pool_status(
self,
osdmap: OSDMap,
pools: Dict[str, Dict[str, Any]],
) -> Tuple[List[Dict[str, Any]],
Dict[int, CrushSubtreeResourceStatus]]:
threshold = self.threshold
assert threshold >= 1.0
crush_map = osdmap.get_crush()
root_map, overlapped_roots = self.get_subtree_resource_status(osdmap, crush_map)
df = self.get('df')
pool_stats = dict([(p['id'], p['stats']) for p in df['pools']])
ret: List[Dict[str, Any]] = []
# Iterate over all pools to determine how they should be sized.
# First call of _get_pool_pg_targets() is to find/adjust pools that uses more capacaity than
# the even_ratio of other pools and we adjust those first.
# Second call make use of the even_pools we keep track of in the first call.
# All we need to do is iterate over those and give them 1/pool_count of the
# total pgs.
ret, bulk_pools, _ = self._get_pool_pg_targets(osdmap, pools, crush_map, root_map,
pool_stats, ret, threshold, 'first', overlapped_roots)
ret, _, even_pools = self._get_pool_pg_targets(osdmap, bulk_pools, crush_map, root_map,
pool_stats, ret, threshold, 'second', overlapped_roots)
ret, _, _ = self._get_pool_pg_targets(osdmap, even_pools, crush_map, root_map,
pool_stats, ret, threshold, 'third', overlapped_roots)
return (ret, root_map)
def _update_progress_events(self) -> None:
if self.noautoscale:
return
osdmap = self.get_osdmap()
pools = osdmap.get_pools()
for pool_id in list(self._event):
ev = self._event[pool_id]
pool_data = pools.get(pool_id)
if pool_data is None or pool_data['pg_num'] == pool_data['pg_num_target'] or ev.pg_num == ev.pg_num_target:
# pool is gone or we've reached our target
self.remote('progress', 'complete', ev.ev_id)
del self._event[pool_id]
continue
ev.update(self, (ev.pg_num - pool_data['pg_num']) / (ev.pg_num - ev.pg_num_target))
def _maybe_adjust(self) -> None:
if self.noautoscale:
return
self.log.info('_maybe_adjust')
osdmap = self.get_osdmap()
if osdmap.get_require_osd_release() < 'nautilus':
return
pools = osdmap.get_pools_by_name()
self.log.debug("pool: {0}".format(json.dumps(pools, indent=4,
sort_keys=True)))
ps, root_map = self._get_pool_status(osdmap, pools)
# Anyone in 'warn', set the health message for them and then
# drop them from consideration.
too_few = []
too_many = []
bytes_and_ratio = []
health_checks: Dict[str, Dict[str, Union[int, str, List[str]]]] = {}
total_bytes = dict([(r, 0) for r in iter(root_map)])
total_target_bytes = dict([(r, 0.0) for r in iter(root_map)])
target_bytes_pools: Dict[int, List[int]] = dict([(r, []) for r in iter(root_map)])
for p in ps:
pool_id = p['pool_id']
pool_opts = pools[p['pool_name']]['options']
if pool_opts.get('target_size_ratio', 0) > 0 and pool_opts.get('target_size_bytes', 0) > 0:
bytes_and_ratio.append(
'Pool %s has target_size_bytes and target_size_ratio set' % p['pool_name'])
total_bytes[p['crush_root_id']] += max(
p['actual_raw_used'],
p['target_bytes'] * p['raw_used_rate'])
if p['target_bytes'] > 0:
total_target_bytes[p['crush_root_id']] += p['target_bytes'] * p['raw_used_rate']
target_bytes_pools[p['crush_root_id']].append(p['pool_name'])
if p['pg_autoscale_mode'] == 'warn':
msg = 'Pool %s has %d placement groups, should have %d' % (
p['pool_name'],
p['pg_num_target'],
p['pg_num_final'])
if p['pg_num_final'] > p['pg_num_target']:
too_few.append(msg)
else:
too_many.append(msg)
if not p['would_adjust']:
continue
if p['pg_autoscale_mode'] == 'on':
# Note that setting pg_num actually sets pg_num_target (see
# OSDMonitor.cc)
r = self.mon_command({
'prefix': 'osd pool set',
'pool': p['pool_name'],
'var': 'pg_num',
'val': str(p['pg_num_final'])
})
# create new event or update existing one to reflect
# progress from current state to the new pg_num_target
pool_data = pools[p['pool_name']]
pg_num = pool_data['pg_num']
new_target = p['pg_num_final']
if pool_id in self._event:
self._event[pool_id].reset(pg_num, new_target)
else:
self._event[pool_id] = PgAdjustmentProgress(pool_id, pg_num, new_target)
self._event[pool_id].update(self, 0.0)
if r[0] != 0:
# FIXME: this is a serious and unexpected thing,
# we should expose it as a cluster log error once
# the hook for doing that from ceph-mgr modules is
# in.
self.log.error("pg_num adjustment on {0} to {1} failed: {2}"
.format(p['pool_name'],
p['pg_num_final'], r))
if too_few:
summary = "{0} pools have too few placement groups".format(
len(too_few))
health_checks['POOL_TOO_FEW_PGS'] = {
'severity': 'warning',
'summary': summary,
'count': len(too_few),
'detail': too_few
}
if too_many:
summary = "{0} pools have too many placement groups".format(
len(too_many))
health_checks['POOL_TOO_MANY_PGS'] = {
'severity': 'warning',
'summary': summary,
'count': len(too_many),
'detail': too_many
}
too_much_target_bytes = []
for root_id, total in total_bytes.items():
total_target = int(total_target_bytes[root_id])
capacity = root_map[root_id].capacity
assert capacity is not None
if total_target > 0 and total > capacity and capacity:
too_much_target_bytes.append(
'Pools %s overcommit available storage by %.03fx due to '
'target_size_bytes %s on pools %s' % (
root_map[root_id].pool_names,
total / capacity,
mgr_util.format_bytes(total_target, 5, colored=False),
target_bytes_pools[root_id]
)
)
elif total_target > capacity and capacity:
too_much_target_bytes.append(
'Pools %s overcommit available storage by %.03fx due to '
'collective target_size_bytes of %s' % (
root_map[root_id].pool_names,
total / capacity,
mgr_util.format_bytes(total_target, 5, colored=False),
)
)
if too_much_target_bytes:
health_checks['POOL_TARGET_SIZE_BYTES_OVERCOMMITTED'] = {
'severity': 'warning',
'summary': "%d subtrees have overcommitted pool target_size_bytes" % len(too_much_target_bytes),
'count': len(too_much_target_bytes),
'detail': too_much_target_bytes,
}
if bytes_and_ratio:
health_checks['POOL_HAS_TARGET_SIZE_BYTES_AND_RATIO'] = {
'severity': 'warning',
'summary': "%d pools have both target_size_bytes and target_size_ratio set" % len(bytes_and_ratio),
'count': len(bytes_and_ratio),
'detail': bytes_and_ratio,
}
self.set_health_checks(health_checks)
| 33,980 | 40.289186 | 119 | py |
null | ceph-main/src/pybind/mgr/pg_autoscaler/tests/__init__.py | 0 | 0 | 0 | py | |
null | ceph-main/src/pybind/mgr/pg_autoscaler/tests/test_cal_final_pg_target.py | # python unit test
import unittest
from tests import mock
import pytest
import json
from pg_autoscaler import module
class RootMapItem:
def __init__(self, pool_count, pg_target, pg_left):
self.pool_count = pool_count
self.pg_target = pg_target
self.pg_left = pg_left
self.pool_used = 0
class TestPgAutoscaler(object):
def setup_method(self):
# a bunch of attributes for testing.
self.autoscaler = module.PgAutoscaler('module_name', 0, 0)
def helper_test(self, pools, root_map, bias, overlapped_roots):
# Here we simulate how _get_pool_pg_target() works.
bulk_pools = {}
even_pools = {}
# first pass
for pool_name, p in pools.items():
root_id = p['root_id']
if root_id in overlapped_roots:
# skip pools with overlapping roots
assert p['no_scale']
continue
final_ratio, pool_pg_target, final_pg_target = self.autoscaler._calc_final_pg_target(
p, pool_name, root_map,
p['root_id'], p['capacity_ratio'],
bias, even_pools, bulk_pools, 'first', p['bulk'])
if final_ratio == None:
# no final_ratio means current pool is an even pool
# and we do not have to do any assertion on it.
continue
assert p['expected_final_pg_target'] == final_pg_target
assert p['expected_final_ratio'] == final_ratio
assert not p['expected_bulk_pool'] and pool_name not in bulk_pools
# second pass
for pool_name, p in bulk_pools.items():
final_ratio, pool_pg_target, final_pg_target = self.autoscaler._calc_final_pg_target(
p, pool_name, root_map,
p['root_id'], p['capacity_ratio'],
bias, even_pools, bulk_pools, 'second', p['bulk'])
if final_ratio == None:
# no final_ratio means current pool is an even pool
# and we do not have to do any assertion on it.
continue
assert p['expected_final_pg_target'] == final_pg_target
assert p['expected_final_ratio'] == final_ratio
assert not p['even_pools'] and pool_name not in even_pools
#third pass
for pool_name, p in even_pools.items():
final_ratio, pool_pg_target, final_pg_target = self.autoscaler._calc_final_pg_target(
p, pool_name, root_map,
p['root_id'], p['capacity_ratio'],
bias, even_pools, bulk_pools, 'third', p['bulk'])
assert p['expected_final_pg_target'] == final_pg_target
assert p['expected_final_ratio'] == final_ratio
assert p['even_pools'] and pool_name in even_pools
def test_even_pools_one_meta_three_bulk(self):
pools = {
"meta_0": {
"pool": 0,
"pool_name": "meta_0",
"pg_num_target": 32,
"capacity_ratio": 0.2,
"root_id": 0,
"expected_final_pg_target": 64,
"expected_final_ratio": 0.2,
"expected_bulk_pool": False,
"even_pools": False,
"size": 1,
"no_scale": False,
"bulk": False,
},
"bulk_0": {
"pool": 1,
"pool_name": "bulk_0",
"pg_num_target": 32,
"capacity_ratio": 0.2,
"root_id": 0,
"expected_final_pg_target": 128,
"expected_final_ratio": 1/3,
"expected_bulk_pool": True,
"even_pools": True,
"size": 1,
"no_scale": False,
"bulk": True,
},
"bulk_1": {
"pool": 2,
"pool_name": "bulk_1",
"pg_num_target": 32,
"capacity_ratio": 0.2,
"root_id": 0,
"expected_final_pg_target": 128,
"expected_final_ratio": 1/3,
"expected_bulk_pool": True,
"even_pools": True,
"size": 1,
"no_scale": False,
"bulk": True,
},
"bulk_2": {
"pool": 3,
"pool_name": "bulk_2",
"pg_num_target": 32,
"capacity_ratio": 0.1,
"root_id": 0,
"expected_final_pg_target": 128,
"expected_final_ratio": 1/3,
"expected_bulk_pool": True,
"even_pools": True,
"size": 1,
"no_scale": False,
"bulk": True,
},
}
root_map = {
0: RootMapItem(4, 400, 400),
1: RootMapItem(4, 400, 400),
}
bias = 1
overlapped_roots = set()
self.helper_test(pools, root_map, bias, overlapped_roots)
def test_even_pools_two_meta_two_bulk(self):
pools = {
"meta0": {
"pool": 0,
"pool_name": "meta0",
"pg_num_target": 32,
"capacity_ratio": 0.2,
"root_id": 0,
"expected_final_pg_target": 64,
"expected_final_ratio": 0.2,
"expected_bulk_pool": False,
"even_pools": True,
"size": 1,
"no_scale": False,
"bulk": False,
},
"meta1": {
"pool": 1,
"pool_name": "meta1",
"pg_num_target": 32,
"capacity_ratio": 0.2,
"root_id": 0,
"expected_final_pg_target": 64,
"expected_final_ratio": 0.2,
"expected_bulk_pool": False,
"even_pools": True,
"size": 1,
"no_scale": False,
"bulk": False,
},
"bulk0": {
"pool": 2,
"pool_name": "bulk0",
"pg_num_target": 32,
"capacity_ratio": 0.2,
"root_id": 0,
"expected_final_pg_target": 128,
"expected_final_ratio": 0.5,
"expected_bulk_pool": True,
"even_pools": True,
"size": 1,
"no_scale": False,
"bulk": True,
},
"bulk1": {
"pool": 3,
"pool_name": "test3",
"pg_num_target": 32,
"capacity_ratio": 0.1,
"root_id": 0,
"expected_final_pg_target": 128,
"expected_final_ratio": 0.5,
"expected_bulk_pool": True,
"even_pools": True,
"size": 1,
"no_scale": False,
"bulk": True,
},
}
root_map = {
0: RootMapItem(4, 400, 400),
1: RootMapItem(4, 400, 400),
}
bias = 1
overlapped_roots = set()
self.helper_test(pools, root_map, bias, overlapped_roots)
def test_uneven_pools_one_meta_three_bulk(self):
pools = {
"meta0": {
"pool": 0,
"pool_name": "meta0",
"pg_num_target": 32,
"capacity_ratio": 0.1,
"root_id": 0,
"expected_final_pg_target": 32,
"expected_final_ratio": 0.1,
"expected_bulk_pool": False,
"even_pools": True,
"size": 1,
"no_scale": False,
"bulk": False,
},
"bulk0": {
"pool": 1,
"pool_name": "bulk0",
"pg_num_target": 32,
"capacity_ratio": 0.5,
"root_id": 0,
"expected_final_pg_target": 128,
"expected_final_ratio": 0.5,
"expected_bulk_pool": True,
"even_pools": False,
"size": 1,
"no_scale": False,
"bulk": True,
},
"bulk1": {
"pool": 2,
"pool_name": "bulk1",
"pg_num_target": 32,
"capacity_ratio": 0.1,
"root_id": 0,
"expected_final_pg_target": 64,
"expected_final_ratio": 0.5,
"expected_bulk_pool": True,
"even_pools": True,
"size": 1,
"no_scale": False,
"bulk": True,
},
"bulk2": {
"pool": 3,
"pool_name": "bulk2",
"pg_num_target": 32,
"capacity_ratio": 0.1,
"root_id": 0,
"expected_final_pg_target": 64,
"expected_final_ratio": 0.5,
"expected_bulk_pool": True,
"even_pools": True,
"size": 1,
"no_scale": False,
"bulk": True,
},
}
root_map = {
0: RootMapItem(4, 400, 400),
1: RootMapItem(4, 400, 400),
}
bias = 1
overlapped_roots = set()
self.helper_test(pools, root_map, bias, overlapped_roots)
def test_uneven_pools_two_meta_two_bulk(self):
pools = {
"meta0": {
"pool": 0,
"pool_name": "meta0",
"pg_num_target": 32,
"capacity_ratio": 0.1,
"root_id": 0,
"expected_final_pg_target": 32,
"expected_final_ratio": 0.1,
"expected_bulk_pool": False,
"even_pools": True,
"size": 1,
"no_scale": False,
"bulk": False,
},
"meta1": {
"pool": 1,
"pool_name": "meta1",
"pg_num_target": 32,
"capacity_ratio": 0.1,
"root_id": 0,
"expected_final_pg_target": 32,
"expected_final_ratio": 0.1,
"expected_bulk_pool": False,
"even_pools": False,
"size": 1,
"no_scale": False,
"bulk": False,
},
"bulk0": {
"pool": 2,
"pool_name": "bulk0",
"pg_num_target": 32,
"capacity_ratio": 0.5,
"root_id": 0,
"expected_final_pg_target": 128,
"expected_final_ratio": 0.5,
"expected_bulk_pool": True,
"even_pools": True,
"size": 1,
"no_scale": False,
"bulk": True,
},
"bulk1": {
"pool": 3,
"pool_name": "bulk1",
"pg_num_target": 32,
"capacity_ratio": 0.1,
"root_id": 0,
"expected_final_pg_target": 128,
"expected_final_ratio": 0.5,
"expected_bulk_pool": True,
"even_pools": True,
"size": 1,
"no_scale": False,
"bulk": True,
},
}
root_map = {
0: RootMapItem(4, 400, 400),
1: RootMapItem(4, 400, 400),
}
bias = 1
overlapped_roots = set()
self.helper_test(pools, root_map, bias, overlapped_roots)
def test_uneven_pools_with_diff_roots(self):
pools = {
"meta0": {
"pool": 0,
"pool_name": "meta0",
"pg_num_target": 32,
"capacity_ratio": 0.3,
"root_id": 0,
"expected_final_pg_target": 1024,
"expected_final_ratio": 0.3,
"expected_bulk_pool": False,
"even_pools": False,
"size": 1,
"no_scale": False,
"bulk": False,
},
"meta1": {
"pool": 1,
"pool_name": "meta1",
"pg_num_target": 32,
"capacity_ratio": 0.6,
"root_id": 1,
"expected_final_pg_target": 2048,
"expected_final_ratio": 0.6,
"expected_bulk_pool": False,
"even_pools": False,
"size": 1,
"no_scale": False,
"bulk": False,
},
"bulk2": {
"pool": 2,
"pool_name": "bulk2",
"pg_num_target": 32,
"capacity_ratio": 0.6,
"root_id": 0,
"expected_final_pg_target": 2048,
"expected_final_ratio": 0.6,
"expected_bulk_pool": True,
"even_pools": False,
"size": 1,
"no_scale": False,
"bulk": True,
},
"bulk3": {
"pool": 3,
"pool_name": "test3",
"pg_num_target": 32,
"capacity_ratio": 0.1,
"root_id": 0,
"expected_final_pg_target": 1024,
"expected_final_ratio": 1,
"expected_bulk_pool": True,
"even_pools": True,
"size": 1,
"no_scale": False,
"bulk": True,
},
"bulk4": {
"pool": 4,
"pool_name": "bulk4",
"pg_num_target": 32,
"capacity_ratio": 0.4,
"root_id": 1,
"expected_final_pg_target": 2048,
"expected_final_ratio": 1,
"expected_bulk_pool": True,
"even_pools": True,
"size": 1,
"no_scale": False,
"bulk": True,
},
}
root_map = {
0: RootMapItem(3, 5000, 5000),
1: RootMapItem(2, 5000, 5000),
}
bias = 1
overlapped_roots = set()
self.helper_test(pools, root_map, bias, overlapped_roots)
def test_even_pools_with_diff_roots(self):
pools = {
"meta0": {
"pool": 0,
"pool_name": "meta0",
"pg_num_target": 32,
"capacity_ratio": 0.4,
"root_id": 0,
"expected_final_pg_target": 2048,
"expected_final_ratio": 0.4,
"expected_bulk_pool": False,
"even_pools": False,
"size": 1,
"no_scale": False,
"bulk": False,
},
"meta1": {
"pool": 1,
"pool_name": "meta1",
"pg_num_target": 32,
"capacity_ratio": 0.6,
"root_id": 1,
"expected_final_pg_target": 2048,
"expected_final_ratio": 0.6,
"expected_bulk_pool": False,
"even_pools": False,
"size": 1,
"no_scale": False,
"bulk": False,
},
"bulk1": {
"pool": 2,
"pool_name": "bulk1",
"pg_num_target": 32,
"capacity_ratio": 0.2,
"root_id": 0,
"expected_final_pg_target": 1024,
"expected_final_ratio": 0.5,
"expected_bulk_pool": True,
"even_pools": True,
"size": 1,
"no_scale": False,
"bulk": True,
},
"bulk2": {
"pool": 3,
"pool_name": "bulk2",
"pg_num_target": 32,
"capacity_ratio": 0.1,
"root_id": 0,
"expected_final_pg_target": 1024,
"expected_final_ratio": 0.5,
"expected_bulk_pool": True,
"even_pools": True,
"size": 1,
"no_scale": False,
"bulk": True,
},
"bulk3": {
"pool": 4,
"pool_name": "bulk4",
"pg_num_target": 32,
"capacity_ratio": 0.25,
"root_id": 1,
"expected_final_pg_target": 2048,
"expected_final_ratio": 1,
"expected_bulk_pool": True,
"even_pools": True,
"size": 1,
"no_scale": False,
"bulk": True,
},
}
root_map = {
0: RootMapItem(3, 5000, 5000),
1: RootMapItem(2, 5000, 5000),
}
bias = 1
overlapped_roots = set()
self.helper_test(pools, root_map, bias, overlapped_roots)
def test_uneven_pools_with_overlapped_roots(self):
pools = {
"test0": {
"pool": 0,
"pool_name": "test0",
"pg_num_target": 32,
"capacity_ratio": 0.4,
"root_id": 0,
"expected_final_pg_target": 2048,
"expected_final_ratio": 0.4,
"even_pools": False,
"size": 1,
"no_scale": True,
},
"test1": {
"pool": 1,
"pool_name": "test1",
"pg_num_target": 32,
"capacity_ratio": 0.6,
"root_id": 1,
"expected_final_pg_target": 2048,
"expected_final_ratio": 0.6,
"even_pools": False,
"size": 1,
"no_scale": True,
},
"test2": {
"pool": 2,
"pool_name": "test2",
"pg_num_target": 32,
"capacity_ratio": 0.5,
"root_id": 0,
"expected_final_pg_target": 2048,
"expected_final_ratio": 0.5,
"even_pools": False,
"size": 1,
"no_scale": True,
},
"test3": {
"pool": 3,
"pool_name": "test3",
"pg_num_target": 32,
"capacity_ratio": 0.1,
"root_id": 0,
"expected_final_pg_target": 512,
"expected_final_ratio": 1,
"even_pools": True,
"size": 1,
"no_scale": True,
},
"test4": {
"pool": 4,
"pool_name": "test4",
"pg_num_target": 32,
"capacity_ratio": 0.4,
"root_id": 1,
"expected_final_pg_target": 2048,
"expected_final_ratio": 1,
"even_pools": True,
"size": 1,
"no_scale": True,
},
}
root_map = {
0: RootMapItem(3, 5000, 5000),
1: RootMapItem(2, 5000, 5000),
}
bias = 1
overlapped_roots = {0, 1}
self.helper_test(pools, root_map, bias, overlapped_roots)
| 19,491 | 27.791728 | 97 | py |
null | ceph-main/src/pybind/mgr/pg_autoscaler/tests/test_cal_ratio.py | from pg_autoscaler import effective_target_ratio
from pytest import approx
def check_simple_ratio(target_ratio, tot_ratio):
etr = effective_target_ratio(target_ratio, tot_ratio, 0, 0)
assert (target_ratio / tot_ratio) == approx(etr)
return etr
def test_simple():
etr1 = check_simple_ratio(0.2, 0.9)
etr2 = check_simple_ratio(2, 9)
etr3 = check_simple_ratio(20, 90)
assert etr1 == approx(etr2)
assert etr1 == approx(etr3)
etr = check_simple_ratio(0.9, 0.9)
assert etr == approx(1.0)
etr1 = check_simple_ratio(1, 2)
etr2 = check_simple_ratio(0.5, 1.0)
assert etr1 == approx(etr2)
def test_total_bytes():
etr = effective_target_ratio(1, 10, 5, 10)
assert etr == approx(0.05)
etr = effective_target_ratio(0.1, 1, 5, 10)
assert etr == approx(0.05)
etr = effective_target_ratio(1, 1, 5, 10)
assert etr == approx(0.5)
etr = effective_target_ratio(1, 1, 0, 10)
assert etr == approx(1.0)
etr = effective_target_ratio(0, 1, 5, 10)
assert etr == approx(0.0)
etr = effective_target_ratio(1, 1, 10, 10)
assert etr == approx(0.0)
| 1,123 | 28.578947 | 63 | py |
null | ceph-main/src/pybind/mgr/pg_autoscaler/tests/test_overlapping_roots.py | # python unit test
import unittest
from tests import mock
import pytest
import json
from pg_autoscaler import module
class OSDMAP:
def __init__(self, pools):
self.pools = pools
def get_pools(self):
return self.pools
def pool_raw_used_rate(pool_id):
return 1
class CRUSH:
def __init__(self, rules, osd_dic):
self.rules = rules
self.osd_dic = osd_dic
def get_rule_by_id(self, rule_id):
for rule in self.rules:
if rule['rule_id'] == rule_id:
return rule
return None
def get_rule_root(self, rule_name):
for rule in self.rules:
if rule['rule_name'] == rule_name:
return rule['root_id']
return None
def get_osds_under(self, root_id):
return self.osd_dic[root_id]
class TestPgAutoscaler(object):
def setup_method(self):
# a bunch of attributes for testing.
self.autoscaler = module.PgAutoscaler('module_name', 0, 0)
def helper_test(self, osd_dic, rules, pools, expected_overlapped_roots):
result = {}
roots = []
overlapped_roots = set()
osdmap = OSDMAP(pools)
crush = CRUSH(rules, osd_dic)
roots, overlapped_roots = self.autoscaler.identify_subtrees_and_overlaps(osdmap,
crush, result, overlapped_roots, roots)
assert overlapped_roots == expected_overlapped_roots
def test_subtrees_and_overlaps(self):
osd_dic = {
-1: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
-40: [11, 12, 13, 14, 15],
-5: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
}
rules = [
{
"rule_id": 0,
"rule_name": "data",
"ruleset": 0,
"type": 1,
"min_size": 1,
"max_size": 10,
"root_id": -1,
},
{
"rule_id": 1,
"rule_name": "teuthology-data-ec",
"ruleset": 1,
"type": 3,
"min_size": 3,
"max_size": 6,
"root_id": -5,
},
{
"rule_id": 4,
"rule_name": "rep-ssd",
"ruleset": 4,
"type": 1,
"min_size": 1,
"max_size": 10,
"root_id": -40,
},
]
pools = {
0: {
"pool_name": "data",
"pg_num_target": 1024,
"size": 3,
"crush_rule": 0,
"capacity_ratio": 0.1624,
"options": {
"pg_num_min": 1024,
},
"expected_final_pg_target": 1024,
},
1: {
"pool_name": "metadata",
"pg_num_target": 64,
"size": 3,
"crush_rule": 4,
"capacity_ratio": 0.0144,
"options": {
"pg_num_min": 64,
},
"expected_final_pg_target": 64,
},
4: {
"pool_name": "libvirt-pool",
"pg_num_target": 32,
"size": 3,
"crush_rule": 0,
"capacity_ratio": 0.0001,
"options": {},
"expected_final_pg_target": 128,
},
93: {
"pool_name": ".rgw.root",
"pg_num_target": 32,
"size": 3,
"crush_rule": 4,
"capacity_ratio": 0,
"options": {},
"expected_final_pg_target": 32,
},
94: {
"pool_name": "default.rgw.control",
"pg_num_target": 32,
"size": 3,
"crush_rule": 4,
"capacity_ratio": 0,
"options": {},
"expected_final_pg_target": 32,
},
95: {
"pool_name": "default.rgw.meta",
"pg_num_target": 32,
"size": 3,
"crush_rule": 4,
"capacity_ratio": 0,
"options": {},
"expected_final_pg_target": 32,
},
96: {
"pool_name": "default.rgw.log",
"pg_num_target": 32,
"size": 3,
"crush_rule": 4,
"capacity_ratio": 0,
"options": {},
"expected_final_pg_target": 32,
},
97: {
"pool_name": "default.rgw.buckets.index",
"pg_num_target": 32,
"size": 3,
"crush_rule": 4,
"capacity_ratio": 0.0002,
"options": {},
"expected_final_pg_target": 32,
},
98: {
"pool_name": "default.rgw.buckets.data",
"pg_num_target": 32,
"size": 3,
"crush_rule": 0,
"capacity_ratio": 0.0457,
"options": {},
"expected_final_pg_target": 128,
},
99: {
"pool_name": "default.rgw.buckets.non-ec",
"pg_num_target": 32,
"size": 3,
"crush_rule": 0,
"capacity_ratio": 0,
"options": {},
"expected_final_pg_target": 32,
},
100: {
"pool_name": "device_health_metrics",
"pg_num_target": 1,
"size": 3,
"crush_rule": 0,
"capacity_ratio": 0,
"options": {
"pg_num_min": 1
},
"expected_final_pg_target": 1,
},
113: {
"pool_name": "cephfs.teuthology.meta",
"pg_num_target": 64,
"size": 3,
"crush_rule": 4,
"capacity_ratio": 0.1389,
"options": {
"pg_autoscale_bias": 4,
"pg_num_min": 64,
},
"expected_final_pg_target": 512,
},
114: {
"pool_name": "cephfs.teuthology.data",
"pg_num_target": 256,
"size": 3,
"crush_rule": 0,
"capacity_ratio": 0.0006,
"options": {
"pg_num_min": 128,
},
"expected_final_pg_target": 1024,
"expected_final_pg_target": 256,
},
117: {
"pool_name": "cephfs.scratch.meta",
"pg_num_target": 32,
"size": 3,
"crush_rule": 4,
"capacity_ratio": 0.0027,
"options": {
"pg_autoscale_bias": 4,
"pg_num_min": 16,
},
"expected_final_pg_target": 64,
},
118: {
"pool_name": "cephfs.scratch.data",
"pg_num_target": 32,
"size": 3,
"crush_rule": 0,
"capacity_ratio": 0.0027,
"options": {},
"expected_final_pg_target": 128,
},
119: {
"pool_name": "cephfs.teuthology.data-ec",
"pg_num_target": 1024,
"size": 6,
"crush_rule": 1,
"capacity_ratio": 0.8490,
"options": {
"pg_num_min": 1024
},
"expected_final_pg_target": 1024,
},
121: {
"pool_name": "cephsqlite",
"pg_num_target": 32,
"size": 3,
"crush_rule": 4,
"capacity_ratio": 0,
"options": {},
"expected_final_pg_target": 128,
},
}
expected_overlapped_roots = {-40, -1, -5}
self.helper_test(osd_dic, rules, pools, expected_overlapped_roots)
def test_no_overlaps(self):
osd_dic = {
-1: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
-40: [11, 12, 13, 14, 15],
-5: [16, 17, 18],
}
rules = [
{
"rule_id": 0,
"rule_name": "data",
"ruleset": 0,
"type": 1,
"min_size": 1,
"max_size": 10,
"root_id": -1,
},
{
"rule_id": 1,
"rule_name": "teuthology-data-ec",
"ruleset": 1,
"type": 3,
"min_size": 3,
"max_size": 6,
"root_id": -5,
},
{
"rule_id": 4,
"rule_name": "rep-ssd",
"ruleset": 4,
"type": 1,
"min_size": 1,
"max_size": 10,
"root_id": -40,
},
]
pools = {
0: {
"pool_name": "data",
"pg_num_target": 1024,
"size": 3,
"crush_rule": 0,
"capacity_ratio": 0.1624,
"options": {
"pg_num_min": 1024,
},
"expected_final_pg_target": 1024,
},
1: {
"pool_name": "metadata",
"pg_num_target": 64,
"size": 3,
"crush_rule": 4,
"capacity_ratio": 0.0144,
"options": {
"pg_num_min": 64,
},
"expected_final_pg_target": 64,
},
4: {
"pool_name": "libvirt-pool",
"pg_num_target": 32,
"size": 3,
"crush_rule": 0,
"capacity_ratio": 0.0001,
"options": {},
"expected_final_pg_target": 128,
},
93: {
"pool_name": ".rgw.root",
"pg_num_target": 32,
"size": 3,
"crush_rule": 4,
"capacity_ratio": 0,
"options": {},
"expected_final_pg_target": 32,
},
94: {
"pool_name": "default.rgw.control",
"pg_num_target": 32,
"size": 3,
"crush_rule": 4,
"capacity_ratio": 0,
"options": {},
"expected_final_pg_target": 32,
},
95: {
"pool_name": "default.rgw.meta",
"pg_num_target": 32,
"size": 3,
"crush_rule": 4,
"capacity_ratio": 0,
"options": {},
"expected_final_pg_target": 32,
},
96: {
"pool_name": "default.rgw.log",
"pg_num_target": 32,
"size": 3,
"crush_rule": 4,
"capacity_ratio": 0,
"options": {},
"expected_final_pg_target": 32,
},
97: {
"pool_name": "default.rgw.buckets.index",
"pg_num_target": 32,
"size": 3,
"crush_rule": 4,
"capacity_ratio": 0.0002,
"options": {},
"expected_final_pg_target": 32,
},
98: {
"pool_name": "default.rgw.buckets.data",
"pg_num_target": 32,
"size": 3,
"crush_rule": 0,
"capacity_ratio": 0.0457,
"options": {},
"expected_final_pg_target": 128,
},
99: {
"pool_name": "default.rgw.buckets.non-ec",
"pg_num_target": 32,
"size": 3,
"crush_rule": 0,
"capacity_ratio": 0,
"options": {},
"expected_final_pg_target": 32,
},
100: {
"pool_name": "device_health_metrics",
"pg_num_target": 1,
"size": 3,
"crush_rule": 0,
"capacity_ratio": 0,
"options": {
"pg_num_min": 1
},
"expected_final_pg_target": 1,
},
113: {
"pool_name": "cephfs.teuthology.meta",
"pg_num_target": 64,
"size": 3,
"crush_rule": 4,
"capacity_ratio": 0.1389,
"options": {
"pg_autoscale_bias": 4,
"pg_num_min": 64,
},
"expected_final_pg_target": 512,
},
114: {
"pool_name": "cephfs.teuthology.data",
"pg_num_target": 256,
"size": 3,
"crush_rule": 0,
"capacity_ratio": 0.0006,
"options": {
"pg_num_min": 128,
},
"expected_final_pg_target": 1024,
"expected_final_pg_target": 256,
},
117: {
"pool_name": "cephfs.scratch.meta",
"pg_num_target": 32,
"size": 3,
"crush_rule": 4,
"capacity_ratio": 0.0027,
"options": {
"pg_autoscale_bias": 4,
"pg_num_min": 16,
},
"expected_final_pg_target": 64,
},
118: {
"pool_name": "cephfs.scratch.data",
"pg_num_target": 32,
"size": 3,
"crush_rule": 0,
"capacity_ratio": 0.0027,
"options": {},
"expected_final_pg_target": 128,
},
119: {
"pool_name": "cephfs.teuthology.data-ec",
"pg_num_target": 1024,
"size": 6,
"crush_rule": 1,
"capacity_ratio": 0.8490,
"options": {
"pg_num_min": 1024
},
"expected_final_pg_target": 1024,
},
121: {
"pool_name": "cephsqlite",
"pg_num_target": 32,
"size": 3,
"crush_rule": 4,
"capacity_ratio": 0,
"options": {},
"expected_final_pg_target": 128,
},
}
expected_overlapped_roots = set()
self.helper_test(osd_dic, rules, pools, expected_overlapped_roots)
| 15,051 | 30.358333 | 120 | py |
null | ceph-main/src/pybind/mgr/progress/__init__.py | import os
if 'UNITTEST' in os.environ:
import tests
from .module import Module
| 86 | 9.875 | 28 | py |
null | ceph-main/src/pybind/mgr/progress/module.py | try:
from typing import List, Dict, Union, Any, Optional
from typing import TYPE_CHECKING
except ImportError:
TYPE_CHECKING = False
from mgr_module import MgrModule, OSDMap, Option
from mgr_util import to_pretty_timedelta
from datetime import timedelta
import os
import threading
import datetime
import uuid
import time
import logging
import json
ENCODING_VERSION = 2
# keep a global reference to the module so we can use it from Event methods
_module = None # type: Optional["Module"]
class Event(object):
"""
A generic "event" that has a start time, completion percentage,
and a list of "refs" that are (type, id) tuples describing which
objects (osds, pools) this relates to.
"""
def __init__(self, id: str,
message: str,
refs: List[str],
add_to_ceph_s: bool,
started_at: Optional[float] = None):
self._message = message
self._refs = refs
self.started_at = started_at if started_at else time.time()
self.id = id
self._add_to_ceph_s = add_to_ceph_s
def _refresh(self):
global _module
assert _module
_module.log.debug('refreshing mgr for %s (%s) at %f' % (self.id, self._message,
self.progress))
_module.update_progress_event(
self.id, self.twoline_progress(6), self.progress, self._add_to_ceph_s)
@property
def message(self):
# type: () -> str
return self._message
@property
def refs(self):
# type: () -> List[str]
return self._refs
@property
def add_to_ceph_s(self):
# type: () -> bool
return self._add_to_ceph_s
@property
def progress(self):
# type: () -> float
raise NotImplementedError()
@property
def duration_str(self):
duration = time.time() - self.started_at
return "(%s)" % (
to_pretty_timedelta(timedelta(seconds=duration)))
@property
def failed(self):
return False
@property
def failure_message(self):
return None
def summary(self):
# type: () -> str
return "{0} {1} {2}".format(self.progress, self.message,
self.duration_str)
def _progress_str(self, width):
inner_width = width - 2
out = "["
done_chars = int(self.progress * inner_width)
out += done_chars * '='
out += (inner_width - done_chars) * '.'
out += "]"
return out
def twoline_progress(self, indent=4):
"""
e.g.
- Eating my delicious strudel (since: 30s)
[===============..............] (remaining: 04m)
"""
time_remaining = self.estimated_time_remaining()
if time_remaining:
remaining = "(remaining: %s)" % (
to_pretty_timedelta(timedelta(seconds=time_remaining)))
else:
remaining = ''
return "{0} {1}\n{2}{3} {4}".format(self._message,
self.duration_str,
" " * indent,
self._progress_str(30),
remaining)
def to_json(self):
# type: () -> Dict[str, Any]
return {
"id": self.id,
"message": self.message,
"duration": self.duration_str,
"refs": self._refs,
"progress": self.progress,
"started_at": self.started_at,
"time_remaining": self.estimated_time_remaining()
}
def estimated_time_remaining(self):
elapsed = time.time() - self.started_at
progress = self.progress
if progress == 0.0:
return None
return int(elapsed * (1 - progress) / progress)
class GhostEvent(Event):
"""
The ghost of a completed event: these are the fields that we persist
after the event is complete.
"""
def __init__(self, my_id, message, refs, add_to_ceph_s, started_at, finished_at=None,
failed=False, failure_message=None):
super().__init__(my_id, message, refs, add_to_ceph_s, started_at)
self.finished_at = finished_at if finished_at else time.time()
if failed:
self._failed = True
self._failure_message = failure_message
else:
self._failed = False
@property
def progress(self):
return 1.0
@property
def failed(self):
return self._failed
@property
def failure_message(self):
return self._failure_message if self._failed else None
def to_json(self):
d = {
"id": self.id,
"message": self.message,
"refs": self._refs,
"started_at": self.started_at,
"finished_at": self.finished_at,
"add_to_ceph_s:": self.add_to_ceph_s
}
if self._failed:
d["failed"] = True
d["failure_message"] = self._failure_message
return d
class GlobalRecoveryEvent(Event):
"""
An event whoese completion is determined by active+clean/total_pg_num
"""
def __init__(self, message, refs, add_to_ceph_s, start_epoch, active_clean_num):
# type: (str, List[Any], bool, int, int) -> None
super().__init__(str(uuid.uuid4()), message, refs, add_to_ceph_s)
self._add_to_ceph_s = add_to_ceph_s
self._progress = 0.0
self._start_epoch = start_epoch
self._active_clean_num = active_clean_num
self._refresh()
def global_event_update_progress(self, log):
# type: (logging.Logger) -> None
"Update progress of Global Recovery Event"
global _module
assert _module
skipped_pgs = 0
active_clean_pgs = _module.get("active_clean_pgs")
total_pg_num = active_clean_pgs["total_num_pgs"]
new_active_clean_pgs = active_clean_pgs["pg_stats"]
new_active_clean_num = len(new_active_clean_pgs)
for pg in new_active_clean_pgs:
# Disregard PGs that are not being reported
# if the states are active+clean. Since it is
# possible that some pgs might not have any movement
# even before the start of the event.
if pg['reported_epoch'] < self._start_epoch:
log.debug("Skipping pg {0} since reported_epoch {1} < start_epoch {2}"
.format(pg['pgid'], pg['reported_epoch'], self._start_epoch))
skipped_pgs += 1
continue
if self._active_clean_num != new_active_clean_num:
# Have this case to know when need to update
# the progress
try:
# Might be that total_pg_num is 0
self._progress = float(new_active_clean_num) / (total_pg_num - skipped_pgs)
except ZeroDivisionError:
self._progress = 0.0
else:
# No need to update since there is no change
return
log.debug("Updated progress to %s", self.summary())
self._refresh()
@property
def progress(self):
return self._progress
class RemoteEvent(Event):
"""
An event that was published by another module: we know nothing about
this, rely on the other module to continuously update us with
progress information as it emerges.
"""
def __init__(self, my_id, message, refs, add_to_ceph_s):
# type: (str, str, List[str], bool) -> None
super().__init__(my_id, message, refs, add_to_ceph_s)
self._progress = 0.0
self._failed = False
self._refresh()
def set_progress(self, progress):
# type: (float) -> None
self._progress = progress
self._refresh()
def set_failed(self, message):
self._progress = 1.0
self._failed = True
self._failure_message = message
self._refresh()
def set_message(self, message):
self._message = message
self._refresh()
@property
def progress(self):
return self._progress
@property
def failed(self):
return self._failed
@property
def failure_message(self):
return self._failure_message if self._failed else None
class PgRecoveryEvent(Event):
"""
An event whose completion is determined by the recovery of a set of
PGs to a healthy state.
Always call update() immediately after construction.
"""
def __init__(self, message, refs, which_pgs, which_osds, start_epoch, add_to_ceph_s):
# type: (str, List[Any], List[PgId], List[str], int, bool) -> None
super().__init__(str(uuid.uuid4()), message, refs, add_to_ceph_s)
self._pgs = which_pgs
self._which_osds = which_osds
self._original_pg_count = len(self._pgs)
self._original_bytes_recovered = None # type: Optional[Dict[PgId, float]]
self._progress = 0.0
self._start_epoch = start_epoch
self._refresh()
@property
def which_osds(self):
return self. _which_osds
def pg_update(self, pg_progress: Dict, log: Any) -> None:
# FIXME: O(pg_num) in python
# Sanity check to see if there are any missing PGs and to assign
# empty array and dictionary if there hasn't been any recovery
pg_to_state: Dict[str, Any] = pg_progress["pgs"]
pg_ready: bool = pg_progress["pg_ready"]
if self._original_bytes_recovered is None:
self._original_bytes_recovered = {}
missing_pgs = []
for pg in self._pgs:
pg_str = str(pg)
if pg_str in pg_to_state:
self._original_bytes_recovered[pg] = \
pg_to_state[pg_str]['num_bytes_recovered']
else:
missing_pgs.append(pg)
if pg_ready:
for pg in missing_pgs:
self._pgs.remove(pg)
complete_accumulate = 0.0
# Calculating progress as the number of PGs recovered divided by the
# original where partially completed PGs count for something
# between 0.0-1.0. This is perhaps less faithful than looking at the
# total number of bytes recovered, but it does a better job of
# representing the work still to do if there are a number of very
# few-bytes PGs that still need the housekeeping of their recovery
# to be done. This is subjective...
complete = set()
for pg in self._pgs:
pg_str = str(pg)
try:
info = pg_to_state[pg_str]
except KeyError:
# The PG is gone! Probably a pool was deleted. Drop it.
complete.add(pg)
continue
# Only checks the state of each PGs when it's epoch >= the OSDMap's epoch
if info['reported_epoch'] < self._start_epoch:
continue
state = info['state']
states = state.split("+")
if "active" in states and "clean" in states:
complete.add(pg)
else:
if info['num_bytes'] == 0:
# Empty PGs are considered 0% done until they are
# in the correct state.
pass
else:
recovered = info['num_bytes_recovered']
total_bytes = info['num_bytes']
if total_bytes > 0:
ratio = float(recovered -
self._original_bytes_recovered[pg]) / \
total_bytes
# Since the recovered bytes (over time) could perhaps
# exceed the contents of the PG (moment in time), we
# must clamp this
ratio = min(ratio, 1.0)
ratio = max(ratio, 0.0)
else:
# Dataless PGs (e.g. containing only OMAPs) count
# as half done.
ratio = 0.5
complete_accumulate += ratio
self._pgs = list(set(self._pgs) ^ complete)
completed_pgs = self._original_pg_count - len(self._pgs)
completed_pgs = max(completed_pgs, 0)
try:
prog = (completed_pgs + complete_accumulate)\
/ self._original_pg_count
except ZeroDivisionError:
prog = 0.0
self._progress = min(max(prog, 0.0), 1.0)
self._refresh()
log.info("Updated progress to %s", self.summary())
@property
def progress(self):
# type: () -> float
return self._progress
class PgId(object):
def __init__(self, pool_id, ps):
# type: (str, int) -> None
self.pool_id = pool_id
self.ps = ps
def __cmp__(self, other):
return (self.pool_id, self.ps) == (other.pool_id, other.ps)
def __lt__(self, other):
return (self.pool_id, self.ps) < (other.pool_id, other.ps)
def __str__(self):
return "{0}.{1:x}".format(self.pool_id, self.ps)
class Module(MgrModule):
COMMANDS = [
{"cmd": "progress",
"desc": "Show progress of recovery operations",
"perm": "r"},
{"cmd": "progress json",
"desc": "Show machine readable progress information",
"perm": "r"},
{"cmd": "progress clear",
"desc": "Reset progress tracking",
"perm": "rw"},
{"cmd": "progress on",
"desc": "Enable progress tracking",
"perm": "rw"},
{"cmd": "progress off",
"desc": "Disable progress tracking",
"perm": "rw"}
]
MODULE_OPTIONS = [
Option(
'max_completed_events',
default=50,
type='int',
desc='number of past completed events to remember',
runtime=True
),
Option(
'sleep_interval',
default=5,
type='secs',
desc='how long the module is going to sleep',
runtime=True
),
Option(
'enabled',
default=True,
type='bool',
),
Option(
'allow_pg_recovery_event',
default=False,
type='bool',
desc='allow the module to show pg recovery progress',
runtime=True
)
]
def __init__(self, *args, **kwargs):
super(Module, self).__init__(*args, **kwargs)
self._events = {} # type: Dict[str, Union[RemoteEvent, PgRecoveryEvent, GlobalRecoveryEvent]]
self._completed_events = [] # type: List[GhostEvent]
self._old_osd_map = None # type: Optional[OSDMap]
self._ready = threading.Event()
self._shutdown = threading.Event()
self._latest_osdmap = None # type: Optional[OSDMap]
self._dirty = False
global _module
_module = self
# only for mypy
if TYPE_CHECKING:
self.max_completed_events = 0
self.sleep_interval = 0
self.enabled = True
self.allow_pg_recovery_event = False
def config_notify(self):
for opt in self.MODULE_OPTIONS:
setattr(self,
opt['name'],
self.get_module_option(opt['name']))
self.log.debug(' %s = %s', opt['name'], getattr(self, opt['name']))
def _osd_in_out(self, old_map, old_dump, new_map, osd_id, marked):
# type: (OSDMap, Dict, OSDMap, str, str) -> None
# A function that will create or complete an event when an
# OSD is marked in or out according to the affected PGs
affected_pgs = []
for pool in old_dump['pools']:
pool_id = pool['pool'] # type: str
for ps in range(0, pool['pg_num']):
# Was this OSD affected by the OSD coming in/out?
# Compare old and new osds using
# data from the json dump
old_up_acting = old_map.pg_to_up_acting_osds(pool['pool'], ps)
old_osds = set(old_up_acting['acting'])
new_up_acting = new_map.pg_to_up_acting_osds(pool['pool'], ps)
new_osds = set(new_up_acting['acting'])
# Check the osd_id being in the acting set for both old
# and new maps to cover both out and in cases
was_on_out_or_in_osd = osd_id in old_osds or osd_id in new_osds
if not was_on_out_or_in_osd:
continue
self.log.debug("pool_id, ps = {0}, {1}".format(
pool_id, ps
))
self.log.debug(
"old_up_acting: {0}".format(json.dumps(old_up_acting, indent=4, sort_keys=True)))
# Has this OSD been assigned a new location?
# (it might not be if there is no suitable place to move
# after an OSD is marked in/out)
is_relocated = old_osds != new_osds
self.log.debug(
"new_up_acting: {0}".format(json.dumps(new_up_acting,
indent=4,
sort_keys=True)))
if was_on_out_or_in_osd and is_relocated:
# This PG is now in motion, track its progress
affected_pgs.append(PgId(pool_id, ps))
# In the case that we ignored some PGs, log the reason why (we may
# not end up creating a progress event)
self.log.warning("{0} PGs affected by osd.{1} being marked {2}".format(
len(affected_pgs), osd_id, marked))
# In the case of the osd coming back in, we might need to cancel
# previous recovery event for that osd
if marked == "in":
for ev_id in list(self._events):
try:
ev = self._events[ev_id]
if isinstance(ev, PgRecoveryEvent) and osd_id in ev.which_osds:
self.log.info("osd.{0} came back in, cancelling event".format(
osd_id
))
self._complete(ev)
except KeyError:
self.log.warning("_osd_in_out: ev {0} does not exist".format(ev_id))
if len(affected_pgs) > 0:
r_ev = PgRecoveryEvent(
"Rebalancing after osd.{0} marked {1}".format(osd_id, marked),
refs=[("osd", osd_id)],
which_pgs=affected_pgs,
which_osds=[osd_id],
start_epoch=self.get_osdmap().get_epoch(),
add_to_ceph_s=False
)
r_ev.pg_update(self.get("pg_progress"), self.log)
self._events[r_ev.id] = r_ev
def _osdmap_changed(self, old_osdmap, new_osdmap):
# type: (OSDMap, OSDMap) -> None
old_dump = old_osdmap.dump()
new_dump = new_osdmap.dump()
old_osds = dict([(o['osd'], o) for o in old_dump['osds']])
for osd in new_dump['osds']:
osd_id = osd['osd']
new_weight = osd['in']
if osd_id in old_osds:
old_weight = old_osds[osd_id]['in']
if new_weight == 0.0 and old_weight > new_weight:
self.log.warning("osd.{0} marked out".format(osd_id))
self._osd_in_out(old_osdmap, old_dump, new_osdmap, osd_id, "out")
elif new_weight >= 1.0 and old_weight == 0.0:
# Only consider weight>=1.0 as "in" to avoid spawning
# individual recovery events on every adjustment
# in a gradual weight-in
self.log.warning("osd.{0} marked in".format(osd_id))
self._osd_in_out(old_osdmap, old_dump, new_osdmap, osd_id, "in")
def _pg_state_changed(self):
# This function both constructs and updates
# the global recovery event if one of the
# PGs is not at active+clean state
active_clean_pgs = self.get("active_clean_pgs")
total_pg_num = active_clean_pgs["total_num_pgs"]
active_clean_num = len(active_clean_pgs["pg_stats"])
try:
# There might be a case where there is no pg_num
progress = float(active_clean_num) / total_pg_num
except ZeroDivisionError:
return
if progress < 1.0:
self.log.warning(("Starting Global Recovery Event,"
"%d pgs not in active + clean state"),
total_pg_num - active_clean_num)
ev = GlobalRecoveryEvent("Global Recovery Event",
refs=[("global", "")],
add_to_ceph_s=True,
start_epoch=self.get_osdmap().get_epoch(),
active_clean_num=active_clean_num)
ev.global_event_update_progress(self.log)
self._events[ev.id] = ev
def _process_osdmap(self):
old_osdmap = self._latest_osdmap
self._latest_osdmap = self.get_osdmap()
assert old_osdmap
assert self._latest_osdmap
self.log.info(("Processing OSDMap change %d..%d"),
old_osdmap.get_epoch(), self._latest_osdmap.get_epoch())
self._osdmap_changed(old_osdmap, self._latest_osdmap)
def _process_pg_summary(self):
# if there are no events we will skip this here to avoid
# expensive get calls
if len(self._events) == 0:
return
global_event = False
data = self.get("pg_progress")
for ev_id in list(self._events):
try:
ev = self._events[ev_id]
# Check for types of events
# we have to update
if isinstance(ev, PgRecoveryEvent):
ev.pg_update(data, self.log)
self.maybe_complete(ev)
elif isinstance(ev, GlobalRecoveryEvent):
global_event = True
ev.global_event_update_progress(self.log)
self.maybe_complete(ev)
except KeyError:
self.log.warning("_process_pg_summary: ev {0} does not exist".format(ev_id))
continue
if not global_event:
# If there is no global event
# we create one
self._pg_state_changed()
def maybe_complete(self, event):
# type: (Event) -> None
if event.progress >= 1.0:
self._complete(event)
def _save(self):
self.log.info("Writing back {0} completed events".format(
len(self._completed_events)
))
# TODO: bound the number we store.
encoded = json.dumps({
"events": [ev.to_json() for ev in self._completed_events],
"version": ENCODING_VERSION,
"compat_version": ENCODING_VERSION
})
self.set_store("completed", encoded)
def _load(self):
stored = self.get_store("completed")
if stored is None:
self.log.info("No stored events to load")
return
decoded = json.loads(stored)
if decoded['compat_version'] > ENCODING_VERSION:
raise RuntimeError("Cannot decode version {0}".format(
decoded['compat_version']))
if decoded['compat_version'] < ENCODING_VERSION:
# we need to add the "started_at" and "finished_at" attributes to the events
for ev in decoded['events']:
ev['started_at'] = None
ev['finished_at'] = None
for ev in decoded['events']:
self._completed_events.append(GhostEvent(ev['id'], ev['message'],
ev['refs'], ev['started_at'],
ev['finished_at'],
ev.get('failed', False),
ev.get('failure_message')))
self._prune_completed_events()
def _prune_completed_events(self):
length = len(self._completed_events)
if length > self.max_completed_events:
self._completed_events = self._completed_events[length - self.max_completed_events : length]
self._dirty = True
def serve(self):
self.config_notify()
self.clear_all_progress_events()
self.log.info("Loading...")
self._load()
self.log.info("Loaded {0} historic events".format(self._completed_events))
self._latest_osdmap = self.get_osdmap()
self.log.info("Loaded OSDMap, ready.")
self._ready.set()
while not self._shutdown.is_set():
# Lazy periodic write back of completed events
if self._dirty:
self._save()
self._dirty = False
if self.enabled:
if self.allow_pg_recovery_event:
self._process_osdmap()
self._process_pg_summary()
self._shutdown.wait(timeout=self.sleep_interval)
self._shutdown.wait()
def shutdown(self):
self._shutdown.set()
self.clear_all_progress_events()
def update(self, ev_id, ev_msg, ev_progress, refs=None, add_to_ceph_s=False):
# type: (str, str, float, Optional[list], bool) -> None
"""
For calling from other mgr modules
"""
if not self.enabled:
return
if refs is None:
refs = []
try:
ev = self._events[ev_id]
assert isinstance(ev, RemoteEvent)
except KeyError:
# if key doesn't exist we create an event
ev = RemoteEvent(ev_id, ev_msg, refs, add_to_ceph_s)
self._events[ev_id] = ev
self.log.info("update: starting ev {0} ({1})".format(
ev_id, ev_msg))
else:
self.log.debug("update: {0} on {1}".format(
ev_progress, ev_msg))
ev.set_progress(ev_progress)
ev.set_message(ev_msg)
def _complete(self, ev):
# type: (Event) -> None
duration = (time.time() - ev.started_at)
self.log.info("Completed event {0} ({1}) in {2} seconds".format(
ev.id, ev.message, int(round(duration))
))
self.complete_progress_event(ev.id)
self._completed_events.append(
GhostEvent(ev.id, ev.message, ev.refs, ev.add_to_ceph_s, ev.started_at,
failed=ev.failed, failure_message=ev.failure_message))
assert ev.id
del self._events[ev.id]
self._prune_completed_events()
self._dirty = True
def complete(self, ev_id):
"""
For calling from other mgr modules
"""
if not self.enabled:
return
try:
ev = self._events[ev_id]
assert isinstance(ev, RemoteEvent)
ev.set_progress(1.0)
self.log.info("complete: finished ev {0} ({1})".format(ev_id,
ev.message))
self._complete(ev)
except KeyError:
self.log.warning("complete: ev {0} does not exist".format(ev_id))
pass
def fail(self, ev_id, message):
"""
For calling from other mgr modules to mark an event as failed (and
complete)
"""
try:
ev = self._events[ev_id]
assert isinstance(ev, RemoteEvent)
ev.set_failed(message)
self.log.info("fail: finished ev {0} ({1}): {2}".format(ev_id,
ev.message,
message))
self._complete(ev)
except KeyError:
self.log.warning("fail: ev {0} does not exist".format(ev_id))
def on(self):
self.set_module_option('enabled', "true")
def off(self):
self.set_module_option('enabled', "false")
def _handle_ls(self):
if len(self._events) or len(self._completed_events):
out = ""
chrono_order = sorted(self._events.values(),
key=lambda x: x.started_at, reverse=True)
for ev in chrono_order:
out += ev.twoline_progress()
out += "\n"
if len(self._completed_events):
# TODO: limit number of completed events to show
out += "\n"
for ghost_ev in self._completed_events:
out += "[{0}]: {1}\n".format("Complete" if not ghost_ev.failed else "Failed",
ghost_ev.twoline_progress())
return 0, out, ""
else:
return 0, "", "Nothing in progress"
def _json(self):
return {
'events': [ev.to_json() for ev in self._events.values()],
'completed': [ev.to_json() for ev in self._completed_events]
}
def clear(self):
self._events = {}
self._completed_events = []
self._dirty = True
self._save()
self.clear_all_progress_events()
def _handle_clear(self):
self.clear()
return 0, "", ""
def handle_command(self, _, cmd):
if cmd['prefix'] == "progress":
return self._handle_ls()
elif cmd['prefix'] == "progress clear":
# The clear command isn't usually needed - it's to enable
# the admin to "kick" this module if it seems to have done
# something wrong (e.g. we have a bug causing a progress event
# that never finishes)
return self._handle_clear()
elif cmd['prefix'] == "progress json":
return 0, json.dumps(self._json(), indent=4, sort_keys=True), ""
elif cmd['prefix'] == "progress on":
if self.enabled:
return 0, "", "progress already enabled!"
self.on()
return 0, "", "progress enabled"
elif cmd['prefix'] == "progress off":
if not self.enabled:
return 0, "", "progress already disabled!"
self.off()
self.clear()
return 0, "", "progress disabled"
else:
raise NotImplementedError(cmd['prefix'])
| 30,853 | 33.942242 | 104 | py |
null | ceph-main/src/pybind/mgr/progress/test_progress.py | #python unit test
import unittest
import os
import sys
from tests import mock
import pytest
import json
os.environ['UNITTEST'] = "1"
sys.path.insert(0, "../../pybind/mgr")
from progress import module
class TestPgRecoveryEvent(object):
# Testing PgRecoveryEvent class
def setup_method(self):
# Creating the class and Mocking
# a bunch of attributes for testing
module._module = mock.Mock() # just so Event._refresh() works
self.test_event = module.PgRecoveryEvent(None, None, [module.PgId(1,i) for i in range(3)], [0], 30, False)
def test_pg_update(self):
# Test for a completed event when the pg states show active+clean
pg_progress = {
"pgs": {
"1.0": {
"state": "active+clean",
"num_bytes": 10,
"num_bytes_recovered": 10,
"reported_epoch": 30,
},
"1.1": {
"state": "active+clean",
"num_bytes": 10,
"num_bytes_recovered": 10,
"reported_epoch": 30,
},
"1.2": {
"state": "active+clean",
"num_bytes": 10,
"num_bytes_recovered": 10,
"reported_epoch": 30,
},
},
"pg_ready": True,
}
self.test_event.pg_update(pg_progress, mock.Mock())
assert self.test_event._progress == 1.0
class OSDMap:
# This is an artificial class to help
# _osd_in_out function have all the
# necessary characteristics, some
# of the funcitons are copied from
# mgr_module
def __init__(self, dump, pg_stats):
self._dump = dump
self._pg_stats = pg_stats
def _pg_to_up_acting_osds(self, pool_id, ps):
pg_id = str(pool_id) + "." + str(ps)
for pg in self._pg_stats["pg_stats"]:
if pg["pg_id"] == pg_id:
ret = {
"up_primary": pg["up_primary"],
"acting_primary": pg["acting_primary"],
"up": pg["up"],
"acting": pg["acting"]
}
return ret
def dump(self):
return self._dump
def get_pools(self):
d = self._dump()
return dict([(p['pool'], p) for p in d['pools']])
def get_pools_by_name(self):
d = self._dump()
return dict([(p['pool_name'], p) for p in d['pools']])
def pg_to_up_acting_osds(self, pool_id, ps):
return self._pg_to_up_acting_osds(pool_id, ps)
class TestModule(object):
# Testing Module Class
def setup_method(self):
# Creating the class and Mocking a
# bunch of attributes for testing
module.PgRecoveryEvent.pg_update = mock.Mock()
module.Module._ceph_get_option = mock.Mock() # .__init__
module.Module._configure_logging = lambda *args: ... # .__init__
self.test_module = module.Module('module_name', 0, 0) # so we can see if an event gets created
self.test_module.get = mock.Mock() # so we can call pg_update
self.test_module._complete = mock.Mock() # we want just to see if this event gets called
self.test_module.get_osdmap = mock.Mock() # so that self.get_osdmap().get_epoch() works
module._module = mock.Mock() # so that Event.refresh() works
def test_osd_in_out(self):
# test for the correct event being
# triggered and completed.
old_pg_stats = {
"pg_stats":[
{
"pg_id": "1.0",
"up_primary": 3,
"acting_primary": 3,
"up": [
3,
0
],
"acting": [
3,
0
]
},
]
}
new_pg_stats = {
"pg_stats":[
{
"pg_id": "1.0",
"up_primary": 0,
"acting_primary": 0,
"up": [
0,
2
],
"acting": [
0,
2
]
},
]
}
old_dump ={
"pools": [
{
"pool": 1,
"pg_num": 1
}
]
}
new_dump = {
"pools": [
{
"pool": 1,
"pg_num": 1
}
]
}
new_map = OSDMap(new_dump, new_pg_stats)
old_map = OSDMap(old_dump, old_pg_stats)
self.test_module._osd_in_out(old_map, old_dump, new_map, 3, "out")
# check if only one event is created
assert len(self.test_module._events) == 1
self.test_module._osd_in_out(old_map, old_dump, new_map, 3, "in")
# check if complete function is called
assert self.test_module._complete.call_count == 1
# check if a PgRecovery Event was created and pg_update gets triggered
assert module.PgRecoveryEvent.pg_update.call_count == 2
| 5,364 | 29.657143 | 114 | py |
null | ceph-main/src/pybind/mgr/prometheus/__init__.py | # flake8: noqa
from .module import Module, StandbyModule
| 57 | 18.333333 | 41 | py |
null | ceph-main/src/pybind/mgr/prometheus/module.py | import cherrypy
import yaml
from collections import defaultdict
from pkg_resources import packaging # type: ignore
import json
import math
import os
import re
import threading
import time
import enum
from collections import namedtuple
from mgr_module import CLIReadCommand, MgrModule, MgrStandbyModule, PG_STATES, Option, ServiceInfoT, HandleCommandResult, CLIWriteCommand
from mgr_util import get_default_addr, profile_method, build_url
from rbd import RBD
from typing import DefaultDict, Optional, Dict, Any, Set, cast, Tuple, Union, List, Callable
LabelValues = Tuple[str, ...]
Number = Union[int, float]
MetricValue = Dict[LabelValues, Number]
# Defaults for the Prometheus HTTP server. Can also set in config-key
# see https://github.com/prometheus/prometheus/wiki/Default-port-allocations
# for Prometheus exporter port registry
DEFAULT_PORT = 9283
# When the CherryPy server in 3.2.2 (and later) starts it attempts to verify
# that the ports its listening on are in fact bound. When using the any address
# "::" it tries both ipv4 and ipv6, and in some environments (e.g. kubernetes)
# ipv6 isn't yet configured / supported and CherryPy throws an uncaught
# exception.
if cherrypy is not None:
Version = packaging.version.Version
v = Version(cherrypy.__version__)
# the issue was fixed in 3.2.3. it's present in 3.2.2 (current version on
# centos:7) and back to at least 3.0.0.
if Version("3.1.2") <= v < Version("3.2.3"):
# https://github.com/cherrypy/cherrypy/issues/1100
from cherrypy.process import servers
servers.wait_for_occupied_port = lambda host, port: None
# cherrypy likes to sys.exit on error. don't let it take us down too!
def os_exit_noop(status: int) -> None:
pass
os._exit = os_exit_noop # type: ignore
# to access things in class Module from subclass Root. Because
# it's a dict, the writer doesn't need to declare 'global' for access
_global_instance = None # type: Optional[Module]
cherrypy.config.update({
'response.headers.server': 'Ceph-Prometheus'
})
def health_status_to_number(status: str) -> int:
if status == 'HEALTH_OK':
return 0
elif status == 'HEALTH_WARN':
return 1
elif status == 'HEALTH_ERR':
return 2
raise ValueError(f'unknown status "{status}"')
DF_CLUSTER = ['total_bytes', 'total_used_bytes', 'total_used_raw_bytes']
OSD_BLOCKLIST = ['osd_blocklist_count']
DF_POOL = ['max_avail', 'avail_raw', 'stored', 'stored_raw', 'objects', 'dirty',
'quota_bytes', 'quota_objects', 'rd', 'rd_bytes', 'wr', 'wr_bytes',
'compress_bytes_used', 'compress_under_bytes', 'bytes_used', 'percent_used']
OSD_POOL_STATS = ('recovering_objects_per_sec', 'recovering_bytes_per_sec',
'recovering_keys_per_sec', 'num_objects_recovered',
'num_bytes_recovered', 'num_bytes_recovered')
OSD_FLAGS = ('noup', 'nodown', 'noout', 'noin', 'nobackfill', 'norebalance',
'norecover', 'noscrub', 'nodeep-scrub')
FS_METADATA = ('data_pools', 'fs_id', 'metadata_pool', 'name')
MDS_METADATA = ('ceph_daemon', 'fs_id', 'hostname', 'public_addr', 'rank',
'ceph_version')
MON_METADATA = ('ceph_daemon', 'hostname',
'public_addr', 'rank', 'ceph_version')
MGR_METADATA = ('ceph_daemon', 'hostname', 'ceph_version')
MGR_STATUS = ('ceph_daemon',)
MGR_MODULE_STATUS = ('name',)
MGR_MODULE_CAN_RUN = ('name',)
OSD_METADATA = ('back_iface', 'ceph_daemon', 'cluster_addr', 'device_class',
'front_iface', 'hostname', 'objectstore', 'public_addr',
'ceph_version')
OSD_STATUS = ['weight', 'up', 'in']
OSD_STATS = ['apply_latency_ms', 'commit_latency_ms']
POOL_METADATA = ('pool_id', 'name', 'type', 'description', 'compression_mode')
RGW_METADATA = ('ceph_daemon', 'hostname', 'ceph_version', 'instance_id')
RBD_MIRROR_METADATA = ('ceph_daemon', 'id', 'instance_id', 'hostname',
'ceph_version')
DISK_OCCUPATION = ('ceph_daemon', 'device', 'db_device',
'wal_device', 'instance', 'devices', 'device_ids')
NUM_OBJECTS = ['degraded', 'misplaced', 'unfound']
alert_metric = namedtuple('alert_metric', 'name description')
HEALTH_CHECKS = [
alert_metric('SLOW_OPS', 'OSD or Monitor requests taking a long time to process'),
]
HEALTHCHECK_DETAIL = ('name', 'severity')
class Severity(enum.Enum):
ok = "HEALTH_OK"
warn = "HEALTH_WARN"
error = "HEALTH_ERR"
class Format(enum.Enum):
plain = 'plain'
json = 'json'
json_pretty = 'json-pretty'
yaml = 'yaml'
class HealthCheckEvent:
def __init__(self, name: str, severity: Severity, first_seen: float, last_seen: float, count: int, active: bool = True):
self.name = name
self.severity = severity
self.first_seen = first_seen
self.last_seen = last_seen
self.count = count
self.active = active
def as_dict(self) -> Dict[str, Any]:
"""Return the instance as a dictionary."""
return self.__dict__
class HealthHistory:
kv_name = 'health_history'
titles = "{healthcheck_name:<24} {first_seen:<20} {last_seen:<20} {count:>5} {active:^6}"
date_format = "%Y/%m/%d %H:%M:%S"
def __init__(self, mgr: MgrModule):
self.mgr = mgr
self.lock = threading.Lock()
self.healthcheck: Dict[str, HealthCheckEvent] = {}
self._load()
def _load(self) -> None:
"""Load the current state from the mons KV store."""
data = self.mgr.get_store(self.kv_name)
if data:
try:
healthcheck_data = json.loads(data)
except json.JSONDecodeError:
self.mgr.log.warn(
f"INVALID data read from mgr/prometheus/{self.kv_name}. Resetting")
self.reset()
return
else:
for k, v in healthcheck_data.items():
self.healthcheck[k] = HealthCheckEvent(
name=k,
severity=v.get('severity'),
first_seen=v.get('first_seen', 0),
last_seen=v.get('last_seen', 0),
count=v.get('count', 1),
active=v.get('active', True))
else:
self.reset()
def reset(self) -> None:
"""Reset the healthcheck history."""
with self.lock:
self.mgr.set_store(self.kv_name, "{}")
self.healthcheck = {}
def save(self) -> None:
"""Save the current in-memory healthcheck history to the KV store."""
with self.lock:
self.mgr.set_store(self.kv_name, self.as_json())
def check(self, health_checks: Dict[str, Any]) -> None:
"""Look at the current health checks and compare existing the history.
Args:
health_checks (Dict[str, Any]): current health check data
"""
current_checks = health_checks.get('checks', {})
changes_made = False
# first turn off any active states we're tracking
for seen_check in self.healthcheck:
check = self.healthcheck[seen_check]
if check.active and seen_check not in current_checks:
check.active = False
changes_made = True
# now look for any additions to track
now = time.time()
for name, info in current_checks.items():
if name not in self.healthcheck:
# this healthcheck is new, so start tracking it
changes_made = True
self.healthcheck[name] = HealthCheckEvent(
name=name,
severity=info.get('severity'),
first_seen=now,
last_seen=now,
count=1,
active=True
)
else:
# seen it before, so update its metadata
check = self.healthcheck[name]
if check.active:
# check has been registered as active already, so skip
continue
else:
check.last_seen = now
check.count += 1
check.active = True
changes_made = True
if changes_made:
self.save()
def __str__(self) -> str:
"""Print the healthcheck history.
Returns:
str: Human readable representation of the healthcheck history
"""
out = []
if len(self.healthcheck.keys()) == 0:
out.append("No healthchecks have been recorded")
else:
out.append(self.titles.format(
healthcheck_name="Healthcheck Name",
first_seen="First Seen (UTC)",
last_seen="Last seen (UTC)",
count="Count",
active="Active")
)
for k in sorted(self.healthcheck.keys()):
check = self.healthcheck[k]
out.append(self.titles.format(
healthcheck_name=check.name,
first_seen=time.strftime(self.date_format, time.localtime(check.first_seen)),
last_seen=time.strftime(self.date_format, time.localtime(check.last_seen)),
count=check.count,
active="Yes" if check.active else "No")
)
out.extend([f"{len(self.healthcheck)} health check(s) listed", ""])
return "\n".join(out)
def as_dict(self) -> Dict[str, Any]:
"""Return the history in a dictionary.
Returns:
Dict[str, Any]: dictionary indexed by the healthcheck name
"""
return {name: self.healthcheck[name].as_dict() for name in self.healthcheck}
def as_json(self, pretty: bool = False) -> str:
"""Return the healthcheck history object as a dict (JSON).
Args:
pretty (bool, optional): whether to json pretty print the history. Defaults to False.
Returns:
str: str representation of the healthcheck in JSON format
"""
if pretty:
return json.dumps(self.as_dict(), indent=2)
else:
return json.dumps(self.as_dict())
def as_yaml(self) -> str:
"""Return the healthcheck history in yaml format.
Returns:
str: YAML representation of the healthcheck history
"""
return yaml.safe_dump(self.as_dict(), explicit_start=True, default_flow_style=False)
class Metric(object):
def __init__(self, mtype: str, name: str, desc: str, labels: Optional[LabelValues] = None) -> None:
self.mtype = mtype
self.name = name
self.desc = desc
self.labelnames = labels # tuple if present
self.value: Dict[LabelValues, Number] = {}
def clear(self) -> None:
self.value = {}
def set(self, value: Number, labelvalues: Optional[LabelValues] = None) -> None:
# labelvalues must be a tuple
labelvalues = labelvalues or ('',)
self.value[labelvalues] = value
def str_expfmt(self) -> str:
# Must be kept in sync with promethize() in src/exporter/util.cc
def promethize(path: str) -> str:
''' replace illegal metric name characters '''
result = re.sub(r'[./\s]|::', '_', path).replace('+', '_plus')
# Hyphens usually turn into underscores, unless they are
# trailing
if result.endswith("-"):
result = result[0:-1] + "_minus"
else:
result = result.replace("-", "_")
return "ceph_{0}".format(result)
def floatstr(value: float) -> str:
''' represent as Go-compatible float '''
if value == float('inf'):
return '+Inf'
if value == float('-inf'):
return '-Inf'
if math.isnan(value):
return 'NaN'
return repr(float(value))
name = promethize(self.name)
expfmt = '''
# HELP {name} {desc}
# TYPE {name} {mtype}'''.format(
name=name,
desc=self.desc,
mtype=self.mtype,
)
for labelvalues, value in self.value.items():
if self.labelnames:
labels_list = zip(self.labelnames, labelvalues)
labels = ','.join('%s="%s"' % (k, v) for k, v in labels_list)
else:
labels = ''
if labels:
fmtstr = '\n{name}{{{labels}}} {value}'
else:
fmtstr = '\n{name} {value}'
expfmt += fmtstr.format(
name=name,
labels=labels,
value=floatstr(value),
)
return expfmt
def group_by(
self,
keys: List[str],
joins: Dict[str, Callable[[List[str]], str]],
name: Optional[str] = None,
) -> "Metric":
"""
Groups data by label names.
Label names not passed are being removed from the resulting metric but
by providing a join function, labels of metrics can be grouped.
The purpose of this method is to provide a version of a metric that can
be used in matching where otherwise multiple results would be returned.
As grouping is possible in Prometheus, the only additional value of this
method is the possibility to join labels when grouping. For that reason,
passing joins is required. Please use PromQL expressions in all other
cases.
>>> m = Metric('type', 'name', '', labels=('label1', 'id'))
>>> m.value = {
... ('foo', 'x'): 1,
... ('foo', 'y'): 1,
... }
>>> m.group_by(['label1'], {'id': lambda ids: ','.join(ids)}).value
{('foo', 'x,y'): 1}
The functionality of group by could roughly be compared with Prometheus'
group (ceph_disk_occupation) by (device, instance)
with the exception that not all labels which aren't used as a condition
to group a metric are discarded, but their values can are joined and the
label is thereby preserved.
This function takes the value of the first entry of a found group to be
used for the resulting value of the grouping operation.
>>> m = Metric('type', 'name', '', labels=('label1', 'id'))
>>> m.value = {
... ('foo', 'x'): 555,
... ('foo', 'y'): 10,
... }
>>> m.group_by(['label1'], {'id': lambda ids: ','.join(ids)}).value
{('foo', 'x,y'): 555}
"""
assert self.labelnames, "cannot match keys without label names"
for key in keys:
assert key in self.labelnames, "unknown key: {}".format(key)
assert joins, "joins must not be empty"
assert all(callable(c) for c in joins.values()), "joins must be callable"
# group
grouped: Dict[LabelValues, List[Tuple[Dict[str, str], Number]]] = defaultdict(list)
for label_values, metric_value in self.value.items():
labels = dict(zip(self.labelnames, label_values))
if not all(k in labels for k in keys):
continue
group_key = tuple(labels[k] for k in keys)
grouped[group_key].append((labels, metric_value))
# as there is nothing specified on how to join labels that are not equal
# and Prometheus `group` aggregation functions similarly, we simply drop
# those labels.
labelnames = tuple(
label for label in self.labelnames if label in keys or label in joins
)
superfluous_labelnames = [
label for label in self.labelnames if label not in labelnames
]
# iterate and convert groups with more than one member into a single
# entry
values: MetricValue = {}
for group in grouped.values():
labels, metric_value = group[0]
for label in superfluous_labelnames:
del labels[label]
if len(group) > 1:
for key, fn in joins.items():
labels[key] = fn(list(labels[key] for labels, _ in group))
values[tuple(labels.values())] = metric_value
new_metric = Metric(self.mtype, name if name else self.name, self.desc, labelnames)
new_metric.value = values
return new_metric
class MetricCounter(Metric):
def __init__(self,
name: str,
desc: str,
labels: Optional[LabelValues] = None) -> None:
super(MetricCounter, self).__init__('counter', name, desc, labels)
self.value = defaultdict(lambda: 0)
def clear(self) -> None:
pass # Skip calls to clear as we want to keep the counters here.
def set(self,
value: Number,
labelvalues: Optional[LabelValues] = None) -> None:
msg = 'This method must not be used for instances of MetricCounter class'
raise NotImplementedError(msg)
def add(self,
value: Number,
labelvalues: Optional[LabelValues] = None) -> None:
# labelvalues must be a tuple
labelvalues = labelvalues or ('',)
self.value[labelvalues] += value
class MetricCollectionThread(threading.Thread):
def __init__(self, module: 'Module') -> None:
self.mod = module
self.active = True
self.event = threading.Event()
super(MetricCollectionThread, self).__init__(target=self.collect)
def collect(self) -> None:
self.mod.log.info('starting metric collection thread')
while self.active:
self.mod.log.debug('collecting cache in thread')
if self.mod.have_mon_connection():
start_time = time.time()
try:
data = self.mod.collect()
except Exception:
# Log any issues encountered during the data collection and continue
self.mod.log.exception("failed to collect metrics:")
self.event.wait(self.mod.scrape_interval)
continue
duration = time.time() - start_time
self.mod.log.debug('collecting cache in thread done')
sleep_time = self.mod.scrape_interval - duration
if sleep_time < 0:
self.mod.log.warning(
'Collecting data took more time than configured scrape interval. '
'This possibly results in stale data. Please check the '
'`stale_cache_strategy` configuration option. '
'Collecting data took {:.2f} seconds but scrape interval is configured '
'to be {:.0f} seconds.'.format(
duration,
self.mod.scrape_interval,
)
)
sleep_time = 0
with self.mod.collect_lock:
self.mod.collect_cache = data
self.mod.collect_time = duration
self.event.wait(sleep_time)
else:
self.mod.log.error('No MON connection')
self.event.wait(self.mod.scrape_interval)
def stop(self) -> None:
self.active = False
self.event.set()
class Module(MgrModule):
MODULE_OPTIONS = [
Option(
'server_addr',
default=get_default_addr(),
desc='the IPv4 or IPv6 address on which the module listens for HTTP requests',
),
Option(
'server_port',
type='int',
default=DEFAULT_PORT,
desc='the port on which the module listens for HTTP requests',
runtime=True
),
Option(
'scrape_interval',
type='float',
default=15.0
),
Option(
'stale_cache_strategy',
default='log'
),
Option(
'cache',
type='bool',
default=True,
),
Option(
'rbd_stats_pools',
default=''
),
Option(
name='rbd_stats_pools_refresh_interval',
type='int',
default=300
),
Option(
name='standby_behaviour',
type='str',
default='default',
enum_allowed=['default', 'error'],
runtime=True
),
Option(
name='standby_error_status_code',
type='int',
default=500,
min=400,
max=599,
runtime=True
),
Option(
name='exclude_perf_counters',
type='bool',
default=True,
desc='Do not include perf-counters in the metrics output',
long_desc='Gathering perf-counters from a single Prometheus exporter can degrade ceph-mgr performance, especially in large clusters. Instead, Ceph-exporter daemons are now used by default for perf-counter gathering. This should only be disabled when no ceph-exporters are deployed.',
runtime=True
)
]
STALE_CACHE_FAIL = 'fail'
STALE_CACHE_RETURN = 'return'
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(Module, self).__init__(*args, **kwargs)
self.metrics = self._setup_static_metrics()
self.shutdown_event = threading.Event()
self.collect_lock = threading.Lock()
self.collect_time = 0.0
self.scrape_interval: float = 15.0
self.cache = True
self.stale_cache_strategy: str = self.STALE_CACHE_FAIL
self.collect_cache: Optional[str] = None
self.rbd_stats = {
'pools': {},
'pools_refresh_time': 0,
'counters_info': {
'write_ops': {'type': self.PERFCOUNTER_COUNTER,
'desc': 'RBD image writes count'},
'read_ops': {'type': self.PERFCOUNTER_COUNTER,
'desc': 'RBD image reads count'},
'write_bytes': {'type': self.PERFCOUNTER_COUNTER,
'desc': 'RBD image bytes written'},
'read_bytes': {'type': self.PERFCOUNTER_COUNTER,
'desc': 'RBD image bytes read'},
'write_latency': {'type': self.PERFCOUNTER_LONGRUNAVG,
'desc': 'RBD image writes latency (msec)'},
'read_latency': {'type': self.PERFCOUNTER_LONGRUNAVG,
'desc': 'RBD image reads latency (msec)'},
},
} # type: Dict[str, Any]
global _global_instance
_global_instance = self
self.metrics_thread = MetricCollectionThread(_global_instance)
self.health_history = HealthHistory(self)
def _setup_static_metrics(self) -> Dict[str, Metric]:
metrics = {}
metrics['health_status'] = Metric(
'untyped',
'health_status',
'Cluster health status'
)
metrics['mon_quorum_status'] = Metric(
'gauge',
'mon_quorum_status',
'Monitors in quorum',
('ceph_daemon',)
)
metrics['fs_metadata'] = Metric(
'untyped',
'fs_metadata',
'FS Metadata',
FS_METADATA
)
metrics['mds_metadata'] = Metric(
'untyped',
'mds_metadata',
'MDS Metadata',
MDS_METADATA
)
metrics['mon_metadata'] = Metric(
'untyped',
'mon_metadata',
'MON Metadata',
MON_METADATA
)
metrics['mgr_metadata'] = Metric(
'gauge',
'mgr_metadata',
'MGR metadata',
MGR_METADATA
)
metrics['mgr_status'] = Metric(
'gauge',
'mgr_status',
'MGR status (0=standby, 1=active)',
MGR_STATUS
)
metrics['mgr_module_status'] = Metric(
'gauge',
'mgr_module_status',
'MGR module status (0=disabled, 1=enabled, 2=auto-enabled)',
MGR_MODULE_STATUS
)
metrics['mgr_module_can_run'] = Metric(
'gauge',
'mgr_module_can_run',
'MGR module runnable state i.e. can it run (0=no, 1=yes)',
MGR_MODULE_CAN_RUN
)
metrics['osd_metadata'] = Metric(
'untyped',
'osd_metadata',
'OSD Metadata',
OSD_METADATA
)
# The reason for having this separate to OSD_METADATA is
# so that we can stably use the same tag names that
# the Prometheus node_exporter does
metrics['disk_occupation'] = Metric(
'untyped',
'disk_occupation',
'Associate Ceph daemon with disk used',
DISK_OCCUPATION
)
metrics['disk_occupation_human'] = Metric(
'untyped',
'disk_occupation_human',
'Associate Ceph daemon with disk used for displaying to humans,'
' not for joining tables (vector matching)',
DISK_OCCUPATION, # label names are automatically decimated on grouping
)
metrics['pool_metadata'] = Metric(
'untyped',
'pool_metadata',
'POOL Metadata',
POOL_METADATA
)
metrics['rgw_metadata'] = Metric(
'untyped',
'rgw_metadata',
'RGW Metadata',
RGW_METADATA
)
metrics['rbd_mirror_metadata'] = Metric(
'untyped',
'rbd_mirror_metadata',
'RBD Mirror Metadata',
RBD_MIRROR_METADATA
)
metrics['pg_total'] = Metric(
'gauge',
'pg_total',
'PG Total Count per Pool',
('pool_id',)
)
metrics['health_detail'] = Metric(
'gauge',
'health_detail',
'healthcheck status by type (0=inactive, 1=active)',
HEALTHCHECK_DETAIL
)
metrics['pool_objects_repaired'] = Metric(
'counter',
'pool_objects_repaired',
'Number of objects repaired in a pool',
('pool_id',)
)
metrics['daemon_health_metrics'] = Metric(
'gauge',
'daemon_health_metrics',
'Health metrics for Ceph daemons',
('type', 'ceph_daemon',)
)
for flag in OSD_FLAGS:
path = 'osd_flag_{}'.format(flag)
metrics[path] = Metric(
'untyped',
path,
'OSD Flag {}'.format(flag)
)
for state in OSD_STATUS:
path = 'osd_{}'.format(state)
metrics[path] = Metric(
'untyped',
path,
'OSD status {}'.format(state),
('ceph_daemon',)
)
for stat in OSD_STATS:
path = 'osd_{}'.format(stat)
metrics[path] = Metric(
'gauge',
path,
'OSD stat {}'.format(stat),
('ceph_daemon',)
)
for stat in OSD_POOL_STATS:
path = 'pool_{}'.format(stat)
metrics[path] = Metric(
'gauge',
path,
"OSD pool stats: {}".format(stat),
('pool_id',)
)
for state in PG_STATES:
path = 'pg_{}'.format(state)
metrics[path] = Metric(
'gauge',
path,
'PG {} per pool'.format(state),
('pool_id',)
)
for state in DF_CLUSTER:
path = 'cluster_{}'.format(state)
metrics[path] = Metric(
'gauge',
path,
'DF {}'.format(state),
)
path = 'cluster_by_class_{}'.format(state)
metrics[path] = Metric(
'gauge',
path,
'DF {}'.format(state),
('device_class',)
)
for state in DF_POOL:
path = 'pool_{}'.format(state)
metrics[path] = Metric(
'counter' if state in ('rd', 'rd_bytes', 'wr', 'wr_bytes') else 'gauge',
path,
'DF pool {}'.format(state),
('pool_id',)
)
for state in OSD_BLOCKLIST:
path = 'cluster_{}'.format(state)
metrics[path] = Metric(
'gauge',
path,
'OSD Blocklist Count {}'.format(state),
)
for state in NUM_OBJECTS:
path = 'num_objects_{}'.format(state)
metrics[path] = Metric(
'gauge',
path,
'Number of {} objects'.format(state),
)
for check in HEALTH_CHECKS:
path = 'healthcheck_{}'.format(check.name.lower())
metrics[path] = Metric(
'gauge',
path,
check.description,
)
return metrics
def get_server_addr(self) -> str:
"""
Return the current mgr server IP.
"""
server_addr = cast(str, self.get_localized_module_option('server_addr', get_default_addr()))
if server_addr in ['::', '0.0.0.0']:
return self.get_mgr_ip()
return server_addr
def config_notify(self) -> None:
"""
This method is called whenever one of our config options is changed.
"""
# https://stackoverflow.com/questions/7254845/change-cherrypy-port-and-restart-web-server
# if we omit the line: cherrypy.server.httpserver = None
# then the cherrypy server is not restarted correctly
self.log.info('Restarting engine...')
cherrypy.engine.stop()
cherrypy.server.httpserver = None
server_addr = cast(str, self.get_localized_module_option('server_addr', get_default_addr()))
server_port = cast(int, self.get_localized_module_option('server_port', DEFAULT_PORT))
self.configure(server_addr, server_port)
cherrypy.engine.start()
self.log.info('Engine started.')
@profile_method()
def get_health(self) -> None:
def _get_value(message: str, delim: str = ' ', word_pos: int = 0) -> Tuple[int, int]:
"""Extract value from message (default is 1st field)"""
v_str = message.split(delim)[word_pos]
if v_str.isdigit():
return int(v_str), 0
return 0, 1
health = json.loads(self.get('health')['json'])
# set overall health
self.metrics['health_status'].set(
health_status_to_number(health['status'])
)
# Examine the health to see if any health checks triggered need to
# become a specific metric with a value from the health detail
active_healthchecks = health.get('checks', {})
active_names = active_healthchecks.keys()
for check in HEALTH_CHECKS:
path = 'healthcheck_{}'.format(check.name.lower())
if path in self.metrics:
if check.name in active_names:
check_data = active_healthchecks[check.name]
message = check_data['summary'].get('message', '')
v, err = 0, 0
if check.name == "SLOW_OPS":
# 42 slow ops, oldest one blocked for 12 sec, daemons [osd.0, osd.3] have
# slow ops.
v, err = _get_value(message)
if err:
self.log.error(
"healthcheck %s message format is incompatible and has been dropped",
check.name)
# drop the metric, so it's no longer emitted
del self.metrics[path]
continue
else:
self.metrics[path].set(v)
else:
# health check is not active, so give it a default of 0
self.metrics[path].set(0)
self.health_history.check(health)
for name, info in self.health_history.healthcheck.items():
v = 1 if info.active else 0
self.metrics['health_detail'].set(
v, (
name,
str(info.severity))
)
@profile_method()
def get_pool_stats(self) -> None:
# retrieve pool stats to provide per pool recovery metrics
# (osd_pool_stats moved to mgr in Mimic)
pstats = self.get('osd_pool_stats')
for pool in pstats['pool_stats']:
for stat in OSD_POOL_STATS:
self.metrics['pool_{}'.format(stat)].set(
pool['recovery_rate'].get(stat, 0),
(pool['pool_id'],)
)
@profile_method()
def get_df(self) -> None:
# maybe get the to-be-exported metrics from a config?
df = self.get('df')
for stat in DF_CLUSTER:
self.metrics['cluster_{}'.format(stat)].set(df['stats'][stat])
for device_class in df['stats_by_class']:
self.metrics['cluster_by_class_{}'.format(stat)].set(
df['stats_by_class'][device_class][stat], (device_class,))
for pool in df['pools']:
for stat in DF_POOL:
self.metrics['pool_{}'.format(stat)].set(
pool['stats'][stat],
(pool['id'],)
)
@profile_method()
def get_osd_blocklisted_entries(self) -> None:
r = self.mon_command({
'prefix': 'osd blocklist ls',
'format': 'json'
})
blocklist_entries = r[2].split(' ')
blocklist_count = blocklist_entries[1]
for stat in OSD_BLOCKLIST:
self.metrics['cluster_{}'.format(stat)].set(int(blocklist_count))
@profile_method()
def get_fs(self) -> None:
fs_map = self.get('fs_map')
servers = self.get_service_list()
self.log.debug('standbys: {}'.format(fs_map['standbys']))
# export standby mds metadata, default standby fs_id is '-1'
for standby in fs_map['standbys']:
id_ = standby['name']
host, version, _ = servers.get((id_, 'mds'), ('', '', ''))
addr, rank = standby['addr'], standby['rank']
self.metrics['mds_metadata'].set(1, (
'mds.{}'.format(id_), '-1',
cast(str, host),
cast(str, addr),
cast(str, rank),
cast(str, version)
))
for fs in fs_map['filesystems']:
# collect fs metadata
data_pools = ",".join([str(pool)
for pool in fs['mdsmap']['data_pools']])
self.metrics['fs_metadata'].set(1, (
data_pools,
fs['id'],
fs['mdsmap']['metadata_pool'],
fs['mdsmap']['fs_name']
))
self.log.debug('mdsmap: {}'.format(fs['mdsmap']))
for gid, daemon in fs['mdsmap']['info'].items():
id_ = daemon['name']
host, version, _ = servers.get((id_, 'mds'), ('', '', ''))
self.metrics['mds_metadata'].set(1, (
'mds.{}'.format(id_), fs['id'],
host, daemon['addr'],
daemon['rank'], version
))
@profile_method()
def get_quorum_status(self) -> None:
mon_status = json.loads(self.get('mon_status')['json'])
servers = self.get_service_list()
for mon in mon_status['monmap']['mons']:
rank = mon['rank']
id_ = mon['name']
mon_version = servers.get((id_, 'mon'), ('', '', ''))
self.metrics['mon_metadata'].set(1, (
'mon.{}'.format(id_), mon_version[0],
mon['public_addr'].rsplit(':', 1)[0], rank,
mon_version[1]
))
in_quorum = int(rank in mon_status['quorum'])
self.metrics['mon_quorum_status'].set(in_quorum, (
'mon.{}'.format(id_),
))
@profile_method()
def get_mgr_status(self) -> None:
mgr_map = self.get('mgr_map')
servers = self.get_service_list()
active = mgr_map['active_name']
standbys = [s.get('name') for s in mgr_map['standbys']]
all_mgrs = list(standbys)
all_mgrs.append(active)
all_modules = {module.get('name'): module.get('can_run')
for module in mgr_map['available_modules']}
for mgr in all_mgrs:
host, version, _ = servers.get((mgr, 'mgr'), ('', '', ''))
if mgr == active:
_state = 1
else:
_state = 0
self.metrics['mgr_metadata'].set(1, (
f'mgr.{mgr}', host, version
))
self.metrics['mgr_status'].set(_state, (
f'mgr.{mgr}',))
always_on_modules = mgr_map['always_on_modules'].get(self.release_name, [])
active_modules = list(always_on_modules)
active_modules.extend(mgr_map['modules'])
for mod_name in all_modules.keys():
if mod_name in always_on_modules:
_state = 2
elif mod_name in active_modules:
_state = 1
else:
_state = 0
_can_run = 1 if all_modules[mod_name] else 0
self.metrics['mgr_module_status'].set(_state, (mod_name,))
self.metrics['mgr_module_can_run'].set(_can_run, (mod_name,))
@profile_method()
def get_pg_status(self) -> None:
pg_summary = self.get('pg_summary')
for pool in pg_summary['by_pool']:
num_by_state: DefaultDict[str, int] = defaultdict(int)
for state in PG_STATES:
num_by_state[state] = 0
for state_name, count in pg_summary['by_pool'][pool].items():
for state in state_name.split('+'):
num_by_state[state] += count
num_by_state['total'] += count
for state, num in num_by_state.items():
try:
self.metrics["pg_{}".format(state)].set(num, (pool,))
except KeyError:
self.log.warning("skipping pg in unknown state {}".format(state))
@profile_method()
def get_osd_stats(self) -> None:
osd_stats = self.get('osd_stats')
for osd in osd_stats['osd_stats']:
id_ = osd['osd']
for stat in OSD_STATS:
val = osd['perf_stat'][stat]
self.metrics['osd_{}'.format(stat)].set(val, (
'osd.{}'.format(id_),
))
def get_service_list(self) -> Dict[Tuple[str, str], Tuple[str, str, str]]:
ret = {}
for server in self.list_servers():
host = cast(str, server.get('hostname', ''))
for service in cast(List[ServiceInfoT], server.get('services', [])):
ret.update({(service['id'], service['type']): (host,
service.get('ceph_version', 'unknown'),
service.get('name', ''))})
return ret
@profile_method()
def get_metadata_and_osd_status(self) -> None:
osd_map = self.get('osd_map')
osd_flags = osd_map['flags'].split(',')
for flag in OSD_FLAGS:
self.metrics['osd_flag_{}'.format(flag)].set(
int(flag in osd_flags)
)
osd_devices = self.get('osd_map_crush')['devices']
servers = self.get_service_list()
for osd in osd_map['osds']:
# id can be used to link osd metrics and metadata
id_ = osd['osd']
# collect osd metadata
p_addr = osd['public_addr'].rsplit(':', 1)[0]
c_addr = osd['cluster_addr'].rsplit(':', 1)[0]
if p_addr == "-" or c_addr == "-":
self.log.info(
"Missing address metadata for osd {0}, skipping occupation"
" and metadata records for this osd".format(id_)
)
continue
dev_class = None
for osd_device in osd_devices:
if osd_device['id'] == id_:
dev_class = osd_device.get('class', '')
break
if dev_class is None:
self.log.info("OSD {0} is missing from CRUSH map, "
"skipping output".format(id_))
continue
osd_version = servers.get((str(id_), 'osd'), ('', '', ''))
# collect disk occupation metadata
osd_metadata = self.get_metadata("osd", str(id_))
if osd_metadata is None:
continue
obj_store = osd_metadata.get('osd_objectstore', '')
f_iface = osd_metadata.get('front_iface', '')
b_iface = osd_metadata.get('back_iface', '')
self.metrics['osd_metadata'].set(1, (
b_iface,
'osd.{}'.format(id_),
c_addr,
dev_class,
f_iface,
osd_version[0],
obj_store,
p_addr,
osd_version[1]
))
# collect osd status
for state in OSD_STATUS:
status = osd[state]
self.metrics['osd_{}'.format(state)].set(status, (
'osd.{}'.format(id_),
))
osd_dev_node = None
osd_wal_dev_node = ''
osd_db_dev_node = ''
if obj_store == "filestore":
# collect filestore backend device
osd_dev_node = osd_metadata.get(
'backend_filestore_dev_node', None)
# collect filestore journal device
osd_wal_dev_node = osd_metadata.get('osd_journal', '')
osd_db_dev_node = ''
elif obj_store == "bluestore":
# collect bluestore backend device
osd_dev_node = osd_metadata.get(
'bluestore_bdev_dev_node', None)
# collect bluestore wal backend
osd_wal_dev_node = osd_metadata.get('bluefs_wal_dev_node', '')
# collect bluestore db backend
osd_db_dev_node = osd_metadata.get('bluefs_db_dev_node', '')
if osd_dev_node and osd_dev_node == "unknown":
osd_dev_node = None
# fetch the devices and ids (vendor, model, serial) from the
# osd_metadata
osd_devs = osd_metadata.get('devices', '') or 'N/A'
osd_dev_ids = osd_metadata.get('device_ids', '') or 'N/A'
osd_hostname = osd_metadata.get('hostname', None)
if osd_dev_node and osd_hostname:
self.log.debug("Got dev for osd {0}: {1}/{2}".format(
id_, osd_hostname, osd_dev_node))
self.metrics['disk_occupation'].set(1, (
"osd.{0}".format(id_),
osd_dev_node,
osd_db_dev_node,
osd_wal_dev_node,
osd_hostname,
osd_devs,
osd_dev_ids,
))
else:
self.log.info("Missing dev node metadata for osd {0}, skipping "
"occupation record for this osd".format(id_))
if 'disk_occupation' in self.metrics:
try:
self.metrics['disk_occupation_human'] = \
self.metrics['disk_occupation'].group_by(
['device', 'instance'],
{'ceph_daemon': lambda daemons: ', '.join(daemons)},
name='disk_occupation_human',
)
except Exception as e:
self.log.error(e)
ec_profiles = osd_map.get('erasure_code_profiles', {})
def _get_pool_info(pool: Dict[str, Any]) -> Tuple[str, str]:
pool_type = 'unknown'
description = 'unknown'
if pool['type'] == 1:
pool_type = "replicated"
description = f"replica:{pool['size']}"
elif pool['type'] == 3:
pool_type = "erasure"
name = pool.get('erasure_code_profile', '')
profile = ec_profiles.get(name, {})
if profile:
description = f"ec:{profile['k']}+{profile['m']}"
else:
description = "ec:unknown"
return pool_type, description
for pool in osd_map['pools']:
compression_mode = 'none'
pool_type, pool_description = _get_pool_info(pool)
if 'options' in pool:
compression_mode = pool['options'].get('compression_mode', 'none')
self.metrics['pool_metadata'].set(
1, (
pool['pool'],
pool['pool_name'],
pool_type,
pool_description,
compression_mode)
)
# Populate other servers metadata
for key, value in servers.items():
service_id, service_type = key
if service_type == 'rgw':
hostname, version, name = value
self.metrics['rgw_metadata'].set(
1,
('{}.{}'.format(service_type, name),
hostname, version, service_id)
)
elif service_type == 'rbd-mirror':
mirror_metadata = self.get_metadata('rbd-mirror', service_id)
if mirror_metadata is None:
continue
mirror_metadata['ceph_daemon'] = '{}.{}'.format(service_type,
service_id)
rbd_mirror_metadata = cast(LabelValues,
(mirror_metadata.get(k, '')
for k in RBD_MIRROR_METADATA))
self.metrics['rbd_mirror_metadata'].set(
1, rbd_mirror_metadata
)
@profile_method()
def get_num_objects(self) -> None:
pg_sum = self.get('pg_summary')['pg_stats_sum']['stat_sum']
for obj in NUM_OBJECTS:
stat = 'num_objects_{}'.format(obj)
self.metrics[stat].set(pg_sum[stat])
@profile_method()
def get_rbd_stats(self) -> None:
# Per RBD image stats is collected by registering a dynamic osd perf
# stats query that tells OSDs to group stats for requests associated
# with RBD objects by pool, namespace, and image id, which are
# extracted from the request object names or other attributes.
# The RBD object names have the following prefixes:
# - rbd_data.{image_id}. (data stored in the same pool as metadata)
# - rbd_data.{pool_id}.{image_id}. (data stored in a dedicated data pool)
# - journal_data.{pool_id}.{image_id}. (journal if journaling is enabled)
# The pool_id in the object name is the id of the pool with the image
# metdata, and should be used in the image spec. If there is no pool_id
# in the object name, the image pool is the pool where the object is
# located.
# Parse rbd_stats_pools option, which is a comma or space separated
# list of pool[/namespace] entries. If no namespace is specifed the
# stats are collected for every namespace in the pool. The wildcard
# '*' can be used to indicate all pools or namespaces
pools_string = cast(str, self.get_localized_module_option('rbd_stats_pools'))
pool_keys = set()
osd_map = self.get('osd_map')
rbd_pools = [pool['pool_name'] for pool in osd_map['pools']
if 'rbd' in pool.get('application_metadata', {})]
for x in re.split(r'[\s,]+', pools_string):
if not x:
continue
s = x.split('/', 2)
pool_name = s[0]
namespace_name = None
if len(s) == 2:
namespace_name = s[1]
if pool_name == "*":
# collect for all pools
for pool in rbd_pools:
pool_keys.add((pool, namespace_name))
else:
if pool_name in rbd_pools:
pool_keys.add((pool_name, namespace_name)) # avoids adding deleted pool
pools = {} # type: Dict[str, Set[str]]
for pool_key in pool_keys:
pool_name = pool_key[0]
namespace_name = pool_key[1]
if not namespace_name or namespace_name == "*":
# empty set means collect for all namespaces
pools[pool_name] = set()
continue
if pool_name not in pools:
pools[pool_name] = set()
elif not pools[pool_name]:
continue
pools[pool_name].add(namespace_name)
rbd_stats_pools = {}
for pool_id in self.rbd_stats['pools'].keys():
name = self.rbd_stats['pools'][pool_id]['name']
if name not in pools:
del self.rbd_stats['pools'][pool_id]
else:
rbd_stats_pools[name] = \
self.rbd_stats['pools'][pool_id]['ns_names']
pools_refreshed = False
if pools:
next_refresh = self.rbd_stats['pools_refresh_time'] + \
self.get_localized_module_option(
'rbd_stats_pools_refresh_interval', 300)
if rbd_stats_pools != pools or time.time() >= next_refresh:
self.refresh_rbd_stats_pools(pools)
pools_refreshed = True
pool_ids = list(self.rbd_stats['pools'])
pool_ids.sort()
pool_id_regex = '^(' + '|'.join([str(x) for x in pool_ids]) + ')$'
nspace_names = []
for pool_id, pool in self.rbd_stats['pools'].items():
if pool['ns_names']:
nspace_names.extend(pool['ns_names'])
else:
nspace_names = []
break
if nspace_names:
namespace_regex = '^(' + \
"|".join([re.escape(x)
for x in set(nspace_names)]) + ')$'
else:
namespace_regex = '^(.*)$'
if ('query' in self.rbd_stats
and (pool_id_regex != self.rbd_stats['query']['key_descriptor'][0]['regex']
or namespace_regex != self.rbd_stats['query']['key_descriptor'][1]['regex'])):
self.remove_osd_perf_query(self.rbd_stats['query_id'])
del self.rbd_stats['query_id']
del self.rbd_stats['query']
if not self.rbd_stats['pools']:
return
counters_info = self.rbd_stats['counters_info']
if 'query_id' not in self.rbd_stats:
query = {
'key_descriptor': [
{'type': 'pool_id', 'regex': pool_id_regex},
{'type': 'namespace', 'regex': namespace_regex},
{'type': 'object_name',
'regex': r'^(?:rbd|journal)_data\.(?:([0-9]+)\.)?([^.]+)\.'},
],
'performance_counter_descriptors': list(counters_info),
}
query_id = self.add_osd_perf_query(query)
if query_id is None:
self.log.error('failed to add query %s' % query)
return
self.rbd_stats['query'] = query
self.rbd_stats['query_id'] = query_id
res = self.get_osd_perf_counters(self.rbd_stats['query_id'])
assert res
for c in res['counters']:
# if the pool id is not found in the object name use id of the
# pool where the object is located
if c['k'][2][0]:
pool_id = int(c['k'][2][0])
else:
pool_id = int(c['k'][0][0])
if pool_id not in self.rbd_stats['pools'] and not pools_refreshed:
self.refresh_rbd_stats_pools(pools)
pools_refreshed = True
if pool_id not in self.rbd_stats['pools']:
continue
pool = self.rbd_stats['pools'][pool_id]
nspace_name = c['k'][1][0]
if nspace_name not in pool['images']:
continue
image_id = c['k'][2][1]
if image_id not in pool['images'][nspace_name] and \
not pools_refreshed:
self.refresh_rbd_stats_pools(pools)
pool = self.rbd_stats['pools'][pool_id]
pools_refreshed = True
if image_id not in pool['images'][nspace_name]:
continue
counters = pool['images'][nspace_name][image_id]['c']
for i in range(len(c['c'])):
counters[i][0] += c['c'][i][0]
counters[i][1] += c['c'][i][1]
label_names = ("pool", "namespace", "image")
for pool_id, pool in self.rbd_stats['pools'].items():
pool_name = pool['name']
for nspace_name, images in pool['images'].items():
for image_id in images:
image_name = images[image_id]['n']
counters = images[image_id]['c']
i = 0
for key in counters_info:
counter_info = counters_info[key]
stattype = self._stattype_to_str(counter_info['type'])
labels = (pool_name, nspace_name, image_name)
if counter_info['type'] == self.PERFCOUNTER_COUNTER:
path = 'rbd_' + key
if path not in self.metrics:
self.metrics[path] = Metric(
stattype,
path,
counter_info['desc'],
label_names,
)
self.metrics[path].set(counters[i][0], labels)
elif counter_info['type'] == self.PERFCOUNTER_LONGRUNAVG:
path = 'rbd_' + key + '_sum'
if path not in self.metrics:
self.metrics[path] = Metric(
stattype,
path,
counter_info['desc'] + ' Total',
label_names,
)
self.metrics[path].set(counters[i][0], labels)
path = 'rbd_' + key + '_count'
if path not in self.metrics:
self.metrics[path] = Metric(
'counter',
path,
counter_info['desc'] + ' Count',
label_names,
)
self.metrics[path].set(counters[i][1], labels)
i += 1
def refresh_rbd_stats_pools(self, pools: Dict[str, Set[str]]) -> None:
self.log.debug('refreshing rbd pools %s' % (pools))
rbd = RBD()
counters_info = self.rbd_stats['counters_info']
for pool_name, cfg_ns_names in pools.items():
try:
pool_id = self.rados.pool_lookup(pool_name)
with self.rados.open_ioctx(pool_name) as ioctx:
if pool_id not in self.rbd_stats['pools']:
self.rbd_stats['pools'][pool_id] = {'images': {}}
pool = self.rbd_stats['pools'][pool_id]
pool['name'] = pool_name
pool['ns_names'] = cfg_ns_names
if cfg_ns_names:
nspace_names = list(cfg_ns_names)
else:
nspace_names = [''] + rbd.namespace_list(ioctx)
for nspace_name in pool['images']:
if nspace_name not in nspace_names:
del pool['images'][nspace_name]
for nspace_name in nspace_names:
if nspace_name and\
not rbd.namespace_exists(ioctx, nspace_name):
self.log.debug('unknown namespace %s for pool %s' %
(nspace_name, pool_name))
continue
ioctx.set_namespace(nspace_name)
if nspace_name not in pool['images']:
pool['images'][nspace_name] = {}
namespace = pool['images'][nspace_name]
images = {}
for image_meta in RBD().list2(ioctx):
image = {'n': image_meta['name']}
image_id = image_meta['id']
if image_id in namespace:
image['c'] = namespace[image_id]['c']
else:
image['c'] = [[0, 0] for x in counters_info]
images[image_id] = image
pool['images'][nspace_name] = images
except Exception as e:
self.log.error('failed listing pool %s: %s' % (pool_name, e))
self.rbd_stats['pools_refresh_time'] = time.time()
def shutdown_rbd_stats(self) -> None:
if 'query_id' in self.rbd_stats:
self.remove_osd_perf_query(self.rbd_stats['query_id'])
del self.rbd_stats['query_id']
del self.rbd_stats['query']
self.rbd_stats['pools'].clear()
def add_fixed_name_metrics(self) -> None:
"""
Add fixed name metrics from existing ones that have details in their names
that should be in labels (not in name).
For backward compatibility, a new fixed name metric is created (instead of replacing)
and details are put in new labels.
Intended for RGW sync perf. counters but extendable as required.
See: https://tracker.ceph.com/issues/45311
"""
new_metrics = {}
for metric_path, metrics in self.metrics.items():
# Address RGW sync perf. counters.
match = re.search(r'^data-sync-from-(.*)\.', metric_path)
if match:
new_path = re.sub('from-([^.]*)', 'from-zone', metric_path)
if new_path not in new_metrics:
new_metrics[new_path] = Metric(
metrics.mtype,
new_path,
metrics.desc,
cast(LabelValues, metrics.labelnames) + ('source_zone',)
)
for label_values, value in metrics.value.items():
new_metrics[new_path].set(value, label_values + (match.group(1),))
self.metrics.update(new_metrics)
def get_collect_time_metrics(self) -> None:
sum_metric = self.metrics.get('prometheus_collect_duration_seconds_sum')
count_metric = self.metrics.get('prometheus_collect_duration_seconds_count')
if sum_metric is None:
sum_metric = MetricCounter(
'prometheus_collect_duration_seconds_sum',
'The sum of seconds took to collect all metrics of this exporter',
('method',))
self.metrics['prometheus_collect_duration_seconds_sum'] = sum_metric
if count_metric is None:
count_metric = MetricCounter(
'prometheus_collect_duration_seconds_count',
'The amount of metrics gathered for this exporter',
('method',))
self.metrics['prometheus_collect_duration_seconds_count'] = count_metric
# Collect all timing data and make it available as metric, excluding the
# `collect` method because it has not finished at this point and hence
# there's no `_execution_duration` attribute to be found. The
# `_execution_duration` attribute is added by the `profile_method`
# decorator.
for method_name, method in Module.__dict__.items():
duration = getattr(method, '_execution_duration', None)
if duration is not None:
cast(MetricCounter, sum_metric).add(duration, (method_name,))
cast(MetricCounter, count_metric).add(1, (method_name,))
def get_pool_repaired_objects(self) -> None:
dump = self.get('pg_dump')
for stats in dump['pool_stats']:
path = 'pool_objects_repaired'
self.metrics[path].set(stats['stat_sum']['num_objects_repaired'],
labelvalues=(stats['poolid'],))
def get_all_daemon_health_metrics(self) -> None:
daemon_metrics = self.get_daemon_health_metrics()
self.log.debug('metrics jeje %s' % (daemon_metrics))
for daemon_name, health_metrics in daemon_metrics.items():
for health_metric in health_metrics:
path = 'daemon_health_metrics'
self.metrics[path].set(health_metric['value'], labelvalues=(
health_metric['type'], daemon_name,))
def get_perf_counters(self) -> None:
"""
Get the perf counters for all daemons
"""
for daemon, counters in self.get_unlabeled_perf_counters().items():
for path, counter_info in counters.items():
# Skip histograms, they are represented by long running avgs
stattype = self._stattype_to_str(counter_info['type'])
if not stattype or stattype == 'histogram':
self.log.debug('ignoring %s, type %s' % (path, stattype))
continue
path, label_names, labels = self._perfpath_to_path_labels(
daemon, path)
# Get the value of the counter
value = self._perfvalue_to_value(
counter_info['type'], counter_info['value'])
# Represent the long running avgs as sum/count pairs
if counter_info['type'] & self.PERFCOUNTER_LONGRUNAVG:
_path = path + '_sum'
if _path not in self.metrics:
self.metrics[_path] = Metric(
stattype,
_path,
counter_info['description'] + ' Total',
label_names,
)
self.metrics[_path].set(value, labels)
_path = path + '_count'
if _path not in self.metrics:
self.metrics[_path] = Metric(
'counter',
_path,
counter_info['description'] + ' Count',
label_names,
)
self.metrics[_path].set(counter_info['count'], labels,)
else:
if path not in self.metrics:
self.metrics[path] = Metric(
stattype,
path,
counter_info['description'],
label_names,
)
self.metrics[path].set(value, labels)
self.add_fixed_name_metrics()
@profile_method(True)
def collect(self) -> str:
# Clear the metrics before scraping
for k in self.metrics.keys():
self.metrics[k].clear()
self.get_health()
self.get_df()
self.get_osd_blocklisted_entries()
self.get_pool_stats()
self.get_fs()
self.get_osd_stats()
self.get_quorum_status()
self.get_mgr_status()
self.get_metadata_and_osd_status()
self.get_pg_status()
self.get_pool_repaired_objects()
self.get_num_objects()
self.get_all_daemon_health_metrics()
if not self.get_module_option('exclude_perf_counters'):
self.get_perf_counters()
self.get_rbd_stats()
self.get_collect_time_metrics()
# Return formatted metrics and clear no longer used data
_metrics = [m.str_expfmt() for m in self.metrics.values()]
for k in self.metrics.keys():
self.metrics[k].clear()
return ''.join(_metrics) + '\n'
@CLIReadCommand('prometheus file_sd_config')
def get_file_sd_config(self) -> Tuple[int, str, str]:
'''
Return file_sd compatible prometheus config for mgr cluster
'''
servers = self.list_servers()
targets = []
for server in servers:
hostname = server.get('hostname', '')
for service in cast(List[ServiceInfoT], server.get('services', [])):
if service['type'] != 'mgr':
continue
id_ = service['id']
port = self._get_module_option('server_port', DEFAULT_PORT, id_)
targets.append(f'{hostname}:{port}')
ret = [
{
"targets": targets,
"labels": {}
}
]
return 0, json.dumps(ret), ""
def self_test(self) -> None:
self.collect()
self.get_file_sd_config()
def configure(self, server_addr: str, server_port: int) -> None:
# cephadm deployments have a TLS monitoring stack setup option.
# If the cephadm module is on and the setting is true (defaults to false)
# we should have prometheus be set up to interact with that
cephadm_secure_monitoring_stack = self.get_module_option_ex(
'cephadm', 'secure_monitoring_stack', False)
if cephadm_secure_monitoring_stack:
try:
self.setup_cephadm_tls_config(server_addr, server_port)
return
except Exception as e:
self.log.exception(f'Failed to setup cephadm based secure monitoring stack: {e}\n',
'Falling back to default configuration')
self.setup_default_config(server_addr, server_port)
def setup_default_config(self, server_addr: str, server_port: int) -> None:
cherrypy.config.update({
'server.socket_host': server_addr,
'server.socket_port': server_port,
'engine.autoreload.on': False,
'server.ssl_module': None,
'server.ssl_certificate': None,
'server.ssl_private_key': None,
})
# Publish the URI that others may use to access the service we're about to start serving
self.set_uri(build_url(scheme='http', host=self.get_server_addr(),
port=server_port, path='/'))
def setup_cephadm_tls_config(self, server_addr: str, server_port: int) -> None:
from cephadm.ssl_cert_utils import SSLCerts
# the ssl certs utils uses a NamedTemporaryFile for the cert files
# generated with generate_cert_files function. We need the SSLCerts
# object to not be cleaned up in order to have those temp files not
# be cleaned up, so making it an attribute of the module instead
# of just a standalone object
self.cephadm_monitoring_tls_ssl_certs = SSLCerts()
host = self.get_mgr_ip()
try:
old_cert = self.get_store('root/cert')
old_key = self.get_store('root/key')
if not old_cert or not old_key:
raise Exception('No old credentials for mgr-prometheus endpoint')
self.cephadm_monitoring_tls_ssl_certs.load_root_credentials(old_cert, old_key)
except Exception:
self.cephadm_monitoring_tls_ssl_certs.generate_root_cert(host)
self.set_store('root/cert', self.cephadm_monitoring_tls_ssl_certs.get_root_cert())
self.set_store('root/key', self.cephadm_monitoring_tls_ssl_certs.get_root_key())
cert_file_path, key_file_path = self.cephadm_monitoring_tls_ssl_certs.generate_cert_files(
self.get_hostname(), host)
cherrypy.config.update({
'server.socket_host': server_addr,
'server.socket_port': server_port,
'engine.autoreload.on': False,
'server.ssl_module': 'builtin',
'server.ssl_certificate': cert_file_path,
'server.ssl_private_key': key_file_path,
})
# Publish the URI that others may use to access the service we're about to start serving
self.set_uri(build_url(scheme='https', host=self.get_server_addr(),
port=server_port, path='/'))
def serve(self) -> None:
class Root(object):
# collapse everything to '/'
def _cp_dispatch(self, vpath: str) -> 'Root':
cherrypy.request.path = ''
return self
@cherrypy.expose
def index(self) -> str:
return '''<!DOCTYPE html>
<html>
<head><title>Ceph Exporter</title></head>
<body>
<h1>Ceph Exporter</h1>
<p><a href='/metrics'>Metrics</a></p>
</body>
</html>'''
@cherrypy.expose
def metrics(self) -> Optional[str]:
# Lock the function execution
assert isinstance(_global_instance, Module)
with _global_instance.collect_lock:
return self._metrics(_global_instance)
@staticmethod
def _metrics(instance: 'Module') -> Optional[str]:
if not self.cache:
self.log.debug('Cache disabled, collecting and returning without cache')
cherrypy.response.headers['Content-Type'] = 'text/plain'
return self.collect()
# Return cached data if available
if not instance.collect_cache:
raise cherrypy.HTTPError(503, 'No cached data available yet')
def respond() -> Optional[str]:
assert isinstance(instance, Module)
cherrypy.response.headers['Content-Type'] = 'text/plain'
return instance.collect_cache
if instance.collect_time < instance.scrape_interval:
# Respond if cache isn't stale
return respond()
if instance.stale_cache_strategy == instance.STALE_CACHE_RETURN:
# Respond even if cache is stale
instance.log.info(
'Gathering data took {:.2f} seconds, metrics are stale for {:.2f} seconds, '
'returning metrics from stale cache.'.format(
instance.collect_time,
instance.collect_time - instance.scrape_interval
)
)
return respond()
if instance.stale_cache_strategy == instance.STALE_CACHE_FAIL:
# Fail if cache is stale
msg = (
'Gathering data took {:.2f} seconds, metrics are stale for {:.2f} seconds, '
'returning "service unavailable".'.format(
instance.collect_time,
instance.collect_time - instance.scrape_interval,
)
)
instance.log.error(msg)
raise cherrypy.HTTPError(503, msg)
return None
# Make the cache timeout for collecting configurable
self.scrape_interval = cast(float, self.get_localized_module_option('scrape_interval'))
self.stale_cache_strategy = cast(
str, self.get_localized_module_option('stale_cache_strategy'))
if self.stale_cache_strategy not in [self.STALE_CACHE_FAIL,
self.STALE_CACHE_RETURN]:
self.stale_cache_strategy = self.STALE_CACHE_FAIL
server_addr = cast(str, self.get_localized_module_option('server_addr', get_default_addr()))
server_port = cast(int, self.get_localized_module_option('server_port', DEFAULT_PORT))
self.log.info(
"server_addr: %s server_port: %s" %
(server_addr, server_port)
)
self.cache = cast(bool, self.get_localized_module_option('cache', True))
if self.cache:
self.log.info('Cache enabled')
self.metrics_thread.start()
else:
self.log.info('Cache disabled')
self.configure(server_addr, server_port)
cherrypy.tree.mount(Root(), "/")
self.log.info('Starting engine...')
cherrypy.engine.start()
self.log.info('Engine started.')
# wait for the shutdown event
self.shutdown_event.wait()
self.shutdown_event.clear()
# tell metrics collection thread to stop collecting new metrics
self.metrics_thread.stop()
cherrypy.engine.stop()
cherrypy.server.httpserver = None
self.log.info('Engine stopped.')
self.shutdown_rbd_stats()
# wait for the metrics collection thread to stop
self.metrics_thread.join()
def shutdown(self) -> None:
self.log.info('Stopping engine...')
self.shutdown_event.set()
@CLIReadCommand('healthcheck history ls')
def _list_healthchecks(self, format: Format = Format.plain) -> HandleCommandResult:
"""List all the healthchecks being tracked
The format options are parsed in ceph_argparse, before they get evaluated here so
we can safely assume that what we have to process is valid. ceph_argparse will throw
a ValueError if the cast to our Format class fails.
Args:
format (Format, optional): output format. Defaults to Format.plain.
Returns:
HandleCommandResult: return code, stdout and stderr returned to the caller
"""
out = ""
if format == Format.plain:
out = str(self.health_history)
elif format == Format.yaml:
out = self.health_history.as_yaml()
else:
out = self.health_history.as_json(format == Format.json_pretty)
return HandleCommandResult(retval=0, stdout=out)
@CLIWriteCommand('healthcheck history clear')
def _clear_healthchecks(self) -> HandleCommandResult:
"""Clear the healthcheck history"""
self.health_history.reset()
return HandleCommandResult(retval=0, stdout="healthcheck history cleared")
class StandbyModule(MgrStandbyModule):
MODULE_OPTIONS = Module.MODULE_OPTIONS
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(StandbyModule, self).__init__(*args, **kwargs)
self.shutdown_event = threading.Event()
def serve(self) -> None:
server_addr = self.get_localized_module_option(
'server_addr', get_default_addr())
server_port = self.get_localized_module_option(
'server_port', DEFAULT_PORT)
self.log.info("server_addr: %s server_port: %s" %
(server_addr, server_port))
cherrypy.config.update({
'server.socket_host': server_addr,
'server.socket_port': server_port,
'engine.autoreload.on': False,
'request.show_tracebacks': False
})
module = self
class Root(object):
@cherrypy.expose
def index(self) -> str:
standby_behaviour = module.get_module_option('standby_behaviour')
if standby_behaviour == 'default':
active_uri = module.get_active_uri()
return '''<!DOCTYPE html>
<html>
<head><title>Ceph Exporter</title></head>
<body>
<h1>Ceph Exporter</h1>
<p><a href='{}metrics'>Metrics</a></p>
</body>
</html>'''.format(active_uri)
else:
status = module.get_module_option('standby_error_status_code')
raise cherrypy.HTTPError(status, message="Keep on looking")
@cherrypy.expose
def metrics(self) -> str:
cherrypy.response.headers['Content-Type'] = 'text/plain'
return ''
cherrypy.tree.mount(Root(), '/', {})
self.log.info('Starting engine...')
cherrypy.engine.start()
self.log.info('Engine started.')
# Wait for shutdown event
self.shutdown_event.wait()
self.shutdown_event.clear()
cherrypy.engine.stop()
cherrypy.server.httpserver = None
self.log.info('Engine stopped.')
def shutdown(self) -> None:
self.log.info("Stopping engine...")
self.shutdown_event.set()
self.log.info("Stopped engine")
| 78,062 | 37.740943 | 295 | py |
null | ceph-main/src/pybind/mgr/prometheus/test_module.py | from typing import Dict
from unittest import TestCase
from prometheus.module import Metric, LabelValues, Number
class MetricGroupTest(TestCase):
def setUp(self):
self.DISK_OCCUPATION = (
"ceph_daemon",
"device",
"db_device",
"wal_device",
"instance",
)
self.metric_value: Dict[LabelValues, Number] = {
("osd.0", "/dev/dm-0", "", "", "node1"): 1,
("osd.1", "/dev/dm-0", "", "", "node3"): 1,
("osd.2", "/dev/dm-0", "", "", "node2"): 1,
("osd.3", "/dev/dm-1", "", "", "node1"): 1,
("osd.4", "/dev/dm-1", "", "", "node3"): 1,
("osd.5", "/dev/dm-1", "", "", "node2"): 1,
("osd.6", "/dev/dm-1", "", "", "node2"): 1,
}
def test_metric_group_by(self):
m = Metric("untyped", "disk_occupation", "", self.DISK_OCCUPATION)
m.value = self.metric_value
grouped_metric = m.group_by(
["device", "instance"],
{"ceph_daemon": lambda xs: "+".join(xs)},
name="disk_occupation_display",
)
self.assertEqual(
grouped_metric.value,
{
("osd.0", "/dev/dm-0", "node1"): 1,
("osd.1", "/dev/dm-0", "node3"): 1,
("osd.2", "/dev/dm-0", "node2"): 1,
("osd.3", "/dev/dm-1", "node1"): 1,
("osd.4", "/dev/dm-1", "node3"): 1,
("osd.5+osd.6", "/dev/dm-1", "node2"): 1,
},
)
self.maxDiff = None
self.assertEqual(
grouped_metric.str_expfmt(),
"""
# HELP ceph_disk_occupation_display
# TYPE ceph_disk_occupation_display untyped
ceph_disk_occupation_display{ceph_daemon="osd.0",device="/dev/dm-0",instance="node1"} 1.0
ceph_disk_occupation_display{ceph_daemon="osd.1",device="/dev/dm-0",instance="node3"} 1.0
ceph_disk_occupation_display{ceph_daemon="osd.2",device="/dev/dm-0",instance="node2"} 1.0
ceph_disk_occupation_display{ceph_daemon="osd.3",device="/dev/dm-1",instance="node1"} 1.0
ceph_disk_occupation_display{ceph_daemon="osd.4",device="/dev/dm-1",instance="node3"} 1.0
ceph_disk_occupation_display{ceph_daemon="osd.5+osd.6",device="/dev/dm-1",instance="node2"} 1.0""", # noqa: W291
)
self.assertEqual(
grouped_metric.labelnames, ("ceph_daemon", "device", "instance")
)
def test_metric_group_by__no_value(self):
m = Metric("metric_type", "name", "desc", labels=('foo', 'bar'))
grouped = m.group_by(['foo'], {'bar': lambda bars: ', '.join(bars)})
self.assertEqual(grouped.value, {})
self.assertEqual(grouped.str_expfmt(),
'\n# HELP ceph_name desc\n# TYPE ceph_name metric_type')
def test_metric_group_by__no_labels(self):
m = Metric("metric_type", "name", "desc", labels=None)
with self.assertRaises(AssertionError) as cm:
m.group_by([], {})
self.assertEqual(str(cm.exception), "cannot match keys without label names")
def test_metric_group_by__key_not_in_labels(self):
m = Metric("metric_type", "name", "desc", labels=("foo", "bar"))
m.value = self.metric_value
with self.assertRaises(AssertionError) as cm:
m.group_by(["baz"], {})
self.assertEqual(str(cm.exception), "unknown key: baz")
def test_metric_group_by__empty_joins(self):
m = Metric("", "", "", ("foo", "bar"))
with self.assertRaises(AssertionError) as cm:
m.group_by(["foo"], joins={})
self.assertEqual(str(cm.exception), "joins must not be empty")
def test_metric_group_by__joins_not_callable(self):
m = Metric("", "", "", ("foo", "bar"))
m.value = self.metric_value
with self.assertRaises(AssertionError) as cm:
m.group_by(["foo"], {"bar": "not callable str"})
self.assertEqual(str(cm.exception), "joins must be callable")
| 3,979 | 41.340426 | 113 | py |
null | ceph-main/src/pybind/mgr/rbd_support/__init__.py | # flake8: noqa
from .module import Module
| 42 | 13.333333 | 26 | py |
null | ceph-main/src/pybind/mgr/rbd_support/common.py | import re
from typing import Dict, Optional, Tuple, TYPE_CHECKING, Union
GLOBAL_POOL_KEY = (None, None)
class NotAuthorizedError(Exception):
pass
if TYPE_CHECKING:
from rbd_support.module import Module
def is_authorized(module: 'Module',
pool: Optional[str],
namespace: Optional[str]) -> bool:
return module.is_authorized({"pool": pool or '',
"namespace": namespace or ''})
def authorize_request(module: 'Module',
pool: Optional[str],
namespace: Optional[str]) -> None:
if not is_authorized(module, pool, namespace):
raise NotAuthorizedError("not authorized on pool={}, namespace={}".format(
pool, namespace))
PoolKeyT = Union[Tuple[str, str], Tuple[None, None]]
def extract_pool_key(pool_spec: Optional[str]) -> PoolKeyT:
if not pool_spec:
return GLOBAL_POOL_KEY
match = re.match(r'^([^/]+)(?:/([^/]+))?$', pool_spec)
if not match:
raise ValueError("Invalid pool spec: {}".format(pool_spec))
return (match.group(1), match.group(2) or '')
def get_rbd_pools(module: 'Module') -> Dict[int, str]:
osd_map = module.get('osd_map')
return {pool['pool']: pool['pool_name'] for pool in osd_map['pools']
if 'rbd' in pool.get('application_metadata', {})}
| 1,364 | 26.857143 | 82 | py |
null | ceph-main/src/pybind/mgr/rbd_support/mirror_snapshot_schedule.py | import errno
import json
import rados
import rbd
import traceback
from datetime import datetime
from threading import Condition, Lock, Thread
from typing import Any, Dict, List, NamedTuple, Optional, Set, Tuple, Union
from .common import get_rbd_pools
from .schedule import LevelSpec, Schedules
def namespace_validator(ioctx: rados.Ioctx) -> None:
mode = rbd.RBD().mirror_mode_get(ioctx)
if mode != rbd.RBD_MIRROR_MODE_IMAGE:
raise ValueError("namespace {} is not in mirror image mode".format(
ioctx.get_namespace()))
def image_validator(image: rbd.Image) -> None:
mode = image.mirror_image_get_mode()
if mode != rbd.RBD_MIRROR_IMAGE_MODE_SNAPSHOT:
raise rbd.InvalidArgument("Invalid mirror image mode")
class ImageSpec(NamedTuple):
pool_id: str
namespace: str
image_id: str
class CreateSnapshotRequests:
lock = Lock()
condition = Condition(lock)
def __init__(self, handler: Any) -> None:
self.handler = handler
self.rados = handler.module.rados
self.log = handler.log
self.pending: Set[ImageSpec] = set()
self.queue: List[ImageSpec] = []
self.ioctxs: Dict[Tuple[str, str], Tuple[rados.Ioctx, Set[ImageSpec]]] = {}
def __del__(self) -> None:
self.wait_for_pending()
def wait_for_pending(self) -> None:
with self.lock:
while self.pending:
self.log.debug(
"CreateSnapshotRequests.wait_for_pending: "
"{} images".format(len(self.pending)))
self.condition.wait()
self.log.debug("CreateSnapshotRequests.wait_for_pending: done")
def add(self, pool_id: str, namespace: str, image_id: str) -> None:
image_spec = ImageSpec(pool_id, namespace, image_id)
self.log.debug("CreateSnapshotRequests.add: {}/{}/{}".format(
pool_id, namespace, image_id))
max_concurrent = self.handler.module.get_localized_module_option(
self.handler.MODULE_OPTION_NAME_MAX_CONCURRENT_SNAP_CREATE)
with self.lock:
if image_spec in self.pending:
self.log.info(
"CreateSnapshotRequests.add: {}/{}/{}: {}".format(
pool_id, namespace, image_id,
"previous request is still in progress"))
return
self.pending.add(image_spec)
if len(self.pending) > max_concurrent:
self.queue.append(image_spec)
return
self.open_image(image_spec)
def open_image(self, image_spec: ImageSpec) -> None:
pool_id, namespace, image_id = image_spec
self.log.debug("CreateSnapshotRequests.open_image: {}/{}/{}".format(
pool_id, namespace, image_id))
try:
ioctx = self.get_ioctx(image_spec)
def cb(comp: rados.Completion, image: rbd.Image) -> None:
self.handle_open_image(image_spec, comp, image)
rbd.RBD().aio_open_image(cb, ioctx, image_id=image_id)
except Exception as e:
self.log.error(
"exception when opening {}/{}/{}: {}".format(
pool_id, namespace, image_id, e))
self.finish(image_spec)
def handle_open_image(self,
image_spec: ImageSpec,
comp: rados.Completion,
image: rbd.Image) -> None:
pool_id, namespace, image_id = image_spec
self.log.debug(
"CreateSnapshotRequests.handle_open_image {}/{}/{}: r={}".format(
pool_id, namespace, image_id, comp.get_return_value()))
if comp.get_return_value() < 0:
if comp.get_return_value() != -errno.ENOENT:
self.log.error(
"error when opening {}/{}/{}: {}".format(
pool_id, namespace, image_id, comp.get_return_value()))
self.finish(image_spec)
return
self.get_mirror_mode(image_spec, image)
def get_mirror_mode(self, image_spec: ImageSpec, image: rbd.Image) -> None:
pool_id, namespace, image_id = image_spec
self.log.debug("CreateSnapshotRequests.get_mirror_mode: {}/{}/{}".format(
pool_id, namespace, image_id))
def cb(comp: rados.Completion, mode: int) -> None:
self.handle_get_mirror_mode(image_spec, image, comp, mode)
try:
image.aio_mirror_image_get_mode(cb)
except Exception as e:
self.log.error(
"exception when getting mirror mode for {}/{}/{}: {}".format(
pool_id, namespace, image_id, e))
self.close_image(image_spec, image)
def handle_get_mirror_mode(self,
image_spec: ImageSpec,
image: rbd.Image,
comp: rados.Completion,
mode: int) -> None:
pool_id, namespace, image_id = image_spec
self.log.debug(
"CreateSnapshotRequests.handle_get_mirror_mode {}/{}/{}: r={} mode={}".format(
pool_id, namespace, image_id, comp.get_return_value(), mode))
if comp.get_return_value() < 0:
if comp.get_return_value() != -errno.ENOENT:
self.log.error(
"error when getting mirror mode for {}/{}/{}: {}".format(
pool_id, namespace, image_id, comp.get_return_value()))
self.close_image(image_spec, image)
return
if mode != rbd.RBD_MIRROR_IMAGE_MODE_SNAPSHOT:
self.log.debug(
"CreateSnapshotRequests.handle_get_mirror_mode: {}/{}/{}: {}".format(
pool_id, namespace, image_id,
"snapshot mirroring is not enabled"))
self.close_image(image_spec, image)
return
self.get_mirror_info(image_spec, image)
def get_mirror_info(self, image_spec: ImageSpec, image: rbd.Image) -> None:
pool_id, namespace, image_id = image_spec
self.log.debug("CreateSnapshotRequests.get_mirror_info: {}/{}/{}".format(
pool_id, namespace, image_id))
def cb(comp: rados.Completion, info: Dict[str, Union[str, int]]) -> None:
self.handle_get_mirror_info(image_spec, image, comp, info)
try:
image.aio_mirror_image_get_info(cb)
except Exception as e:
self.log.error(
"exception when getting mirror info for {}/{}/{}: {}".format(
pool_id, namespace, image_id, e))
self.close_image(image_spec, image)
def handle_get_mirror_info(self,
image_spec: ImageSpec,
image: rbd.Image,
comp: rados.Completion,
info: Dict[str, Union[str, int]]) -> None:
pool_id, namespace, image_id = image_spec
self.log.debug(
"CreateSnapshotRequests.handle_get_mirror_info {}/{}/{}: r={} info={}".format(
pool_id, namespace, image_id, comp.get_return_value(), info))
if comp.get_return_value() < 0:
if comp.get_return_value() != -errno.ENOENT:
self.log.error(
"error when getting mirror info for {}/{}/{}: {}".format(
pool_id, namespace, image_id, comp.get_return_value()))
self.close_image(image_spec, image)
return
if not info['primary']:
self.log.debug(
"CreateSnapshotRequests.handle_get_mirror_info: {}/{}/{}: {}".format(
pool_id, namespace, image_id,
"is not primary"))
self.close_image(image_spec, image)
return
self.create_snapshot(image_spec, image)
def create_snapshot(self, image_spec: ImageSpec, image: rbd.Image) -> None:
pool_id, namespace, image_id = image_spec
self.log.debug(
"CreateSnapshotRequests.create_snapshot for {}/{}/{}".format(
pool_id, namespace, image_id))
def cb(comp: rados.Completion, snap_id: int) -> None:
self.handle_create_snapshot(image_spec, image, comp, snap_id)
try:
image.aio_mirror_image_create_snapshot(0, cb)
except Exception as e:
self.log.error(
"exception when creating snapshot for {}/{}/{}: {}".format(
pool_id, namespace, image_id, e))
self.close_image(image_spec, image)
def handle_create_snapshot(self,
image_spec: ImageSpec,
image: rbd.Image,
comp: rados.Completion,
snap_id: int) -> None:
pool_id, namespace, image_id = image_spec
self.log.debug(
"CreateSnapshotRequests.handle_create_snapshot for {}/{}/{}: r={}, snap_id={}".format(
pool_id, namespace, image_id, comp.get_return_value(), snap_id))
if comp.get_return_value() < 0 and \
comp.get_return_value() != -errno.ENOENT:
self.log.error(
"error when creating snapshot for {}/{}/{}: {}".format(
pool_id, namespace, image_id, comp.get_return_value()))
self.close_image(image_spec, image)
def close_image(self, image_spec: ImageSpec, image: rbd.Image) -> None:
pool_id, namespace, image_id = image_spec
self.log.debug(
"CreateSnapshotRequests.close_image {}/{}/{}".format(
pool_id, namespace, image_id))
def cb(comp: rados.Completion) -> None:
self.handle_close_image(image_spec, comp)
try:
image.aio_close(cb)
except Exception as e:
self.log.error(
"exception when closing {}/{}/{}: {}".format(
pool_id, namespace, image_id, e))
self.finish(image_spec)
def handle_close_image(self,
image_spec: ImageSpec,
comp: rados.Completion) -> None:
pool_id, namespace, image_id = image_spec
self.log.debug(
"CreateSnapshotRequests.handle_close_image {}/{}/{}: r={}".format(
pool_id, namespace, image_id, comp.get_return_value()))
if comp.get_return_value() < 0:
self.log.error(
"error when closing {}/{}/{}: {}".format(
pool_id, namespace, image_id, comp.get_return_value()))
self.finish(image_spec)
def finish(self, image_spec: ImageSpec) -> None:
pool_id, namespace, image_id = image_spec
self.log.debug("CreateSnapshotRequests.finish: {}/{}/{}".format(
pool_id, namespace, image_id))
self.put_ioctx(image_spec)
with self.lock:
self.pending.remove(image_spec)
self.condition.notify()
if not self.queue:
return
image_spec = self.queue.pop(0)
self.open_image(image_spec)
def get_ioctx(self, image_spec: ImageSpec) -> rados.Ioctx:
pool_id, namespace, image_id = image_spec
nspec = (pool_id, namespace)
with self.lock:
ioctx, images = self.ioctxs.get(nspec, (None, None))
if not ioctx:
ioctx = self.rados.open_ioctx2(int(pool_id))
ioctx.set_namespace(namespace)
images = set()
self.ioctxs[nspec] = (ioctx, images)
assert images is not None
images.add(image_spec)
return ioctx
def put_ioctx(self, image_spec: ImageSpec) -> None:
pool_id, namespace, image_id = image_spec
nspec = (pool_id, namespace)
with self.lock:
ioctx, images = self.ioctxs[nspec]
images.remove(image_spec)
if not images:
del self.ioctxs[nspec]
class MirrorSnapshotScheduleHandler:
MODULE_OPTION_NAME = "mirror_snapshot_schedule"
MODULE_OPTION_NAME_MAX_CONCURRENT_SNAP_CREATE = "max_concurrent_snap_create"
SCHEDULE_OID = "rbd_mirror_snapshot_schedule"
REFRESH_DELAY_SECONDS = 60.0
lock = Lock()
condition = Condition(lock)
def __init__(self, module: Any) -> None:
self.module = module
self.log = module.log
self.last_refresh_images = datetime(1970, 1, 1)
self.create_snapshot_requests = CreateSnapshotRequests(self)
self.stop_thread = False
self.thread = Thread(target=self.run)
def setup(self) -> None:
self.init_schedule_queue()
self.thread.start()
def shutdown(self) -> None:
self.log.info("MirrorSnapshotScheduleHandler: shutting down")
self.stop_thread = True
if self.thread.is_alive():
self.log.debug("MirrorSnapshotScheduleHandler: joining thread")
self.thread.join()
self.create_snapshot_requests.wait_for_pending()
self.log.info("MirrorSnapshotScheduleHandler: shut down")
def run(self) -> None:
try:
self.log.info("MirrorSnapshotScheduleHandler: starting")
while not self.stop_thread:
refresh_delay = self.refresh_images()
with self.lock:
(image_spec, wait_time) = self.dequeue()
if not image_spec:
self.condition.wait(min(wait_time, refresh_delay))
continue
pool_id, namespace, image_id = image_spec
self.create_snapshot_requests.add(pool_id, namespace, image_id)
with self.lock:
self.enqueue(datetime.now(), pool_id, namespace, image_id)
except (rados.ConnectionShutdown, rbd.ConnectionShutdown):
self.log.exception("MirrorSnapshotScheduleHandler: client blocklisted")
self.module.client_blocklisted.set()
except Exception as ex:
self.log.fatal("Fatal runtime error: {}\n{}".format(
ex, traceback.format_exc()))
def init_schedule_queue(self) -> None:
# schedule_time => image_spec
self.queue: Dict[str, List[ImageSpec]] = {}
# pool_id => {namespace => image_id}
self.images: Dict[str, Dict[str, Dict[str, str]]] = {}
self.schedules = Schedules(self)
self.refresh_images()
self.log.debug("MirrorSnapshotScheduleHandler: queue is initialized")
def load_schedules(self) -> None:
self.log.info("MirrorSnapshotScheduleHandler: load_schedules")
self.schedules.load(namespace_validator, image_validator)
def refresh_images(self) -> float:
elapsed = (datetime.now() - self.last_refresh_images).total_seconds()
if elapsed < self.REFRESH_DELAY_SECONDS:
return self.REFRESH_DELAY_SECONDS - elapsed
self.log.debug("MirrorSnapshotScheduleHandler: refresh_images")
with self.lock:
self.load_schedules()
if not self.schedules:
self.log.debug("MirrorSnapshotScheduleHandler: no schedules")
self.images = {}
self.queue = {}
self.last_refresh_images = datetime.now()
return self.REFRESH_DELAY_SECONDS
images: Dict[str, Dict[str, Dict[str, str]]] = {}
for pool_id, pool_name in get_rbd_pools(self.module).items():
if not self.schedules.intersects(
LevelSpec.from_pool_spec(pool_id, pool_name)):
continue
with self.module.rados.open_ioctx2(int(pool_id)) as ioctx:
self.load_pool_images(ioctx, images)
with self.lock:
self.refresh_queue(images)
self.images = images
self.last_refresh_images = datetime.now()
return self.REFRESH_DELAY_SECONDS
def load_pool_images(self,
ioctx: rados.Ioctx,
images: Dict[str, Dict[str, Dict[str, str]]]) -> None:
pool_id = str(ioctx.get_pool_id())
pool_name = ioctx.get_pool_name()
images[pool_id] = {}
self.log.debug("load_pool_images: pool={}".format(pool_name))
try:
namespaces = [''] + rbd.RBD().namespace_list(ioctx)
for namespace in namespaces:
if not self.schedules.intersects(
LevelSpec.from_pool_spec(int(pool_id), pool_name, namespace)):
continue
self.log.debug("load_pool_images: pool={}, namespace={}".format(
pool_name, namespace))
images[pool_id][namespace] = {}
ioctx.set_namespace(namespace)
mirror_images = dict(rbd.RBD().mirror_image_info_list(
ioctx, rbd.RBD_MIRROR_IMAGE_MODE_SNAPSHOT))
if not mirror_images:
continue
image_names = dict(
[(x['id'], x['name']) for x in filter(
lambda x: x['id'] in mirror_images,
rbd.RBD().list2(ioctx))])
for image_id, info in mirror_images.items():
if not info['primary']:
continue
image_name = image_names.get(image_id)
if not image_name:
continue
if namespace:
name = "{}/{}/{}".format(pool_name, namespace,
image_name)
else:
name = "{}/{}".format(pool_name, image_name)
self.log.debug(
"load_pool_images: adding image {}".format(name))
images[pool_id][namespace][image_id] = name
except rbd.ConnectionShutdown:
raise
except Exception as e:
self.log.error(
"load_pool_images: exception when scanning pool {}: {}".format(
pool_name, e))
def rebuild_queue(self) -> None:
now = datetime.now()
# don't remove from queue "due" images
now_string = datetime.strftime(now, "%Y-%m-%d %H:%M:00")
for schedule_time in list(self.queue):
if schedule_time > now_string:
del self.queue[schedule_time]
if not self.schedules:
return
for pool_id in self.images:
for namespace in self.images[pool_id]:
for image_id in self.images[pool_id][namespace]:
self.enqueue(now, pool_id, namespace, image_id)
self.condition.notify()
def refresh_queue(self,
current_images: Dict[str, Dict[str, Dict[str, str]]]) -> None:
now = datetime.now()
for pool_id in self.images:
for namespace in self.images[pool_id]:
for image_id in self.images[pool_id][namespace]:
if pool_id not in current_images or \
namespace not in current_images[pool_id] or \
image_id not in current_images[pool_id][namespace]:
self.remove_from_queue(pool_id, namespace, image_id)
for pool_id in current_images:
for namespace in current_images[pool_id]:
for image_id in current_images[pool_id][namespace]:
if pool_id not in self.images or \
namespace not in self.images[pool_id] or \
image_id not in self.images[pool_id][namespace]:
self.enqueue(now, pool_id, namespace, image_id)
self.condition.notify()
def enqueue(self, now: datetime, pool_id: str, namespace: str, image_id: str) -> None:
schedule = self.schedules.find(pool_id, namespace, image_id)
if not schedule:
self.log.debug(
"MirrorSnapshotScheduleHandler: no schedule for {}/{}/{}".format(
pool_id, namespace, image_id))
return
schedule_time = schedule.next_run(now)
if schedule_time not in self.queue:
self.queue[schedule_time] = []
self.log.debug(
"MirrorSnapshotScheduleHandler: scheduling {}/{}/{} at {}".format(
pool_id, namespace, image_id, schedule_time))
image_spec = ImageSpec(pool_id, namespace, image_id)
if image_spec not in self.queue[schedule_time]:
self.queue[schedule_time].append(image_spec)
def dequeue(self) -> Tuple[Optional[ImageSpec], float]:
if not self.queue:
return None, 1000.0
now = datetime.now()
schedule_time = sorted(self.queue)[0]
if datetime.strftime(now, "%Y-%m-%d %H:%M:%S") < schedule_time:
wait_time = (datetime.strptime(schedule_time,
"%Y-%m-%d %H:%M:%S") - now)
return None, wait_time.total_seconds()
images = self.queue[schedule_time]
image = images.pop(0)
if not images:
del self.queue[schedule_time]
return image, 0.0
def remove_from_queue(self, pool_id: str, namespace: str, image_id: str) -> None:
self.log.debug(
"MirrorSnapshotScheduleHandler: descheduling {}/{}/{}".format(
pool_id, namespace, image_id))
empty_slots = []
image_spec = ImageSpec(pool_id, namespace, image_id)
for schedule_time, images in self.queue.items():
if image_spec in images:
images.remove(image_spec)
if not images:
empty_slots.append(schedule_time)
for schedule_time in empty_slots:
del self.queue[schedule_time]
def add_schedule(self,
level_spec: LevelSpec,
interval: str,
start_time: Optional[str]) -> Tuple[int, str, str]:
self.log.debug(
"MirrorSnapshotScheduleHandler: add_schedule: level_spec={}, interval={}, start_time={}".format(
level_spec.name, interval, start_time))
# TODO: optimize to rebuild only affected part of the queue
with self.lock:
self.schedules.add(level_spec, interval, start_time)
self.rebuild_queue()
return 0, "", ""
def remove_schedule(self,
level_spec: LevelSpec,
interval: Optional[str],
start_time: Optional[str]) -> Tuple[int, str, str]:
self.log.debug(
"MirrorSnapshotScheduleHandler: remove_schedule: level_spec={}, interval={}, start_time={}".format(
level_spec.name, interval, start_time))
# TODO: optimize to rebuild only affected part of the queue
with self.lock:
self.schedules.remove(level_spec, interval, start_time)
self.rebuild_queue()
return 0, "", ""
def list(self, level_spec: LevelSpec) -> Tuple[int, str, str]:
self.log.debug(
"MirrorSnapshotScheduleHandler: list: level_spec={}".format(
level_spec.name))
with self.lock:
result = self.schedules.to_list(level_spec)
return 0, json.dumps(result, indent=4, sort_keys=True), ""
def status(self, level_spec: LevelSpec) -> Tuple[int, str, str]:
self.log.debug(
"MirrorSnapshotScheduleHandler: status: level_spec={}".format(
level_spec.name))
scheduled_images = []
with self.lock:
for schedule_time in sorted(self.queue):
for pool_id, namespace, image_id in self.queue[schedule_time]:
if not level_spec.matches(pool_id, namespace, image_id):
continue
image_name = self.images[pool_id][namespace][image_id]
scheduled_images.append({
'schedule_time': schedule_time,
'image': image_name
})
return 0, json.dumps({'scheduled_images': scheduled_images},
indent=4, sort_keys=True), ""
| 24,356 | 38.033654 | 111 | py |
null | ceph-main/src/pybind/mgr/rbd_support/module.py | """
RBD support module
"""
import enum
import errno
import functools
import inspect
import rados
import rbd
import traceback
from typing import cast, Any, Callable, Optional, Tuple, TypeVar
from mgr_module import CLIReadCommand, CLIWriteCommand, MgrModule, Option
from threading import Thread, Event
from .common import NotAuthorizedError
from .mirror_snapshot_schedule import image_validator, namespace_validator, \
LevelSpec, MirrorSnapshotScheduleHandler
from .perf import PerfHandler, OSD_PERF_QUERY_COUNTERS
from .task import TaskHandler
from .trash_purge_schedule import TrashPurgeScheduleHandler
class ImageSortBy(enum.Enum):
write_ops = 'write_ops'
write_bytes = 'write_bytes'
write_latency = 'write_latency'
read_ops = 'read_ops'
read_bytes = 'read_bytes'
read_latency = 'read_latency'
FuncT = TypeVar('FuncT', bound=Callable)
def with_latest_osdmap(func: FuncT) -> FuncT:
@functools.wraps(func)
def wrapper(self: 'Module', *args: Any, **kwargs: Any) -> Tuple[int, str, str]:
if not self.module_ready:
return (-errno.EAGAIN, "",
"rbd_support module is not ready, try again")
# ensure we have latest pools available
self.rados.wait_for_latest_osdmap()
try:
try:
return func(self, *args, **kwargs)
except NotAuthorizedError:
raise
except Exception:
# log the full traceback but don't send it to the CLI user
self.log.exception("Fatal runtime error: ")
raise
except (rados.ConnectionShutdown, rbd.ConnectionShutdown) as ex:
self.log.debug("with_latest_osdmap: client blocklisted")
self.client_blocklisted.set()
return -errno.EAGAIN, "", str(ex)
except rados.Error as ex:
return -ex.errno, "", str(ex)
except rbd.OSError as ex:
return -ex.errno, "", str(ex)
except rbd.Error as ex:
return -errno.EINVAL, "", str(ex)
except KeyError as ex:
return -errno.ENOENT, "", str(ex)
except ValueError as ex:
return -errno.EINVAL, "", str(ex)
except NotAuthorizedError as ex:
return -errno.EACCES, "", str(ex)
wrapper.__signature__ = inspect.signature(func) # type: ignore[attr-defined]
return cast(FuncT, wrapper)
class Module(MgrModule):
MODULE_OPTIONS = [
Option(name=MirrorSnapshotScheduleHandler.MODULE_OPTION_NAME),
Option(name=MirrorSnapshotScheduleHandler.MODULE_OPTION_NAME_MAX_CONCURRENT_SNAP_CREATE,
type='int',
default=10),
Option(name=TrashPurgeScheduleHandler.MODULE_OPTION_NAME),
]
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(Module, self).__init__(*args, **kwargs)
self.client_blocklisted = Event()
self.module_ready = False
self.init_handlers()
self.recovery_thread = Thread(target=self.run)
self.recovery_thread.start()
def init_handlers(self) -> None:
self.mirror_snapshot_schedule = MirrorSnapshotScheduleHandler(self)
self.perf = PerfHandler(self)
self.task = TaskHandler(self)
self.trash_purge_schedule = TrashPurgeScheduleHandler(self)
def setup_handlers(self) -> None:
self.log.info("starting setup")
# new RADOS client is created and registered in the MgrMap
# implicitly here as 'rados' is a property attribute.
self.rados.wait_for_latest_osdmap()
self.mirror_snapshot_schedule.setup()
self.perf.setup()
self.task.setup()
self.trash_purge_schedule.setup()
self.log.info("setup complete")
self.module_ready = True
def run(self) -> None:
self.log.info("recovery thread starting")
try:
while True:
try:
self.setup_handlers()
except (rados.ConnectionShutdown, rbd.ConnectionShutdown):
self.log.exception("setup_handlers: client blocklisted")
self.log.info("recovering from double blocklisting")
else:
# block until RADOS client is blocklisted
self.client_blocklisted.wait()
self.log.info("recovering from blocklisting")
self.shutdown()
self.client_blocklisted.clear()
self.init_handlers()
except Exception as ex:
self.log.fatal("Fatal runtime error: {}\n{}".format(
ex, traceback.format_exc()))
def shutdown(self) -> None:
self.module_ready = False
self.mirror_snapshot_schedule.shutdown()
self.trash_purge_schedule.shutdown()
self.task.shutdown()
self.perf.shutdown()
# shut down client and deregister it from MgrMap
super().shutdown()
@CLIWriteCommand('rbd mirror snapshot schedule add')
@with_latest_osdmap
def mirror_snapshot_schedule_add(self,
level_spec: str,
interval: str,
start_time: Optional[str] = None) -> Tuple[int, str, str]:
"""
Add rbd mirror snapshot schedule
"""
spec = LevelSpec.from_name(self, level_spec, namespace_validator, image_validator)
return self.mirror_snapshot_schedule.add_schedule(spec, interval, start_time)
@CLIWriteCommand('rbd mirror snapshot schedule remove')
@with_latest_osdmap
def mirror_snapshot_schedule_remove(self,
level_spec: str,
interval: Optional[str] = None,
start_time: Optional[str] = None) -> Tuple[int, str, str]:
"""
Remove rbd mirror snapshot schedule
"""
spec = LevelSpec.from_name(self, level_spec, namespace_validator, image_validator)
return self.mirror_snapshot_schedule.remove_schedule(spec, interval, start_time)
@CLIReadCommand('rbd mirror snapshot schedule list')
@with_latest_osdmap
def mirror_snapshot_schedule_list(self,
level_spec: str = '') -> Tuple[int, str, str]:
"""
List rbd mirror snapshot schedule
"""
spec = LevelSpec.from_name(self, level_spec, namespace_validator, image_validator)
return self.mirror_snapshot_schedule.list(spec)
@CLIReadCommand('rbd mirror snapshot schedule status')
@with_latest_osdmap
def mirror_snapshot_schedule_status(self,
level_spec: str = '') -> Tuple[int, str, str]:
"""
Show rbd mirror snapshot schedule status
"""
spec = LevelSpec.from_name(self, level_spec, namespace_validator, image_validator)
return self.mirror_snapshot_schedule.status(spec)
@CLIReadCommand('rbd perf image stats')
@with_latest_osdmap
def perf_image_stats(self,
pool_spec: Optional[str] = None,
sort_by: Optional[ImageSortBy] = None) -> Tuple[int, str, str]:
"""
Retrieve current RBD IO performance stats
"""
with self.perf.lock:
sort_by_name = sort_by.name if sort_by else OSD_PERF_QUERY_COUNTERS[0]
return self.perf.get_perf_stats(pool_spec, sort_by_name)
@CLIReadCommand('rbd perf image counters')
@with_latest_osdmap
def perf_image_counters(self,
pool_spec: Optional[str] = None,
sort_by: Optional[ImageSortBy] = None) -> Tuple[int, str, str]:
"""
Retrieve current RBD IO performance counters
"""
with self.perf.lock:
sort_by_name = sort_by.name if sort_by else OSD_PERF_QUERY_COUNTERS[0]
return self.perf.get_perf_counters(pool_spec, sort_by_name)
@CLIWriteCommand('rbd task add flatten')
@with_latest_osdmap
def task_add_flatten(self, image_spec: str) -> Tuple[int, str, str]:
"""
Flatten a cloned image asynchronously in the background
"""
with self.task.lock:
return self.task.queue_flatten(image_spec)
@CLIWriteCommand('rbd task add remove')
@with_latest_osdmap
def task_add_remove(self, image_spec: str) -> Tuple[int, str, str]:
"""
Remove an image asynchronously in the background
"""
with self.task.lock:
return self.task.queue_remove(image_spec)
@CLIWriteCommand('rbd task add trash remove')
@with_latest_osdmap
def task_add_trash_remove(self, image_id_spec: str) -> Tuple[int, str, str]:
"""
Remove an image from the trash asynchronously in the background
"""
with self.task.lock:
return self.task.queue_trash_remove(image_id_spec)
@CLIWriteCommand('rbd task add migration execute')
@with_latest_osdmap
def task_add_migration_execute(self, image_spec: str) -> Tuple[int, str, str]:
"""
Execute an image migration asynchronously in the background
"""
with self.task.lock:
return self.task.queue_migration_execute(image_spec)
@CLIWriteCommand('rbd task add migration commit')
@with_latest_osdmap
def task_add_migration_commit(self, image_spec: str) -> Tuple[int, str, str]:
"""
Commit an executed migration asynchronously in the background
"""
with self.task.lock:
return self.task.queue_migration_commit(image_spec)
@CLIWriteCommand('rbd task add migration abort')
@with_latest_osdmap
def task_add_migration_abort(self, image_spec: str) -> Tuple[int, str, str]:
"""
Abort a prepared migration asynchronously in the background
"""
with self.task.lock:
return self.task.queue_migration_abort(image_spec)
@CLIWriteCommand('rbd task cancel')
@with_latest_osdmap
def task_cancel(self, task_id: str) -> Tuple[int, str, str]:
"""
Cancel a pending or running asynchronous task
"""
with self.task.lock:
return self.task.task_cancel(task_id)
@CLIReadCommand('rbd task list')
@with_latest_osdmap
def task_list(self, task_id: Optional[str] = None) -> Tuple[int, str, str]:
"""
List pending or running asynchronous tasks
"""
with self.task.lock:
return self.task.task_list(task_id)
@CLIWriteCommand('rbd trash purge schedule add')
@with_latest_osdmap
def trash_purge_schedule_add(self,
level_spec: str,
interval: str,
start_time: Optional[str] = None) -> Tuple[int, str, str]:
"""
Add rbd trash purge schedule
"""
spec = LevelSpec.from_name(self, level_spec, allow_image_level=False)
return self.trash_purge_schedule.add_schedule(spec, interval, start_time)
@CLIWriteCommand('rbd trash purge schedule remove')
@with_latest_osdmap
def trash_purge_schedule_remove(self,
level_spec: str,
interval: Optional[str] = None,
start_time: Optional[str] = None) -> Tuple[int, str, str]:
"""
Remove rbd trash purge schedule
"""
spec = LevelSpec.from_name(self, level_spec, allow_image_level=False)
return self.trash_purge_schedule.remove_schedule(spec, interval, start_time)
@CLIReadCommand('rbd trash purge schedule list')
@with_latest_osdmap
def trash_purge_schedule_list(self,
level_spec: str = '') -> Tuple[int, str, str]:
"""
List rbd trash purge schedule
"""
spec = LevelSpec.from_name(self, level_spec, allow_image_level=False)
return self.trash_purge_schedule.list(spec)
@CLIReadCommand('rbd trash purge schedule status')
@with_latest_osdmap
def trash_purge_schedule_status(self,
level_spec: str = '') -> Tuple[int, str, str]:
"""
Show rbd trash purge schedule status
"""
spec = LevelSpec.from_name(self, level_spec, allow_image_level=False)
return self.trash_purge_schedule.status(spec)
| 12,491 | 37.795031 | 98 | py |