From 3939201f53d6c6eae23eec1ab83a47d987c18987 Mon Sep 17 00:00:00 2001 From: John Mulligan Date: Sun, 5 Nov 2023 16:03:34 -0500 Subject: [PATCH 01/29] cephadm: add a make_run_dir function This function is roughly the same as make_var_run only it doesn't rely on shelling out to the install command. Eventually, it will be used to replace make_var_run in certain locations. Signed-off-by: John Mulligan --- src/cephadm/cephadmlib/file_utils.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/cephadm/cephadmlib/file_utils.py b/src/cephadm/cephadmlib/file_utils.py index 7c9e6f69e4341..1b9f11499a49f 100644 --- a/src/cephadm/cephadmlib/file_utils.py +++ b/src/cephadm/cephadmlib/file_utils.py @@ -139,3 +139,7 @@ def get_file_timestamp(fn): ).strftime(DATEFMT) except Exception: return None + + +def make_run_dir(fsid: str, uid: int, gid: int) -> None: + makedirs(f'/var/run/ceph/{fsid}', uid, gid, 0o770) From 3aa16497b3ab88c0c88fa60f58d32fbb8156e4d4 Mon Sep 17 00:00:00 2001 From: John Mulligan Date: Sun, 20 Aug 2023 13:50:00 -0400 Subject: [PATCH 02/29] cephadm: add a new funkypatch fixture based on mock.patch and pytest This fixture acts like a combination of mock.patch and pytest's monkeypatch fixture. It has the additional feature of automatically finding and patching the same object imported in other modules. If you have 'from x import y', where y is a function or class, in both a.py and b.py it will patch both instances (so long as both a and b are already imported). This behavior is useful for cephadm because of the heavy use of the `from x import y` idiom and how cephadm is being actively refactored. Signed-off-by: John Mulligan --- src/cephadm/tests/fixtures.py | 82 ++++++++++++++++++++++++++++++++++- 1 file changed, 81 insertions(+), 1 deletion(-) diff --git a/src/cephadm/tests/fixtures.py b/src/cephadm/tests/fixtures.py index d25dffa9e3b44..572c1f9969d66 100644 --- a/src/cephadm/tests/fixtures.py +++ b/src/cephadm/tests/fixtures.py @@ -6,7 +6,7 @@ from contextlib import contextmanager from pyfakefs import fake_filesystem -from typing import Dict, List, Optional +from typing import Dict, List, Optional, Any def import_cephadm(): @@ -183,3 +183,83 @@ def with_cephadm_ctx( else: yield ctx + +@pytest.fixture() +def funkypatch(monkeypatch): + """Defines the funkypatch fixtures that acts like a mixture between + mock.patch and pytest's monkeypatch fixture. + """ + fp = FunkyPatcher(monkeypatch) + yield fp + + +class FunkyPatcher: + """FunkyPatcher monkeypatches all imported instances of an object. + + Use `patch` to patch the canonical location of an object and FunkyPatcher + will automatically replace other imports of that object. + """ + + def __init__(self, monkeypatcher): + self._mp = monkeypatcher + # keep track of objects we've already patched. this dictionary + # maps a (module-name, object-name) tuple to the original object + # before patching. This could be used to determine if a name has + # already been patched or compare a patched object to the original. + self._originals: Dict[Tuple[str, str], Any] = {} + + def patch( + self, + mod: str, + name: str = '', + *, + dest: Any = None, + force: bool = False, + ) -> Any: + """Patch an object and all existing imports of that object. + Specify mod as `my.mod.name.obj` where obj is name of the object to be + patched or as `my.mod.name` and specify `name` as the name of the + object to be patched. + If the object to be patched is not imported as the same name in `mod` + it will *not* be automatically patched. In other words, `from + my.mod.name import foo` will work, but `from my.mod.name import foo as + _foo` will not. + Use the keyword-only argument `dest` to specify the new object to be + used. A MagicMock will be created and used if dest is None. + Use the keyword-only argument `force` to override checks that a mocked + objects are the same across modules. This can be used in the case that + some other code already patched an object and you want funkypatch to + override that patch (use with caution). + Returns the patched object (the MagicMock or supplied dest). + """ + import sys + import importlib + + if not name: + mod, name = mod.rsplit('.', 1) + modname = (mod, name) + # We don't strictly need the check but patching already patched objs is + # confusing to think about. It's better to block it for now and perhaps + # later we can relax these restrictions or be clever in some way. + if modname in self._originals: + raise KeyError(f'{modname} already patched') + + if dest is None: + dest = mock.MagicMock() + + imod = importlib.import_module(mod) + self._originals[modname] = getattr(imod, name) + + for mname, imod in sys.modules.items(): + try: + obj = getattr(imod, name) + except AttributeError: + # no matching name in module + continue + # make sure that the module imported the same object as the + # one we want to patch out, and not just some naming collision. + # ensure the original object and the one in the module are the + # same object + if obj is self._originals[modname] or force: + self._mp.setattr(imod, name, dest) + return dest From 40206912a4752ec7eae91a0948797a8cab54b93d Mon Sep 17 00:00:00 2001 From: John Mulligan Date: Sun, 20 Aug 2023 13:50:31 -0400 Subject: [PATCH 03/29] cephadm: update test to use funkypatch fixture Signed-off-by: John Mulligan --- src/cephadm/tests/test_cephadm.py | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/src/cephadm/tests/test_cephadm.py b/src/cephadm/tests/test_cephadm.py index 8db8edd0c1bf0..4bb1fac432f23 100644 --- a/src/cephadm/tests/test_cephadm.py +++ b/src/cephadm/tests/test_cephadm.py @@ -16,6 +16,7 @@ with_cephadm_ctx, mock_bad_firewalld, import_cephadm, + funkypatch, ) from pyfakefs import fake_filesystem @@ -2113,16 +2114,12 @@ def test_http_validation(self, _logger, _find_executable, values, cephadm_fs): class TestPull: - - @mock.patch('time.sleep') - @mock.patch('cephadm.get_image_info_from_inspect', return_value={}) - @mock.patch('cephadm.logger') - def test_error(self, _logger, _get_image_info_from_inspect, _sleep, monkeypatch): - # manually create a mock and use pytest's monkeypatch fixture to set - # multiple targets to the *same* mock - _call = mock.MagicMock() - monkeypatch.setattr('cephadm.call', _call) - monkeypatch.setattr('cephadmlib.call_wrappers.call', _call) + def test_error(self, funkypatch): + funkypatch.patch('time.sleep') + funkypatch.patch('cephadm.logger') + _giifi = funkypatch.patch('cephadm.get_image_info_from_inspect') + _giifi.return_value = {} + _call = funkypatch.patch('cephadmlib.call_wrappers.call') ctx = _cephadm.CephadmContext() ctx.container_engine = mock_podman() ctx.insecure = False From f25048f876b10705077b8c661469558d25be72f3 Mon Sep 17 00:00:00 2001 From: John Mulligan Date: Wed, 8 Nov 2023 14:31:12 -0500 Subject: [PATCH 04/29] cephadm: convert test_mon_crush_location to use funkypatch fixture The test_mon_crush_location test always seems to have me tinkering with it during refactoring. Re-do the fixtures to use funkpatch instead of mock.patch and normal monkeypatch. This looks nicer (IMO) and should avoid having to frequently mess with it when moving functions during future refactoring. Signed-off-by: John Mulligan --- src/cephadm/tests/test_cephadm.py | 44 ++++++++++++++++++++----------- 1 file changed, 28 insertions(+), 16 deletions(-) diff --git a/src/cephadm/tests/test_cephadm.py b/src/cephadm/tests/test_cephadm.py index 4bb1fac432f23..6379ce28a1d2e 100644 --- a/src/cephadm/tests/test_cephadm.py +++ b/src/cephadm/tests/test_cephadm.py @@ -365,25 +365,37 @@ def test_to_deployment_container(self, _get_container, _get_config, _logger): assert os.path.join('data', '9b9d7609-f4d5-4aba-94c8-effa764d96c9', 'custom_config_files', 'grafana.host1', 'testing.str') in c.volume_mounts assert c.volume_mounts[os.path.join('data', '9b9d7609-f4d5-4aba-94c8-effa764d96c9', 'custom_config_files', 'grafana.host1', 'testing.str')] == '/etc/testing.str' - @mock.patch('cephadm.logger') - @mock.patch('cephadm.FileLock') - @mock.patch('cephadm.deploy_daemon') - @mock.patch('cephadm.make_var_run') - @mock.patch('cephadm.migrate_sysctl_dir') - @mock.patch('cephadm.check_unit', lambda *args, **kwargs: (None, 'running', None)) - @mock.patch('cephadm.get_unit_name', lambda *args, **kwargs: 'mon-unit-name') - @mock.patch('cephadm.extract_uid_gid', lambda *args, **kwargs: (0, 0)) - @mock.patch('cephadm.get_container') - @mock.patch('cephadm.apply_deploy_config_to_ctx', lambda d, c: None) - def test_mon_crush_location(self, _get_container, _migrate_sysctl, _make_var_run, _deploy_daemon, _file_lock, _logger, monkeypatch): + def test_mon_crush_location(self, funkypatch): """ test that crush location for mon is set if it is included in config_json """ - _fetch_configs = mock.MagicMock() - monkeypatch.setattr('cephadmlib.context_getters.fetch_configs', _fetch_configs) - monkeypatch.setattr('cephadm.fetch_configs', _fetch_configs) - monkeypatch.setattr('cephadm.read_configuration_source', lambda c: {}) - monkeypatch.setattr('cephadm.fetch_custom_config_files', mock.MagicMock()) + funkypatch.patch('cephadm.logger') + funkypatch.patch('cephadm.FileLock') + _deploy_daemon = funkypatch.patch('cephadm.deploy_daemon') + _make_var_run = funkypatch.patch('cephadm.make_var_run') + _migrate_sysctl = funkypatch.patch('cephadm.migrate_sysctl_dir') + funkypatch.patch( + 'cephadm.check_unit', + dest=lambda *args, **kwargs: (None, 'running', None), + ) + funkypatch.patch( + 'cephadm.get_unit_name', + dest=lambda *args, **kwargs: 'mon-unit-name', + ) + funkypatch.patch( + 'cephadm.extract_uid_gid', dest=lambda *args, **kwargs: (0, 0) + ) + _get_container = funkypatch.patch('cephadm.get_container') + funkypatch.patch( + 'cephadm.apply_deploy_config_to_ctx', dest=lambda d, c: None + ) + _fetch_configs = funkypatch.patch( + 'cephadmlib.context_getters.fetch_configs' + ) + funkypatch.patch( + 'cephadm.read_configuration_source', dest=lambda c: {} + ) + funkypatch.patch('cephadm.fetch_custom_config_files') ctx = _cephadm.CephadmContext() ctx.name = 'mon.test' From eca9be6544d86d1ee1a384898dcc86c098405d24 Mon Sep 17 00:00:00 2001 From: John Mulligan Date: Sun, 5 Nov 2023 16:03:53 -0500 Subject: [PATCH 05/29] cephadm: create deployment_utils module Create a deployment_utils module for deployment related functions that don't have a better home. Signed-off-by: John Mulligan --- src/cephadm/cephadm.py | 31 +------------------ src/cephadm/cephadmlib/deployment_utils.py | 35 ++++++++++++++++++++++ src/cephadm/tests/test_cephadm.py | 14 +++++---- 3 files changed, 45 insertions(+), 35 deletions(-) create mode 100644 src/cephadm/cephadmlib/deployment_utils.py diff --git a/src/cephadm/cephadm.py b/src/cephadm/cephadm.py index 959676ba3af17..14b064a95428b 100755 --- a/src/cephadm/cephadm.py +++ b/src/cephadm/cephadm.py @@ -176,6 +176,7 @@ from cephadmlib.sysctl import install_sysctl, migrate_sysctl_dir from cephadmlib.firewalld import Firewalld, update_firewalld from cephadmlib import templating +from cephadmlib.deployment_utils import to_deployment_container FuncT = TypeVar('FuncT', bound=Callable) @@ -5257,36 +5258,6 @@ def command_registry_login(ctx: CephadmContext) -> int: ################################## -def to_deployment_container( - ctx: CephadmContext, ctr: CephContainer -) -> CephContainer: - """Given a standard ceph container instance return a CephContainer - prepared for a deployment as a daemon, having the extra args and - custom configurations added. - NOTE: The `ctr` object is mutated before being returned. - """ - if 'extra_container_args' in ctx and ctx.extra_container_args: - ctr.container_args.extend(ctx.extra_container_args) - if 'extra_entrypoint_args' in ctx and ctx.extra_entrypoint_args: - ctr.args.extend(ctx.extra_entrypoint_args) - ccfiles = fetch_custom_config_files(ctx) - if ccfiles: - mandatory_keys = ['mount_path', 'content'] - for conf in ccfiles: - if all(k in conf for k in mandatory_keys): - mount_path = conf['mount_path'] - assert ctr.identity - file_path = os.path.join( - ctx.data_dir, - ctr.identity.fsid, - 'custom_config_files', - ctr.identity.daemon_name, - os.path.basename(mount_path) - ) - ctr.volume_mounts[file_path] = mount_path - return ctr - - def get_deployment_type( ctx: CephadmContext, ident: 'DaemonIdentity', ) -> DeploymentType: diff --git a/src/cephadm/cephadmlib/deployment_utils.py b/src/cephadm/cephadmlib/deployment_utils.py new file mode 100644 index 0000000000000..908fa979f1a56 --- /dev/null +++ b/src/cephadm/cephadmlib/deployment_utils.py @@ -0,0 +1,35 @@ +import os + +from .container_types import CephContainer +from .context import CephadmContext +from cephadmlib.context_getters import fetch_custom_config_files + + +def to_deployment_container( + ctx: CephadmContext, ctr: CephContainer +) -> CephContainer: + """Given a standard ceph container instance return a CephContainer + prepared for a deployment as a daemon, having the extra args and + custom configurations added. + NOTE: The `ctr` object is mutated before being returned. + """ + if 'extra_container_args' in ctx and ctx.extra_container_args: + ctr.container_args.extend(ctx.extra_container_args) + if 'extra_entrypoint_args' in ctx and ctx.extra_entrypoint_args: + ctr.args.extend(ctx.extra_entrypoint_args) + ccfiles = fetch_custom_config_files(ctx) + if ccfiles: + mandatory_keys = ['mount_path', 'content'] + for conf in ccfiles: + if all(k in conf for k in mandatory_keys): + mount_path = conf['mount_path'] + assert ctr.identity + file_path = os.path.join( + ctx.data_dir, + ctr.identity.fsid, + 'custom_config_files', + ctr.identity.daemon_name, + os.path.basename(mount_path), + ) + ctr.volume_mounts[file_path] = mount_path + return ctr diff --git a/src/cephadm/tests/test_cephadm.py b/src/cephadm/tests/test_cephadm.py index 6379ce28a1d2e..c5d8d19f26d68 100644 --- a/src/cephadm/tests/test_cephadm.py +++ b/src/cephadm/tests/test_cephadm.py @@ -318,13 +318,17 @@ def test_skip_firewalld(self, _logger, cephadm_fs): with pytest.raises(Exception): _cephadm.prepare_dashboard(ctx, 0, 0, lambda _, extra_mounts=None, ___=None : '5', lambda : None) - @mock.patch('cephadm.logger') - @mock.patch('cephadm.fetch_custom_config_files') - @mock.patch('cephadm.get_container') - def test_to_deployment_container(self, _get_container, _get_config, _logger): + def test_to_deployment_container(self, funkypatch): """ test to_deployment_container properly makes use of extra container args and custom conf files """ + from cephadmlib.deployment_utils import to_deployment_container + + funkypatch.patch('cephadm.logger') + _get_config = funkypatch.patch( + 'cephadmlib.deployment_utils.fetch_custom_config_files' + ) + _get_container = funkypatch.patch('cephadm.get_container') ctx = _cephadm.CephadmContext() ctx.config_json = '-' @@ -358,7 +362,7 @@ def test_to_deployment_container(self, _get_container, _get_config, _logger): host_network=True, ) c = _cephadm.get_container(ctx, ident) - c = _cephadm.to_deployment_container(ctx, c) + c = to_deployment_container(ctx, c) assert '--pids-limit=12345' in c.container_args assert '--something' in c.container_args From 700ea8d109c71f31079d239dd256a7562e9961f2 Mon Sep 17 00:00:00 2001 From: John Mulligan Date: Wed, 8 Nov 2023 19:21:10 -0500 Subject: [PATCH 06/29] cephadm: use funkypatch for setting up common patches in deploy tests Add a shim function and convert to the use of the FunkyPatcher class in the test_deploy.py test functions. Use a shim as to not have to change all the tests (yet). Signed-off-by: John Mulligan --- src/cephadm/tests/test_deploy.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/cephadm/tests/test_deploy.py b/src/cephadm/tests/test_deploy.py index c77b243dfa774..9d82b2055b918 100644 --- a/src/cephadm/tests/test_deploy.py +++ b/src/cephadm/tests/test_deploy.py @@ -8,6 +8,7 @@ import_cephadm, mock_podman, with_cephadm_ctx, + FunkyPatcher, ) @@ -15,25 +16,24 @@ def _common_mp(monkeypatch): + return _common_patches(FunkyPatcher(monkeypatch)) + + +def _common_patches(funkypatch): mocks = {} - _call = mock.MagicMock(return_value=('', '', 0)) - monkeypatch.setattr('cephadmlib.container_types.call', _call) + _call = funkypatch.patch('cephadmlib.container_types.call') + _call.return_value = ('', '', 0) mocks['call'] = _call - _call_throws = mock.MagicMock(return_value=0) - monkeypatch.setattr( - 'cephadmlib.container_types.call_throws', _call_throws - ) + _call_throws = funkypatch.patch('cephadmlib.container_types.call_throws') + _call_throws.return_value = ('', '', 0) mocks['call_throws'] = _call_throws - _firewalld = mock.MagicMock() + _firewalld = funkypatch.patch('cephadm.Firewalld') _firewalld().external_ports.get.return_value = [] - monkeypatch.setattr('cephadm.Firewalld', _firewalld) mocks['Firewalld'] = _firewalld - _extract_uid_gid = mock.MagicMock() + _extract_uid_gid = funkypatch.patch('cephadm.extract_uid_gid', force=True) _extract_uid_gid.return_value = (8765, 8765) - monkeypatch.setattr('cephadm.extract_uid_gid', _extract_uid_gid) mocks['extract_uid_gid'] = _extract_uid_gid - _install_sysctl = mock.MagicMock() - monkeypatch.setattr('cephadm.install_sysctl', _install_sysctl) + _install_sysctl = funkypatch.patch('cephadm.install_sysctl') mocks['install_sysctl'] = _install_sysctl return mocks From d5ce0b7a8da55f52a473ef4169622357f655fe0d Mon Sep 17 00:00:00 2001 From: John Mulligan Date: Thu, 9 Nov 2023 09:57:24 -0500 Subject: [PATCH 07/29] cephadm: update tests to import dict_get* functions from proper module Update the test that import dict_get and dict_get_join to use the context_getters module - the module that actually defines the functions. Signed-off-by: John Mulligan --- src/cephadm/tests/test_cephadm.py | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/src/cephadm/tests/test_cephadm.py b/src/cephadm/tests/test_cephadm.py index c5d8d19f26d68..c8fd3abf7d3ea 100644 --- a/src/cephadm/tests/test_cephadm.py +++ b/src/cephadm/tests/test_cephadm.py @@ -558,25 +558,31 @@ def test_get_image_info_from_inspect(self): def test_dict_get(self): - result = _cephadm.dict_get({'a': 1}, 'a', require=True) + from cephadmlib.data_utils import dict_get + + result = dict_get({'a': 1}, 'a', require=True) assert result == 1 - result = _cephadm.dict_get({'a': 1}, 'b') + result = dict_get({'a': 1}, 'b') assert result is None - result = _cephadm.dict_get({'a': 1}, 'b', default=2) + result = dict_get({'a': 1}, 'b', default=2) assert result == 2 def test_dict_get_error(self): + from cephadmlib.data_utils import dict_get + with pytest.raises(_cephadm.Error): - _cephadm.dict_get({'a': 1}, 'b', require=True) + dict_get({'a': 1}, 'b', require=True) def test_dict_get_join(self): - result = _cephadm.dict_get_join({'foo': ['a', 'b']}, 'foo') + from cephadmlib.data_utils import dict_get_join + + result = dict_get_join({'foo': ['a', 'b']}, 'foo') assert result == 'a\nb' - result = _cephadm.dict_get_join({'foo': [1, 2]}, 'foo') + result = dict_get_join({'foo': [1, 2]}, 'foo') assert result == '1\n2' - result = _cephadm.dict_get_join({'bar': 'a'}, 'bar') + result = dict_get_join({'bar': 'a'}, 'bar') assert result == 'a' - result = _cephadm.dict_get_join({'a': 1}, 'a') + result = dict_get_join({'a': 1}, 'a') assert result == 1 @mock.patch('os.listdir', return_value=[]) From 6276f74ff2c5e5364a3cd6e7e38e9dbda9f0b3a7 Mon Sep 17 00:00:00 2001 From: John Mulligan Date: Thu, 9 Nov 2023 13:46:04 -0500 Subject: [PATCH 08/29] cephamd: update tests to use should_log_to_journald from context_getters Update tests to import should_log_to_journald from context_getters - the module that actually defines that function. This makes later refactoring easier. Signed-off-by: John Mulligan --- src/cephadm/tests/test_cephadm.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/cephadm/tests/test_cephadm.py b/src/cephadm/tests/test_cephadm.py index c8fd3abf7d3ea..899272cb4143a 100644 --- a/src/cephadm/tests/test_cephadm.py +++ b/src/cephadm/tests/test_cephadm.py @@ -784,24 +784,26 @@ def test_get_container_info(self, _logger, daemon_filter, by_name, daemon_list, assert _cephadm.get_container_info(ctx, daemon_filter, by_name) == output def test_should_log_to_journald(self): + from cephadmlib import context_getters + ctx = _cephadm.CephadmContext() # explicit ctx.log_to_journald = True - assert _cephadm.should_log_to_journald(ctx) + assert context_getters.should_log_to_journald(ctx) ctx.log_to_journald = None # enable if podman support --cgroup=split ctx.container_engine = mock_podman() ctx.container_engine.version = (2, 1, 0) - assert _cephadm.should_log_to_journald(ctx) + assert context_getters.should_log_to_journald(ctx) # disable on old podman ctx.container_engine.version = (2, 0, 0) - assert not _cephadm.should_log_to_journald(ctx) + assert not context_getters.should_log_to_journald(ctx) # disable on docker ctx.container_engine = mock_docker() - assert not _cephadm.should_log_to_journald(ctx) + assert not context_getters.should_log_to_journald(ctx) def test_normalize_image_digest(self): s = 'myhostname:5000/ceph/ceph@sha256:753886ad9049004395ae990fbb9b096923b5a518b819283141ee8716ddf55ad1' From 5a45aca014571d6a6251cdbd67f3e11fcbf19d4a Mon Sep 17 00:00:00 2001 From: John Mulligan Date: Wed, 8 Nov 2023 15:45:13 -0500 Subject: [PATCH 09/29] cephadm: start a cephadmlib.daemons package Signed-off-by: John Mulligan --- src/cephadm/cephadmlib/daemons/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 src/cephadm/cephadmlib/daemons/__init__.py diff --git a/src/cephadm/cephadmlib/daemons/__init__.py b/src/cephadm/cephadmlib/daemons/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d From 17714889f56bed9e09ba1cdfb2a89a321fb87978 Mon Sep 17 00:00:00 2001 From: John Mulligan Date: Wed, 8 Nov 2023 15:58:36 -0500 Subject: [PATCH 10/29] cephadm: move custom container class to a new file Signed-off-by: John Mulligan --- src/cephadm/cephadm.py | 194 +------------------ src/cephadm/cephadmlib/daemons/__init__.py | 3 + src/cephadm/cephadmlib/daemons/custom.py | 210 +++++++++++++++++++++ 3 files changed, 214 insertions(+), 193 deletions(-) create mode 100644 src/cephadm/cephadmlib/daemons/custom.py diff --git a/src/cephadm/cephadm.py b/src/cephadm/cephadm.py index 14b064a95428b..fa8e48244f045 100755 --- a/src/cephadm/cephadm.py +++ b/src/cephadm/cephadm.py @@ -177,6 +177,7 @@ from cephadmlib.firewalld import Firewalld, update_firewalld from cephadmlib import templating from cephadmlib.deployment_utils import to_deployment_container +from cephadmlib.daemons import CustomContainer FuncT = TypeVar('FuncT', bound=Callable) @@ -2005,199 +2006,6 @@ def default_entrypoint(self) -> str: ################################## -@register_daemon_form -class CustomContainer(ContainerDaemonForm): - """Defines a custom container""" - daemon_type = 'container' - - @classmethod - def for_daemon_type(cls, daemon_type: str) -> bool: - return cls.daemon_type == daemon_type - - def __init__(self, - fsid: str, daemon_id: Union[int, str], - config_json: Dict, image: str) -> None: - self.fsid = fsid - self.daemon_id = daemon_id - self.image = image - - # config-json options - self.entrypoint = dict_get(config_json, 'entrypoint') - self.uid = dict_get(config_json, 'uid', 65534) # nobody - self.gid = dict_get(config_json, 'gid', 65534) # nobody - self.volume_mounts = dict_get(config_json, 'volume_mounts', {}) - self.args = dict_get(config_json, 'args', []) - self.envs = dict_get(config_json, 'envs', []) - self.privileged = dict_get(config_json, 'privileged', False) - self.bind_mounts = dict_get(config_json, 'bind_mounts', []) - self.ports = dict_get(config_json, 'ports', []) - self.dirs = dict_get(config_json, 'dirs', []) - self.files = dict_get(config_json, 'files', {}) - - @classmethod - def init(cls, ctx: CephadmContext, - fsid: str, daemon_id: Union[int, str]) -> 'CustomContainer': - return cls(fsid, daemon_id, - fetch_configs(ctx), ctx.image) - - @classmethod - def create(cls, ctx: CephadmContext, ident: DaemonIdentity) -> 'CustomContainer': - return cls.init(ctx, ident.fsid, ident.daemon_id) - - @property - def identity(self) -> DaemonIdentity: - return DaemonIdentity(self.fsid, self.daemon_type, self.daemon_id) - - def create_daemon_dirs(self, data_dir: str, uid: int, gid: int) -> None: - """ - Create dirs/files below the container data directory. - """ - logger.info('Creating custom container configuration ' - 'dirs/files in {} ...'.format(data_dir)) - - if not os.path.isdir(data_dir): - raise OSError('data_dir is not a directory: %s' % data_dir) - - for dir_path in self.dirs: - logger.info('Creating directory: {}'.format(dir_path)) - dir_path = os.path.join(data_dir, dir_path.strip('/')) - makedirs(dir_path, uid, gid, 0o755) - - for file_path in self.files: - logger.info('Creating file: {}'.format(file_path)) - content = dict_get_join(self.files, file_path) - file_path = os.path.join(data_dir, file_path.strip('/')) - with write_new(file_path, owner=(uid, gid), encoding='utf-8') as f: - f.write(content) - - def get_daemon_args(self) -> List[str]: - return [] - - def get_container_args(self) -> List[str]: - return self.args - - def get_container_envs(self) -> List[str]: - return self.envs - - def _get_container_mounts(self, data_dir: str) -> Dict[str, str]: - """ - Get the volume mounts. Relative source paths will be located below - `/var/lib/ceph//`. - - Example: - { - /foo/conf: /conf - foo/conf: /conf - } - becomes - { - /foo/conf: /conf - /var/lib/ceph///foo/conf: /conf - } - """ - mounts = {} - for source, destination in self.volume_mounts.items(): - source = os.path.join(data_dir, source) - mounts[source] = destination - return mounts - - def customize_container_mounts( - self, ctx: CephadmContext, mounts: Dict[str, str] - ) -> None: - data_dir = self.identity.data_dir(ctx.data_dir) - mounts.update(self._get_container_mounts(data_dir)) - - def _get_container_binds(self, data_dir: str) -> List[List[str]]: - """ - Get the bind mounts. Relative `source=...` paths will be located below - `/var/lib/ceph//`. - - Example: - [ - 'type=bind', - 'source=lib/modules', - 'destination=/lib/modules', - 'ro=true' - ] - becomes - [ - ... - 'source=/var/lib/ceph///lib/modules', - ... - ] - """ - binds = self.bind_mounts.copy() - for bind in binds: - for index, value in enumerate(bind): - match = re.match(r'^source=(.+)$', value) - if match: - bind[index] = 'source={}'.format(os.path.join( - data_dir, match.group(1))) - return binds - - def customize_container_binds( - self, ctx: CephadmContext, binds: List[List[str]] - ) -> None: - data_dir = self.identity.data_dir(ctx.data_dir) - binds.extend(self._get_container_binds(data_dir)) - - # Cache the container so we don't need to rebuild it again when calling - # into init_containers - _container: Optional[CephContainer] = None - - def container(self, ctx: CephadmContext) -> CephContainer: - if self._container is None: - ctr = daemon_to_container( - ctx, - self, - host_network=False, - privileged=self.privileged, - ptrace=ctx.allow_ptrace, - ) - self._container = to_deployment_container(ctx, ctr) - return self._container - - def init_containers(self, ctx: CephadmContext) -> List[InitContainer]: - primary = self.container(ctx) - init_containers: List[Dict[str, Any]] = getattr( - ctx, 'init_containers', [] - ) - return [ - InitContainer.from_primary_and_opts(ctx, primary, ic_opts) - for ic_opts in init_containers - ] - - def customize_container_endpoints( - self, endpoints: List[EndPoint], deployment_type: DeploymentType - ) -> None: - if deployment_type == DeploymentType.DEFAULT: - endpoints.extend([EndPoint('0.0.0.0', p) for p in self.ports]) - - def customize_container_envs( - self, ctx: CephadmContext, envs: List[str] - ) -> None: - envs.extend(self.get_container_envs()) - - def customize_container_args( - self, ctx: CephadmContext, args: List[str] - ) -> None: - args.extend(self.get_container_args()) - - def customize_process_args( - self, ctx: CephadmContext, args: List[str] - ) -> None: - args.extend(self.get_daemon_args()) - - def default_entrypoint(self) -> str: - return self.entrypoint or '' - - def uid_gid(self, ctx: CephadmContext) -> Tuple[int, int]: - return self.uid, self.gid - - -################################## - - def get_supported_daemons(): # type: () -> List[str] supported_daemons = ceph_daemons() diff --git a/src/cephadm/cephadmlib/daemons/__init__.py b/src/cephadm/cephadmlib/daemons/__init__.py index e69de29bb2d1d..d979ce19a936f 100644 --- a/src/cephadm/cephadmlib/daemons/__init__.py +++ b/src/cephadm/cephadmlib/daemons/__init__.py @@ -0,0 +1,3 @@ +from .custom import CustomContainer + +__all__ = ['CustomContainer'] diff --git a/src/cephadm/cephadmlib/daemons/custom.py b/src/cephadm/cephadmlib/daemons/custom.py new file mode 100644 index 0000000000000..8e0d59e6f3a9e --- /dev/null +++ b/src/cephadm/cephadmlib/daemons/custom.py @@ -0,0 +1,210 @@ +import logging +import os +import re + +from typing import Any, Dict, List, Optional, Tuple, Union + +from ..container_daemon_form import ContainerDaemonForm, daemon_to_container +from ..container_types import CephContainer, InitContainer +from ..context import CephadmContext +from ..context_getters import fetch_configs +from ..daemon_form import register as register_daemon_form +from ..daemon_identity import DaemonIdentity +from ..data_utils import dict_get, dict_get_join +from ..deploy import DeploymentType +from ..deployment_utils import to_deployment_container +from ..file_utils import write_new, makedirs +from ..net_utils import EndPoint + + +logger = logging.getLogger() + + +@register_daemon_form +class CustomContainer(ContainerDaemonForm): + """Defines a custom container""" + daemon_type = 'container' + + @classmethod + def for_daemon_type(cls, daemon_type: str) -> bool: + return cls.daemon_type == daemon_type + + def __init__(self, + fsid: str, daemon_id: Union[int, str], + config_json: Dict, image: str) -> None: + self.fsid = fsid + self.daemon_id = daemon_id + self.image = image + + # config-json options + self.entrypoint = dict_get(config_json, 'entrypoint') + self.uid = dict_get(config_json, 'uid', 65534) # nobody + self.gid = dict_get(config_json, 'gid', 65534) # nobody + self.volume_mounts = dict_get(config_json, 'volume_mounts', {}) + self.args = dict_get(config_json, 'args', []) + self.envs = dict_get(config_json, 'envs', []) + self.privileged = dict_get(config_json, 'privileged', False) + self.bind_mounts = dict_get(config_json, 'bind_mounts', []) + self.ports = dict_get(config_json, 'ports', []) + self.dirs = dict_get(config_json, 'dirs', []) + self.files = dict_get(config_json, 'files', {}) + + @classmethod + def init(cls, ctx: CephadmContext, + fsid: str, daemon_id: Union[int, str]) -> 'CustomContainer': + return cls(fsid, daemon_id, + fetch_configs(ctx), ctx.image) + + @classmethod + def create(cls, ctx: CephadmContext, ident: DaemonIdentity) -> 'CustomContainer': + return cls.init(ctx, ident.fsid, ident.daemon_id) + + @property + def identity(self) -> DaemonIdentity: + return DaemonIdentity(self.fsid, self.daemon_type, self.daemon_id) + + def create_daemon_dirs(self, data_dir: str, uid: int, gid: int) -> None: + """ + Create dirs/files below the container data directory. + """ + logger.info('Creating custom container configuration ' + 'dirs/files in {} ...'.format(data_dir)) + + if not os.path.isdir(data_dir): + raise OSError('data_dir is not a directory: %s' % data_dir) + + for dir_path in self.dirs: + logger.info('Creating directory: {}'.format(dir_path)) + dir_path = os.path.join(data_dir, dir_path.strip('/')) + makedirs(dir_path, uid, gid, 0o755) + + for file_path in self.files: + logger.info('Creating file: {}'.format(file_path)) + content = dict_get_join(self.files, file_path) + file_path = os.path.join(data_dir, file_path.strip('/')) + with write_new(file_path, owner=(uid, gid), encoding='utf-8') as f: + f.write(content) + + def get_daemon_args(self) -> List[str]: + return [] + + def get_container_args(self) -> List[str]: + return self.args + + def get_container_envs(self) -> List[str]: + return self.envs + + def _get_container_mounts(self, data_dir: str) -> Dict[str, str]: + """ + Get the volume mounts. Relative source paths will be located below + `/var/lib/ceph//`. + + Example: + { + /foo/conf: /conf + foo/conf: /conf + } + becomes + { + /foo/conf: /conf + /var/lib/ceph///foo/conf: /conf + } + """ + mounts = {} + for source, destination in self.volume_mounts.items(): + source = os.path.join(data_dir, source) + mounts[source] = destination + return mounts + + def customize_container_mounts( + self, ctx: CephadmContext, mounts: Dict[str, str] + ) -> None: + data_dir = self.identity.data_dir(ctx.data_dir) + mounts.update(self._get_container_mounts(data_dir)) + + def _get_container_binds(self, data_dir: str) -> List[List[str]]: + """ + Get the bind mounts. Relative `source=...` paths will be located below + `/var/lib/ceph//`. + + Example: + [ + 'type=bind', + 'source=lib/modules', + 'destination=/lib/modules', + 'ro=true' + ] + becomes + [ + ... + 'source=/var/lib/ceph///lib/modules', + ... + ] + """ + binds = self.bind_mounts.copy() + for bind in binds: + for index, value in enumerate(bind): + match = re.match(r'^source=(.+)$', value) + if match: + bind[index] = 'source={}'.format(os.path.join( + data_dir, match.group(1))) + return binds + + def customize_container_binds( + self, ctx: CephadmContext, binds: List[List[str]] + ) -> None: + data_dir = self.identity.data_dir(ctx.data_dir) + binds.extend(self._get_container_binds(data_dir)) + + # Cache the container so we don't need to rebuild it again when calling + # into init_containers + _container: Optional[CephContainer] = None + + def container(self, ctx: CephadmContext) -> CephContainer: + if self._container is None: + ctr = daemon_to_container( + ctx, + self, + host_network=False, + privileged=self.privileged, + ptrace=ctx.allow_ptrace, + ) + self._container = to_deployment_container(ctx, ctr) + return self._container + + def init_containers(self, ctx: CephadmContext) -> List[InitContainer]: + primary = self.container(ctx) + init_containers: List[Dict[str, Any]] = getattr( + ctx, 'init_containers', [] + ) + return [ + InitContainer.from_primary_and_opts(ctx, primary, ic_opts) + for ic_opts in init_containers + ] + + def customize_container_endpoints( + self, endpoints: List[EndPoint], deployment_type: DeploymentType + ) -> None: + if deployment_type == DeploymentType.DEFAULT: + endpoints.extend([EndPoint('0.0.0.0', p) for p in self.ports]) + + def customize_container_envs( + self, ctx: CephadmContext, envs: List[str] + ) -> None: + envs.extend(self.get_container_envs()) + + def customize_container_args( + self, ctx: CephadmContext, args: List[str] + ) -> None: + args.extend(self.get_container_args()) + + def customize_process_args( + self, ctx: CephadmContext, args: List[str] + ) -> None: + args.extend(self.get_daemon_args()) + + def default_entrypoint(self) -> str: + return self.entrypoint or '' + + def uid_gid(self, ctx: CephadmContext) -> Tuple[int, int]: + return self.uid, self.gid From 8db86d29d646a74f992ea5b05ce7efe63bd9ea6b Mon Sep 17 00:00:00 2001 From: John Mulligan Date: Wed, 8 Nov 2023 16:04:15 -0500 Subject: [PATCH 11/29] cephadm: move tracing class to a new file Signed-off-by: John Mulligan --- src/cephadm/cephadm.py | 104 +------------------ src/cephadm/cephadmlib/daemons/__init__.py | 3 +- src/cephadm/cephadmlib/daemons/tracing.py | 115 +++++++++++++++++++++ 3 files changed, 118 insertions(+), 104 deletions(-) create mode 100644 src/cephadm/cephadmlib/daemons/tracing.py diff --git a/src/cephadm/cephadm.py b/src/cephadm/cephadm.py index fa8e48244f045..3e46fdba18448 100755 --- a/src/cephadm/cephadm.py +++ b/src/cephadm/cephadm.py @@ -35,15 +35,11 @@ from cephadmlib.constants import ( # default images DEFAULT_ALERT_MANAGER_IMAGE, - DEFAULT_ELASTICSEARCH_IMAGE, DEFAULT_GRAFANA_IMAGE, DEFAULT_HAPROXY_IMAGE, DEFAULT_IMAGE, DEFAULT_IMAGE_IS_MAIN, DEFAULT_IMAGE_RELEASE, - DEFAULT_JAEGER_AGENT_IMAGE, - DEFAULT_JAEGER_COLLECTOR_IMAGE, - DEFAULT_JAEGER_QUERY_IMAGE, DEFAULT_KEEPALIVED_IMAGE, DEFAULT_LOKI_IMAGE, DEFAULT_NODE_EXPORTER_IMAGE, @@ -177,7 +173,7 @@ from cephadmlib.firewalld import Firewalld, update_firewalld from cephadmlib import templating from cephadmlib.deployment_utils import to_deployment_container -from cephadmlib.daemons import CustomContainer +from cephadmlib.daemons import CustomContainer, Tracing FuncT = TypeVar('FuncT', bound=Callable) @@ -1908,104 +1904,6 @@ def customize_container_args( ################################## -@register_daemon_form -class Tracing(ContainerDaemonForm): - """Define the configs for the jaeger tracing containers""" - - components: Dict[str, Dict[str, Any]] = { - 'elasticsearch': { - 'image': DEFAULT_ELASTICSEARCH_IMAGE, - 'envs': ['discovery.type=single-node'] - }, - 'jaeger-agent': { - 'image': DEFAULT_JAEGER_AGENT_IMAGE, - }, - 'jaeger-collector': { - 'image': DEFAULT_JAEGER_COLLECTOR_IMAGE, - }, - 'jaeger-query': { - 'image': DEFAULT_JAEGER_QUERY_IMAGE, - }, - } # type: ignore - - @classmethod - def for_daemon_type(cls, daemon_type: str) -> bool: - return daemon_type in cls.components - - @staticmethod - def set_configuration(config: Dict[str, str], daemon_type: str) -> None: - if daemon_type in ['jaeger-collector', 'jaeger-query']: - assert 'elasticsearch_nodes' in config - Tracing.components[daemon_type]['envs'] = [ - 'SPAN_STORAGE_TYPE=elasticsearch', - f'ES_SERVER_URLS={config["elasticsearch_nodes"]}'] - if daemon_type == 'jaeger-agent': - assert 'collector_nodes' in config - Tracing.components[daemon_type]['daemon_args'] = [ - f'--reporter.grpc.host-port={config["collector_nodes"]}', - '--processor.jaeger-compact.server-host-port=6799' - ] - - def __init__(self, ident: DaemonIdentity) -> None: - self._identity = ident - self._configured = False - - def _configure(self, ctx: CephadmContext) -> None: - if self._configured: - return - config = fetch_configs(ctx) - # Currently, this method side-effects the class attribute, and that - # is unpleasant. In the future it would be nice to move all of - # set_configuration into _confiure and only modify each classes data - # independently - self.set_configuration(config, self.identity.daemon_type) - self._configured = True - - @classmethod - def create(cls, ctx: CephadmContext, ident: DaemonIdentity) -> 'Tracing': - return cls(ident) - - @property - def identity(self) -> DaemonIdentity: - return self._identity - - def container(self, ctx: CephadmContext) -> CephContainer: - ctr = daemon_to_container(ctx, self) - return to_deployment_container(ctx, ctr) - - def uid_gid(self, ctx: CephadmContext) -> Tuple[int, int]: - return 65534, 65534 - - def get_daemon_args(self) -> List[str]: - return self.components[self.identity.daemon_type].get( - 'daemon_args', [] - ) - - def customize_process_args( - self, ctx: CephadmContext, args: List[str] - ) -> None: - self._configure(ctx) - # earlier code did an explicit check if the daemon type was jaeger-agent - # and would only call get_daemon_args if that was true. However, since - # the function only returns a non-empty list in the case of jaeger-agent - # that check is unnecessary and is not brought over. - args.extend(self.get_daemon_args()) - - def customize_container_envs( - self, ctx: CephadmContext, envs: List[str] - ) -> None: - self._configure(ctx) - envs.extend( - self.components[self.identity.daemon_type].get('envs', []) - ) - - def default_entrypoint(self) -> str: - return '' - - -################################## - - def get_supported_daemons(): # type: () -> List[str] supported_daemons = ceph_daemons() diff --git a/src/cephadm/cephadmlib/daemons/__init__.py b/src/cephadm/cephadmlib/daemons/__init__.py index d979ce19a936f..dec9151050526 100644 --- a/src/cephadm/cephadmlib/daemons/__init__.py +++ b/src/cephadm/cephadmlib/daemons/__init__.py @@ -1,3 +1,4 @@ from .custom import CustomContainer +from .tracing import Tracing -__all__ = ['CustomContainer'] +__all__ = ['CustomContainer', 'Tracing'] diff --git a/src/cephadm/cephadmlib/daemons/tracing.py b/src/cephadm/cephadmlib/daemons/tracing.py new file mode 100644 index 0000000000000..f178bd664086f --- /dev/null +++ b/src/cephadm/cephadmlib/daemons/tracing.py @@ -0,0 +1,115 @@ +import logging + +from typing import Any, Dict, List, Tuple + +from ..constants import ( + DEFAULT_ELASTICSEARCH_IMAGE, + DEFAULT_JAEGER_AGENT_IMAGE, + DEFAULT_JAEGER_COLLECTOR_IMAGE, + DEFAULT_JAEGER_QUERY_IMAGE, +) +from ..container_daemon_form import ContainerDaemonForm, daemon_to_container +from ..container_types import CephContainer +from ..context import CephadmContext +from ..context_getters import fetch_configs +from ..daemon_form import register as register_daemon_form +from ..daemon_identity import DaemonIdentity +from ..deployment_utils import to_deployment_container + + +logger = logging.getLogger() + + +@register_daemon_form +class Tracing(ContainerDaemonForm): + """Define the configs for the jaeger tracing containers""" + + components: Dict[str, Dict[str, Any]] = { + 'elasticsearch': { + 'image': DEFAULT_ELASTICSEARCH_IMAGE, + 'envs': ['discovery.type=single-node'] + }, + 'jaeger-agent': { + 'image': DEFAULT_JAEGER_AGENT_IMAGE, + }, + 'jaeger-collector': { + 'image': DEFAULT_JAEGER_COLLECTOR_IMAGE, + }, + 'jaeger-query': { + 'image': DEFAULT_JAEGER_QUERY_IMAGE, + }, + } # type: ignore + + @classmethod + def for_daemon_type(cls, daemon_type: str) -> bool: + return daemon_type in cls.components + + @staticmethod + def set_configuration(config: Dict[str, str], daemon_type: str) -> None: + if daemon_type in ['jaeger-collector', 'jaeger-query']: + assert 'elasticsearch_nodes' in config + Tracing.components[daemon_type]['envs'] = [ + 'SPAN_STORAGE_TYPE=elasticsearch', + f'ES_SERVER_URLS={config["elasticsearch_nodes"]}'] + if daemon_type == 'jaeger-agent': + assert 'collector_nodes' in config + Tracing.components[daemon_type]['daemon_args'] = [ + f'--reporter.grpc.host-port={config["collector_nodes"]}', + '--processor.jaeger-compact.server-host-port=6799' + ] + + def __init__(self, ident: DaemonIdentity) -> None: + self._identity = ident + self._configured = False + + def _configure(self, ctx: CephadmContext) -> None: + if self._configured: + return + config = fetch_configs(ctx) + # Currently, this method side-effects the class attribute, and that + # is unpleasant. In the future it would be nice to move all of + # set_configuration into _confiure and only modify each classes data + # independently + self.set_configuration(config, self.identity.daemon_type) + self._configured = True + + @classmethod + def create(cls, ctx: CephadmContext, ident: DaemonIdentity) -> 'Tracing': + return cls(ident) + + @property + def identity(self) -> DaemonIdentity: + return self._identity + + def container(self, ctx: CephadmContext) -> CephContainer: + ctr = daemon_to_container(ctx, self) + return to_deployment_container(ctx, ctr) + + def uid_gid(self, ctx: CephadmContext) -> Tuple[int, int]: + return 65534, 65534 + + def get_daemon_args(self) -> List[str]: + return self.components[self.identity.daemon_type].get( + 'daemon_args', [] + ) + + def customize_process_args( + self, ctx: CephadmContext, args: List[str] + ) -> None: + self._configure(ctx) + # earlier code did an explicit check if the daemon type was jaeger-agent + # and would only call get_daemon_args if that was true. However, since + # the function only returns a non-empty list in the case of jaeger-agent + # that check is unnecessary and is not brought over. + args.extend(self.get_daemon_args()) + + def customize_container_envs( + self, ctx: CephadmContext, envs: List[str] + ) -> None: + self._configure(ctx) + envs.extend( + self.components[self.identity.daemon_type].get('envs', []) + ) + + def default_entrypoint(self) -> str: + return '' From cb960823ba55c1797243384befe48217c54e326a Mon Sep 17 00:00:00 2001 From: John Mulligan Date: Wed, 8 Nov 2023 19:23:20 -0500 Subject: [PATCH 12/29] cephadm: move haproxy and keepalived classes to a new file Signed-off-by: John Mulligan --- src/cephadm/cephadm.py | 259 +------------------- src/cephadm/cephadmlib/daemons/__init__.py | 3 +- src/cephadm/cephadmlib/daemons/ingress.py | 268 +++++++++++++++++++++ 3 files changed, 271 insertions(+), 259 deletions(-) create mode 100644 src/cephadm/cephadmlib/daemons/ingress.py diff --git a/src/cephadm/cephadm.py b/src/cephadm/cephadm.py index 3e46fdba18448..c588d661c2110 100755 --- a/src/cephadm/cephadm.py +++ b/src/cephadm/cephadm.py @@ -36,11 +36,9 @@ # default images DEFAULT_ALERT_MANAGER_IMAGE, DEFAULT_GRAFANA_IMAGE, - DEFAULT_HAPROXY_IMAGE, DEFAULT_IMAGE, DEFAULT_IMAGE_IS_MAIN, DEFAULT_IMAGE_RELEASE, - DEFAULT_KEEPALIVED_IMAGE, DEFAULT_LOKI_IMAGE, DEFAULT_NODE_EXPORTER_IMAGE, DEFAULT_NVMEOF_IMAGE, @@ -173,7 +171,7 @@ from cephadmlib.firewalld import Firewalld, update_firewalld from cephadmlib import templating from cephadmlib.deployment_utils import to_deployment_container -from cephadmlib.daemons import CustomContainer, Tracing +from cephadmlib.daemons import CustomContainer, Tracing, HAproxy, Keepalived FuncT = TypeVar('FuncT', bound=Callable) @@ -1646,261 +1644,6 @@ def customize_container_envs( def default_entrypoint(self) -> str: return self.entrypoint - -################################## - - -@register_daemon_form -class HAproxy(ContainerDaemonForm): - """Defines an HAproxy container""" - daemon_type = 'haproxy' - required_files = ['haproxy.cfg'] - default_image = DEFAULT_HAPROXY_IMAGE - - @classmethod - def for_daemon_type(cls, daemon_type: str) -> bool: - return cls.daemon_type == daemon_type - - def __init__(self, - ctx: CephadmContext, - fsid: str, daemon_id: Union[int, str], - config_json: Dict, image: str) -> None: - self.ctx = ctx - self.fsid = fsid - self.daemon_id = daemon_id - self.image = image - - # config-json options - self.files = dict_get(config_json, 'files', {}) - - self.validate() - - @classmethod - def init(cls, ctx: CephadmContext, - fsid: str, daemon_id: Union[int, str]) -> 'HAproxy': - return cls(ctx, fsid, daemon_id, fetch_configs(ctx), - ctx.image) - - @classmethod - def create(cls, ctx: CephadmContext, ident: DaemonIdentity) -> 'HAproxy': - return cls.init(ctx, ident.fsid, ident.daemon_id) - - @property - def identity(self) -> DaemonIdentity: - return DaemonIdentity(self.fsid, self.daemon_type, self.daemon_id) - - def create_daemon_dirs(self, data_dir: str, uid: int, gid: int) -> None: - """Create files under the container data dir""" - if not os.path.isdir(data_dir): - raise OSError('data_dir is not a directory: %s' % (data_dir)) - - # create additional directories in data dir for HAproxy to use - if not os.path.isdir(os.path.join(data_dir, 'haproxy')): - makedirs(os.path.join(data_dir, 'haproxy'), uid, gid, DATA_DIR_MODE) - - data_dir = os.path.join(data_dir, 'haproxy') - populate_files(data_dir, self.files, uid, gid) - - def get_daemon_args(self) -> List[str]: - return ['haproxy', '-f', '/var/lib/haproxy/haproxy.cfg'] - - def validate(self): - # type: () -> None - if not is_fsid(self.fsid): - raise Error('not an fsid: %s' % self.fsid) - if not self.daemon_id: - raise Error('invalid daemon_id: %s' % self.daemon_id) - if not self.image: - raise Error('invalid image: %s' % self.image) - - # check for the required files - if self.required_files: - for fname in self.required_files: - if fname not in self.files: - raise Error('required file missing from config-json: %s' % fname) - - def get_daemon_name(self): - # type: () -> str - return '%s.%s' % (self.daemon_type, self.daemon_id) - - def get_container_name(self, desc=None): - # type: (Optional[str]) -> str - cname = 'ceph-%s-%s' % (self.fsid, self.get_daemon_name()) - if desc: - cname = '%s-%s' % (cname, desc) - return cname - - def uid_gid(self, ctx: CephadmContext) -> Tuple[int, int]: - # better directory for this? - return extract_uid_gid(self.ctx, file_path='/var/lib') - - @staticmethod - def _get_container_mounts(data_dir: str) -> Dict[str, str]: - mounts = dict() - mounts[os.path.join(data_dir, 'haproxy')] = '/var/lib/haproxy' - return mounts - - def customize_container_mounts( - self, ctx: CephadmContext, mounts: Dict[str, str] - ) -> None: - data_dir = self.identity.data_dir(ctx.data_dir) - mounts.update(self._get_container_mounts(data_dir)) - - @staticmethod - def get_sysctl_settings() -> List[str]: - return [ - '# IP forwarding and non-local bind', - 'net.ipv4.ip_forward = 1', - 'net.ipv4.ip_nonlocal_bind = 1', - ] - - def container(self, ctx: CephadmContext) -> CephContainer: - ctr = daemon_to_container(ctx, self) - return to_deployment_container(ctx, ctr) - - def customize_container_args( - self, ctx: CephadmContext, args: List[str] - ) -> None: - args.extend( - ['--user=root'] - ) # haproxy 2.4 defaults to a different user - - def customize_process_args( - self, ctx: CephadmContext, args: List[str] - ) -> None: - args.extend(self.get_daemon_args()) - - -################################## - - -@register_daemon_form -class Keepalived(ContainerDaemonForm): - """Defines an Keepalived container""" - daemon_type = 'keepalived' - required_files = ['keepalived.conf'] - default_image = DEFAULT_KEEPALIVED_IMAGE - - @classmethod - def for_daemon_type(cls, daemon_type: str) -> bool: - return cls.daemon_type == daemon_type - - def __init__(self, - ctx: CephadmContext, - fsid: str, daemon_id: Union[int, str], - config_json: Dict, image: str) -> None: - self.ctx = ctx - self.fsid = fsid - self.daemon_id = daemon_id - self.image = image - - # config-json options - self.files = dict_get(config_json, 'files', {}) - - self.validate() - - @classmethod - def init(cls, ctx: CephadmContext, fsid: str, - daemon_id: Union[int, str]) -> 'Keepalived': - return cls(ctx, fsid, daemon_id, - fetch_configs(ctx), ctx.image) - - @classmethod - def create(cls, ctx: CephadmContext, ident: DaemonIdentity) -> 'Keepalived': - return cls.init(ctx, ident.fsid, ident.daemon_id) - - @property - def identity(self) -> DaemonIdentity: - return DaemonIdentity(self.fsid, self.daemon_type, self.daemon_id) - - def create_daemon_dirs(self, data_dir: str, uid: int, gid: int) -> None: - """Create files under the container data dir""" - if not os.path.isdir(data_dir): - raise OSError('data_dir is not a directory: %s' % (data_dir)) - - # create additional directories in data dir for keepalived to use - if not os.path.isdir(os.path.join(data_dir, 'keepalived')): - makedirs(os.path.join(data_dir, 'keepalived'), uid, gid, DATA_DIR_MODE) - - # populate files from the config-json - populate_files(data_dir, self.files, uid, gid) - - def validate(self): - # type: () -> None - if not is_fsid(self.fsid): - raise Error('not an fsid: %s' % self.fsid) - if not self.daemon_id: - raise Error('invalid daemon_id: %s' % self.daemon_id) - if not self.image: - raise Error('invalid image: %s' % self.image) - - # check for the required files - if self.required_files: - for fname in self.required_files: - if fname not in self.files: - raise Error('required file missing from config-json: %s' % fname) - - def get_daemon_name(self): - # type: () -> str - return '%s.%s' % (self.daemon_type, self.daemon_id) - - def get_container_name(self, desc=None): - # type: (Optional[str]) -> str - cname = 'ceph-%s-%s' % (self.fsid, self.get_daemon_name()) - if desc: - cname = '%s-%s' % (cname, desc) - return cname - - @staticmethod - def get_container_envs(): - # type: () -> List[str] - envs = [ - 'KEEPALIVED_AUTOCONF=false', - 'KEEPALIVED_CONF=/etc/keepalived/keepalived.conf', - 'KEEPALIVED_CMD=/usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf', - 'KEEPALIVED_DEBUG=false' - ] - return envs - - @staticmethod - def get_sysctl_settings() -> List[str]: - return [ - '# IP forwarding and non-local bind', - 'net.ipv4.ip_forward = 1', - 'net.ipv4.ip_nonlocal_bind = 1', - ] - - def uid_gid(self, ctx: CephadmContext) -> Tuple[int, int]: - # better directory for this? - return extract_uid_gid(self.ctx, file_path='/var/lib') - - @staticmethod - def _get_container_mounts(data_dir: str) -> Dict[str, str]: - mounts = dict() - mounts[os.path.join(data_dir, 'keepalived.conf')] = '/etc/keepalived/keepalived.conf' - return mounts - - def customize_container_mounts( - self, ctx: CephadmContext, mounts: Dict[str, str] - ) -> None: - data_dir = self.identity.data_dir(ctx.data_dir) - mounts.update(self._get_container_mounts(data_dir)) - - def container(self, ctx: CephadmContext) -> CephContainer: - ctr = daemon_to_container(ctx, self) - return to_deployment_container(ctx, ctr) - - def customize_container_envs( - self, ctx: CephadmContext, envs: List[str] - ) -> None: - envs.extend(self.get_container_envs()) - - def customize_container_args( - self, ctx: CephadmContext, args: List[str] - ) -> None: - args.extend(['--cap-add=NET_ADMIN', '--cap-add=NET_RAW']) - - ################################## diff --git a/src/cephadm/cephadmlib/daemons/__init__.py b/src/cephadm/cephadmlib/daemons/__init__.py index dec9151050526..ec94fc577127a 100644 --- a/src/cephadm/cephadmlib/daemons/__init__.py +++ b/src/cephadm/cephadmlib/daemons/__init__.py @@ -1,4 +1,5 @@ from .custom import CustomContainer from .tracing import Tracing +from .ingress import HAproxy, Keepalived -__all__ = ['CustomContainer', 'Tracing'] +__all__ = ['CustomContainer', 'Tracing', 'HAproxy', 'Keepalived'] diff --git a/src/cephadm/cephadmlib/daemons/ingress.py b/src/cephadm/cephadmlib/daemons/ingress.py new file mode 100644 index 0000000000000..94ee34505ed58 --- /dev/null +++ b/src/cephadm/cephadmlib/daemons/ingress.py @@ -0,0 +1,268 @@ +import os + +from typing import Dict, List, Optional, Tuple, Union + +from ..constants import ( + DEFAULT_HAPROXY_IMAGE, + DEFAULT_KEEPALIVED_IMAGE, + DATA_DIR_MODE, +) +from ..container_daemon_form import ContainerDaemonForm, daemon_to_container +from ..container_types import CephContainer, extract_uid_gid +from ..context import CephadmContext +from ..context_getters import fetch_configs +from ..daemon_form import register as register_daemon_form +from ..daemon_identity import DaemonIdentity +from ..data_utils import dict_get, is_fsid +from ..deployment_utils import to_deployment_container +from ..exceptions import Error +from ..file_utils import makedirs, populate_files + + +@register_daemon_form +class HAproxy(ContainerDaemonForm): + """Defines an HAproxy container""" + daemon_type = 'haproxy' + required_files = ['haproxy.cfg'] + default_image = DEFAULT_HAPROXY_IMAGE + + @classmethod + def for_daemon_type(cls, daemon_type: str) -> bool: + return cls.daemon_type == daemon_type + + def __init__(self, + ctx: CephadmContext, + fsid: str, daemon_id: Union[int, str], + config_json: Dict, image: str) -> None: + self.ctx = ctx + self.fsid = fsid + self.daemon_id = daemon_id + self.image = image + + # config-json options + self.files = dict_get(config_json, 'files', {}) + + self.validate() + + @classmethod + def init(cls, ctx: CephadmContext, + fsid: str, daemon_id: Union[int, str]) -> 'HAproxy': + return cls(ctx, fsid, daemon_id, fetch_configs(ctx), + ctx.image) + + @classmethod + def create(cls, ctx: CephadmContext, ident: DaemonIdentity) -> 'HAproxy': + return cls.init(ctx, ident.fsid, ident.daemon_id) + + @property + def identity(self) -> DaemonIdentity: + return DaemonIdentity(self.fsid, self.daemon_type, self.daemon_id) + + def create_daemon_dirs(self, data_dir: str, uid: int, gid: int) -> None: + """Create files under the container data dir""" + if not os.path.isdir(data_dir): + raise OSError('data_dir is not a directory: %s' % (data_dir)) + + # create additional directories in data dir for HAproxy to use + if not os.path.isdir(os.path.join(data_dir, 'haproxy')): + makedirs(os.path.join(data_dir, 'haproxy'), uid, gid, DATA_DIR_MODE) + + data_dir = os.path.join(data_dir, 'haproxy') + populate_files(data_dir, self.files, uid, gid) + + def get_daemon_args(self) -> List[str]: + return ['haproxy', '-f', '/var/lib/haproxy/haproxy.cfg'] + + def validate(self): + # type: () -> None + if not is_fsid(self.fsid): + raise Error('not an fsid: %s' % self.fsid) + if not self.daemon_id: + raise Error('invalid daemon_id: %s' % self.daemon_id) + if not self.image: + raise Error('invalid image: %s' % self.image) + + # check for the required files + if self.required_files: + for fname in self.required_files: + if fname not in self.files: + raise Error('required file missing from config-json: %s' % fname) + + def get_daemon_name(self): + # type: () -> str + return '%s.%s' % (self.daemon_type, self.daemon_id) + + def get_container_name(self, desc=None): + # type: (Optional[str]) -> str + cname = 'ceph-%s-%s' % (self.fsid, self.get_daemon_name()) + if desc: + cname = '%s-%s' % (cname, desc) + return cname + + def uid_gid(self, ctx: CephadmContext) -> Tuple[int, int]: + # better directory for this? + print('UUUUU', extract_uid_gid) + return extract_uid_gid(self.ctx, file_path='/var/lib') + + @staticmethod + def _get_container_mounts(data_dir: str) -> Dict[str, str]: + mounts = dict() + mounts[os.path.join(data_dir, 'haproxy')] = '/var/lib/haproxy' + return mounts + + def customize_container_mounts( + self, ctx: CephadmContext, mounts: Dict[str, str] + ) -> None: + data_dir = self.identity.data_dir(ctx.data_dir) + mounts.update(self._get_container_mounts(data_dir)) + + @staticmethod + def get_sysctl_settings() -> List[str]: + return [ + '# IP forwarding and non-local bind', + 'net.ipv4.ip_forward = 1', + 'net.ipv4.ip_nonlocal_bind = 1', + ] + + def container(self, ctx: CephadmContext) -> CephContainer: + ctr = daemon_to_container(ctx, self) + return to_deployment_container(ctx, ctr) + + def customize_container_args( + self, ctx: CephadmContext, args: List[str] + ) -> None: + args.extend( + ['--user=root'] + ) # haproxy 2.4 defaults to a different user + + def customize_process_args( + self, ctx: CephadmContext, args: List[str] + ) -> None: + args.extend(self.get_daemon_args()) + + +@register_daemon_form +class Keepalived(ContainerDaemonForm): + """Defines an Keepalived container""" + daemon_type = 'keepalived' + required_files = ['keepalived.conf'] + default_image = DEFAULT_KEEPALIVED_IMAGE + + @classmethod + def for_daemon_type(cls, daemon_type: str) -> bool: + return cls.daemon_type == daemon_type + + def __init__(self, + ctx: CephadmContext, + fsid: str, daemon_id: Union[int, str], + config_json: Dict, image: str) -> None: + self.ctx = ctx + self.fsid = fsid + self.daemon_id = daemon_id + self.image = image + + # config-json options + self.files = dict_get(config_json, 'files', {}) + + self.validate() + + @classmethod + def init(cls, ctx: CephadmContext, fsid: str, + daemon_id: Union[int, str]) -> 'Keepalived': + return cls(ctx, fsid, daemon_id, + fetch_configs(ctx), ctx.image) + + @classmethod + def create(cls, ctx: CephadmContext, ident: DaemonIdentity) -> 'Keepalived': + return cls.init(ctx, ident.fsid, ident.daemon_id) + + @property + def identity(self) -> DaemonIdentity: + return DaemonIdentity(self.fsid, self.daemon_type, self.daemon_id) + + def create_daemon_dirs(self, data_dir: str, uid: int, gid: int) -> None: + """Create files under the container data dir""" + if not os.path.isdir(data_dir): + raise OSError('data_dir is not a directory: %s' % (data_dir)) + + # create additional directories in data dir for keepalived to use + if not os.path.isdir(os.path.join(data_dir, 'keepalived')): + makedirs(os.path.join(data_dir, 'keepalived'), uid, gid, DATA_DIR_MODE) + + # populate files from the config-json + populate_files(data_dir, self.files, uid, gid) + + def validate(self): + # type: () -> None + if not is_fsid(self.fsid): + raise Error('not an fsid: %s' % self.fsid) + if not self.daemon_id: + raise Error('invalid daemon_id: %s' % self.daemon_id) + if not self.image: + raise Error('invalid image: %s' % self.image) + + # check for the required files + if self.required_files: + for fname in self.required_files: + if fname not in self.files: + raise Error('required file missing from config-json: %s' % fname) + + def get_daemon_name(self): + # type: () -> str + return '%s.%s' % (self.daemon_type, self.daemon_id) + + def get_container_name(self, desc=None): + # type: (Optional[str]) -> str + cname = 'ceph-%s-%s' % (self.fsid, self.get_daemon_name()) + if desc: + cname = '%s-%s' % (cname, desc) + return cname + + @staticmethod + def get_container_envs(): + # type: () -> List[str] + envs = [ + 'KEEPALIVED_AUTOCONF=false', + 'KEEPALIVED_CONF=/etc/keepalived/keepalived.conf', + 'KEEPALIVED_CMD=/usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf', + 'KEEPALIVED_DEBUG=false' + ] + return envs + + @staticmethod + def get_sysctl_settings() -> List[str]: + return [ + '# IP forwarding and non-local bind', + 'net.ipv4.ip_forward = 1', + 'net.ipv4.ip_nonlocal_bind = 1', + ] + + def uid_gid(self, ctx: CephadmContext) -> Tuple[int, int]: + # better directory for this? + return extract_uid_gid(self.ctx, file_path='/var/lib') + + @staticmethod + def _get_container_mounts(data_dir: str) -> Dict[str, str]: + mounts = dict() + mounts[os.path.join(data_dir, 'keepalived.conf')] = '/etc/keepalived/keepalived.conf' + return mounts + + def customize_container_mounts( + self, ctx: CephadmContext, mounts: Dict[str, str] + ) -> None: + data_dir = self.identity.data_dir(ctx.data_dir) + mounts.update(self._get_container_mounts(data_dir)) + + def container(self, ctx: CephadmContext) -> CephContainer: + ctr = daemon_to_container(ctx, self) + return to_deployment_container(ctx, ctr) + + def customize_container_envs( + self, ctx: CephadmContext, envs: List[str] + ) -> None: + envs.extend(self.get_container_envs()) + + def customize_container_args( + self, ctx: CephadmContext, args: List[str] + ) -> None: + args.extend(['--cap-add=NET_ADMIN', '--cap-add=NET_RAW']) From 3b752dde73732a37991ae12e57d6fd8d6ebec9e2 Mon Sep 17 00:00:00 2001 From: John Mulligan Date: Wed, 8 Nov 2023 19:34:58 -0500 Subject: [PATCH 13/29] cephadm: move nvmeof class to a new file Signed-off-by: John Mulligan --- src/cephadm/cephadm.py | 173 +------------------- src/cephadm/cephadmlib/daemons/__init__.py | 9 +- src/cephadm/cephadmlib/daemons/nvmeof.py | 181 +++++++++++++++++++++ 3 files changed, 196 insertions(+), 167 deletions(-) create mode 100644 src/cephadm/cephadmlib/daemons/nvmeof.py diff --git a/src/cephadm/cephadm.py b/src/cephadm/cephadm.py index c588d661c2110..8eac20593b08c 100755 --- a/src/cephadm/cephadm.py +++ b/src/cephadm/cephadm.py @@ -41,7 +41,6 @@ DEFAULT_IMAGE_RELEASE, DEFAULT_LOKI_IMAGE, DEFAULT_NODE_EXPORTER_IMAGE, - DEFAULT_NVMEOF_IMAGE, DEFAULT_PROMETHEUS_IMAGE, DEFAULT_PROMTAIL_IMAGE, DEFAULT_SNMP_GATEWAY_IMAGE, @@ -171,7 +170,13 @@ from cephadmlib.firewalld import Firewalld, update_firewalld from cephadmlib import templating from cephadmlib.deployment_utils import to_deployment_container -from cephadmlib.daemons import CustomContainer, Tracing, HAproxy, Keepalived +from cephadmlib.daemons import ( + CephNvmeof, + CustomContainer, + HAproxy, + Keepalived, + Tracing, +) FuncT = TypeVar('FuncT', bound=Callable) @@ -1379,170 +1384,6 @@ def customize_container_args( ################################## -@register_daemon_form -class CephNvmeof(ContainerDaemonForm): - """Defines a Ceph-Nvmeof container""" - - daemon_type = 'nvmeof' - required_files = ['ceph-nvmeof.conf'] - default_image = DEFAULT_NVMEOF_IMAGE - - @classmethod - def for_daemon_type(cls, daemon_type: str) -> bool: - return cls.daemon_type == daemon_type - - def __init__(self, - ctx, - fsid, - daemon_id, - config_json, - image=DEFAULT_NVMEOF_IMAGE): - # type: (CephadmContext, str, Union[int, str], Dict, str) -> None - self.ctx = ctx - self.fsid = fsid - self.daemon_id = daemon_id - self.image = image - - # config-json options - self.files = dict_get(config_json, 'files', {}) - - # validate the supplied args - self.validate() - - @classmethod - def init(cls, ctx, fsid, daemon_id): - # type: (CephadmContext, str, Union[int, str]) -> CephNvmeof - return cls(ctx, fsid, daemon_id, - fetch_configs(ctx), ctx.image) - - @classmethod - def create(cls, ctx: CephadmContext, ident: DaemonIdentity) -> 'CephNvmeof': - return cls.init(ctx, ident.fsid, ident.daemon_id) - - @property - def identity(self) -> DaemonIdentity: - return DaemonIdentity(self.fsid, self.daemon_type, self.daemon_id) - - @staticmethod - def _get_container_mounts(data_dir: str) -> Dict[str, str]: - mounts = dict() - mounts[os.path.join(data_dir, 'config')] = '/etc/ceph/ceph.conf:z' - mounts[os.path.join(data_dir, 'keyring')] = '/etc/ceph/keyring:z' - mounts[os.path.join(data_dir, 'ceph-nvmeof.conf')] = '/src/ceph-nvmeof.conf:z' - mounts[os.path.join(data_dir, 'configfs')] = '/sys/kernel/config' - mounts['/dev/hugepages'] = '/dev/hugepages' - mounts['/dev/vfio/vfio'] = '/dev/vfio/vfio' - return mounts - - def customize_container_mounts( - self, ctx: CephadmContext, mounts: Dict[str, str] - ) -> None: - data_dir = self.identity.data_dir(ctx.data_dir) - mounts.update(self._get_container_mounts(data_dir)) - - def customize_container_binds( - self, ctx: CephadmContext, binds: List[List[str]] - ) -> None: - lib_modules = [ - 'type=bind', - 'source=/lib/modules', - 'destination=/lib/modules', - 'ro=true', - ] - binds.append(lib_modules) - - @staticmethod - def get_version(ctx: CephadmContext, container_id: str) -> Optional[str]: - out, err, ret = call(ctx, - [ctx.container_engine.path, 'inspect', - '--format', '{{index .Config.Labels "io.ceph.version"}}', - ctx.image]) - version = None - if ret == 0: - version = out.strip() - return version - - def validate(self): - # type: () -> None - if not is_fsid(self.fsid): - raise Error('not an fsid: %s' % self.fsid) - if not self.daemon_id: - raise Error('invalid daemon_id: %s' % self.daemon_id) - if not self.image: - raise Error('invalid image: %s' % self.image) - - # check for the required files - if self.required_files: - for fname in self.required_files: - if fname not in self.files: - raise Error('required file missing from config-json: %s' % fname) - - def get_daemon_name(self): - # type: () -> str - return '%s.%s' % (self.daemon_type, self.daemon_id) - - def get_container_name(self, desc=None): - # type: (Optional[str]) -> str - cname = '%s-%s' % (self.fsid, self.get_daemon_name()) - if desc: - cname = '%s-%s' % (cname, desc) - return cname - - def create_daemon_dirs(self, data_dir, uid, gid): - # type: (str, int, int) -> None - """Create files under the container data dir""" - if not os.path.isdir(data_dir): - raise OSError('data_dir is not a directory: %s' % (data_dir)) - - logger.info('Creating ceph-nvmeof config...') - configfs_dir = os.path.join(data_dir, 'configfs') - makedirs(configfs_dir, uid, gid, 0o755) - - # populate files from the config-json - populate_files(data_dir, self.files, uid, gid) - - @staticmethod - def configfs_mount_umount(data_dir, mount=True): - # type: (str, bool) -> List[str] - mount_path = os.path.join(data_dir, 'configfs') - if mount: - cmd = 'if ! grep -qs {0} /proc/mounts; then ' \ - 'mount -t configfs none {0}; fi'.format(mount_path) - else: - cmd = 'if grep -qs {0} /proc/mounts; then ' \ - 'umount {0}; fi'.format(mount_path) - return cmd.split() - - @staticmethod - def get_sysctl_settings() -> List[str]: - return [ - 'vm.nr_hugepages = 4096', - ] - - def container(self, ctx: CephadmContext) -> CephContainer: - ctr = daemon_to_container(ctx, self) - return to_deployment_container(ctx, ctr) - - def uid_gid(self, ctx: CephadmContext) -> Tuple[int, int]: - return 167, 167 # TODO: need to get properly the uid/gid - - def config_and_keyring( - self, ctx: CephadmContext - ) -> Tuple[Optional[str], Optional[str]]: - return get_config_and_keyring(ctx) - - def customize_container_args( - self, ctx: CephadmContext, args: List[str] - ) -> None: - args.append(ctx.container_engine.unlimited_pids_option) - args.extend(['--ulimit', 'memlock=-1:-1']) - args.extend(['--ulimit', 'nofile=10240']) - args.extend(['--cap-add=SYS_ADMIN', '--cap-add=CAP_SYS_NICE']) - - -################################## - - @register_daemon_form class CephExporter(ContainerDaemonForm): """Defines a Ceph exporter container""" diff --git a/src/cephadm/cephadmlib/daemons/__init__.py b/src/cephadm/cephadmlib/daemons/__init__.py index ec94fc577127a..4a48f4da7caca 100644 --- a/src/cephadm/cephadmlib/daemons/__init__.py +++ b/src/cephadm/cephadmlib/daemons/__init__.py @@ -1,5 +1,12 @@ from .custom import CustomContainer from .tracing import Tracing from .ingress import HAproxy, Keepalived +from .nvmeof import CephNvmeof -__all__ = ['CustomContainer', 'Tracing', 'HAproxy', 'Keepalived'] +__all__ = [ + 'CephNvmeof', + 'CustomContainer', + 'HAproxy', + 'Keepalived', + 'Tracing', +] diff --git a/src/cephadm/cephadmlib/daemons/nvmeof.py b/src/cephadm/cephadmlib/daemons/nvmeof.py new file mode 100644 index 0000000000000..a1a18f5a18baa --- /dev/null +++ b/src/cephadm/cephadmlib/daemons/nvmeof.py @@ -0,0 +1,181 @@ +import logging +import os + +from typing import Dict, List, Optional, Tuple, Union + +from ..container_daemon_form import ContainerDaemonForm, daemon_to_container +from ..container_types import CephContainer +from ..context_getters import fetch_configs, get_config_and_keyring +from ..daemon_form import register as register_daemon_form +from ..daemon_identity import DaemonIdentity +from ..constants import DEFAULT_NVMEOF_IMAGE +from ..context import CephadmContext +from ..data_utils import dict_get, is_fsid +from ..deployment_utils import to_deployment_container +from ..exceptions import Error +from ..file_utils import makedirs, populate_files +from ..call_wrappers import call + + +logger = logging.getLogger() + + +@register_daemon_form +class CephNvmeof(ContainerDaemonForm): + """Defines a Ceph-Nvmeof container""" + + daemon_type = 'nvmeof' + required_files = ['ceph-nvmeof.conf'] + default_image = DEFAULT_NVMEOF_IMAGE + + @classmethod + def for_daemon_type(cls, daemon_type: str) -> bool: + return cls.daemon_type == daemon_type + + def __init__(self, + ctx, + fsid, + daemon_id, + config_json, + image=DEFAULT_NVMEOF_IMAGE): + # type: (CephadmContext, str, Union[int, str], Dict, str) -> None + self.ctx = ctx + self.fsid = fsid + self.daemon_id = daemon_id + self.image = image + + # config-json options + self.files = dict_get(config_json, 'files', {}) + + # validate the supplied args + self.validate() + + @classmethod + def init(cls, ctx, fsid, daemon_id): + # type: (CephadmContext, str, Union[int, str]) -> CephNvmeof + return cls(ctx, fsid, daemon_id, + fetch_configs(ctx), ctx.image) + + @classmethod + def create(cls, ctx: CephadmContext, ident: DaemonIdentity) -> 'CephNvmeof': + return cls.init(ctx, ident.fsid, ident.daemon_id) + + @property + def identity(self) -> DaemonIdentity: + return DaemonIdentity(self.fsid, self.daemon_type, self.daemon_id) + + @staticmethod + def _get_container_mounts(data_dir: str) -> Dict[str, str]: + mounts = dict() + mounts[os.path.join(data_dir, 'config')] = '/etc/ceph/ceph.conf:z' + mounts[os.path.join(data_dir, 'keyring')] = '/etc/ceph/keyring:z' + mounts[os.path.join(data_dir, 'ceph-nvmeof.conf')] = '/src/ceph-nvmeof.conf:z' + mounts[os.path.join(data_dir, 'configfs')] = '/sys/kernel/config' + mounts['/dev/hugepages'] = '/dev/hugepages' + mounts['/dev/vfio/vfio'] = '/dev/vfio/vfio' + return mounts + + def customize_container_mounts( + self, ctx: CephadmContext, mounts: Dict[str, str] + ) -> None: + data_dir = self.identity.data_dir(ctx.data_dir) + mounts.update(self._get_container_mounts(data_dir)) + + def customize_container_binds( + self, ctx: CephadmContext, binds: List[List[str]] + ) -> None: + lib_modules = [ + 'type=bind', + 'source=/lib/modules', + 'destination=/lib/modules', + 'ro=true', + ] + binds.append(lib_modules) + + @staticmethod + def get_version(ctx: CephadmContext, container_id: str) -> Optional[str]: + out, err, ret = call(ctx, + [ctx.container_engine.path, 'inspect', + '--format', '{{index .Config.Labels "io.ceph.version"}}', + ctx.image]) + version = None + if ret == 0: + version = out.strip() + return version + + def validate(self): + # type: () -> None + if not is_fsid(self.fsid): + raise Error('not an fsid: %s' % self.fsid) + if not self.daemon_id: + raise Error('invalid daemon_id: %s' % self.daemon_id) + if not self.image: + raise Error('invalid image: %s' % self.image) + + # check for the required files + if self.required_files: + for fname in self.required_files: + if fname not in self.files: + raise Error('required file missing from config-json: %s' % fname) + + def get_daemon_name(self): + # type: () -> str + return '%s.%s' % (self.daemon_type, self.daemon_id) + + def get_container_name(self, desc=None): + # type: (Optional[str]) -> str + cname = '%s-%s' % (self.fsid, self.get_daemon_name()) + if desc: + cname = '%s-%s' % (cname, desc) + return cname + + def create_daemon_dirs(self, data_dir, uid, gid): + # type: (str, int, int) -> None + """Create files under the container data dir""" + if not os.path.isdir(data_dir): + raise OSError('data_dir is not a directory: %s' % (data_dir)) + + logger.info('Creating ceph-nvmeof config...') + configfs_dir = os.path.join(data_dir, 'configfs') + makedirs(configfs_dir, uid, gid, 0o755) + + # populate files from the config-json + populate_files(data_dir, self.files, uid, gid) + + @staticmethod + def configfs_mount_umount(data_dir, mount=True): + # type: (str, bool) -> List[str] + mount_path = os.path.join(data_dir, 'configfs') + if mount: + cmd = 'if ! grep -qs {0} /proc/mounts; then ' \ + 'mount -t configfs none {0}; fi'.format(mount_path) + else: + cmd = 'if grep -qs {0} /proc/mounts; then ' \ + 'umount {0}; fi'.format(mount_path) + return cmd.split() + + @staticmethod + def get_sysctl_settings() -> List[str]: + return [ + 'vm.nr_hugepages = 4096', + ] + + def container(self, ctx: CephadmContext) -> CephContainer: + ctr = daemon_to_container(ctx, self) + return to_deployment_container(ctx, ctr) + + def uid_gid(self, ctx: CephadmContext) -> Tuple[int, int]: + return 167, 167 # TODO: need to get properly the uid/gid + + def config_and_keyring( + self, ctx: CephadmContext + ) -> Tuple[Optional[str], Optional[str]]: + return get_config_and_keyring(ctx) + + def customize_container_args( + self, ctx: CephadmContext, args: List[str] + ) -> None: + args.append(ctx.container_engine.unlimited_pids_option) + args.extend(['--ulimit', 'memlock=-1:-1']) + args.extend(['--ulimit', 'nofile=10240']) + args.extend(['--cap-add=SYS_ADMIN', '--cap-add=CAP_SYS_NICE']) From c093a1991d352e636429223f75b599bc4c311dd7 Mon Sep 17 00:00:00 2001 From: John Mulligan Date: Wed, 8 Nov 2023 19:42:27 -0500 Subject: [PATCH 14/29] cephadm: move iscsi class to a new file Signed-off-by: John Mulligan --- src/cephadm/cephadm.py | 241 +------------------ src/cephadm/cephadmlib/daemons/__init__.py | 2 + src/cephadm/cephadmlib/daemons/iscsi.py | 257 +++++++++++++++++++++ 3 files changed, 261 insertions(+), 239 deletions(-) create mode 100644 src/cephadm/cephadmlib/daemons/iscsi.py diff --git a/src/cephadm/cephadm.py b/src/cephadm/cephadm.py index 8eac20593b08c..4ef7957effc08 100755 --- a/src/cephadm/cephadm.py +++ b/src/cephadm/cephadm.py @@ -138,7 +138,7 @@ wrap_ipv6, ) from cephadmlib.locking import FileLock -from cephadmlib.daemon_identity import DaemonIdentity, DaemonSubIdentity +from cephadmlib.daemon_identity import DaemonIdentity from cephadmlib.packagers import create_packager, Packager from cephadmlib.logging import cephadm_init_logging, Highlight, LogDestination from cephadmlib.systemd import check_unit, check_units @@ -171,6 +171,7 @@ from cephadmlib import templating from cephadmlib.deployment_utils import to_deployment_container from cephadmlib.daemons import ( + CephIscsi, CephNvmeof, CustomContainer, HAproxy, @@ -1146,244 +1147,6 @@ def default_entrypoint(self) -> str: ################################## -@register_daemon_form -class CephIscsi(ContainerDaemonForm): - """Defines a Ceph-Iscsi container""" - - daemon_type = 'iscsi' - entrypoint = '/usr/bin/rbd-target-api' - - required_files = ['iscsi-gateway.cfg'] - - @classmethod - def for_daemon_type(cls, daemon_type: str) -> bool: - return cls.daemon_type == daemon_type - - def __init__(self, - ctx: CephadmContext, - ident: DaemonIdentity, - config_json: Dict, - image: str = DEFAULT_IMAGE): - self.ctx = ctx - self._identity = ident - self.image = image - - # config-json options - self.files = dict_get(config_json, 'files', {}) - - # validate the supplied args - self.validate() - - @classmethod - def init(cls, ctx: CephadmContext, fsid: str, daemon_id: str) -> 'CephIscsi': - return cls.create(ctx, DaemonIdentity(fsid, cls.daemon_type, daemon_id)) - - @classmethod - def create(cls, ctx: CephadmContext, ident: DaemonIdentity) -> 'CephIscsi': - return cls(ctx, ident, fetch_configs(ctx), ctx.image) - - @property - def identity(self) -> DaemonIdentity: - return self._identity - - @property - def fsid(self) -> str: - return self._identity.fsid - - @property - def daemon_id(self) -> str: - return self._identity.daemon_id - - @staticmethod - def _get_container_mounts(data_dir, log_dir): - # type: (str, str) -> Dict[str, str] - mounts = dict() - mounts[os.path.join(data_dir, 'config')] = '/etc/ceph/ceph.conf:z' - mounts[os.path.join(data_dir, 'keyring')] = '/etc/ceph/keyring:z' - mounts[os.path.join(data_dir, 'iscsi-gateway.cfg')] = '/etc/ceph/iscsi-gateway.cfg:z' - mounts[os.path.join(data_dir, 'configfs')] = '/sys/kernel/config' - mounts[os.path.join(data_dir, 'tcmu-runner-entrypoint.sh')] = '/usr/local/scripts/tcmu-runner-entrypoint.sh' - mounts[log_dir] = '/var/log:z' - mounts['/dev'] = '/dev' - return mounts - - def customize_container_mounts( - self, ctx: CephadmContext, mounts: Dict[str, str] - ) -> None: - data_dir = self.identity.data_dir(ctx.data_dir) - # Removes ending ".tcmu" from data_dir a tcmu-runner uses the same - # data_dir as rbd-runner-api - if data_dir.endswith('.tcmu'): - data_dir = re.sub(r'\.tcmu$', '', data_dir) - log_dir = get_log_dir(self.identity.fsid, ctx.log_dir) - mounts.update(CephIscsi._get_container_mounts(data_dir, log_dir)) - - def customize_container_binds( - self, ctx: CephadmContext, binds: List[List[str]] - ) -> None: - lib_modules = [ - 'type=bind', - 'source=/lib/modules', - 'destination=/lib/modules', - 'ro=true', - ] - binds.append(lib_modules) - - @staticmethod - def get_version(ctx, container_id): - # type: (CephadmContext, str) -> Optional[str] - version = None - out, err, code = call(ctx, - [ctx.container_engine.path, 'exec', container_id, - '/usr/bin/python3', '-c', - "import pkg_resources; print(pkg_resources.require('ceph_iscsi')[0].version)"], - verbosity=CallVerbosity.QUIET) - if code == 0: - version = out.strip() - return version - - def validate(self): - # type: () -> None - if not is_fsid(self.fsid): - raise Error('not an fsid: %s' % self.fsid) - if not self.daemon_id: - raise Error('invalid daemon_id: %s' % self.daemon_id) - if not self.image: - raise Error('invalid image: %s' % self.image) - - # check for the required files - if self.required_files: - for fname in self.required_files: - if fname not in self.files: - raise Error('required file missing from config-json: %s' % fname) - - def get_daemon_name(self): - # type: () -> str - return '%s.%s' % (self.daemon_type, self.daemon_id) - - def get_container_name(self, desc=None): - # type: (Optional[str]) -> str - cname = 'ceph-%s-%s' % (self.fsid, self.get_daemon_name()) - if desc: - cname = '%s-%s' % (cname, desc) - return cname - - def create_daemon_dirs(self, data_dir, uid, gid): - # type: (str, int, int) -> None - """Create files under the container data dir""" - if not os.path.isdir(data_dir): - raise OSError('data_dir is not a directory: %s' % (data_dir)) - - logger.info('Creating ceph-iscsi config...') - configfs_dir = os.path.join(data_dir, 'configfs') - makedirs(configfs_dir, uid, gid, 0o755) - - # set up the tcmu-runner entrypoint script - # to be mounted into the container. For more info - # on why we need this script, see the - # tcmu_runner_entrypoint_script function - self.files['tcmu-runner-entrypoint.sh'] = self.tcmu_runner_entrypoint_script() - - # populate files from the config-json - populate_files(data_dir, self.files, uid, gid) - - # we want the tcmu runner entrypoint script to be executable - # populate_files will give it 0o600 by default - os.chmod(os.path.join(data_dir, 'tcmu-runner-entrypoint.sh'), 0o700) - - @staticmethod - def configfs_mount_umount(data_dir, mount=True): - # type: (str, bool) -> List[str] - mount_path = os.path.join(data_dir, 'configfs') - if mount: - cmd = 'if ! grep -qs {0} /proc/mounts; then ' \ - 'mount -t configfs none {0}; fi'.format(mount_path) - else: - cmd = 'if grep -qs {0} /proc/mounts; then ' \ - 'umount {0}; fi'.format(mount_path) - return cmd.split() - - @staticmethod - def tcmu_runner_entrypoint_script() -> str: - # since we are having tcmu-runner be a background - # process in its systemd unit (rbd-target-api being - # the main process) systemd will not restart it when - # it fails. in order to try and get around that for now - # we can have a script mounted in the container that - # that attempts to do the restarting for us. This script - # can then become the entrypoint for the tcmu-runner - # container - - # This is intended to be dropped for a better solution - # for at least the squid release onward - return """#!/bin/bash -RUN_DIR=/var/run/tcmu-runner - -if [ ! -d "${RUN_DIR}" ] ; then - mkdir -p "${RUN_DIR}" -fi - -rm -rf "${RUN_DIR}"/* - -while true -do - touch "${RUN_DIR}"/start-up-$(date -Ins) - /usr/bin/tcmu-runner - - # If we got around 3 kills/segfaults in the last minute, - # don't start anymore - if [ $(find "${RUN_DIR}" -type f -cmin -1 | wc -l) -ge 3 ] ; then - exit 0 - fi - - sleep 1 -done -""" - - def get_tcmu_runner_container(self): - # type: () -> CephContainer - # daemon_id, is used to generated the cid and pid files used by podman but as both tcmu-runner - # and rbd-target-api have the same daemon_id, it conflits and prevent the second container from - # starting. .tcmu runner is appended to the daemon_id to fix that. - subident = DaemonSubIdentity( - self.fsid, self.daemon_type, self.daemon_id, 'tcmu' - ) - tcmu_dmn = self.create(self.ctx, subident) - tcmu_container = to_deployment_container( - self.ctx, daemon_to_container(self.ctx, tcmu_dmn, privileged=True) - ) - # TODO: Eventually we don't want to run tcmu-runner through this script. - # This is intended to be a workaround backported to older releases - # and should eventually be removed in at least squid onward - tcmu_container.entrypoint = '/usr/local/scripts/tcmu-runner-entrypoint.sh' - tcmu_container.cname = self.get_container_name(desc='tcmu') - return tcmu_container - - def container(self, ctx: CephadmContext) -> CephContainer: - # So the container can modprobe iscsi_target_mod and have write perms - # to configfs we need to make this a privileged container. - ctr = daemon_to_container(ctx, self, privileged=True) - return to_deployment_container(ctx, ctr) - - def config_and_keyring( - self, ctx: CephadmContext - ) -> Tuple[Optional[str], Optional[str]]: - return get_config_and_keyring(ctx) - - def uid_gid(self, ctx: CephadmContext) -> Tuple[int, int]: - return extract_uid_gid(ctx) - - def default_entrypoint(self) -> str: - return self.entrypoint - - def customize_container_args( - self, ctx: CephadmContext, args: List[str] - ) -> None: - args.append(ctx.container_engine.unlimited_pids_option) - -################################## - - @register_daemon_form class CephExporter(ContainerDaemonForm): """Defines a Ceph exporter container""" diff --git a/src/cephadm/cephadmlib/daemons/__init__.py b/src/cephadm/cephadmlib/daemons/__init__.py index 4a48f4da7caca..dbe01783e4c3c 100644 --- a/src/cephadm/cephadmlib/daemons/__init__.py +++ b/src/cephadm/cephadmlib/daemons/__init__.py @@ -2,8 +2,10 @@ from .tracing import Tracing from .ingress import HAproxy, Keepalived from .nvmeof import CephNvmeof +from .iscsi import CephIscsi __all__ = [ + 'CephIscsi', 'CephNvmeof', 'CustomContainer', 'HAproxy', diff --git a/src/cephadm/cephadmlib/daemons/iscsi.py b/src/cephadm/cephadmlib/daemons/iscsi.py new file mode 100644 index 0000000000000..504db6885583d --- /dev/null +++ b/src/cephadm/cephadmlib/daemons/iscsi.py @@ -0,0 +1,257 @@ +import logging +import os +import re + +from typing import Dict, List, Optional, Tuple + +from ..container_daemon_form import ContainerDaemonForm, daemon_to_container +from ..container_types import CephContainer, extract_uid_gid +from ..context_getters import fetch_configs, get_config_and_keyring +from ..daemon_form import register as register_daemon_form +from ..daemon_identity import DaemonIdentity, DaemonSubIdentity +from ..constants import DEFAULT_IMAGE +from ..context import CephadmContext +from ..data_utils import dict_get, is_fsid +from ..deployment_utils import to_deployment_container +from ..exceptions import Error +from ..file_utils import makedirs, populate_files +from ..call_wrappers import call, CallVerbosity + + +logger = logging.getLogger() + + +@register_daemon_form +class CephIscsi(ContainerDaemonForm): + """Defines a Ceph-Iscsi container""" + + daemon_type = 'iscsi' + entrypoint = '/usr/bin/rbd-target-api' + + required_files = ['iscsi-gateway.cfg'] + + @classmethod + def for_daemon_type(cls, daemon_type: str) -> bool: + return cls.daemon_type == daemon_type + + def __init__(self, + ctx: CephadmContext, + ident: DaemonIdentity, + config_json: Dict, + image: str = DEFAULT_IMAGE): + self.ctx = ctx + self._identity = ident + self.image = image + + # config-json options + self.files = dict_get(config_json, 'files', {}) + + # validate the supplied args + self.validate() + + @classmethod + def init(cls, ctx: CephadmContext, fsid: str, daemon_id: str) -> 'CephIscsi': + return cls.create(ctx, DaemonIdentity(fsid, cls.daemon_type, daemon_id)) + + @classmethod + def create(cls, ctx: CephadmContext, ident: DaemonIdentity) -> 'CephIscsi': + return cls(ctx, ident, fetch_configs(ctx), ctx.image) + + @property + def identity(self) -> DaemonIdentity: + return self._identity + + @property + def fsid(self) -> str: + return self._identity.fsid + + @property + def daemon_id(self) -> str: + return self._identity.daemon_id + + @staticmethod + def _get_container_mounts(data_dir, log_dir): + # type: (str, str) -> Dict[str, str] + mounts = dict() + mounts[os.path.join(data_dir, 'config')] = '/etc/ceph/ceph.conf:z' + mounts[os.path.join(data_dir, 'keyring')] = '/etc/ceph/keyring:z' + mounts[os.path.join(data_dir, 'iscsi-gateway.cfg')] = '/etc/ceph/iscsi-gateway.cfg:z' + mounts[os.path.join(data_dir, 'configfs')] = '/sys/kernel/config' + mounts[os.path.join(data_dir, 'tcmu-runner-entrypoint.sh')] = '/usr/local/scripts/tcmu-runner-entrypoint.sh' + mounts[log_dir] = '/var/log:z' + mounts['/dev'] = '/dev' + return mounts + + def customize_container_mounts( + self, ctx: CephadmContext, mounts: Dict[str, str] + ) -> None: + data_dir = self.identity.data_dir(ctx.data_dir) + # Removes ending ".tcmu" from data_dir a tcmu-runner uses the same + # data_dir as rbd-runner-api + if data_dir.endswith('.tcmu'): + data_dir = re.sub(r'\.tcmu$', '', data_dir) + log_dir = os.path.join(ctx.log_dir, self.identity.fsid) + mounts.update(CephIscsi._get_container_mounts(data_dir, log_dir)) + + def customize_container_binds( + self, ctx: CephadmContext, binds: List[List[str]] + ) -> None: + lib_modules = [ + 'type=bind', + 'source=/lib/modules', + 'destination=/lib/modules', + 'ro=true', + ] + binds.append(lib_modules) + + @staticmethod + def get_version(ctx, container_id): + # type: (CephadmContext, str) -> Optional[str] + version = None + out, err, code = call(ctx, + [ctx.container_engine.path, 'exec', container_id, + '/usr/bin/python3', '-c', + "import pkg_resources; print(pkg_resources.require('ceph_iscsi')[0].version)"], + verbosity=CallVerbosity.QUIET) + if code == 0: + version = out.strip() + return version + + def validate(self): + # type: () -> None + if not is_fsid(self.fsid): + raise Error('not an fsid: %s' % self.fsid) + if not self.daemon_id: + raise Error('invalid daemon_id: %s' % self.daemon_id) + if not self.image: + raise Error('invalid image: %s' % self.image) + + # check for the required files + if self.required_files: + for fname in self.required_files: + if fname not in self.files: + raise Error('required file missing from config-json: %s' % fname) + + def get_daemon_name(self): + # type: () -> str + return '%s.%s' % (self.daemon_type, self.daemon_id) + + def get_container_name(self, desc=None): + # type: (Optional[str]) -> str + cname = 'ceph-%s-%s' % (self.fsid, self.get_daemon_name()) + if desc: + cname = '%s-%s' % (cname, desc) + return cname + + def create_daemon_dirs(self, data_dir, uid, gid): + # type: (str, int, int) -> None + """Create files under the container data dir""" + if not os.path.isdir(data_dir): + raise OSError('data_dir is not a directory: %s' % (data_dir)) + + logger.info('Creating ceph-iscsi config...') + configfs_dir = os.path.join(data_dir, 'configfs') + makedirs(configfs_dir, uid, gid, 0o755) + + # set up the tcmu-runner entrypoint script + # to be mounted into the container. For more info + # on why we need this script, see the + # tcmu_runner_entrypoint_script function + self.files['tcmu-runner-entrypoint.sh'] = self.tcmu_runner_entrypoint_script() + + # populate files from the config-json + populate_files(data_dir, self.files, uid, gid) + + # we want the tcmu runner entrypoint script to be executable + # populate_files will give it 0o600 by default + os.chmod(os.path.join(data_dir, 'tcmu-runner-entrypoint.sh'), 0o700) + + @staticmethod + def configfs_mount_umount(data_dir, mount=True): + # type: (str, bool) -> List[str] + mount_path = os.path.join(data_dir, 'configfs') + if mount: + cmd = 'if ! grep -qs {0} /proc/mounts; then ' \ + 'mount -t configfs none {0}; fi'.format(mount_path) + else: + cmd = 'if grep -qs {0} /proc/mounts; then ' \ + 'umount {0}; fi'.format(mount_path) + return cmd.split() + + @staticmethod + def tcmu_runner_entrypoint_script() -> str: + # since we are having tcmu-runner be a background + # process in its systemd unit (rbd-target-api being + # the main process) systemd will not restart it when + # it fails. in order to try and get around that for now + # we can have a script mounted in the container that + # that attempts to do the restarting for us. This script + # can then become the entrypoint for the tcmu-runner + # container + + # This is intended to be dropped for a better solution + # for at least the squid release onward + return """#!/bin/bash +RUN_DIR=/var/run/tcmu-runner + +if [ ! -d "${RUN_DIR}" ] ; then + mkdir -p "${RUN_DIR}" +fi + +rm -rf "${RUN_DIR}"/* + +while true +do + touch "${RUN_DIR}"/start-up-$(date -Ins) + /usr/bin/tcmu-runner + + # If we got around 3 kills/segfaults in the last minute, + # don't start anymore + if [ $(find "${RUN_DIR}" -type f -cmin -1 | wc -l) -ge 3 ] ; then + exit 0 + fi + + sleep 1 +done +""" + + def get_tcmu_runner_container(self): + # type: () -> CephContainer + # daemon_id, is used to generated the cid and pid files used by podman but as both tcmu-runner + # and rbd-target-api have the same daemon_id, it conflits and prevent the second container from + # starting. .tcmu runner is appended to the daemon_id to fix that. + subident = DaemonSubIdentity( + self.fsid, self.daemon_type, self.daemon_id, 'tcmu' + ) + tcmu_dmn = self.create(self.ctx, subident) + tcmu_container = to_deployment_container( + self.ctx, daemon_to_container(self.ctx, tcmu_dmn, privileged=True) + ) + # TODO: Eventually we don't want to run tcmu-runner through this script. + # This is intended to be a workaround backported to older releases + # and should eventually be removed in at least squid onward + tcmu_container.entrypoint = '/usr/local/scripts/tcmu-runner-entrypoint.sh' + tcmu_container.cname = self.get_container_name(desc='tcmu') + return tcmu_container + + def container(self, ctx: CephadmContext) -> CephContainer: + # So the container can modprobe iscsi_target_mod and have write perms + # to configfs we need to make this a privileged container. + ctr = daemon_to_container(ctx, self, privileged=True) + return to_deployment_container(ctx, ctr) + + def config_and_keyring( + self, ctx: CephadmContext + ) -> Tuple[Optional[str], Optional[str]]: + return get_config_and_keyring(ctx) + + def uid_gid(self, ctx: CephadmContext) -> Tuple[int, int]: + return extract_uid_gid(ctx) + + def default_entrypoint(self) -> str: + return self.entrypoint + + def customize_container_args( + self, ctx: CephadmContext, args: List[str] + ) -> None: + args.append(ctx.container_engine.unlimited_pids_option) From 0a6a1dfa966a560237387629a8be49bf4d61ebf9 Mon Sep 17 00:00:00 2001 From: John Mulligan Date: Thu, 9 Nov 2023 09:57:56 -0500 Subject: [PATCH 15/29] cephadm: move nfs class to a new file Signed-off-by: John Mulligan --- src/cephadm/cephadm.py | 200 +------------------ src/cephadm/cephadmlib/daemons/__init__.py | 2 + src/cephadm/cephadmlib/daemons/nfs.py | 218 +++++++++++++++++++++ src/cephadm/tests/test_nfs.py | 6 +- 4 files changed, 225 insertions(+), 201 deletions(-) create mode 100644 src/cephadm/cephadmlib/daemons/nfs.py diff --git a/src/cephadm/cephadm.py b/src/cephadm/cephadm.py index 4ef7957effc08..75fe0104af582 100755 --- a/src/cephadm/cephadm.py +++ b/src/cephadm/cephadm.py @@ -100,7 +100,6 @@ registry_login, ) from cephadmlib.data_utils import ( - dict_get, dict_get_join, get_legacy_config_fsid, is_fsid, @@ -113,7 +112,6 @@ get_file_timestamp, makedirs, pathify, - populate_files, read_file, recursive_chown, touch, @@ -176,6 +174,7 @@ CustomContainer, HAproxy, Keepalived, + NFSGanesha, Tracing, ) @@ -950,203 +949,6 @@ def default_entrypoint(self) -> str: ################################## -@register_daemon_form -class NFSGanesha(ContainerDaemonForm): - """Defines a NFS-Ganesha container""" - - daemon_type = 'nfs' - entrypoint = '/usr/bin/ganesha.nfsd' - daemon_args = ['-F', '-L', 'STDERR'] - - required_files = ['ganesha.conf'] - - port_map = { - 'nfs': 2049, - } - - @classmethod - def for_daemon_type(cls, daemon_type: str) -> bool: - return cls.daemon_type == daemon_type - - def __init__(self, - ctx, - fsid, - daemon_id, - config_json, - image=DEFAULT_IMAGE): - # type: (CephadmContext, str, Union[int, str], Dict, str) -> None - self.ctx = ctx - self.fsid = fsid - self.daemon_id = daemon_id - self.image = image - - # config-json options - self.pool = dict_get(config_json, 'pool', require=True) - self.namespace = dict_get(config_json, 'namespace') - self.userid = dict_get(config_json, 'userid') - self.extra_args = dict_get(config_json, 'extra_args', []) - self.files = dict_get(config_json, 'files', {}) - self.rgw = dict_get(config_json, 'rgw', {}) - - # validate the supplied args - self.validate() - - @classmethod - def init(cls, ctx, fsid, daemon_id): - # type: (CephadmContext, str, Union[int, str]) -> NFSGanesha - return cls(ctx, fsid, daemon_id, fetch_configs(ctx), ctx.image) - - @classmethod - def create(cls, ctx: CephadmContext, ident: DaemonIdentity) -> 'NFSGanesha': - return cls.init(ctx, ident.fsid, ident.daemon_id) - - @property - def identity(self) -> DaemonIdentity: - return DaemonIdentity(self.fsid, self.daemon_type, self.daemon_id) - - def _get_container_mounts(self, data_dir): - # type: (str) -> Dict[str, str] - mounts = dict() - mounts[os.path.join(data_dir, 'config')] = '/etc/ceph/ceph.conf:z' - mounts[os.path.join(data_dir, 'keyring')] = '/etc/ceph/keyring:z' - mounts[os.path.join(data_dir, 'etc/ganesha')] = '/etc/ganesha:z' - if self.rgw: - cluster = self.rgw.get('cluster', 'ceph') - rgw_user = self.rgw.get('user', 'admin') - mounts[os.path.join(data_dir, 'keyring.rgw')] = \ - '/var/lib/ceph/radosgw/%s-%s/keyring:z' % (cluster, rgw_user) - return mounts - - def customize_container_mounts( - self, ctx: CephadmContext, mounts: Dict[str, str] - ) -> None: - data_dir = self.identity.data_dir(ctx.data_dir) - mounts.update(self._get_container_mounts(data_dir)) - - @staticmethod - def get_container_envs(): - # type: () -> List[str] - envs = [ - 'CEPH_CONF=%s' % (CEPH_DEFAULT_CONF) - ] - return envs - - @staticmethod - def get_version(ctx, container_id): - # type: (CephadmContext, str) -> Optional[str] - version = None - out, err, code = call(ctx, - [ctx.container_engine.path, 'exec', container_id, - NFSGanesha.entrypoint, '-v'], - verbosity=CallVerbosity.QUIET) - if code == 0: - match = re.search(r'NFS-Ganesha Release\s*=\s*[V]*([\d.]+)', out) - if match: - version = match.group(1) - return version - - def validate(self): - # type: () -> None - if not is_fsid(self.fsid): - raise Error('not an fsid: %s' % self.fsid) - if not self.daemon_id: - raise Error('invalid daemon_id: %s' % self.daemon_id) - if not self.image: - raise Error('invalid image: %s' % self.image) - - # check for the required files - if self.required_files: - for fname in self.required_files: - if fname not in self.files: - raise Error('required file missing from config-json: %s' % fname) - - # check for an RGW config - if self.rgw: - if not self.rgw.get('keyring'): - raise Error('RGW keyring is missing') - if not self.rgw.get('user'): - raise Error('RGW user is missing') - - def get_daemon_name(self): - # type: () -> str - return '%s.%s' % (self.daemon_type, self.daemon_id) - - def get_container_name(self, desc=None): - # type: (Optional[str]) -> str - cname = 'ceph-%s-%s' % (self.fsid, self.get_daemon_name()) - if desc: - cname = '%s-%s' % (cname, desc) - return cname - - def get_daemon_args(self): - # type: () -> List[str] - return self.daemon_args + self.extra_args - - def create_daemon_dirs(self, data_dir, uid, gid): - # type: (str, int, int) -> None - """Create files under the container data dir""" - if not os.path.isdir(data_dir): - raise OSError('data_dir is not a directory: %s' % (data_dir)) - - logger.info('Creating ganesha config...') - - # create the ganesha conf dir - config_dir = os.path.join(data_dir, 'etc/ganesha') - makedirs(config_dir, uid, gid, 0o755) - - # populate files from the config-json - populate_files(config_dir, self.files, uid, gid) - - # write the RGW keyring - if self.rgw: - keyring_path = os.path.join(data_dir, 'keyring.rgw') - with write_new(keyring_path, owner=(uid, gid)) as f: - f.write(self.rgw.get('keyring', '')) - - def firewall_service_name(self) -> str: - return 'nfs' - - def container(self, ctx: CephadmContext) -> CephContainer: - ctr = daemon_to_container(ctx, self) - return to_deployment_container(ctx, ctr) - - def customize_container_endpoints( - self, endpoints: List[EndPoint], deployment_type: DeploymentType - ) -> None: - if deployment_type == DeploymentType.DEFAULT and not endpoints: - nfs_ports = list(NFSGanesha.port_map.values()) - endpoints.extend([EndPoint('0.0.0.0', p) for p in nfs_ports]) - - def uid_gid(self, ctx: CephadmContext) -> Tuple[int, int]: - # TODO: extract ganesha uid/gid (997, 994) ? - return extract_uid_gid(ctx) - - def config_and_keyring( - self, ctx: CephadmContext - ) -> Tuple[Optional[str], Optional[str]]: - return get_config_and_keyring(ctx) - - def customize_container_envs( - self, ctx: CephadmContext, envs: List[str] - ) -> None: - envs.extend(self.get_container_envs()) - - def customize_process_args( - self, ctx: CephadmContext, args: List[str] - ) -> None: - args.extend(self.get_daemon_args()) - - def customize_container_args( - self, ctx: CephadmContext, args: List[str] - ) -> None: - args.append(ctx.container_engine.unlimited_pids_option) - - def default_entrypoint(self) -> str: - return self.entrypoint - -################################## - - @register_daemon_form class CephExporter(ContainerDaemonForm): """Defines a Ceph exporter container""" diff --git a/src/cephadm/cephadmlib/daemons/__init__.py b/src/cephadm/cephadmlib/daemons/__init__.py index dbe01783e4c3c..96d337b0c660d 100644 --- a/src/cephadm/cephadmlib/daemons/__init__.py +++ b/src/cephadm/cephadmlib/daemons/__init__.py @@ -3,6 +3,7 @@ from .ingress import HAproxy, Keepalived from .nvmeof import CephNvmeof from .iscsi import CephIscsi +from .nfs import NFSGanesha __all__ = [ 'CephIscsi', @@ -10,5 +11,6 @@ 'CustomContainer', 'HAproxy', 'Keepalived', + 'NFSGanesha', 'Tracing', ] diff --git a/src/cephadm/cephadmlib/daemons/nfs.py b/src/cephadm/cephadmlib/daemons/nfs.py new file mode 100644 index 0000000000000..48653b775fb08 --- /dev/null +++ b/src/cephadm/cephadmlib/daemons/nfs.py @@ -0,0 +1,218 @@ +import logging +import os +import re + +from typing import Dict, List, Optional, Tuple, Union + +from ..call_wrappers import call, CallVerbosity +from ..constants import DEFAULT_IMAGE, CEPH_DEFAULT_CONF +from ..container_daemon_form import ContainerDaemonForm, daemon_to_container +from ..container_types import CephContainer, extract_uid_gid +from ..context import CephadmContext +from ..context_getters import fetch_configs, get_config_and_keyring +from ..daemon_form import register as register_daemon_form +from ..daemon_identity import DaemonIdentity +from ..data_utils import dict_get, is_fsid +from ..deploy import DeploymentType +from ..deployment_utils import to_deployment_container +from ..exceptions import Error +from ..file_utils import makedirs, populate_files, write_new +from ..net_utils import EndPoint + + +logger = logging.getLogger() + + +@register_daemon_form +class NFSGanesha(ContainerDaemonForm): + """Defines a NFS-Ganesha container""" + + daemon_type = 'nfs' + entrypoint = '/usr/bin/ganesha.nfsd' + daemon_args = ['-F', '-L', 'STDERR'] + + required_files = ['ganesha.conf'] + + port_map = { + 'nfs': 2049, + } + + @classmethod + def for_daemon_type(cls, daemon_type: str) -> bool: + return cls.daemon_type == daemon_type + + def __init__(self, + ctx, + fsid, + daemon_id, + config_json, + image=DEFAULT_IMAGE): + # type: (CephadmContext, str, Union[int, str], Dict, str) -> None + self.ctx = ctx + self.fsid = fsid + self.daemon_id = daemon_id + self.image = image + + # config-json options + self.pool = dict_get(config_json, 'pool', require=True) + self.namespace = dict_get(config_json, 'namespace') + self.userid = dict_get(config_json, 'userid') + self.extra_args = dict_get(config_json, 'extra_args', []) + self.files = dict_get(config_json, 'files', {}) + self.rgw = dict_get(config_json, 'rgw', {}) + + # validate the supplied args + self.validate() + + @classmethod + def init(cls, ctx, fsid, daemon_id): + # type: (CephadmContext, str, Union[int, str]) -> NFSGanesha + return cls(ctx, fsid, daemon_id, fetch_configs(ctx), ctx.image) + + @classmethod + def create(cls, ctx: CephadmContext, ident: DaemonIdentity) -> 'NFSGanesha': + return cls.init(ctx, ident.fsid, ident.daemon_id) + + @property + def identity(self) -> DaemonIdentity: + return DaemonIdentity(self.fsid, self.daemon_type, self.daemon_id) + + def _get_container_mounts(self, data_dir): + # type: (str) -> Dict[str, str] + mounts = dict() + mounts[os.path.join(data_dir, 'config')] = '/etc/ceph/ceph.conf:z' + mounts[os.path.join(data_dir, 'keyring')] = '/etc/ceph/keyring:z' + mounts[os.path.join(data_dir, 'etc/ganesha')] = '/etc/ganesha:z' + if self.rgw: + cluster = self.rgw.get('cluster', 'ceph') + rgw_user = self.rgw.get('user', 'admin') + mounts[os.path.join(data_dir, 'keyring.rgw')] = \ + '/var/lib/ceph/radosgw/%s-%s/keyring:z' % (cluster, rgw_user) + return mounts + + def customize_container_mounts( + self, ctx: CephadmContext, mounts: Dict[str, str] + ) -> None: + data_dir = self.identity.data_dir(ctx.data_dir) + mounts.update(self._get_container_mounts(data_dir)) + + @staticmethod + def get_container_envs(): + # type: () -> List[str] + envs = [ + 'CEPH_CONF=%s' % (CEPH_DEFAULT_CONF) + ] + return envs + + @staticmethod + def get_version(ctx, container_id): + # type: (CephadmContext, str) -> Optional[str] + version = None + out, err, code = call(ctx, + [ctx.container_engine.path, 'exec', container_id, + NFSGanesha.entrypoint, '-v'], + verbosity=CallVerbosity.QUIET) + if code == 0: + match = re.search(r'NFS-Ganesha Release\s*=\s*[V]*([\d.]+)', out) + if match: + version = match.group(1) + return version + + def validate(self): + # type: () -> None + if not is_fsid(self.fsid): + raise Error('not an fsid: %s' % self.fsid) + if not self.daemon_id: + raise Error('invalid daemon_id: %s' % self.daemon_id) + if not self.image: + raise Error('invalid image: %s' % self.image) + + # check for the required files + if self.required_files: + for fname in self.required_files: + if fname not in self.files: + raise Error('required file missing from config-json: %s' % fname) + + # check for an RGW config + if self.rgw: + if not self.rgw.get('keyring'): + raise Error('RGW keyring is missing') + if not self.rgw.get('user'): + raise Error('RGW user is missing') + + def get_daemon_name(self): + # type: () -> str + return '%s.%s' % (self.daemon_type, self.daemon_id) + + def get_container_name(self, desc=None): + # type: (Optional[str]) -> str + cname = 'ceph-%s-%s' % (self.fsid, self.get_daemon_name()) + if desc: + cname = '%s-%s' % (cname, desc) + return cname + + def get_daemon_args(self): + # type: () -> List[str] + return self.daemon_args + self.extra_args + + def create_daemon_dirs(self, data_dir, uid, gid): + # type: (str, int, int) -> None + """Create files under the container data dir""" + if not os.path.isdir(data_dir): + raise OSError('data_dir is not a directory: %s' % (data_dir)) + + logger.info('Creating ganesha config...') + + # create the ganesha conf dir + config_dir = os.path.join(data_dir, 'etc/ganesha') + makedirs(config_dir, uid, gid, 0o755) + + # populate files from the config-json + populate_files(config_dir, self.files, uid, gid) + + # write the RGW keyring + if self.rgw: + keyring_path = os.path.join(data_dir, 'keyring.rgw') + with write_new(keyring_path, owner=(uid, gid)) as f: + f.write(self.rgw.get('keyring', '')) + + def firewall_service_name(self) -> str: + return 'nfs' + + def container(self, ctx: CephadmContext) -> CephContainer: + ctr = daemon_to_container(ctx, self) + return to_deployment_container(ctx, ctr) + + def customize_container_endpoints( + self, endpoints: List[EndPoint], deployment_type: DeploymentType + ) -> None: + if deployment_type == DeploymentType.DEFAULT and not endpoints: + nfs_ports = list(NFSGanesha.port_map.values()) + endpoints.extend([EndPoint('0.0.0.0', p) for p in nfs_ports]) + + def uid_gid(self, ctx: CephadmContext) -> Tuple[int, int]: + # TODO: extract ganesha uid/gid (997, 994) ? + return extract_uid_gid(ctx) + + def config_and_keyring( + self, ctx: CephadmContext + ) -> Tuple[Optional[str], Optional[str]]: + return get_config_and_keyring(ctx) + + def customize_container_envs( + self, ctx: CephadmContext, envs: List[str] + ) -> None: + envs.extend(self.get_container_envs()) + + def customize_process_args( + self, ctx: CephadmContext, args: List[str] + ) -> None: + args.extend(self.get_daemon_args()) + + def customize_container_args( + self, ctx: CephadmContext, args: List[str] + ) -> None: + args.append(ctx.container_engine.unlimited_pids_option) + + def default_entrypoint(self) -> str: + return self.entrypoint diff --git a/src/cephadm/tests/test_nfs.py b/src/cephadm/tests/test_nfs.py index 94ab6afcfdf92..aae8113382dc3 100644 --- a/src/cephadm/tests/test_nfs.py +++ b/src/cephadm/tests/test_nfs.py @@ -155,15 +155,17 @@ def test_nfsganesha_container_envs(): def test_nfsganesha_get_version(): + from cephadmlib.daemons import nfs + with with_cephadm_ctx([]) as ctx: - nfsg = _cephadm.NFSGanesha( + nfsg = nfs.NFSGanesha( ctx, SAMPLE_UUID, "fred", good_nfs_json(), ) - with mock.patch("cephadm.call") as _call: + with mock.patch("cephadmlib.daemons.nfs.call") as _call: _call.return_value = ("NFS-Ganesha Release = V100", "", 0) ver = nfsg.get_version(ctx, "fake_version") _call.assert_called() From 643dd5d0f63985715d4b843218990ccca6187bb9 Mon Sep 17 00:00:00 2001 From: John Mulligan Date: Thu, 9 Nov 2023 13:02:20 -0500 Subject: [PATCH 16/29] cephadm: move monitoring class to a new file Signed-off-by: John Mulligan --- src/cephadm/cephadm.py | 336 +----------------- src/cephadm/cephadmlib/daemons/__init__.py | 2 + src/cephadm/cephadmlib/daemons/monitoring.py | 350 +++++++++++++++++++ src/cephadm/tests/test_cephadm.py | 18 +- src/cephadm/tests/test_deploy.py | 9 +- 5 files changed, 370 insertions(+), 345 deletions(-) create mode 100644 src/cephadm/cephadmlib/daemons/monitoring.py diff --git a/src/cephadm/cephadm.py b/src/cephadm/cephadm.py index 75fe0104af582..ee8182afd1444 100755 --- a/src/cephadm/cephadm.py +++ b/src/cephadm/cephadm.py @@ -34,15 +34,9 @@ from cephadmlib.constants import ( # default images - DEFAULT_ALERT_MANAGER_IMAGE, - DEFAULT_GRAFANA_IMAGE, DEFAULT_IMAGE, DEFAULT_IMAGE_IS_MAIN, DEFAULT_IMAGE_RELEASE, - DEFAULT_LOKI_IMAGE, - DEFAULT_NODE_EXPORTER_IMAGE, - DEFAULT_PROMETHEUS_IMAGE, - DEFAULT_PROMTAIL_IMAGE, DEFAULT_SNMP_GATEWAY_IMAGE, # other constant values CEPH_CONF, @@ -174,6 +168,7 @@ CustomContainer, HAproxy, Keepalived, + Monitoring, NFSGanesha, Tracing, ) @@ -617,335 +612,6 @@ def customize_process_args( ) -> None: args.extend(self.get_daemon_args()) - -################################## -@register_daemon_form -class Monitoring(ContainerDaemonForm): - """Define the configs for the monitoring containers""" - - port_map = { - 'prometheus': [9095], # Avoid default 9090, due to conflict with cockpit UI - 'node-exporter': [9100], - 'grafana': [3000], - 'alertmanager': [9093, 9094], - 'loki': [3100], - 'promtail': [9080] - } - - components = { - 'prometheus': { - 'image': DEFAULT_PROMETHEUS_IMAGE, - 'cpus': '2', - 'memory': '4GB', - 'args': [ - '--config.file=/etc/prometheus/prometheus.yml', - '--storage.tsdb.path=/prometheus', - ], - 'config-json-files': [ - 'prometheus.yml', - ], - }, - 'loki': { - 'image': DEFAULT_LOKI_IMAGE, - 'cpus': '1', - 'memory': '1GB', - 'args': [ - '--config.file=/etc/loki/loki.yml', - ], - 'config-json-files': [ - 'loki.yml' - ], - }, - 'promtail': { - 'image': DEFAULT_PROMTAIL_IMAGE, - 'cpus': '1', - 'memory': '1GB', - 'args': [ - '--config.file=/etc/promtail/promtail.yml', - ], - 'config-json-files': [ - 'promtail.yml', - ], - }, - 'node-exporter': { - 'image': DEFAULT_NODE_EXPORTER_IMAGE, - 'cpus': '1', - 'memory': '1GB', - 'args': [ - '--no-collector.timex' - ], - }, - 'grafana': { - 'image': DEFAULT_GRAFANA_IMAGE, - 'cpus': '2', - 'memory': '4GB', - 'args': [], - 'config-json-files': [ - 'grafana.ini', - 'provisioning/datasources/ceph-dashboard.yml', - 'certs/cert_file', - 'certs/cert_key', - ], - }, - 'alertmanager': { - 'image': DEFAULT_ALERT_MANAGER_IMAGE, - 'cpus': '2', - 'memory': '2GB', - 'args': [ - '--cluster.listen-address=:{}'.format(port_map['alertmanager'][1]), - ], - 'config-json-files': [ - 'alertmanager.yml', - ], - 'config-json-args': [ - 'peers', - ], - }, - } # type: ignore - - @classmethod - def for_daemon_type(cls, daemon_type: str) -> bool: - return daemon_type in cls.components - - @staticmethod - def get_version(ctx, container_id, daemon_type): - # type: (CephadmContext, str, str) -> str - """ - :param: daemon_type Either "prometheus", "alertmanager", "loki", "promtail" or "node-exporter" - """ - assert daemon_type in ('prometheus', 'alertmanager', 'node-exporter', 'loki', 'promtail') - cmd = daemon_type.replace('-', '_') - code = -1 - err = '' - out = '' - version = '' - if daemon_type == 'alertmanager': - for cmd in ['alertmanager', 'prometheus-alertmanager']: - out, err, code = call(ctx, [ - ctx.container_engine.path, 'exec', container_id, cmd, - '--version' - ], verbosity=CallVerbosity.QUIET) - if code == 0: - break - cmd = 'alertmanager' # reset cmd for version extraction - else: - out, err, code = call(ctx, [ - ctx.container_engine.path, 'exec', container_id, cmd, '--version' - ], verbosity=CallVerbosity.QUIET) - if code == 0: - if err.startswith('%s, version ' % cmd): - version = err.split(' ')[2] - elif out.startswith('%s, version ' % cmd): - version = out.split(' ')[2] - return version - - @staticmethod - def extract_uid_gid( - ctx: CephadmContext, daemon_type: str - ) -> Tuple[int, int]: - if daemon_type == 'prometheus': - uid, gid = extract_uid_gid(ctx, file_path='/etc/prometheus') - elif daemon_type == 'node-exporter': - uid, gid = 65534, 65534 - elif daemon_type == 'grafana': - uid, gid = extract_uid_gid(ctx, file_path='/var/lib/grafana') - elif daemon_type == 'loki': - uid, gid = extract_uid_gid(ctx, file_path='/etc/loki') - elif daemon_type == 'promtail': - uid, gid = extract_uid_gid(ctx, file_path='/etc/promtail') - elif daemon_type == 'alertmanager': - uid, gid = extract_uid_gid( - ctx, file_path=['/etc/alertmanager', '/etc/prometheus'] - ) - else: - raise Error('{} not implemented yet'.format(daemon_type)) - return uid, gid - - def __init__(self, ctx: CephadmContext, ident: DaemonIdentity) -> None: - self.ctx = ctx - self._identity = ident - - @classmethod - def create(cls, ctx: CephadmContext, ident: DaemonIdentity) -> 'Monitoring': - return cls(ctx, ident) - - @property - def identity(self) -> DaemonIdentity: - return self._identity - - def container(self, ctx: CephadmContext) -> CephContainer: - self._prevalidate(ctx) - ctr = daemon_to_container(ctx, self) - return to_deployment_container(ctx, ctr) - - def uid_gid(self, ctx: CephadmContext) -> Tuple[int, int]: - return self.extract_uid_gid(ctx, self.identity.daemon_type) - - def _prevalidate(self, ctx: CephadmContext) -> None: - # before being refactored into a ContainerDaemonForm these checks were - # done inside the deploy function. This was the only "family" of daemons - # that performed these checks in that location - daemon_type = self.identity.daemon_type - config = fetch_configs(ctx) # type: ignore - required_files = self.components[daemon_type].get( - 'config-json-files', list() - ) - required_args = self.components[daemon_type].get( - 'config-json-args', list() - ) - if required_files: - if not config or not all(c in config.get('files', {}).keys() for c in required_files): # type: ignore - raise Error( - '{} deployment requires config-json which must ' - 'contain file content for {}'.format( - daemon_type.capitalize(), ', '.join(required_files) - ) - ) - if required_args: - if not config or not all(c in config.keys() for c in required_args): # type: ignore - raise Error( - '{} deployment requires config-json which must ' - 'contain arg for {}'.format( - daemon_type.capitalize(), ', '.join(required_args) - ) - ) - - def get_daemon_args(self) -> List[str]: - ctx = self.ctx - daemon_type = self.identity.daemon_type - metadata = self.components[daemon_type] - r = list(metadata.get('args', [])) - # set ip and port to bind to for nodeexporter,alertmanager,prometheus - if daemon_type not in ['grafana', 'loki', 'promtail']: - ip = '' - port = self.port_map[daemon_type][0] - meta = fetch_meta(ctx) - if meta: - if 'ip' in meta and meta['ip']: - ip = meta['ip'] - if 'ports' in meta and meta['ports']: - port = meta['ports'][0] - r += [f'--web.listen-address={ip}:{port}'] - if daemon_type == 'prometheus': - config = fetch_configs(ctx) - retention_time = config.get('retention_time', '15d') - retention_size = config.get('retention_size', '0') # default to disabled - r += [f'--storage.tsdb.retention.time={retention_time}'] - r += [f'--storage.tsdb.retention.size={retention_size}'] - scheme = 'http' - host = get_fqdn() - # in case host is not an fqdn then we use the IP to - # avoid producing a broken web.external-url link - if '.' not in host: - ipv4_addrs, ipv6_addrs = get_ip_addresses(get_hostname()) - # use the first ipv4 (if any) otherwise use the first ipv6 - addr = next(iter(ipv4_addrs or ipv6_addrs), None) - host = wrap_ipv6(addr) if addr else host - r += [f'--web.external-url={scheme}://{host}:{port}'] - if daemon_type == 'alertmanager': - config = fetch_configs(ctx) - peers = config.get('peers', list()) # type: ignore - for peer in peers: - r += ['--cluster.peer={}'.format(peer)] - try: - r += [f'--web.config.file={config["web_config"]}'] - except KeyError: - pass - # some alertmanager, by default, look elsewhere for a config - r += ['--config.file=/etc/alertmanager/alertmanager.yml'] - if daemon_type == 'promtail': - r += ['--config.expand-env'] - if daemon_type == 'prometheus': - config = fetch_configs(ctx) - try: - r += [f'--web.config.file={config["web_config"]}'] - except KeyError: - pass - if daemon_type == 'node-exporter': - config = fetch_configs(ctx) - try: - r += [f'--web.config.file={config["web_config"]}'] - except KeyError: - pass - r += ['--path.procfs=/host/proc', - '--path.sysfs=/host/sys', - '--path.rootfs=/rootfs'] - return r - - def _get_container_mounts(self, data_dir: str) -> Dict[str, str]: - ctx = self.ctx - daemon_type = self.identity.daemon_type - mounts: Dict[str, str] = {} - log_dir = get_log_dir(self.identity.fsid, ctx.log_dir) - if daemon_type == 'prometheus': - mounts[ - os.path.join(data_dir, 'etc/prometheus') - ] = '/etc/prometheus:Z' - mounts[os.path.join(data_dir, 'data')] = '/prometheus:Z' - elif daemon_type == 'loki': - mounts[os.path.join(data_dir, 'etc/loki')] = '/etc/loki:Z' - mounts[os.path.join(data_dir, 'data')] = '/loki:Z' - elif daemon_type == 'promtail': - mounts[os.path.join(data_dir, 'etc/promtail')] = '/etc/promtail:Z' - mounts[log_dir] = '/var/log/ceph:z' - mounts[os.path.join(data_dir, 'data')] = '/promtail:Z' - elif daemon_type == 'node-exporter': - mounts[ - os.path.join(data_dir, 'etc/node-exporter') - ] = '/etc/node-exporter:Z' - mounts['/proc'] = '/host/proc:ro' - mounts['/sys'] = '/host/sys:ro' - mounts['/'] = '/rootfs:ro' - elif daemon_type == 'grafana': - mounts[ - os.path.join(data_dir, 'etc/grafana/grafana.ini') - ] = '/etc/grafana/grafana.ini:Z' - mounts[ - os.path.join(data_dir, 'etc/grafana/provisioning/datasources') - ] = '/etc/grafana/provisioning/datasources:Z' - mounts[ - os.path.join(data_dir, 'etc/grafana/certs') - ] = '/etc/grafana/certs:Z' - mounts[ - os.path.join(data_dir, 'data/grafana.db') - ] = '/var/lib/grafana/grafana.db:Z' - elif daemon_type == 'alertmanager': - mounts[ - os.path.join(data_dir, 'etc/alertmanager') - ] = '/etc/alertmanager:Z' - return mounts - - def customize_container_mounts( - self, ctx: CephadmContext, mounts: Dict[str, str] - ) -> None: - data_dir = self.identity.data_dir(ctx.data_dir) - mounts.update(self._get_container_mounts(data_dir)) - - def customize_container_args( - self, ctx: CephadmContext, args: List[str] - ) -> None: - uid, _ = self.uid_gid(ctx) - monitoring_args = [ - '--user', - str(uid), - # FIXME: disable cpu/memory limits for the time being (not supported - # by ubuntu 18.04 kernel!) - ] - args.extend(monitoring_args) - if self.identity.daemon_type == 'node-exporter': - # in order to support setting '--path.procfs=/host/proc','--path.sysfs=/host/sys', - # '--path.rootfs=/rootfs' for node-exporter we need to disable selinux separation - # between the node-exporter container and the host to avoid selinux denials - args.extend(['--security-opt', 'label=disable']) - - def customize_process_args( - self, ctx: CephadmContext, args: List[str] - ) -> None: - args.extend(self.get_daemon_args()) - - def default_entrypoint(self) -> str: - return '' - ################################## diff --git a/src/cephadm/cephadmlib/daemons/__init__.py b/src/cephadm/cephadmlib/daemons/__init__.py index 96d337b0c660d..a163e4a382939 100644 --- a/src/cephadm/cephadmlib/daemons/__init__.py +++ b/src/cephadm/cephadmlib/daemons/__init__.py @@ -4,6 +4,7 @@ from .nvmeof import CephNvmeof from .iscsi import CephIscsi from .nfs import NFSGanesha +from .monitoring import Monitoring __all__ = [ 'CephIscsi', @@ -11,6 +12,7 @@ 'CustomContainer', 'HAproxy', 'Keepalived', + 'Monitoring', 'NFSGanesha', 'Tracing', ] diff --git a/src/cephadm/cephadmlib/daemons/monitoring.py b/src/cephadm/cephadmlib/daemons/monitoring.py new file mode 100644 index 0000000000000..405dafc6dfccd --- /dev/null +++ b/src/cephadm/cephadmlib/daemons/monitoring.py @@ -0,0 +1,350 @@ +import os + +from typing import Dict, List, Tuple + +from ..call_wrappers import call, CallVerbosity +from ..constants import ( + DEFAULT_ALERT_MANAGER_IMAGE, + DEFAULT_GRAFANA_IMAGE, + DEFAULT_LOKI_IMAGE, + DEFAULT_NODE_EXPORTER_IMAGE, + DEFAULT_PROMETHEUS_IMAGE, + DEFAULT_PROMTAIL_IMAGE, +) +from ..container_daemon_form import ContainerDaemonForm, daemon_to_container +from ..container_types import CephContainer, extract_uid_gid +from ..context import CephadmContext +from ..context_getters import fetch_configs, fetch_meta +from ..daemon_form import register as register_daemon_form +from ..daemon_identity import DaemonIdentity +from ..deployment_utils import to_deployment_container +from ..exceptions import Error +from ..net_utils import get_fqdn, get_hostname, get_ip_addresses, wrap_ipv6 + + +@register_daemon_form +class Monitoring(ContainerDaemonForm): + """Define the configs for the monitoring containers""" + + port_map = { + 'prometheus': [9095], # Avoid default 9090, due to conflict with cockpit UI + 'node-exporter': [9100], + 'grafana': [3000], + 'alertmanager': [9093, 9094], + 'loki': [3100], + 'promtail': [9080] + } + + components = { + 'prometheus': { + 'image': DEFAULT_PROMETHEUS_IMAGE, + 'cpus': '2', + 'memory': '4GB', + 'args': [ + '--config.file=/etc/prometheus/prometheus.yml', + '--storage.tsdb.path=/prometheus', + ], + 'config-json-files': [ + 'prometheus.yml', + ], + }, + 'loki': { + 'image': DEFAULT_LOKI_IMAGE, + 'cpus': '1', + 'memory': '1GB', + 'args': [ + '--config.file=/etc/loki/loki.yml', + ], + 'config-json-files': [ + 'loki.yml' + ], + }, + 'promtail': { + 'image': DEFAULT_PROMTAIL_IMAGE, + 'cpus': '1', + 'memory': '1GB', + 'args': [ + '--config.file=/etc/promtail/promtail.yml', + ], + 'config-json-files': [ + 'promtail.yml', + ], + }, + 'node-exporter': { + 'image': DEFAULT_NODE_EXPORTER_IMAGE, + 'cpus': '1', + 'memory': '1GB', + 'args': [ + '--no-collector.timex' + ], + }, + 'grafana': { + 'image': DEFAULT_GRAFANA_IMAGE, + 'cpus': '2', + 'memory': '4GB', + 'args': [], + 'config-json-files': [ + 'grafana.ini', + 'provisioning/datasources/ceph-dashboard.yml', + 'certs/cert_file', + 'certs/cert_key', + ], + }, + 'alertmanager': { + 'image': DEFAULT_ALERT_MANAGER_IMAGE, + 'cpus': '2', + 'memory': '2GB', + 'args': [ + '--cluster.listen-address=:{}'.format(port_map['alertmanager'][1]), + ], + 'config-json-files': [ + 'alertmanager.yml', + ], + 'config-json-args': [ + 'peers', + ], + }, + } # type: ignore + + @classmethod + def for_daemon_type(cls, daemon_type: str) -> bool: + return daemon_type in cls.components + + @staticmethod + def get_version(ctx, container_id, daemon_type): + # type: (CephadmContext, str, str) -> str + """ + :param: daemon_type Either "prometheus", "alertmanager", "loki", "promtail" or "node-exporter" + """ + assert daemon_type in ('prometheus', 'alertmanager', 'node-exporter', 'loki', 'promtail') + cmd = daemon_type.replace('-', '_') + code = -1 + err = '' + out = '' + version = '' + if daemon_type == 'alertmanager': + for cmd in ['alertmanager', 'prometheus-alertmanager']: + out, err, code = call(ctx, [ + ctx.container_engine.path, 'exec', container_id, cmd, + '--version' + ], verbosity=CallVerbosity.QUIET) + if code == 0: + break + cmd = 'alertmanager' # reset cmd for version extraction + else: + out, err, code = call(ctx, [ + ctx.container_engine.path, 'exec', container_id, cmd, '--version' + ], verbosity=CallVerbosity.QUIET) + if code == 0: + if err.startswith('%s, version ' % cmd): + version = err.split(' ')[2] + elif out.startswith('%s, version ' % cmd): + version = out.split(' ')[2] + return version + + @staticmethod + def extract_uid_gid( + ctx: CephadmContext, daemon_type: str + ) -> Tuple[int, int]: + if daemon_type == 'prometheus': + uid, gid = extract_uid_gid(ctx, file_path='/etc/prometheus') + elif daemon_type == 'node-exporter': + uid, gid = 65534, 65534 + elif daemon_type == 'grafana': + uid, gid = extract_uid_gid(ctx, file_path='/var/lib/grafana') + elif daemon_type == 'loki': + uid, gid = extract_uid_gid(ctx, file_path='/etc/loki') + elif daemon_type == 'promtail': + uid, gid = extract_uid_gid(ctx, file_path='/etc/promtail') + elif daemon_type == 'alertmanager': + uid, gid = extract_uid_gid( + ctx, file_path=['/etc/alertmanager', '/etc/prometheus'] + ) + else: + raise Error('{} not implemented yet'.format(daemon_type)) + return uid, gid + + def __init__(self, ctx: CephadmContext, ident: DaemonIdentity) -> None: + self.ctx = ctx + self._identity = ident + + @classmethod + def create(cls, ctx: CephadmContext, ident: DaemonIdentity) -> 'Monitoring': + return cls(ctx, ident) + + @property + def identity(self) -> DaemonIdentity: + return self._identity + + def container(self, ctx: CephadmContext) -> CephContainer: + self._prevalidate(ctx) + ctr = daemon_to_container(ctx, self) + return to_deployment_container(ctx, ctr) + + def uid_gid(self, ctx: CephadmContext) -> Tuple[int, int]: + return self.extract_uid_gid(ctx, self.identity.daemon_type) + + def _prevalidate(self, ctx: CephadmContext) -> None: + # before being refactored into a ContainerDaemonForm these checks were + # done inside the deploy function. This was the only "family" of daemons + # that performed these checks in that location + daemon_type = self.identity.daemon_type + config = fetch_configs(ctx) # type: ignore + required_files = self.components[daemon_type].get( + 'config-json-files', list() + ) + required_args = self.components[daemon_type].get( + 'config-json-args', list() + ) + if required_files: + if not config or not all(c in config.get('files', {}).keys() for c in required_files): # type: ignore + raise Error( + '{} deployment requires config-json which must ' + 'contain file content for {}'.format( + daemon_type.capitalize(), ', '.join(required_files) + ) + ) + if required_args: + if not config or not all(c in config.keys() for c in required_args): # type: ignore + raise Error( + '{} deployment requires config-json which must ' + 'contain arg for {}'.format( + daemon_type.capitalize(), ', '.join(required_args) + ) + ) + + def get_daemon_args(self) -> List[str]: + ctx = self.ctx + daemon_type = self.identity.daemon_type + metadata = self.components[daemon_type] + r = list(metadata.get('args', [])) + # set ip and port to bind to for nodeexporter,alertmanager,prometheus + if daemon_type not in ['grafana', 'loki', 'promtail']: + ip = '' + port = self.port_map[daemon_type][0] + meta = fetch_meta(ctx) + if meta: + if 'ip' in meta and meta['ip']: + ip = meta['ip'] + if 'ports' in meta and meta['ports']: + port = meta['ports'][0] + r += [f'--web.listen-address={ip}:{port}'] + if daemon_type == 'prometheus': + config = fetch_configs(ctx) + retention_time = config.get('retention_time', '15d') + retention_size = config.get('retention_size', '0') # default to disabled + r += [f'--storage.tsdb.retention.time={retention_time}'] + r += [f'--storage.tsdb.retention.size={retention_size}'] + scheme = 'http' + host = get_fqdn() + # in case host is not an fqdn then we use the IP to + # avoid producing a broken web.external-url link + if '.' not in host: + ipv4_addrs, ipv6_addrs = get_ip_addresses(get_hostname()) + # use the first ipv4 (if any) otherwise use the first ipv6 + addr = next(iter(ipv4_addrs or ipv6_addrs), None) + host = wrap_ipv6(addr) if addr else host + r += [f'--web.external-url={scheme}://{host}:{port}'] + if daemon_type == 'alertmanager': + config = fetch_configs(ctx) + peers = config.get('peers', list()) # type: ignore + for peer in peers: + r += ['--cluster.peer={}'.format(peer)] + try: + r += [f'--web.config.file={config["web_config"]}'] + except KeyError: + pass + # some alertmanager, by default, look elsewhere for a config + r += ['--config.file=/etc/alertmanager/alertmanager.yml'] + if daemon_type == 'promtail': + r += ['--config.expand-env'] + if daemon_type == 'prometheus': + config = fetch_configs(ctx) + try: + r += [f'--web.config.file={config["web_config"]}'] + except KeyError: + pass + if daemon_type == 'node-exporter': + config = fetch_configs(ctx) + try: + r += [f'--web.config.file={config["web_config"]}'] + except KeyError: + pass + r += ['--path.procfs=/host/proc', + '--path.sysfs=/host/sys', + '--path.rootfs=/rootfs'] + return r + + def _get_container_mounts(self, data_dir: str) -> Dict[str, str]: + ctx = self.ctx + daemon_type = self.identity.daemon_type + mounts: Dict[str, str] = {} + log_dir = os.path.join(ctx.log_dir, self.identity.fsid) + if daemon_type == 'prometheus': + mounts[ + os.path.join(data_dir, 'etc/prometheus') + ] = '/etc/prometheus:Z' + mounts[os.path.join(data_dir, 'data')] = '/prometheus:Z' + elif daemon_type == 'loki': + mounts[os.path.join(data_dir, 'etc/loki')] = '/etc/loki:Z' + mounts[os.path.join(data_dir, 'data')] = '/loki:Z' + elif daemon_type == 'promtail': + mounts[os.path.join(data_dir, 'etc/promtail')] = '/etc/promtail:Z' + mounts[log_dir] = '/var/log/ceph:z' + mounts[os.path.join(data_dir, 'data')] = '/promtail:Z' + elif daemon_type == 'node-exporter': + mounts[ + os.path.join(data_dir, 'etc/node-exporter') + ] = '/etc/node-exporter:Z' + mounts['/proc'] = '/host/proc:ro' + mounts['/sys'] = '/host/sys:ro' + mounts['/'] = '/rootfs:ro' + elif daemon_type == 'grafana': + mounts[ + os.path.join(data_dir, 'etc/grafana/grafana.ini') + ] = '/etc/grafana/grafana.ini:Z' + mounts[ + os.path.join(data_dir, 'etc/grafana/provisioning/datasources') + ] = '/etc/grafana/provisioning/datasources:Z' + mounts[ + os.path.join(data_dir, 'etc/grafana/certs') + ] = '/etc/grafana/certs:Z' + mounts[ + os.path.join(data_dir, 'data/grafana.db') + ] = '/var/lib/grafana/grafana.db:Z' + elif daemon_type == 'alertmanager': + mounts[ + os.path.join(data_dir, 'etc/alertmanager') + ] = '/etc/alertmanager:Z' + return mounts + + def customize_container_mounts( + self, ctx: CephadmContext, mounts: Dict[str, str] + ) -> None: + data_dir = self.identity.data_dir(ctx.data_dir) + mounts.update(self._get_container_mounts(data_dir)) + + def customize_container_args( + self, ctx: CephadmContext, args: List[str] + ) -> None: + uid, _ = self.uid_gid(ctx) + monitoring_args = [ + '--user', + str(uid), + # FIXME: disable cpu/memory limits for the time being (not supported + # by ubuntu 18.04 kernel!) + ] + args.extend(monitoring_args) + if self.identity.daemon_type == 'node-exporter': + # in order to support setting '--path.procfs=/host/proc','--path.sysfs=/host/sys', + # '--path.rootfs=/rootfs' for node-exporter we need to disable selinux separation + # between the node-exporter container and the host to avoid selinux denials + args.extend(['--security-opt', 'label=disable']) + + def customize_process_args( + self, ctx: CephadmContext, args: List[str] + ) -> None: + args.extend(self.get_daemon_args()) + + def default_entrypoint(self) -> str: + return '' diff --git a/src/cephadm/tests/test_cephadm.py b/src/cephadm/tests/test_cephadm.py index 899272cb4143a..b2e395fab64ed 100644 --- a/src/cephadm/tests/test_cephadm.py +++ b/src/cephadm/tests/test_cephadm.py @@ -1211,15 +1211,17 @@ def test_exit_failure_2(self, _target_exists, _target_state, _logger, _call, _li class TestMonitoring(object): - @mock.patch('cephadm.call') + @mock.patch('cephadmlib.daemons.monitoring.call') def test_get_version_alertmanager(self, _call): + from cephadmlib.daemons import monitoring + ctx = _cephadm.CephadmContext() ctx.container_engine = mock_podman() daemon_type = 'alertmanager' # binary `prometheus` _call.return_value = '', '{}, version 0.16.1'.format(daemon_type), 0 - version = _cephadm.Monitoring.get_version(ctx, 'container_id', daemon_type) + version = monitoring.Monitoring.get_version(ctx, 'container_id', daemon_type) assert version == '0.16.1' # binary `prometheus-alertmanager` @@ -1230,13 +1232,15 @@ def test_get_version_alertmanager(self, _call): version = _cephadm.Monitoring.get_version(ctx, 'container_id', daemon_type) assert version == '0.16.1' - @mock.patch('cephadm.call') + @mock.patch('cephadmlib.daemons.monitoring.call') def test_get_version_prometheus(self, _call): + from cephadmlib.daemons import monitoring + ctx = _cephadm.CephadmContext() ctx.container_engine = mock_podman() daemon_type = 'prometheus' _call.return_value = '', '{}, version 0.16.1'.format(daemon_type), 0 - version = _cephadm.Monitoring.get_version(ctx, 'container_id', daemon_type) + version = monitoring.Monitoring.get_version(ctx, 'container_id', daemon_type) assert version == '0.16.1' def test_prometheus_external_url(self): @@ -1250,13 +1254,15 @@ def test_prometheus_external_url(self): ).get_daemon_args() assert any([x.startswith('--web.external-url=http://') for x in args]) - @mock.patch('cephadm.call') + @mock.patch('cephadmlib.daemons.monitoring.call') def test_get_version_node_exporter(self, _call): + from cephadmlib.daemons import monitoring + ctx = _cephadm.CephadmContext() ctx.container_engine = mock_podman() daemon_type = 'node-exporter' _call.return_value = '', '{}, version 0.16.1'.format(daemon_type.replace('-', '_')), 0 - version = _cephadm.Monitoring.get_version(ctx, 'container_id', daemon_type) + version = monitoring.Monitoring.get_version(ctx, 'container_id', daemon_type) assert version == '0.16.1' def test_create_daemon_dirs_prometheus(self, cephadm_fs): diff --git a/src/cephadm/tests/test_deploy.py b/src/cephadm/tests/test_deploy.py index 9d82b2055b918..5d5b46ad6954c 100644 --- a/src/cephadm/tests/test_deploy.py +++ b/src/cephadm/tests/test_deploy.py @@ -9,6 +9,7 @@ mock_podman, with_cephadm_ctx, FunkyPatcher, + funkypatch, ) @@ -290,11 +291,11 @@ def test_deploy_nvmeof_container(cephadm_fs, monkeypatch): assert (si.st_uid, si.st_gid) == (167, 167) -def test_deploy_a_monitoring_container(cephadm_fs, monkeypatch): - mocks = _common_mp(monkeypatch) +def test_deploy_a_monitoring_container(cephadm_fs, funkypatch): + mocks = _common_patches(funkypatch) _firewalld = mocks['Firewalld'] - _get_ip_addresses = mock.MagicMock(return_value=(['10.10.10.10'], [])) - monkeypatch.setattr('cephadm.get_ip_addresses', _get_ip_addresses) + _get_ip_addresses = funkypatch.patch('cephadm.get_ip_addresses') + _get_ip_addresses.return_value = (['10.10.10.10'], []) fsid = 'b01dbeef-701d-9abe-0000-e1e5a47004a7' with with_cephadm_ctx([]) as ctx: ctx.container_engine = mock_podman() From 63a14afb5c9010bfa29847ac2c560b9551a5b3a2 Mon Sep 17 00:00:00 2001 From: John Mulligan Date: Thu, 9 Nov 2023 13:11:21 -0500 Subject: [PATCH 17/29] cephadm: move snmp class to a new file Signed-off-by: John Mulligan --- src/cephadm/cephadm.py | 177 +------------------ src/cephadm/cephadmlib/daemons/__init__.py | 2 + src/cephadm/cephadmlib/daemons/snmp.py | 189 +++++++++++++++++++++ 3 files changed, 192 insertions(+), 176 deletions(-) create mode 100644 src/cephadm/cephadmlib/daemons/snmp.py diff --git a/src/cephadm/cephadm.py b/src/cephadm/cephadm.py index ee8182afd1444..61d6633a0a3de 100755 --- a/src/cephadm/cephadm.py +++ b/src/cephadm/cephadm.py @@ -28,7 +28,6 @@ from glob import glob from io import StringIO from threading import Thread, Event -from urllib.error import HTTPError, URLError from urllib.request import urlopen, Request from pathlib import Path @@ -37,7 +36,6 @@ DEFAULT_IMAGE, DEFAULT_IMAGE_IS_MAIN, DEFAULT_IMAGE_RELEASE, - DEFAULT_SNMP_GATEWAY_IMAGE, # other constant values CEPH_CONF, CEPH_CONF_DIR, @@ -170,6 +168,7 @@ Keepalived, Monitoring, NFSGanesha, + SNMPGateway, Tracing, ) @@ -438,180 +437,6 @@ def firewall_service_name(self) -> str: def osd_fsid(self) -> Optional[str]: return self._osd_fsid - -################################## - - -@register_daemon_form -class SNMPGateway(ContainerDaemonForm): - """Defines an SNMP gateway between Prometheus and SNMP monitoring Frameworks""" - daemon_type = 'snmp-gateway' - SUPPORTED_VERSIONS = ['V2c', 'V3'] - default_image = DEFAULT_SNMP_GATEWAY_IMAGE - DEFAULT_PORT = 9464 - env_filename = 'snmp-gateway.conf' - - @classmethod - def for_daemon_type(cls, daemon_type: str) -> bool: - return cls.daemon_type == daemon_type - - def __init__(self, - ctx: CephadmContext, - fsid: str, - daemon_id: Union[int, str], - config_json: Dict[str, Any], - image: Optional[str] = None) -> None: - self.ctx = ctx - self.fsid = fsid - self.daemon_id = daemon_id - self.image = image or SNMPGateway.default_image - - self.uid = config_json.get('uid', 0) - self.gid = config_json.get('gid', 0) - - self.destination = config_json.get('destination', '') - self.snmp_version = config_json.get('snmp_version', 'V2c') - self.snmp_community = config_json.get('snmp_community', 'public') - self.log_level = config_json.get('log_level', 'info') - self.snmp_v3_auth_username = config_json.get('snmp_v3_auth_username', '') - self.snmp_v3_auth_password = config_json.get('snmp_v3_auth_password', '') - self.snmp_v3_auth_protocol = config_json.get('snmp_v3_auth_protocol', '') - self.snmp_v3_priv_protocol = config_json.get('snmp_v3_priv_protocol', '') - self.snmp_v3_priv_password = config_json.get('snmp_v3_priv_password', '') - self.snmp_v3_engine_id = config_json.get('snmp_v3_engine_id', '') - - self.validate() - - @classmethod - def init(cls, ctx: CephadmContext, fsid: str, - daemon_id: Union[int, str]) -> 'SNMPGateway': - cfgs = fetch_configs(ctx) - assert cfgs # assert some config data was found - return cls(ctx, fsid, daemon_id, cfgs, ctx.image) - - @classmethod - def create(cls, ctx: CephadmContext, ident: DaemonIdentity) -> 'SNMPGateway': - return cls.init(ctx, ident.fsid, ident.daemon_id) - - @property - def identity(self) -> DaemonIdentity: - return DaemonIdentity(self.fsid, self.daemon_type, self.daemon_id) - - @staticmethod - def get_version(ctx: CephadmContext, fsid: str, daemon_id: str) -> Optional[str]: - """Return the version of the notifier from it's http endpoint""" - path = os.path.join(ctx.data_dir, fsid, f'snmp-gateway.{daemon_id}', 'unit.meta') - try: - with open(path, 'r') as env: - metadata = json.loads(env.read()) - except (OSError, json.JSONDecodeError): - return None - - ports = metadata.get('ports', []) - if not ports: - return None - - try: - with urlopen(f'http://127.0.0.1:{ports[0]}/') as r: - html = r.read().decode('utf-8').split('\n') - except (HTTPError, URLError): - return None - - for h in html: - stripped = h.strip() - if stripped.startswith(('
', '
')) and \
-               stripped.endswith(('
', '
')): - #
(version=1.2.1, branch=HEAD, revision=7...
-                return stripped.split(',')[0].split('version=')[1]
-
-        return None
-
-    @property
-    def port(self) -> int:
-        endpoints = fetch_endpoints(self.ctx)
-        if not endpoints:
-            return self.DEFAULT_PORT
-        return endpoints[0].port
-
-    def get_daemon_args(self) -> List[str]:
-        v3_args = []
-        base_args = [
-            f'--web.listen-address=:{self.port}',
-            f'--snmp.destination={self.destination}',
-            f'--snmp.version={self.snmp_version}',
-            f'--log.level={self.log_level}',
-            '--snmp.trap-description-template=/etc/snmp_notifier/description-template.tpl'
-        ]
-
-        if self.snmp_version == 'V3':
-            # common auth settings
-            v3_args.extend([
-                '--snmp.authentication-enabled',
-                f'--snmp.authentication-protocol={self.snmp_v3_auth_protocol}',
-                f'--snmp.security-engine-id={self.snmp_v3_engine_id}'
-            ])
-            # authPriv setting is applied if we have a privacy protocol setting
-            if self.snmp_v3_priv_protocol:
-                v3_args.extend([
-                    '--snmp.private-enabled',
-                    f'--snmp.private-protocol={self.snmp_v3_priv_protocol}'
-                ])
-
-        return base_args + v3_args
-
-    @property
-    def data_dir(self) -> str:
-        return os.path.join(self.ctx.data_dir, self.ctx.fsid, f'{self.daemon_type}.{self.daemon_id}')
-
-    @property
-    def conf_file_path(self) -> str:
-        return os.path.join(self.data_dir, self.env_filename)
-
-    def create_daemon_conf(self) -> None:
-        """Creates the environment file holding 'secrets' passed to the snmp-notifier daemon"""
-        with write_new(self.conf_file_path) as f:
-            if self.snmp_version == 'V2c':
-                f.write(f'SNMP_NOTIFIER_COMMUNITY={self.snmp_community}\n')
-            else:
-                f.write(f'SNMP_NOTIFIER_AUTH_USERNAME={self.snmp_v3_auth_username}\n')
-                f.write(f'SNMP_NOTIFIER_AUTH_PASSWORD={self.snmp_v3_auth_password}\n')
-                if self.snmp_v3_priv_password:
-                    f.write(f'SNMP_NOTIFIER_PRIV_PASSWORD={self.snmp_v3_priv_password}\n')
-
-    def validate(self) -> None:
-        """Validate the settings
-
-        Raises:
-            Error: if the fsid doesn't look like an fsid
-            Error: if the snmp version is not supported
-            Error: destination IP and port address missing
-        """
-        if not is_fsid(self.fsid):
-            raise Error(f'not a valid fsid: {self.fsid}')
-
-        if self.snmp_version not in SNMPGateway.SUPPORTED_VERSIONS:
-            raise Error(f'not a valid snmp version: {self.snmp_version}')
-
-        if not self.destination:
-            raise Error('config is missing destination attribute(:) of the target SNMP listener')
-
-    def container(self, ctx: CephadmContext) -> CephContainer:
-        ctr = daemon_to_container(ctx, self)
-        return to_deployment_container(ctx, ctr)
-
-    def uid_gid(self, ctx: CephadmContext) -> Tuple[int, int]:
-        return self.uid, self.gid
-
-    def customize_container_args(
-        self, ctx: CephadmContext, args: List[str]
-    ) -> None:
-        args.append(f'--env-file={self.conf_file_path}')
-
-    def customize_process_args(
-        self, ctx: CephadmContext, args: List[str]
-    ) -> None:
-        args.extend(self.get_daemon_args())
-
 ##################################
 
 
diff --git a/src/cephadm/cephadmlib/daemons/__init__.py b/src/cephadm/cephadmlib/daemons/__init__.py
index a163e4a382939..d2c818e927d52 100644
--- a/src/cephadm/cephadmlib/daemons/__init__.py
+++ b/src/cephadm/cephadmlib/daemons/__init__.py
@@ -5,6 +5,7 @@
 from .iscsi import CephIscsi
 from .nfs import NFSGanesha
 from .monitoring import Monitoring
+from .snmp import SNMPGateway
 
 __all__ = [
     'CephIscsi',
@@ -14,5 +15,6 @@
     'Keepalived',
     'Monitoring',
     'NFSGanesha',
+    'SNMPGateway',
     'Tracing',
 ]
diff --git a/src/cephadm/cephadmlib/daemons/snmp.py b/src/cephadm/cephadmlib/daemons/snmp.py
new file mode 100644
index 0000000000000..dc952aa4cb9d5
--- /dev/null
+++ b/src/cephadm/cephadmlib/daemons/snmp.py
@@ -0,0 +1,189 @@
+import json
+import os
+
+from typing import Any, Dict, List, Optional, Tuple, Union
+from urllib.error import HTTPError, URLError
+from urllib.request import urlopen
+
+from ..constants import DEFAULT_SNMP_GATEWAY_IMAGE
+from ..container_daemon_form import ContainerDaemonForm, daemon_to_container
+from ..container_types import CephContainer
+from ..context import CephadmContext
+from ..context_getters import fetch_configs, fetch_endpoints
+from ..daemon_form import register as register_daemon_form
+from ..daemon_identity import DaemonIdentity
+from ..data_utils import is_fsid
+from ..deployment_utils import to_deployment_container
+from ..exceptions import Error
+from ..file_utils import write_new
+
+
+@register_daemon_form
+class SNMPGateway(ContainerDaemonForm):
+    """Defines an SNMP gateway between Prometheus and SNMP monitoring Frameworks"""
+    daemon_type = 'snmp-gateway'
+    SUPPORTED_VERSIONS = ['V2c', 'V3']
+    default_image = DEFAULT_SNMP_GATEWAY_IMAGE
+    DEFAULT_PORT = 9464
+    env_filename = 'snmp-gateway.conf'
+
+    @classmethod
+    def for_daemon_type(cls, daemon_type: str) -> bool:
+        return cls.daemon_type == daemon_type
+
+    def __init__(self,
+                 ctx: CephadmContext,
+                 fsid: str,
+                 daemon_id: Union[int, str],
+                 config_json: Dict[str, Any],
+                 image: Optional[str] = None) -> None:
+        self.ctx = ctx
+        self.fsid = fsid
+        self.daemon_id = daemon_id
+        self.image = image or SNMPGateway.default_image
+
+        self.uid = config_json.get('uid', 0)
+        self.gid = config_json.get('gid', 0)
+
+        self.destination = config_json.get('destination', '')
+        self.snmp_version = config_json.get('snmp_version', 'V2c')
+        self.snmp_community = config_json.get('snmp_community', 'public')
+        self.log_level = config_json.get('log_level', 'info')
+        self.snmp_v3_auth_username = config_json.get('snmp_v3_auth_username', '')
+        self.snmp_v3_auth_password = config_json.get('snmp_v3_auth_password', '')
+        self.snmp_v3_auth_protocol = config_json.get('snmp_v3_auth_protocol', '')
+        self.snmp_v3_priv_protocol = config_json.get('snmp_v3_priv_protocol', '')
+        self.snmp_v3_priv_password = config_json.get('snmp_v3_priv_password', '')
+        self.snmp_v3_engine_id = config_json.get('snmp_v3_engine_id', '')
+
+        self.validate()
+
+    @classmethod
+    def init(cls, ctx: CephadmContext, fsid: str,
+             daemon_id: Union[int, str]) -> 'SNMPGateway':
+        cfgs = fetch_configs(ctx)
+        assert cfgs  # assert some config data was found
+        return cls(ctx, fsid, daemon_id, cfgs, ctx.image)
+
+    @classmethod
+    def create(cls, ctx: CephadmContext, ident: DaemonIdentity) -> 'SNMPGateway':
+        return cls.init(ctx, ident.fsid, ident.daemon_id)
+
+    @property
+    def identity(self) -> DaemonIdentity:
+        return DaemonIdentity(self.fsid, self.daemon_type, self.daemon_id)
+
+    @staticmethod
+    def get_version(ctx: CephadmContext, fsid: str, daemon_id: str) -> Optional[str]:
+        """Return the version of the notifier from it's http endpoint"""
+        path = os.path.join(ctx.data_dir, fsid, f'snmp-gateway.{daemon_id}', 'unit.meta')
+        try:
+            with open(path, 'r') as env:
+                metadata = json.loads(env.read())
+        except (OSError, json.JSONDecodeError):
+            return None
+
+        ports = metadata.get('ports', [])
+        if not ports:
+            return None
+
+        try:
+            with urlopen(f'http://127.0.0.1:{ports[0]}/') as r:
+                html = r.read().decode('utf-8').split('\n')
+        except (HTTPError, URLError):
+            return None
+
+        for h in html:
+            stripped = h.strip()
+            if stripped.startswith(('
', '
')) and \
+               stripped.endswith(('
', '
')): + #
(version=1.2.1, branch=HEAD, revision=7...
+                return stripped.split(',')[0].split('version=')[1]
+
+        return None
+
+    @property
+    def port(self) -> int:
+        endpoints = fetch_endpoints(self.ctx)
+        if not endpoints:
+            return self.DEFAULT_PORT
+        return endpoints[0].port
+
+    def get_daemon_args(self) -> List[str]:
+        v3_args = []
+        base_args = [
+            f'--web.listen-address=:{self.port}',
+            f'--snmp.destination={self.destination}',
+            f'--snmp.version={self.snmp_version}',
+            f'--log.level={self.log_level}',
+            '--snmp.trap-description-template=/etc/snmp_notifier/description-template.tpl'
+        ]
+
+        if self.snmp_version == 'V3':
+            # common auth settings
+            v3_args.extend([
+                '--snmp.authentication-enabled',
+                f'--snmp.authentication-protocol={self.snmp_v3_auth_protocol}',
+                f'--snmp.security-engine-id={self.snmp_v3_engine_id}'
+            ])
+            # authPriv setting is applied if we have a privacy protocol setting
+            if self.snmp_v3_priv_protocol:
+                v3_args.extend([
+                    '--snmp.private-enabled',
+                    f'--snmp.private-protocol={self.snmp_v3_priv_protocol}'
+                ])
+
+        return base_args + v3_args
+
+    @property
+    def data_dir(self) -> str:
+        return os.path.join(self.ctx.data_dir, self.ctx.fsid, f'{self.daemon_type}.{self.daemon_id}')
+
+    @property
+    def conf_file_path(self) -> str:
+        return os.path.join(self.data_dir, self.env_filename)
+
+    def create_daemon_conf(self) -> None:
+        """Creates the environment file holding 'secrets' passed to the snmp-notifier daemon"""
+        with write_new(self.conf_file_path) as f:
+            if self.snmp_version == 'V2c':
+                f.write(f'SNMP_NOTIFIER_COMMUNITY={self.snmp_community}\n')
+            else:
+                f.write(f'SNMP_NOTIFIER_AUTH_USERNAME={self.snmp_v3_auth_username}\n')
+                f.write(f'SNMP_NOTIFIER_AUTH_PASSWORD={self.snmp_v3_auth_password}\n')
+                if self.snmp_v3_priv_password:
+                    f.write(f'SNMP_NOTIFIER_PRIV_PASSWORD={self.snmp_v3_priv_password}\n')
+
+    def validate(self) -> None:
+        """Validate the settings
+
+        Raises:
+            Error: if the fsid doesn't look like an fsid
+            Error: if the snmp version is not supported
+            Error: destination IP and port address missing
+        """
+        if not is_fsid(self.fsid):
+            raise Error(f'not a valid fsid: {self.fsid}')
+
+        if self.snmp_version not in SNMPGateway.SUPPORTED_VERSIONS:
+            raise Error(f'not a valid snmp version: {self.snmp_version}')
+
+        if not self.destination:
+            raise Error('config is missing destination attribute(:) of the target SNMP listener')
+
+    def container(self, ctx: CephadmContext) -> CephContainer:
+        ctr = daemon_to_container(ctx, self)
+        return to_deployment_container(ctx, ctr)
+
+    def uid_gid(self, ctx: CephadmContext) -> Tuple[int, int]:
+        return self.uid, self.gid
+
+    def customize_container_args(
+        self, ctx: CephadmContext, args: List[str]
+    ) -> None:
+        args.append(f'--env-file={self.conf_file_path}')
+
+    def customize_process_args(
+        self, ctx: CephadmContext, args: List[str]
+    ) -> None:
+        args.extend(self.get_daemon_args())

From 31ba507cc7046f68e33683747a1a9704e8588588 Mon Sep 17 00:00:00 2001
From: John Mulligan 
Date: Thu, 9 Nov 2023 13:46:45 -0500
Subject: [PATCH 18/29] cephadm: move ceph classes to a new file

Move the ceph classes (Ceph, OSD, CephExporter) along with a few heavily
linked functions to a new ceph.py file under the daemons dir.

Signed-off-by: John Mulligan 
---
 src/cephadm/cephadm.py                     | 419 +-------------------
 src/cephadm/cephadmlib/daemons/__init__.py |   4 +
 src/cephadm/cephadmlib/daemons/ceph.py     | 431 +++++++++++++++++++++
 src/cephadm/tests/test_cephadm.py          |   3 +-
 src/cephadm/tests/test_daemon_form.py      |   3 +-
 src/cephadm/tests/test_deploy.py           |  39 +-
 6 files changed, 460 insertions(+), 439 deletions(-)
 create mode 100644 src/cephadm/cephadmlib/daemons/ceph.py

diff --git a/src/cephadm/cephadm.py b/src/cephadm/cephadm.py
index 61d6633a0a3de..07155da5dcf7b 100755
--- a/src/cephadm/cephadm.py
+++ b/src/cephadm/cephadm.py
@@ -67,7 +67,6 @@
     get_config_and_keyring,
     get_parm,
     read_configuration_source,
-    should_log_to_journald,
 )
 from cephadmlib.exceptions import (
     ClusterAlreadyExists,
@@ -117,7 +116,6 @@
     check_subnet,
     get_fqdn,
     get_hostname,
-    get_ip_addresses,
     get_short_hostname,
     ip_in_subnets,
     is_ipv6,
@@ -159,8 +157,9 @@
 from cephadmlib.sysctl import install_sysctl, migrate_sysctl_dir
 from cephadmlib.firewalld import Firewalld, update_firewalld
 from cephadmlib import templating
-from cephadmlib.deployment_utils import to_deployment_container
+from cephadmlib.daemons.ceph import get_ceph_mounts_for_type, ceph_daemons
 from cephadmlib.daemons import (
+    Ceph,
     CephIscsi,
     CephNvmeof,
     CustomContainer,
@@ -206,344 +205,6 @@ def __eq__(self, other: Any) -> bool:
 ##################################
 
 
-@register_daemon_form
-class Ceph(ContainerDaemonForm):
-    _daemons = ('mon', 'mgr', 'osd', 'mds', 'rgw', 'rbd-mirror',
-                'crash', 'cephfs-mirror')
-
-    @classmethod
-    def for_daemon_type(cls, daemon_type: str) -> bool:
-        # TODO: figure out a way to un-special-case osd
-        return daemon_type in cls._daemons and daemon_type != 'osd'
-
-    def __init__(self, ctx: CephadmContext, ident: DaemonIdentity) -> None:
-        self.ctx = ctx
-        self._identity = ident
-        self.user_supplied_config = False
-
-    @classmethod
-    def create(cls, ctx: CephadmContext, ident: DaemonIdentity) -> 'Ceph':
-        return cls(ctx, ident)
-
-    @property
-    def identity(self) -> DaemonIdentity:
-        return self._identity
-
-    def firewall_service_name(self) -> str:
-        if self.identity.daemon_type == 'mon':
-            return 'ceph-mon'
-        elif self.identity.daemon_type in ['mgr', 'mds']:
-            return 'ceph'
-        return ''
-
-    def container(self, ctx: CephadmContext) -> CephContainer:
-        # previous to being a ContainerDaemonForm, this make_var_run
-        # call was hard coded in the deploy path. Eventually, it would be
-        # good to move this somwhere cleaner and avoid needing to know the
-        # uid/gid here.
-        uid, gid = self.uid_gid(ctx)
-        make_var_run(ctx, ctx.fsid, uid, gid)
-
-        # mon and osd need privileged in order for libudev to query devices
-        privileged = self.identity.daemon_type in ['mon', 'osd']
-        ctr = daemon_to_container(ctx, self, privileged=privileged)
-        ctr = to_deployment_container(ctx, ctr)
-        config_json = fetch_configs(ctx)
-        if self.identity.daemon_type == 'mon' and config_json is not None:
-            if 'crush_location' in config_json:
-                c_loc = config_json['crush_location']
-                # was originally "c.args.extend(['--set-crush-location', c_loc])"
-                # but that doesn't seem to persist in the object after it's passed
-                # in further function calls
-                ctr.args = ctr.args + ['--set-crush-location', c_loc]
-        return ctr
-
-    _uid_gid: Optional[Tuple[int, int]] = None
-
-    def uid_gid(self, ctx: CephadmContext) -> Tuple[int, int]:
-        if self._uid_gid is None:
-            self._uid_gid = extract_uid_gid(ctx)
-        return self._uid_gid
-
-    def config_and_keyring(
-        self, ctx: CephadmContext
-    ) -> Tuple[Optional[str], Optional[str]]:
-        return get_config_and_keyring(ctx)
-
-    def get_daemon_args(self) -> List[str]:
-        if self.identity.daemon_type == 'crash':
-            return []
-        r = [
-            '--setuser', 'ceph',
-            '--setgroup', 'ceph',
-            '--default-log-to-file=false',
-        ]
-        log_to_journald = should_log_to_journald(self.ctx)
-        if log_to_journald:
-            r += [
-                '--default-log-to-journald=true',
-                '--default-log-to-stderr=false',
-            ]
-        else:
-            r += [
-                '--default-log-to-stderr=true',
-                '--default-log-stderr-prefix=debug ',
-            ]
-        if self.identity.daemon_type == 'mon':
-            r += [
-                '--default-mon-cluster-log-to-file=false',
-            ]
-            if log_to_journald:
-                r += [
-                    '--default-mon-cluster-log-to-journald=true',
-                    '--default-mon-cluster-log-to-stderr=false',
-                ]
-            else:
-                r += ['--default-mon-cluster-log-to-stderr=true']
-        return r
-
-    @staticmethod
-    def get_ceph_mounts(
-        ctx: CephadmContext,
-        ident: DaemonIdentity,
-        no_config: bool = False,
-    ) -> Dict[str, str]:
-        # Warning: This is a hack done for more expedient refactoring
-        mounts = _get_container_mounts_for_type(
-            ctx, ident.fsid, ident.daemon_type
-        )
-        data_dir = ident.data_dir(ctx.data_dir)
-        if ident.daemon_type == 'rgw':
-            cdata_dir = '/var/lib/ceph/radosgw/ceph-rgw.%s' % (
-                ident.daemon_id
-            )
-        else:
-            cdata_dir = '/var/lib/ceph/%s/ceph-%s' % (
-                ident.daemon_type,
-                ident.daemon_id,
-            )
-        if ident.daemon_type != 'crash':
-            mounts[data_dir] = cdata_dir + ':z'
-        if not no_config:
-            mounts[data_dir + '/config'] = '/etc/ceph/ceph.conf:z'
-        if ident.daemon_type in [
-            'rbd-mirror',
-            'cephfs-mirror',
-            'crash',
-            'ceph-exporter',
-        ]:
-            # these do not search for their keyrings in a data directory
-            mounts[
-                data_dir + '/keyring'
-            ] = '/etc/ceph/ceph.client.%s.%s.keyring' % (
-                ident.daemon_type,
-                ident.daemon_id,
-            )
-        return mounts
-
-    def customize_container_mounts(
-        self, ctx: CephadmContext, mounts: Dict[str, str]
-    ) -> None:
-        no_config = bool(
-            getattr(ctx, 'config', None) and self.user_supplied_config
-        )
-        cm = self.get_ceph_mounts(
-            ctx,
-            self.identity,
-            no_config=no_config,
-        )
-        mounts.update(cm)
-
-    def customize_container_args(
-        self, ctx: CephadmContext, args: List[str]
-    ) -> None:
-        args.append(ctx.container_engine.unlimited_pids_option)
-
-    def customize_process_args(
-        self, ctx: CephadmContext, args: List[str]
-    ) -> None:
-        ident = self.identity
-        if ident.daemon_type == 'rgw':
-            name = 'client.rgw.%s' % ident.daemon_id
-        elif ident.daemon_type == 'rbd-mirror':
-            name = 'client.rbd-mirror.%s' % ident.daemon_id
-        elif ident.daemon_type == 'cephfs-mirror':
-            name = 'client.cephfs-mirror.%s' % ident.daemon_id
-        elif ident.daemon_type == 'crash':
-            name = 'client.crash.%s' % ident.daemon_id
-        elif ident.daemon_type in ['mon', 'mgr', 'mds', 'osd']:
-            name = ident.daemon_name
-        else:
-            raise ValueError(ident)
-        args.extend(['-n', name])
-        if ident.daemon_type != 'crash':
-            args.append('-f')
-        args.extend(self.get_daemon_args())
-
-    def customize_container_envs(
-        self, ctx: CephadmContext, envs: List[str]
-    ) -> None:
-        envs.append('TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES=134217728')
-
-    def default_entrypoint(self) -> str:
-        ep = {
-            'rgw': '/usr/bin/radosgw',
-            'rbd-mirror': '/usr/bin/rbd-mirror',
-            'cephfs-mirror': '/usr/bin/cephfs-mirror',
-        }
-        daemon_type = self.identity.daemon_type
-        return ep.get(daemon_type) or f'/usr/bin/ceph-{daemon_type}'
-
-##################################
-
-
-@register_daemon_form
-class OSD(Ceph):
-    @classmethod
-    def for_daemon_type(cls, daemon_type: str) -> bool:
-        # TODO: figure out a way to un-special-case osd
-        return daemon_type == 'osd'
-
-    def __init__(
-        self,
-        ctx: CephadmContext,
-        ident: DaemonIdentity,
-        osd_fsid: Optional[str] = None,
-    ) -> None:
-        super().__init__(ctx, ident)
-        self._osd_fsid = osd_fsid
-
-    @classmethod
-    def create(cls, ctx: CephadmContext, ident: DaemonIdentity) -> 'OSD':
-        osd_fsid = getattr(ctx, 'osd_fsid', None)
-        if osd_fsid is None:
-            logger.info(
-                'Creating an OSD daemon form without an OSD FSID value'
-            )
-        return cls(ctx, ident, osd_fsid)
-
-    @staticmethod
-    def get_sysctl_settings() -> List[str]:
-        return [
-            '# allow a large number of OSDs',
-            'fs.aio-max-nr = 1048576',
-            'kernel.pid_max = 4194304',
-        ]
-
-    def firewall_service_name(self) -> str:
-        return 'ceph'
-
-    @property
-    def osd_fsid(self) -> Optional[str]:
-        return self._osd_fsid
-
-##################################
-
-
-@register_daemon_form
-class CephExporter(ContainerDaemonForm):
-    """Defines a Ceph exporter container"""
-
-    daemon_type = 'ceph-exporter'
-    entrypoint = '/usr/bin/ceph-exporter'
-    DEFAULT_PORT = 9926
-    port_map = {
-        'ceph-exporter': DEFAULT_PORT,
-    }
-
-    @classmethod
-    def for_daemon_type(cls, daemon_type: str) -> bool:
-        return cls.daemon_type == daemon_type
-
-    def __init__(self,
-                 ctx: CephadmContext,
-                 fsid: str, daemon_id: Union[int, str],
-                 config_json: Dict[str, Any],
-                 image: str = DEFAULT_IMAGE) -> None:
-        self.ctx = ctx
-        self.fsid = fsid
-        self.daemon_id = daemon_id
-        self.image = image
-
-        self.sock_dir = config_json.get('sock-dir', '/var/run/ceph/')
-        ipv4_addrs, _ = get_ip_addresses(get_hostname())
-        addrs = '0.0.0.0' if ipv4_addrs else '::'
-        self.addrs = config_json.get('addrs', addrs)
-        self.port = config_json.get('port', self.DEFAULT_PORT)
-        self.prio_limit = config_json.get('prio-limit', 5)
-        self.stats_period = config_json.get('stats-period', 5)
-
-        self.validate()
-
-    @classmethod
-    def init(cls, ctx: CephadmContext, fsid: str,
-             daemon_id: Union[int, str]) -> 'CephExporter':
-        return cls(ctx, fsid, daemon_id,
-                   fetch_configs(ctx), ctx.image)
-
-    @classmethod
-    def create(cls, ctx: CephadmContext, ident: DaemonIdentity) -> 'CephExporter':
-        return cls.init(ctx, ident.fsid, ident.daemon_id)
-
-    @property
-    def identity(self) -> DaemonIdentity:
-        return DaemonIdentity(self.fsid, self.daemon_type, self.daemon_id)
-
-    def get_daemon_args(self) -> List[str]:
-        args = [
-            f'--sock-dir={self.sock_dir}',
-            f'--addrs={self.addrs}',
-            f'--port={self.port}',
-            f'--prio-limit={self.prio_limit}',
-            f'--stats-period={self.stats_period}',
-        ]
-        return args
-
-    def validate(self) -> None:
-        if not os.path.isdir(self.sock_dir):
-            raise Error(f'Directory does not exist. Got: {self.sock_dir}')
-
-    def container(self, ctx: CephadmContext) -> CephContainer:
-        ctr = daemon_to_container(ctx, self)
-        return to_deployment_container(ctx, ctr)
-
-    def uid_gid(self, ctx: CephadmContext) -> Tuple[int, int]:
-        return extract_uid_gid(ctx)
-
-    def config_and_keyring(
-        self, ctx: CephadmContext
-    ) -> Tuple[Optional[str], Optional[str]]:
-        return get_config_and_keyring(ctx)
-
-    def customize_container_mounts(
-        self, ctx: CephadmContext, mounts: Dict[str, str]
-    ) -> None:
-        cm = Ceph.get_ceph_mounts(ctx, self.identity)
-        mounts.update(cm)
-
-    def customize_process_args(
-        self, ctx: CephadmContext, args: List[str]
-    ) -> None:
-        name = 'client.ceph-exporter.%s' % self.identity.daemon_id
-        args.extend(['-n', name, '-f'])
-        args.extend(self.get_daemon_args())
-
-    def customize_container_args(
-        self, ctx: CephadmContext, args: List[str]
-    ) -> None:
-        args.append(ctx.container_engine.unlimited_pids_option)
-
-    def customize_container_envs(
-        self, ctx: CephadmContext, envs: List[str]
-    ) -> None:
-        envs.append('TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES=134217728')
-
-    def default_entrypoint(self) -> str:
-        return self.entrypoint
-
-##################################
-
-
 def get_supported_daemons():
     # type: () -> List[str]
     supported_daemons = ceph_daemons()
@@ -560,15 +221,6 @@ def get_supported_daemons():
     assert len(supported_daemons) == len(set(supported_daemons))
     return supported_daemons
 
-
-def ceph_daemons() -> List[str]:
-    cds = list(Ceph._daemons)
-    cds.append(CephExporter.daemon_type)
-    return cds
-
-##################################
-
-
 ##################################
 
 
@@ -1191,76 +843,11 @@ def get_container_mounts_for_type(
     """Return a dictionary mapping container-external paths to container-internal
     paths given an fsid and daemon_type.
     """
-    mounts = _get_container_mounts_for_type(ctx, fsid, daemon_type)
+    mounts = get_ceph_mounts_for_type(ctx, fsid, daemon_type)
     _update_podman_mounts(ctx, mounts)
     return mounts
 
 
-def _get_container_mounts_for_type(
-    ctx: CephadmContext, fsid: str, daemon_type: str
-) -> Dict[str, str]:
-    """The main implementation of get_container_mounts_for_type minus the call
-    to _update_podman_mounts so that this can be called from
-    get_container_mounts.
-    """
-    mounts = dict()
-
-    if daemon_type in ceph_daemons():
-        if fsid:
-            run_path = os.path.join('/var/run/ceph', fsid)
-            if os.path.exists(run_path):
-                mounts[run_path] = '/var/run/ceph:z'
-            log_dir = get_log_dir(fsid, ctx.log_dir)
-            mounts[log_dir] = '/var/log/ceph:z'
-            crash_dir = '/var/lib/ceph/%s/crash' % fsid
-            if os.path.exists(crash_dir):
-                mounts[crash_dir] = '/var/lib/ceph/crash:z'
-            if daemon_type != 'crash' and should_log_to_journald(ctx):
-                journald_sock_dir = '/run/systemd/journal'
-                mounts[journald_sock_dir] = journald_sock_dir
-
-    if daemon_type in ['mon', 'osd', 'clusterless-ceph-volume']:
-        mounts['/dev'] = '/dev'  # FIXME: narrow this down?
-        mounts['/run/udev'] = '/run/udev'
-    if daemon_type in ['osd', 'clusterless-ceph-volume']:
-        mounts['/sys'] = '/sys'  # for numa.cc, pick_address, cgroups, ...
-        mounts['/run/lvm'] = '/run/lvm'
-        mounts['/run/lock/lvm'] = '/run/lock/lvm'
-    if daemon_type == 'osd':
-        # selinux-policy in the container may not match the host.
-        if HostFacts(ctx).selinux_enabled:
-            cluster_dir = f'{ctx.data_dir}/{fsid}'
-            selinux_folder = f'{cluster_dir}/selinux'
-            if os.path.exists(cluster_dir):
-                if not os.path.exists(selinux_folder):
-                    os.makedirs(selinux_folder, mode=0o755)
-                mounts[selinux_folder] = '/sys/fs/selinux:ro'
-            else:
-                logger.error(f'Cluster direcotry {cluster_dir} does not exist.')
-        mounts['/'] = '/rootfs'
-
-    try:
-        if ctx.shared_ceph_folder:  # make easy manager modules/ceph-volume development
-            ceph_folder = pathify(ctx.shared_ceph_folder)
-            if os.path.exists(ceph_folder):
-                cephadm_binary = ceph_folder + '/src/cephadm/cephadm'
-                if not os.path.exists(pathify(cephadm_binary)):
-                    raise Error("cephadm binary does not exist. Please run './build.sh cephadm' from ceph/src/cephadm/ directory.")
-                mounts[cephadm_binary] = '/usr/sbin/cephadm'
-                mounts[ceph_folder + '/src/ceph-volume/ceph_volume'] = '/usr/lib/python3.6/site-packages/ceph_volume'
-                mounts[ceph_folder + '/src/pybind/mgr'] = '/usr/share/ceph/mgr'
-                mounts[ceph_folder + '/src/python-common/ceph'] = '/usr/lib/python3.6/site-packages/ceph'
-                mounts[ceph_folder + '/monitoring/ceph-mixin/dashboards_out'] = '/etc/grafana/dashboards/ceph-dashboard'
-                mounts[ceph_folder + '/monitoring/ceph-mixin/prometheus_alerts.yml'] = '/etc/prometheus/ceph/ceph_default_alerts.yml'
-            else:
-                logger.error(
-                    'Ceph shared source folder does not exist.',
-                    extra=Highlight.FAILURE.extra())
-    except AttributeError:
-        pass
-    return mounts
-
-
 def get_container_mounts(
     ctx: CephadmContext, ident: 'DaemonIdentity', no_config: bool = False
 ) -> Dict[str, str]:
diff --git a/src/cephadm/cephadmlib/daemons/__init__.py b/src/cephadm/cephadmlib/daemons/__init__.py
index d2c818e927d52..3931cdf14b614 100644
--- a/src/cephadm/cephadmlib/daemons/__init__.py
+++ b/src/cephadm/cephadmlib/daemons/__init__.py
@@ -6,8 +6,11 @@
 from .nfs import NFSGanesha
 from .monitoring import Monitoring
 from .snmp import SNMPGateway
+from .ceph import Ceph, OSD, CephExporter
 
 __all__ = [
+    'Ceph',
+    'CephExporter',
     'CephIscsi',
     'CephNvmeof',
     'CustomContainer',
@@ -15,6 +18,7 @@
     'Keepalived',
     'Monitoring',
     'NFSGanesha',
+    'OSD',
     'SNMPGateway',
     'Tracing',
 ]
diff --git a/src/cephadm/cephadmlib/daemons/ceph.py b/src/cephadm/cephadmlib/daemons/ceph.py
new file mode 100644
index 0000000000000..ba908ae5d2988
--- /dev/null
+++ b/src/cephadm/cephadmlib/daemons/ceph.py
@@ -0,0 +1,431 @@
+import logging
+import os
+
+from typing import Any, Dict, List, Optional, Tuple, Union
+
+from ..container_daemon_form import ContainerDaemonForm, daemon_to_container
+from ..container_types import CephContainer, extract_uid_gid
+from ..context_getters import (
+    fetch_configs,
+    get_config_and_keyring,
+    should_log_to_journald,
+)
+from ..daemon_form import register as register_daemon_form
+from ..daemon_identity import DaemonIdentity
+from ..constants import DEFAULT_IMAGE
+from ..context import CephadmContext
+from ..deployment_utils import to_deployment_container
+from ..exceptions import Error
+from ..file_utils import make_run_dir, pathify
+from ..host_facts import HostFacts
+from ..logging import Highlight
+from ..net_utils import get_hostname, get_ip_addresses
+
+
+logger = logging.getLogger()
+
+
+@register_daemon_form
+class Ceph(ContainerDaemonForm):
+    _daemons = ('mon', 'mgr', 'osd', 'mds', 'rgw', 'rbd-mirror',
+                'crash', 'cephfs-mirror')
+
+    @classmethod
+    def for_daemon_type(cls, daemon_type: str) -> bool:
+        # TODO: figure out a way to un-special-case osd
+        return daemon_type in cls._daemons and daemon_type != 'osd'
+
+    def __init__(self, ctx: CephadmContext, ident: DaemonIdentity) -> None:
+        self.ctx = ctx
+        self._identity = ident
+        self.user_supplied_config = False
+
+    @classmethod
+    def create(cls, ctx: CephadmContext, ident: DaemonIdentity) -> 'Ceph':
+        return cls(ctx, ident)
+
+    @property
+    def identity(self) -> DaemonIdentity:
+        return self._identity
+
+    def firewall_service_name(self) -> str:
+        if self.identity.daemon_type == 'mon':
+            return 'ceph-mon'
+        elif self.identity.daemon_type in ['mgr', 'mds']:
+            return 'ceph'
+        return ''
+
+    def container(self, ctx: CephadmContext) -> CephContainer:
+        # previous to being a ContainerDaemonForm, this call to create the
+        # var-run directory was hard coded in the deploy path. Eventually, it
+        # would be good to move this somwhere cleaner and avoid needing to know
+        # the uid/gid here.
+        uid, gid = self.uid_gid(ctx)
+        make_run_dir(ctx.fsid, uid, gid)
+
+        # mon and osd need privileged in order for libudev to query devices
+        privileged = self.identity.daemon_type in ['mon', 'osd']
+        ctr = daemon_to_container(ctx, self, privileged=privileged)
+        ctr = to_deployment_container(ctx, ctr)
+        config_json = fetch_configs(ctx)
+        if self.identity.daemon_type == 'mon' and config_json is not None:
+            if 'crush_location' in config_json:
+                c_loc = config_json['crush_location']
+                # was originally "c.args.extend(['--set-crush-location', c_loc])"
+                # but that doesn't seem to persist in the object after it's passed
+                # in further function calls
+                ctr.args = ctr.args + ['--set-crush-location', c_loc]
+        return ctr
+
+    _uid_gid: Optional[Tuple[int, int]] = None
+
+    def uid_gid(self, ctx: CephadmContext) -> Tuple[int, int]:
+        if self._uid_gid is None:
+            self._uid_gid = extract_uid_gid(ctx)
+        return self._uid_gid
+
+    def config_and_keyring(
+        self, ctx: CephadmContext
+    ) -> Tuple[Optional[str], Optional[str]]:
+        return get_config_and_keyring(ctx)
+
+    def get_daemon_args(self) -> List[str]:
+        if self.identity.daemon_type == 'crash':
+            return []
+        r = [
+            '--setuser', 'ceph',
+            '--setgroup', 'ceph',
+            '--default-log-to-file=false',
+        ]
+        log_to_journald = should_log_to_journald(self.ctx)
+        if log_to_journald:
+            r += [
+                '--default-log-to-journald=true',
+                '--default-log-to-stderr=false',
+            ]
+        else:
+            r += [
+                '--default-log-to-stderr=true',
+                '--default-log-stderr-prefix=debug ',
+            ]
+        if self.identity.daemon_type == 'mon':
+            r += [
+                '--default-mon-cluster-log-to-file=false',
+            ]
+            if log_to_journald:
+                r += [
+                    '--default-mon-cluster-log-to-journald=true',
+                    '--default-mon-cluster-log-to-stderr=false',
+                ]
+            else:
+                r += ['--default-mon-cluster-log-to-stderr=true']
+        return r
+
+    @staticmethod
+    def get_ceph_mounts(
+        ctx: CephadmContext,
+        ident: DaemonIdentity,
+        no_config: bool = False,
+    ) -> Dict[str, str]:
+        # Warning: This is a hack done for more expedient refactoring
+        mounts = get_ceph_mounts_for_type(
+            ctx, ident.fsid, ident.daemon_type
+        )
+        data_dir = ident.data_dir(ctx.data_dir)
+        if ident.daemon_type == 'rgw':
+            cdata_dir = '/var/lib/ceph/radosgw/ceph-rgw.%s' % (
+                ident.daemon_id
+            )
+        else:
+            cdata_dir = '/var/lib/ceph/%s/ceph-%s' % (
+                ident.daemon_type,
+                ident.daemon_id,
+            )
+        if ident.daemon_type != 'crash':
+            mounts[data_dir] = cdata_dir + ':z'
+        if not no_config:
+            mounts[data_dir + '/config'] = '/etc/ceph/ceph.conf:z'
+        if ident.daemon_type in [
+            'rbd-mirror',
+            'cephfs-mirror',
+            'crash',
+            'ceph-exporter',
+        ]:
+            # these do not search for their keyrings in a data directory
+            mounts[
+                data_dir + '/keyring'
+            ] = '/etc/ceph/ceph.client.%s.%s.keyring' % (
+                ident.daemon_type,
+                ident.daemon_id,
+            )
+        return mounts
+
+    def customize_container_mounts(
+        self, ctx: CephadmContext, mounts: Dict[str, str]
+    ) -> None:
+        no_config = bool(
+            getattr(ctx, 'config', None) and self.user_supplied_config
+        )
+        cm = self.get_ceph_mounts(
+            ctx,
+            self.identity,
+            no_config=no_config,
+        )
+        mounts.update(cm)
+
+    def customize_container_args(
+        self, ctx: CephadmContext, args: List[str]
+    ) -> None:
+        args.append(ctx.container_engine.unlimited_pids_option)
+
+    def customize_process_args(
+        self, ctx: CephadmContext, args: List[str]
+    ) -> None:
+        ident = self.identity
+        if ident.daemon_type == 'rgw':
+            name = 'client.rgw.%s' % ident.daemon_id
+        elif ident.daemon_type == 'rbd-mirror':
+            name = 'client.rbd-mirror.%s' % ident.daemon_id
+        elif ident.daemon_type == 'cephfs-mirror':
+            name = 'client.cephfs-mirror.%s' % ident.daemon_id
+        elif ident.daemon_type == 'crash':
+            name = 'client.crash.%s' % ident.daemon_id
+        elif ident.daemon_type in ['mon', 'mgr', 'mds', 'osd']:
+            name = ident.daemon_name
+        else:
+            raise ValueError(ident)
+        args.extend(['-n', name])
+        if ident.daemon_type != 'crash':
+            args.append('-f')
+        args.extend(self.get_daemon_args())
+
+    def customize_container_envs(
+        self, ctx: CephadmContext, envs: List[str]
+    ) -> None:
+        envs.append('TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES=134217728')
+
+    def default_entrypoint(self) -> str:
+        ep = {
+            'rgw': '/usr/bin/radosgw',
+            'rbd-mirror': '/usr/bin/rbd-mirror',
+            'cephfs-mirror': '/usr/bin/cephfs-mirror',
+        }
+        daemon_type = self.identity.daemon_type
+        return ep.get(daemon_type) or f'/usr/bin/ceph-{daemon_type}'
+
+
+@register_daemon_form
+class OSD(Ceph):
+    @classmethod
+    def for_daemon_type(cls, daemon_type: str) -> bool:
+        # TODO: figure out a way to un-special-case osd
+        return daemon_type == 'osd'
+
+    def __init__(
+        self,
+        ctx: CephadmContext,
+        ident: DaemonIdentity,
+        osd_fsid: Optional[str] = None,
+    ) -> None:
+        super().__init__(ctx, ident)
+        self._osd_fsid = osd_fsid
+
+    @classmethod
+    def create(cls, ctx: CephadmContext, ident: DaemonIdentity) -> 'OSD':
+        osd_fsid = getattr(ctx, 'osd_fsid', None)
+        if osd_fsid is None:
+            logger.info(
+                'Creating an OSD daemon form without an OSD FSID value'
+            )
+        return cls(ctx, ident, osd_fsid)
+
+    @staticmethod
+    def get_sysctl_settings() -> List[str]:
+        return [
+            '# allow a large number of OSDs',
+            'fs.aio-max-nr = 1048576',
+            'kernel.pid_max = 4194304',
+        ]
+
+    def firewall_service_name(self) -> str:
+        return 'ceph'
+
+    @property
+    def osd_fsid(self) -> Optional[str]:
+        return self._osd_fsid
+
+
+@register_daemon_form
+class CephExporter(ContainerDaemonForm):
+    """Defines a Ceph exporter container"""
+
+    daemon_type = 'ceph-exporter'
+    entrypoint = '/usr/bin/ceph-exporter'
+    DEFAULT_PORT = 9926
+    port_map = {
+        'ceph-exporter': DEFAULT_PORT,
+    }
+
+    @classmethod
+    def for_daemon_type(cls, daemon_type: str) -> bool:
+        return cls.daemon_type == daemon_type
+
+    def __init__(self,
+                 ctx: CephadmContext,
+                 fsid: str, daemon_id: Union[int, str],
+                 config_json: Dict[str, Any],
+                 image: str = DEFAULT_IMAGE) -> None:
+        self.ctx = ctx
+        self.fsid = fsid
+        self.daemon_id = daemon_id
+        self.image = image
+
+        self.sock_dir = config_json.get('sock-dir', '/var/run/ceph/')
+        ipv4_addrs, _ = get_ip_addresses(get_hostname())
+        addrs = '0.0.0.0' if ipv4_addrs else '::'
+        self.addrs = config_json.get('addrs', addrs)
+        self.port = config_json.get('port', self.DEFAULT_PORT)
+        self.prio_limit = config_json.get('prio-limit', 5)
+        self.stats_period = config_json.get('stats-period', 5)
+
+        self.validate()
+
+    @classmethod
+    def init(cls, ctx: CephadmContext, fsid: str,
+             daemon_id: Union[int, str]) -> 'CephExporter':
+        return cls(ctx, fsid, daemon_id,
+                   fetch_configs(ctx), ctx.image)
+
+    @classmethod
+    def create(cls, ctx: CephadmContext, ident: DaemonIdentity) -> 'CephExporter':
+        return cls.init(ctx, ident.fsid, ident.daemon_id)
+
+    @property
+    def identity(self) -> DaemonIdentity:
+        return DaemonIdentity(self.fsid, self.daemon_type, self.daemon_id)
+
+    def get_daemon_args(self) -> List[str]:
+        args = [
+            f'--sock-dir={self.sock_dir}',
+            f'--addrs={self.addrs}',
+            f'--port={self.port}',
+            f'--prio-limit={self.prio_limit}',
+            f'--stats-period={self.stats_period}',
+        ]
+        return args
+
+    def validate(self) -> None:
+        if not os.path.isdir(self.sock_dir):
+            raise Error(f'Directory does not exist. Got: {self.sock_dir}')
+
+    def container(self, ctx: CephadmContext) -> CephContainer:
+        ctr = daemon_to_container(ctx, self)
+        return to_deployment_container(ctx, ctr)
+
+    def uid_gid(self, ctx: CephadmContext) -> Tuple[int, int]:
+        return extract_uid_gid(ctx)
+
+    def config_and_keyring(
+        self, ctx: CephadmContext
+    ) -> Tuple[Optional[str], Optional[str]]:
+        return get_config_and_keyring(ctx)
+
+    def customize_container_mounts(
+        self, ctx: CephadmContext, mounts: Dict[str, str]
+    ) -> None:
+        cm = Ceph.get_ceph_mounts(ctx, self.identity)
+        mounts.update(cm)
+
+    def customize_process_args(
+        self, ctx: CephadmContext, args: List[str]
+    ) -> None:
+        name = 'client.ceph-exporter.%s' % self.identity.daemon_id
+        args.extend(['-n', name, '-f'])
+        args.extend(self.get_daemon_args())
+
+    def customize_container_args(
+        self, ctx: CephadmContext, args: List[str]
+    ) -> None:
+        args.append(ctx.container_engine.unlimited_pids_option)
+
+    def customize_container_envs(
+        self, ctx: CephadmContext, envs: List[str]
+    ) -> None:
+        envs.append('TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES=134217728')
+
+    def default_entrypoint(self) -> str:
+        return self.entrypoint
+
+
+def get_ceph_mounts_for_type(
+    ctx: CephadmContext, fsid: str, daemon_type: str
+) -> Dict[str, str]:
+    """The main implementation of get_container_mounts_for_type minus the call
+    to _update_podman_mounts so that this can be called from
+    get_container_mounts.
+    """
+    mounts = dict()
+
+    if daemon_type in ceph_daemons():
+        if fsid:
+            run_path = os.path.join('/var/run/ceph', fsid)
+            if os.path.exists(run_path):
+                mounts[run_path] = '/var/run/ceph:z'
+            log_dir = os.path.join(ctx.log_dir, fsid)
+            mounts[log_dir] = '/var/log/ceph:z'
+            crash_dir = '/var/lib/ceph/%s/crash' % fsid
+            if os.path.exists(crash_dir):
+                mounts[crash_dir] = '/var/lib/ceph/crash:z'
+            if daemon_type != 'crash' and should_log_to_journald(ctx):
+                journald_sock_dir = '/run/systemd/journal'
+                mounts[journald_sock_dir] = journald_sock_dir
+
+    if daemon_type in ['mon', 'osd', 'clusterless-ceph-volume']:
+        mounts['/dev'] = '/dev'  # FIXME: narrow this down?
+        mounts['/run/udev'] = '/run/udev'
+    if daemon_type in ['osd', 'clusterless-ceph-volume']:
+        mounts['/sys'] = '/sys'  # for numa.cc, pick_address, cgroups, ...
+        mounts['/run/lvm'] = '/run/lvm'
+        mounts['/run/lock/lvm'] = '/run/lock/lvm'
+    if daemon_type == 'osd':
+        # selinux-policy in the container may not match the host.
+        if HostFacts(ctx).selinux_enabled:
+            cluster_dir = f'{ctx.data_dir}/{fsid}'
+            selinux_folder = f'{cluster_dir}/selinux'
+            if os.path.exists(cluster_dir):
+                if not os.path.exists(selinux_folder):
+                    os.makedirs(selinux_folder, mode=0o755)
+                mounts[selinux_folder] = '/sys/fs/selinux:ro'
+            else:
+                logger.error(f'Cluster direcotry {cluster_dir} does not exist.')
+        mounts['/'] = '/rootfs'
+
+    try:
+        if ctx.shared_ceph_folder:  # make easy manager modules/ceph-volume development
+            ceph_folder = pathify(ctx.shared_ceph_folder)
+            if os.path.exists(ceph_folder):
+                cephadm_binary = ceph_folder + '/src/cephadm/cephadm'
+                if not os.path.exists(pathify(cephadm_binary)):
+                    raise Error("cephadm binary does not exist. Please run './build.sh cephadm' from ceph/src/cephadm/ directory.")
+                mounts[cephadm_binary] = '/usr/sbin/cephadm'
+                mounts[ceph_folder + '/src/ceph-volume/ceph_volume'] = '/usr/lib/python3.6/site-packages/ceph_volume'
+                mounts[ceph_folder + '/src/pybind/mgr'] = '/usr/share/ceph/mgr'
+                mounts[ceph_folder + '/src/python-common/ceph'] = '/usr/lib/python3.6/site-packages/ceph'
+                mounts[ceph_folder + '/monitoring/ceph-mixin/dashboards_out'] = '/etc/grafana/dashboards/ceph-dashboard'
+                mounts[ceph_folder + '/monitoring/ceph-mixin/prometheus_alerts.yml'] = '/etc/prometheus/ceph/ceph_default_alerts.yml'
+            else:
+                logger.error(
+                    'Ceph shared source folder does not exist.',
+                    extra=Highlight.FAILURE.extra())
+    except AttributeError:
+        pass
+    return mounts
+
+
+def ceph_daemons() -> List[str]:
+    """A legacy method that returns a list of all daemon types considered ceph
+    daemons.
+    """
+    cds = list(Ceph._daemons)
+    cds.append(CephExporter.daemon_type)
+    return cds
diff --git a/src/cephadm/tests/test_cephadm.py b/src/cephadm/tests/test_cephadm.py
index b2e395fab64ed..82850ab597d6b 100644
--- a/src/cephadm/tests/test_cephadm.py
+++ b/src/cephadm/tests/test_cephadm.py
@@ -376,7 +376,8 @@ def test_mon_crush_location(self, funkypatch):
         funkypatch.patch('cephadm.logger')
         funkypatch.patch('cephadm.FileLock')
         _deploy_daemon = funkypatch.patch('cephadm.deploy_daemon')
-        _make_var_run = funkypatch.patch('cephadm.make_var_run')
+        funkypatch.patch('cephadm.make_var_run')
+        funkypatch.patch('cephadmlib.file_utils.make_run_dir')
         _migrate_sysctl = funkypatch.patch('cephadm.migrate_sysctl_dir')
         funkypatch.patch(
             'cephadm.check_unit',
diff --git a/src/cephadm/tests/test_daemon_form.py b/src/cephadm/tests/test_daemon_form.py
index 07896cc585590..a2d1773f1c84a 100644
--- a/src/cephadm/tests/test_daemon_form.py
+++ b/src/cephadm/tests/test_daemon_form.py
@@ -6,6 +6,7 @@
 
 from cephadmlib import daemon_form
 from cephadmlib import daemon_identity
+from cephadmlib import daemons
 
 _cephadm = import_cephadm()
 
@@ -22,7 +23,7 @@
         ('mon', _cephadm.Ceph),
         ('nfs', _cephadm.NFSGanesha),
         ('nvmeof', _cephadm.CephNvmeof),
-        ('osd', _cephadm.OSD),
+        ('osd', daemons.OSD),
         ('prometheus', _cephadm.Monitoring),
         ('snmp-gateway', _cephadm.SNMPGateway),
     ],
diff --git a/src/cephadm/tests/test_deploy.py b/src/cephadm/tests/test_deploy.py
index 5d5b46ad6954c..94a292dc57b64 100644
--- a/src/cephadm/tests/test_deploy.py
+++ b/src/cephadm/tests/test_deploy.py
@@ -294,7 +294,7 @@ def test_deploy_nvmeof_container(cephadm_fs, monkeypatch):
 def test_deploy_a_monitoring_container(cephadm_fs, funkypatch):
     mocks = _common_patches(funkypatch)
     _firewalld = mocks['Firewalld']
-    _get_ip_addresses = funkypatch.patch('cephadm.get_ip_addresses')
+    _get_ip_addresses = funkypatch.patch('cephadmlib.net_utils.get_ip_addresses')
     _get_ip_addresses.return_value = (['10.10.10.10'], [])
     fsid = 'b01dbeef-701d-9abe-0000-e1e5a47004a7'
     with with_cephadm_ctx([]) as ctx:
@@ -362,11 +362,10 @@ def test_deploy_a_tracing_container(cephadm_fs, monkeypatch):
     assert not (basedir / 'keyring').exists()
 
 
-def test_deploy_ceph_mgr_container(cephadm_fs, monkeypatch):
-    mocks = _common_mp(monkeypatch)
+def test_deploy_ceph_mgr_container(cephadm_fs, funkypatch):
+    mocks = _common_patches(funkypatch)
     _firewalld = mocks['Firewalld']
-    _make_var_run = mock.MagicMock()
-    monkeypatch.setattr('cephadm.make_var_run', _make_var_run)
+    _make_run_dir = funkypatch.patch('cephadmlib.file_utils.make_run_dir')
     fsid = 'b01dbeef-701d-9abe-0000-e1e5a47004a7'
     with with_cephadm_ctx([]) as ctx:
         ctx.container_engine = mock_podman()
@@ -400,16 +399,15 @@ def test_deploy_ceph_mgr_container(cephadm_fs, monkeypatch):
         assert f.read() == 'XXXXXXX'
     with open(basedir / 'keyring') as f:
         assert f.read() == 'YYYYYY'
-    assert _make_var_run.call_count == 1
-    assert _make_var_run.call_args[0][2] == 8765
-    assert _make_var_run.call_args[0][3] == 8765
+    assert _make_run_dir.call_count == 1
+    assert _make_run_dir.call_args[0][1] == 8765
+    assert _make_run_dir.call_args[0][2] == 8765
 
 
-def test_deploy_ceph_osd_container(cephadm_fs, monkeypatch):
-    mocks = _common_mp(monkeypatch)
+def test_deploy_ceph_osd_container(cephadm_fs, funkypatch):
+    mocks = _common_patches(funkypatch)
     _firewalld = mocks['Firewalld']
-    _make_var_run = mock.MagicMock()
-    monkeypatch.setattr('cephadm.make_var_run', _make_var_run)
+    _make_run_dir = funkypatch.patch('cephadmlib.file_utils.make_run_dir')
     fsid = 'b01dbeef-701d-9abe-0000-e1e5a47004a7'
     with with_cephadm_ctx([]) as ctx:
         ctx.container_engine = mock_podman()
@@ -445,18 +443,17 @@ def test_deploy_ceph_osd_container(cephadm_fs, monkeypatch):
         assert f.read() == 'XXXXXXX'
     with open(basedir / 'keyring') as f:
         assert f.read() == 'YYYYYY'
-    assert _make_var_run.call_count == 1
-    assert _make_var_run.call_args[0][2] == 8765
-    assert _make_var_run.call_args[0][3] == 8765
+    assert _make_run_dir.call_count == 1
+    assert _make_run_dir.call_args[0][1] == 8765
+    assert _make_run_dir.call_args[0][2] == 8765
 
 
-def test_deploy_ceph_exporter_container(cephadm_fs, monkeypatch):
-    mocks = _common_mp(monkeypatch)
+def test_deploy_ceph_exporter_container(cephadm_fs, funkypatch):
+    mocks = _common_patches(funkypatch)
     _firewalld = mocks['Firewalld']
-    _get_ip_addresses = mock.MagicMock(return_value=(['10.10.10.10'], []))
-    monkeypatch.setattr('cephadm.get_ip_addresses', _get_ip_addresses)
-    _make_var_run = mock.MagicMock()
-    monkeypatch.setattr('cephadm.make_var_run', _make_var_run)
+    _get_ip_addresses = funkypatch.patch('cephadmlib.net_utils.get_ip_addresses')
+    _get_ip_addresses.return_value = (['10.10.10.10'], [])
+    _make_run_dir = funkypatch.patch('cephadmlib.file_utils.make_run_dir')
     fsid = 'b01dbeef-701d-9abe-0000-e1e5a47004a7'
     with with_cephadm_ctx([]) as ctx:
         ctx.container_engine = mock_podman()

From 13baad75f599c06f12dde335892713ef6a2fa57e Mon Sep 17 00:00:00 2001
From: John Mulligan 
Date: Thu, 9 Nov 2023 13:49:03 -0500
Subject: [PATCH 19/29] cephadm: sort imports import daemons/__init__.py

Signed-off-by: John Mulligan 
---
 src/cephadm/cephadmlib/daemons/__init__.py | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/src/cephadm/cephadmlib/daemons/__init__.py b/src/cephadm/cephadmlib/daemons/__init__.py
index 3931cdf14b614..cf572d487c9bd 100644
--- a/src/cephadm/cephadmlib/daemons/__init__.py
+++ b/src/cephadm/cephadmlib/daemons/__init__.py
@@ -1,12 +1,12 @@
+from .ceph import Ceph, OSD, CephExporter
 from .custom import CustomContainer
-from .tracing import Tracing
 from .ingress import HAproxy, Keepalived
-from .nvmeof import CephNvmeof
 from .iscsi import CephIscsi
-from .nfs import NFSGanesha
 from .monitoring import Monitoring
+from .nfs import NFSGanesha
+from .nvmeof import CephNvmeof
 from .snmp import SNMPGateway
-from .ceph import Ceph, OSD, CephExporter
+from .tracing import Tracing
 
 __all__ = [
     'Ceph',

From ddee9492b4c0f5bbbc84d39aaa56539c88e4c0c2 Mon Sep 17 00:00:00 2001
From: John Mulligan 
Date: Thu, 9 Nov 2023 14:26:35 -0500
Subject: [PATCH 20/29] cephadm: convert all deploy tests to use funkypatch
 fixture

During the refactor of various daemon type classes some of the tests had
been converted to funkypatch in order to deal with imports occuring over
multiple files. However, this conversion was done piece by piece in
order to make clear what was changing. This left the functions in this
file inconsistent. Change all the remaining function to use funkypatch
for consistency.

Signed-off-by: John Mulligan 
---
 src/cephadm/tests/test_deploy.py | 32 ++++++++++++++------------------
 1 file changed, 14 insertions(+), 18 deletions(-)

diff --git a/src/cephadm/tests/test_deploy.py b/src/cephadm/tests/test_deploy.py
index 94a292dc57b64..dadf3456fd5af 100644
--- a/src/cephadm/tests/test_deploy.py
+++ b/src/cephadm/tests/test_deploy.py
@@ -16,10 +16,6 @@
 _cephadm = import_cephadm()
 
 
-def _common_mp(monkeypatch):
-    return _common_patches(FunkyPatcher(monkeypatch))
-
-
 def _common_patches(funkypatch):
     mocks = {}
     _call = funkypatch.patch('cephadmlib.container_types.call')
@@ -39,8 +35,8 @@ def _common_patches(funkypatch):
     return mocks
 
 
-def test_deploy_nfs_container(cephadm_fs, monkeypatch):
-    mocks = _common_mp(monkeypatch)
+def test_deploy_nfs_container(cephadm_fs, funkypatch):
+    mocks = _common_patches(funkypatch)
     _firewalld = mocks['Firewalld']
     fsid = 'b01dbeef-701d-9abe-0000-e1e5a47004a7'
     with with_cephadm_ctx([]) as ctx:
@@ -76,8 +72,8 @@ def test_deploy_nfs_container(cephadm_fs, monkeypatch):
         assert f.read() == 'FAKE'
 
 
-def test_deploy_snmp_container(cephadm_fs, monkeypatch):
-    mocks = _common_mp(monkeypatch)
+def test_deploy_snmp_container(cephadm_fs, funkypatch):
+    mocks = _common_patches(funkypatch)
     _firewalld = mocks['Firewalld']
     fsid = 'b01dbeef-701d-9abe-0000-e1e5a47004a7'
     with with_cephadm_ctx([]) as ctx:
@@ -108,8 +104,8 @@ def test_deploy_snmp_container(cephadm_fs, monkeypatch):
     assert not (basedir / 'keyring').exists()
 
 
-def test_deploy_keepalived_container(cephadm_fs, monkeypatch):
-    mocks = _common_mp(monkeypatch)
+def test_deploy_keepalived_container(cephadm_fs, funkypatch):
+    mocks = _common_patches(funkypatch)
     _firewalld = mocks['Firewalld']
     _install_sysctl = mocks['install_sysctl']
     fsid = 'b01dbeef-701d-9abe-0000-e1e5a47004a7'
@@ -156,8 +152,8 @@ def test_deploy_keepalived_container(cephadm_fs, monkeypatch):
     assert len(_install_sysctl.call_args[0][-1].get_sysctl_settings()) > 1
 
 
-def test_deploy_haproxy_container(cephadm_fs, monkeypatch):
-    mocks = _common_mp(monkeypatch)
+def test_deploy_haproxy_container(cephadm_fs, funkypatch):
+    mocks = _common_patches(funkypatch)
     _firewalld = mocks['Firewalld']
     _install_sysctl = mocks['install_sysctl']
     fsid = 'b01dbeef-701d-9abe-0000-e1e5a47004a7'
@@ -201,8 +197,8 @@ def test_deploy_haproxy_container(cephadm_fs, monkeypatch):
     assert len(_install_sysctl.call_args[0][-1].get_sysctl_settings()) > 1
 
 
-def test_deploy_iscsi_container(cephadm_fs, monkeypatch):
-    mocks = _common_mp(monkeypatch)
+def test_deploy_iscsi_container(cephadm_fs, funkypatch):
+    mocks = _common_patches(funkypatch)
     _firewalld = mocks['Firewalld']
     fsid = 'b01dbeef-701d-9abe-0000-e1e5a47004a7'
     with with_cephadm_ctx([]) as ctx:
@@ -245,8 +241,8 @@ def test_deploy_iscsi_container(cephadm_fs, monkeypatch):
         assert (si.st_uid, si.st_gid) == (8765, 8765)
 
 
-def test_deploy_nvmeof_container(cephadm_fs, monkeypatch):
-    mocks = _common_mp(monkeypatch)
+def test_deploy_nvmeof_container(cephadm_fs, funkypatch):
+    mocks = _common_patches(funkypatch)
     _firewalld = mocks['Firewalld']
     fsid = 'b01dbeef-701d-9abe-0000-e1e5a47004a7'
     with with_cephadm_ctx([]) as ctx:
@@ -331,8 +327,8 @@ def test_deploy_a_monitoring_container(cephadm_fs, funkypatch):
         assert (si.st_uid, si.st_gid) == (8765, 8765)
 
 
-def test_deploy_a_tracing_container(cephadm_fs, monkeypatch):
-    mocks = _common_mp(monkeypatch)
+def test_deploy_a_tracing_container(cephadm_fs, funkypatch):
+    mocks = _common_patches(funkypatch)
     _firewalld = mocks['Firewalld']
     fsid = 'b01dbeef-701d-9abe-0000-e1e5a47004a7'
     with with_cephadm_ctx([]) as ctx:

From a7f5b0c20b70c0679019c90007c148b4c23f40ac Mon Sep 17 00:00:00 2001
From: John Mulligan 
Date: Thu, 9 Nov 2023 13:57:15 -0500
Subject: [PATCH 21/29] cephadm: black format daemons/ceph.py

Signed-off-by: John Mulligan 
---
 src/cephadm/cephadmlib/daemons/ceph.py | 83 ++++++++++++++++++--------
 1 file changed, 57 insertions(+), 26 deletions(-)

diff --git a/src/cephadm/cephadmlib/daemons/ceph.py b/src/cephadm/cephadmlib/daemons/ceph.py
index ba908ae5d2988..0afb8f734af5f 100644
--- a/src/cephadm/cephadmlib/daemons/ceph.py
+++ b/src/cephadm/cephadmlib/daemons/ceph.py
@@ -27,8 +27,16 @@
 
 @register_daemon_form
 class Ceph(ContainerDaemonForm):
-    _daemons = ('mon', 'mgr', 'osd', 'mds', 'rgw', 'rbd-mirror',
-                'crash', 'cephfs-mirror')
+    _daemons = (
+        'mon',
+        'mgr',
+        'osd',
+        'mds',
+        'rgw',
+        'rbd-mirror',
+        'crash',
+        'cephfs-mirror',
+    )
 
     @classmethod
     def for_daemon_type(cls, daemon_type: str) -> bool:
@@ -93,8 +101,10 @@ def get_daemon_args(self) -> List[str]:
         if self.identity.daemon_type == 'crash':
             return []
         r = [
-            '--setuser', 'ceph',
-            '--setgroup', 'ceph',
+            '--setuser',
+            'ceph',
+            '--setgroup',
+            'ceph',
             '--default-log-to-file=false',
         ]
         log_to_journald = should_log_to_journald(self.ctx)
@@ -128,9 +138,7 @@ def get_ceph_mounts(
         no_config: bool = False,
     ) -> Dict[str, str]:
         # Warning: This is a hack done for more expedient refactoring
-        mounts = get_ceph_mounts_for_type(
-            ctx, ident.fsid, ident.daemon_type
-        )
+        mounts = get_ceph_mounts_for_type(ctx, ident.fsid, ident.daemon_type)
         data_dir = ident.data_dir(ctx.data_dir)
         if ident.daemon_type == 'rgw':
             cdata_dir = '/var/lib/ceph/radosgw/ceph-rgw.%s' % (
@@ -270,11 +278,14 @@ class CephExporter(ContainerDaemonForm):
     def for_daemon_type(cls, daemon_type: str) -> bool:
         return cls.daemon_type == daemon_type
 
-    def __init__(self,
-                 ctx: CephadmContext,
-                 fsid: str, daemon_id: Union[int, str],
-                 config_json: Dict[str, Any],
-                 image: str = DEFAULT_IMAGE) -> None:
+    def __init__(
+        self,
+        ctx: CephadmContext,
+        fsid: str,
+        daemon_id: Union[int, str],
+        config_json: Dict[str, Any],
+        image: str = DEFAULT_IMAGE,
+    ) -> None:
         self.ctx = ctx
         self.fsid = fsid
         self.daemon_id = daemon_id
@@ -291,13 +302,15 @@ def __init__(self,
         self.validate()
 
     @classmethod
-    def init(cls, ctx: CephadmContext, fsid: str,
-             daemon_id: Union[int, str]) -> 'CephExporter':
-        return cls(ctx, fsid, daemon_id,
-                   fetch_configs(ctx), ctx.image)
+    def init(
+        cls, ctx: CephadmContext, fsid: str, daemon_id: Union[int, str]
+    ) -> 'CephExporter':
+        return cls(ctx, fsid, daemon_id, fetch_configs(ctx), ctx.image)
 
     @classmethod
-    def create(cls, ctx: CephadmContext, ident: DaemonIdentity) -> 'CephExporter':
+    def create(
+        cls, ctx: CephadmContext, ident: DaemonIdentity
+    ) -> 'CephExporter':
         return cls.init(ctx, ident.fsid, ident.daemon_id)
 
     @property
@@ -397,26 +410,44 @@ def get_ceph_mounts_for_type(
                     os.makedirs(selinux_folder, mode=0o755)
                 mounts[selinux_folder] = '/sys/fs/selinux:ro'
             else:
-                logger.error(f'Cluster direcotry {cluster_dir} does not exist.')
+                logger.error(
+                    f'Cluster direcotry {cluster_dir} does not exist.'
+                )
         mounts['/'] = '/rootfs'
 
     try:
-        if ctx.shared_ceph_folder:  # make easy manager modules/ceph-volume development
+        if (
+            ctx.shared_ceph_folder
+        ):  # make easy manager modules/ceph-volume development
             ceph_folder = pathify(ctx.shared_ceph_folder)
             if os.path.exists(ceph_folder):
                 cephadm_binary = ceph_folder + '/src/cephadm/cephadm'
                 if not os.path.exists(pathify(cephadm_binary)):
-                    raise Error("cephadm binary does not exist. Please run './build.sh cephadm' from ceph/src/cephadm/ directory.")
+                    raise Error(
+                        "cephadm binary does not exist. Please run './build.sh cephadm' from ceph/src/cephadm/ directory."
+                    )
                 mounts[cephadm_binary] = '/usr/sbin/cephadm'
-                mounts[ceph_folder + '/src/ceph-volume/ceph_volume'] = '/usr/lib/python3.6/site-packages/ceph_volume'
-                mounts[ceph_folder + '/src/pybind/mgr'] = '/usr/share/ceph/mgr'
-                mounts[ceph_folder + '/src/python-common/ceph'] = '/usr/lib/python3.6/site-packages/ceph'
-                mounts[ceph_folder + '/monitoring/ceph-mixin/dashboards_out'] = '/etc/grafana/dashboards/ceph-dashboard'
-                mounts[ceph_folder + '/monitoring/ceph-mixin/prometheus_alerts.yml'] = '/etc/prometheus/ceph/ceph_default_alerts.yml'
+                mounts[
+                    ceph_folder + '/src/ceph-volume/ceph_volume'
+                ] = '/usr/lib/python3.6/site-packages/ceph_volume'
+                mounts[
+                    ceph_folder + '/src/pybind/mgr'
+                ] = '/usr/share/ceph/mgr'
+                mounts[
+                    ceph_folder + '/src/python-common/ceph'
+                ] = '/usr/lib/python3.6/site-packages/ceph'
+                mounts[
+                    ceph_folder + '/monitoring/ceph-mixin/dashboards_out'
+                ] = '/etc/grafana/dashboards/ceph-dashboard'
+                mounts[
+                    ceph_folder
+                    + '/monitoring/ceph-mixin/prometheus_alerts.yml'
+                ] = '/etc/prometheus/ceph/ceph_default_alerts.yml'
             else:
                 logger.error(
                     'Ceph shared source folder does not exist.',
-                    extra=Highlight.FAILURE.extra())
+                    extra=Highlight.FAILURE.extra(),
+                )
     except AttributeError:
         pass
     return mounts

From c7919151647553f39709cc713b06349f315be06d Mon Sep 17 00:00:00 2001
From: John Mulligan 
Date: Thu, 9 Nov 2023 13:57:34 -0500
Subject: [PATCH 22/29] cephadm: black format daemons/custom.py

Signed-off-by: John Mulligan 
---
 src/cephadm/cephadmlib/daemons/custom.py | 38 ++++++++++++++++--------
 1 file changed, 25 insertions(+), 13 deletions(-)

diff --git a/src/cephadm/cephadmlib/daemons/custom.py b/src/cephadm/cephadmlib/daemons/custom.py
index 8e0d59e6f3a9e..e833c80c9a5d7 100644
--- a/src/cephadm/cephadmlib/daemons/custom.py
+++ b/src/cephadm/cephadmlib/daemons/custom.py
@@ -23,15 +23,20 @@
 @register_daemon_form
 class CustomContainer(ContainerDaemonForm):
     """Defines a custom container"""
+
     daemon_type = 'container'
 
     @classmethod
     def for_daemon_type(cls, daemon_type: str) -> bool:
         return cls.daemon_type == daemon_type
 
-    def __init__(self,
-                 fsid: str, daemon_id: Union[int, str],
-                 config_json: Dict, image: str) -> None:
+    def __init__(
+        self,
+        fsid: str,
+        daemon_id: Union[int, str],
+        config_json: Dict,
+        image: str,
+    ) -> None:
         self.fsid = fsid
         self.daemon_id = daemon_id
         self.image = image
@@ -50,13 +55,15 @@ def __init__(self,
         self.files = dict_get(config_json, 'files', {})
 
     @classmethod
-    def init(cls, ctx: CephadmContext,
-             fsid: str, daemon_id: Union[int, str]) -> 'CustomContainer':
-        return cls(fsid, daemon_id,
-                   fetch_configs(ctx), ctx.image)
+    def init(
+        cls, ctx: CephadmContext, fsid: str, daemon_id: Union[int, str]
+    ) -> 'CustomContainer':
+        return cls(fsid, daemon_id, fetch_configs(ctx), ctx.image)
 
     @classmethod
-    def create(cls, ctx: CephadmContext, ident: DaemonIdentity) -> 'CustomContainer':
+    def create(
+        cls, ctx: CephadmContext, ident: DaemonIdentity
+    ) -> 'CustomContainer':
         return cls.init(ctx, ident.fsid, ident.daemon_id)
 
     @property
@@ -67,8 +74,10 @@ def create_daemon_dirs(self, data_dir: str, uid: int, gid: int) -> None:
         """
         Create dirs/files below the container data directory.
         """
-        logger.info('Creating custom container configuration '
-                    'dirs/files in {} ...'.format(data_dir))
+        logger.info(
+            'Creating custom container configuration '
+            'dirs/files in {} ...'.format(data_dir)
+        )
 
         if not os.path.isdir(data_dir):
             raise OSError('data_dir is not a directory: %s' % data_dir)
@@ -82,7 +91,9 @@ def create_daemon_dirs(self, data_dir: str, uid: int, gid: int) -> None:
             logger.info('Creating file: {}'.format(file_path))
             content = dict_get_join(self.files, file_path)
             file_path = os.path.join(data_dir, file_path.strip('/'))
-            with write_new(file_path, owner=(uid, gid), encoding='utf-8') as f:
+            with write_new(
+                file_path, owner=(uid, gid), encoding='utf-8'
+            ) as f:
                 f.write(content)
 
     def get_daemon_args(self) -> List[str]:
@@ -146,8 +157,9 @@ def _get_container_binds(self, data_dir: str) -> List[List[str]]:
             for index, value in enumerate(bind):
                 match = re.match(r'^source=(.+)$', value)
                 if match:
-                    bind[index] = 'source={}'.format(os.path.join(
-                        data_dir, match.group(1)))
+                    bind[index] = 'source={}'.format(
+                        os.path.join(data_dir, match.group(1))
+                    )
         return binds
 
     def customize_container_binds(

From 505624de9fcf46d162914acd858404a4cd02b075 Mon Sep 17 00:00:00 2001
From: John Mulligan 
Date: Thu, 9 Nov 2023 13:57:49 -0500
Subject: [PATCH 23/29] cephadm: black format daemons/ingress.py

Signed-off-by: John Mulligan 
---
 src/cephadm/cephadmlib/daemons/ingress.py | 68 +++++++++++++++--------
 1 file changed, 45 insertions(+), 23 deletions(-)

diff --git a/src/cephadm/cephadmlib/daemons/ingress.py b/src/cephadm/cephadmlib/daemons/ingress.py
index 94ee34505ed58..6064cf538fb05 100644
--- a/src/cephadm/cephadmlib/daemons/ingress.py
+++ b/src/cephadm/cephadmlib/daemons/ingress.py
@@ -22,6 +22,7 @@
 @register_daemon_form
 class HAproxy(ContainerDaemonForm):
     """Defines an HAproxy container"""
+
     daemon_type = 'haproxy'
     required_files = ['haproxy.cfg']
     default_image = DEFAULT_HAPROXY_IMAGE
@@ -30,10 +31,14 @@ class HAproxy(ContainerDaemonForm):
     def for_daemon_type(cls, daemon_type: str) -> bool:
         return cls.daemon_type == daemon_type
 
-    def __init__(self,
-                 ctx: CephadmContext,
-                 fsid: str, daemon_id: Union[int, str],
-                 config_json: Dict, image: str) -> None:
+    def __init__(
+        self,
+        ctx: CephadmContext,
+        fsid: str,
+        daemon_id: Union[int, str],
+        config_json: Dict,
+        image: str,
+    ) -> None:
         self.ctx = ctx
         self.fsid = fsid
         self.daemon_id = daemon_id
@@ -45,10 +50,10 @@ def __init__(self,
         self.validate()
 
     @classmethod
-    def init(cls, ctx: CephadmContext,
-             fsid: str, daemon_id: Union[int, str]) -> 'HAproxy':
-        return cls(ctx, fsid, daemon_id, fetch_configs(ctx),
-                   ctx.image)
+    def init(
+        cls, ctx: CephadmContext, fsid: str, daemon_id: Union[int, str]
+    ) -> 'HAproxy':
+        return cls(ctx, fsid, daemon_id, fetch_configs(ctx), ctx.image)
 
     @classmethod
     def create(cls, ctx: CephadmContext, ident: DaemonIdentity) -> 'HAproxy':
@@ -65,7 +70,9 @@ def create_daemon_dirs(self, data_dir: str, uid: int, gid: int) -> None:
 
         # create additional directories in data dir for HAproxy to use
         if not os.path.isdir(os.path.join(data_dir, 'haproxy')):
-            makedirs(os.path.join(data_dir, 'haproxy'), uid, gid, DATA_DIR_MODE)
+            makedirs(
+                os.path.join(data_dir, 'haproxy'), uid, gid, DATA_DIR_MODE
+            )
 
         data_dir = os.path.join(data_dir, 'haproxy')
         populate_files(data_dir, self.files, uid, gid)
@@ -86,7 +93,9 @@ def validate(self):
         if self.required_files:
             for fname in self.required_files:
                 if fname not in self.files:
-                    raise Error('required file missing from config-json: %s' % fname)
+                    raise Error(
+                        'required file missing from config-json: %s' % fname
+                    )
 
     def get_daemon_name(self):
         # type: () -> str
@@ -144,6 +153,7 @@ def customize_process_args(
 @register_daemon_form
 class Keepalived(ContainerDaemonForm):
     """Defines an Keepalived container"""
+
     daemon_type = 'keepalived'
     required_files = ['keepalived.conf']
     default_image = DEFAULT_KEEPALIVED_IMAGE
@@ -152,10 +162,14 @@ class Keepalived(ContainerDaemonForm):
     def for_daemon_type(cls, daemon_type: str) -> bool:
         return cls.daemon_type == daemon_type
 
-    def __init__(self,
-                 ctx: CephadmContext,
-                 fsid: str, daemon_id: Union[int, str],
-                 config_json: Dict, image: str) -> None:
+    def __init__(
+        self,
+        ctx: CephadmContext,
+        fsid: str,
+        daemon_id: Union[int, str],
+        config_json: Dict,
+        image: str,
+    ) -> None:
         self.ctx = ctx
         self.fsid = fsid
         self.daemon_id = daemon_id
@@ -167,13 +181,15 @@ def __init__(self,
         self.validate()
 
     @classmethod
-    def init(cls, ctx: CephadmContext, fsid: str,
-             daemon_id: Union[int, str]) -> 'Keepalived':
-        return cls(ctx, fsid, daemon_id,
-                   fetch_configs(ctx), ctx.image)
+    def init(
+        cls, ctx: CephadmContext, fsid: str, daemon_id: Union[int, str]
+    ) -> 'Keepalived':
+        return cls(ctx, fsid, daemon_id, fetch_configs(ctx), ctx.image)
 
     @classmethod
-    def create(cls, ctx: CephadmContext, ident: DaemonIdentity) -> 'Keepalived':
+    def create(
+        cls, ctx: CephadmContext, ident: DaemonIdentity
+    ) -> 'Keepalived':
         return cls.init(ctx, ident.fsid, ident.daemon_id)
 
     @property
@@ -187,7 +203,9 @@ def create_daemon_dirs(self, data_dir: str, uid: int, gid: int) -> None:
 
         # create additional directories in data dir for keepalived to use
         if not os.path.isdir(os.path.join(data_dir, 'keepalived')):
-            makedirs(os.path.join(data_dir, 'keepalived'), uid, gid, DATA_DIR_MODE)
+            makedirs(
+                os.path.join(data_dir, 'keepalived'), uid, gid, DATA_DIR_MODE
+            )
 
         # populate files from the config-json
         populate_files(data_dir, self.files, uid, gid)
@@ -205,7 +223,9 @@ def validate(self):
         if self.required_files:
             for fname in self.required_files:
                 if fname not in self.files:
-                    raise Error('required file missing from config-json: %s' % fname)
+                    raise Error(
+                        'required file missing from config-json: %s' % fname
+                    )
 
     def get_daemon_name(self):
         # type: () -> str
@@ -225,7 +245,7 @@ def get_container_envs():
             'KEEPALIVED_AUTOCONF=false',
             'KEEPALIVED_CONF=/etc/keepalived/keepalived.conf',
             'KEEPALIVED_CMD=/usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf',
-            'KEEPALIVED_DEBUG=false'
+            'KEEPALIVED_DEBUG=false',
         ]
         return envs
 
@@ -244,7 +264,9 @@ def uid_gid(self, ctx: CephadmContext) -> Tuple[int, int]:
     @staticmethod
     def _get_container_mounts(data_dir: str) -> Dict[str, str]:
         mounts = dict()
-        mounts[os.path.join(data_dir, 'keepalived.conf')] = '/etc/keepalived/keepalived.conf'
+        mounts[
+            os.path.join(data_dir, 'keepalived.conf')
+        ] = '/etc/keepalived/keepalived.conf'
         return mounts
 
     def customize_container_mounts(

From 196ad1855e218177a54b00bf4a1f54f2202675bf Mon Sep 17 00:00:00 2001
From: John Mulligan 
Date: Thu, 9 Nov 2023 13:58:03 -0500
Subject: [PATCH 24/29] cephadm: black format daemons/iscsi.py

Signed-off-by: John Mulligan 
---
 src/cephadm/cephadmlib/daemons/iscsi.py | 73 +++++++++++++++++--------
 1 file changed, 51 insertions(+), 22 deletions(-)

diff --git a/src/cephadm/cephadmlib/daemons/iscsi.py b/src/cephadm/cephadmlib/daemons/iscsi.py
index 504db6885583d..1845a37bf4efb 100644
--- a/src/cephadm/cephadmlib/daemons/iscsi.py
+++ b/src/cephadm/cephadmlib/daemons/iscsi.py
@@ -34,11 +34,13 @@ class CephIscsi(ContainerDaemonForm):
     def for_daemon_type(cls, daemon_type: str) -> bool:
         return cls.daemon_type == daemon_type
 
-    def __init__(self,
-                 ctx: CephadmContext,
-                 ident: DaemonIdentity,
-                 config_json: Dict,
-                 image: str = DEFAULT_IMAGE):
+    def __init__(
+        self,
+        ctx: CephadmContext,
+        ident: DaemonIdentity,
+        config_json: Dict,
+        image: str = DEFAULT_IMAGE,
+    ):
         self.ctx = ctx
         self._identity = ident
         self.image = image
@@ -50,11 +52,17 @@ def __init__(self,
         self.validate()
 
     @classmethod
-    def init(cls, ctx: CephadmContext, fsid: str, daemon_id: str) -> 'CephIscsi':
-        return cls.create(ctx, DaemonIdentity(fsid, cls.daemon_type, daemon_id))
+    def init(
+        cls, ctx: CephadmContext, fsid: str, daemon_id: str
+    ) -> 'CephIscsi':
+        return cls.create(
+            ctx, DaemonIdentity(fsid, cls.daemon_type, daemon_id)
+        )
 
     @classmethod
-    def create(cls, ctx: CephadmContext, ident: DaemonIdentity) -> 'CephIscsi':
+    def create(
+        cls, ctx: CephadmContext, ident: DaemonIdentity
+    ) -> 'CephIscsi':
         return cls(ctx, ident, fetch_configs(ctx), ctx.image)
 
     @property
@@ -75,9 +83,13 @@ def _get_container_mounts(data_dir, log_dir):
         mounts = dict()
         mounts[os.path.join(data_dir, 'config')] = '/etc/ceph/ceph.conf:z'
         mounts[os.path.join(data_dir, 'keyring')] = '/etc/ceph/keyring:z'
-        mounts[os.path.join(data_dir, 'iscsi-gateway.cfg')] = '/etc/ceph/iscsi-gateway.cfg:z'
+        mounts[
+            os.path.join(data_dir, 'iscsi-gateway.cfg')
+        ] = '/etc/ceph/iscsi-gateway.cfg:z'
         mounts[os.path.join(data_dir, 'configfs')] = '/sys/kernel/config'
-        mounts[os.path.join(data_dir, 'tcmu-runner-entrypoint.sh')] = '/usr/local/scripts/tcmu-runner-entrypoint.sh'
+        mounts[
+            os.path.join(data_dir, 'tcmu-runner-entrypoint.sh')
+        ] = '/usr/local/scripts/tcmu-runner-entrypoint.sh'
         mounts[log_dir] = '/var/log:z'
         mounts['/dev'] = '/dev'
         return mounts
@@ -108,11 +120,18 @@ def customize_container_binds(
     def get_version(ctx, container_id):
         # type: (CephadmContext, str) -> Optional[str]
         version = None
-        out, err, code = call(ctx,
-                              [ctx.container_engine.path, 'exec', container_id,
-                               '/usr/bin/python3', '-c',
-                               "import pkg_resources; print(pkg_resources.require('ceph_iscsi')[0].version)"],
-                              verbosity=CallVerbosity.QUIET)
+        out, err, code = call(
+            ctx,
+            [
+                ctx.container_engine.path,
+                'exec',
+                container_id,
+                '/usr/bin/python3',
+                '-c',
+                "import pkg_resources; print(pkg_resources.require('ceph_iscsi')[0].version)",
+            ],
+            verbosity=CallVerbosity.QUIET,
+        )
         if code == 0:
             version = out.strip()
         return version
@@ -130,7 +149,9 @@ def validate(self):
         if self.required_files:
             for fname in self.required_files:
                 if fname not in self.files:
-                    raise Error('required file missing from config-json: %s' % fname)
+                    raise Error(
+                        'required file missing from config-json: %s' % fname
+                    )
 
     def get_daemon_name(self):
         # type: () -> str
@@ -157,7 +178,9 @@ def create_daemon_dirs(self, data_dir, uid, gid):
         # to be mounted into the container. For more info
         # on why we need this script, see the
         # tcmu_runner_entrypoint_script function
-        self.files['tcmu-runner-entrypoint.sh'] = self.tcmu_runner_entrypoint_script()
+        self.files[
+            'tcmu-runner-entrypoint.sh'
+        ] = self.tcmu_runner_entrypoint_script()
 
         # populate files from the config-json
         populate_files(data_dir, self.files, uid, gid)
@@ -171,11 +194,15 @@ def configfs_mount_umount(data_dir, mount=True):
         # type: (str, bool) -> List[str]
         mount_path = os.path.join(data_dir, 'configfs')
         if mount:
-            cmd = 'if ! grep -qs {0} /proc/mounts; then ' \
-                  'mount -t configfs none {0}; fi'.format(mount_path)
+            cmd = (
+                'if ! grep -qs {0} /proc/mounts; then '
+                'mount -t configfs none {0}; fi'.format(mount_path)
+            )
         else:
-            cmd = 'if grep -qs {0} /proc/mounts; then ' \
-                  'umount {0}; fi'.format(mount_path)
+            cmd = (
+                'if grep -qs {0} /proc/mounts; then '
+                'umount {0}; fi'.format(mount_path)
+            )
         return cmd.split()
 
     @staticmethod
@@ -230,7 +257,9 @@ def get_tcmu_runner_container(self):
         # TODO: Eventually we don't want to run tcmu-runner through this script.
         # This is intended to be a workaround backported to older releases
         # and should eventually be removed in at least squid onward
-        tcmu_container.entrypoint = '/usr/local/scripts/tcmu-runner-entrypoint.sh'
+        tcmu_container.entrypoint = (
+            '/usr/local/scripts/tcmu-runner-entrypoint.sh'
+        )
         tcmu_container.cname = self.get_container_name(desc='tcmu')
         return tcmu_container
 

From 8535d57b3b5bf778ec8323b31d4b969d8e7dff27 Mon Sep 17 00:00:00 2001
From: John Mulligan 
Date: Thu, 9 Nov 2023 13:58:17 -0500
Subject: [PATCH 25/29] cephadm: black format daemons/monitoring.py

Signed-off-by: John Mulligan 
---
 src/cephadm/cephadmlib/daemons/monitoring.py | 71 ++++++++++++++------
 1 file changed, 49 insertions(+), 22 deletions(-)

diff --git a/src/cephadm/cephadmlib/daemons/monitoring.py b/src/cephadm/cephadmlib/daemons/monitoring.py
index 405dafc6dfccd..e0667853dd7b5 100644
--- a/src/cephadm/cephadmlib/daemons/monitoring.py
+++ b/src/cephadm/cephadmlib/daemons/monitoring.py
@@ -27,12 +27,14 @@ class Monitoring(ContainerDaemonForm):
     """Define the configs for the monitoring containers"""
 
     port_map = {
-        'prometheus': [9095],  # Avoid default 9090, due to conflict with cockpit UI
+        'prometheus': [
+            9095
+        ],  # Avoid default 9090, due to conflict with cockpit UI
         'node-exporter': [9100],
         'grafana': [3000],
         'alertmanager': [9093, 9094],
         'loki': [3100],
-        'promtail': [9080]
+        'promtail': [9080],
     }
 
     components = {
@@ -55,9 +57,7 @@ class Monitoring(ContainerDaemonForm):
             'args': [
                 '--config.file=/etc/loki/loki.yml',
             ],
-            'config-json-files': [
-                'loki.yml'
-            ],
+            'config-json-files': ['loki.yml'],
         },
         'promtail': {
             'image': DEFAULT_PROMTAIL_IMAGE,
@@ -74,9 +74,7 @@ class Monitoring(ContainerDaemonForm):
             'image': DEFAULT_NODE_EXPORTER_IMAGE,
             'cpus': '1',
             'memory': '1GB',
-            'args': [
-                '--no-collector.timex'
-            ],
+            'args': ['--no-collector.timex'],
         },
         'grafana': {
             'image': DEFAULT_GRAFANA_IMAGE,
@@ -95,7 +93,9 @@ class Monitoring(ContainerDaemonForm):
             'cpus': '2',
             'memory': '2GB',
             'args': [
-                '--cluster.listen-address=:{}'.format(port_map['alertmanager'][1]),
+                '--cluster.listen-address=:{}'.format(
+                    port_map['alertmanager'][1]
+                ),
             ],
             'config-json-files': [
                 'alertmanager.yml',
@@ -116,7 +116,13 @@ def get_version(ctx, container_id, daemon_type):
         """
         :param: daemon_type Either "prometheus", "alertmanager", "loki", "promtail" or "node-exporter"
         """
-        assert daemon_type in ('prometheus', 'alertmanager', 'node-exporter', 'loki', 'promtail')
+        assert daemon_type in (
+            'prometheus',
+            'alertmanager',
+            'node-exporter',
+            'loki',
+            'promtail',
+        )
         cmd = daemon_type.replace('-', '_')
         code = -1
         err = ''
@@ -124,17 +130,32 @@ def get_version(ctx, container_id, daemon_type):
         version = ''
         if daemon_type == 'alertmanager':
             for cmd in ['alertmanager', 'prometheus-alertmanager']:
-                out, err, code = call(ctx, [
-                    ctx.container_engine.path, 'exec', container_id, cmd,
-                    '--version'
-                ], verbosity=CallVerbosity.QUIET)
+                out, err, code = call(
+                    ctx,
+                    [
+                        ctx.container_engine.path,
+                        'exec',
+                        container_id,
+                        cmd,
+                        '--version',
+                    ],
+                    verbosity=CallVerbosity.QUIET,
+                )
                 if code == 0:
                     break
             cmd = 'alertmanager'  # reset cmd for version extraction
         else:
-            out, err, code = call(ctx, [
-                ctx.container_engine.path, 'exec', container_id, cmd, '--version'
-            ], verbosity=CallVerbosity.QUIET)
+            out, err, code = call(
+                ctx,
+                [
+                    ctx.container_engine.path,
+                    'exec',
+                    container_id,
+                    cmd,
+                    '--version',
+                ],
+                verbosity=CallVerbosity.QUIET,
+            )
         if code == 0:
             if err.startswith('%s, version ' % cmd):
                 version = err.split(' ')[2]
@@ -169,7 +190,9 @@ def __init__(self, ctx: CephadmContext, ident: DaemonIdentity) -> None:
         self._identity = ident
 
     @classmethod
-    def create(cls, ctx: CephadmContext, ident: DaemonIdentity) -> 'Monitoring':
+    def create(
+        cls, ctx: CephadmContext, ident: DaemonIdentity
+    ) -> 'Monitoring':
         return cls(ctx, ident)
 
     @property
@@ -232,7 +255,9 @@ def get_daemon_args(self) -> List[str]:
             if daemon_type == 'prometheus':
                 config = fetch_configs(ctx)
                 retention_time = config.get('retention_time', '15d')
-                retention_size = config.get('retention_size', '0')  # default to disabled
+                retention_size = config.get(
+                    'retention_size', '0'
+                )  # default to disabled
                 r += [f'--storage.tsdb.retention.time={retention_time}']
                 r += [f'--storage.tsdb.retention.size={retention_size}']
                 scheme = 'http'
@@ -270,9 +295,11 @@ def get_daemon_args(self) -> List[str]:
                 r += [f'--web.config.file={config["web_config"]}']
             except KeyError:
                 pass
-            r += ['--path.procfs=/host/proc',
-                  '--path.sysfs=/host/sys',
-                  '--path.rootfs=/rootfs']
+            r += [
+                '--path.procfs=/host/proc',
+                '--path.sysfs=/host/sys',
+                '--path.rootfs=/rootfs',
+            ]
         return r
 
     def _get_container_mounts(self, data_dir: str) -> Dict[str, str]:

From 192ff2f6ef34e3ba858bcc93384123cf19293ee1 Mon Sep 17 00:00:00 2001
From: John Mulligan 
Date: Thu, 9 Nov 2023 13:58:30 -0500
Subject: [PATCH 26/29] cephadm: black format daemons/nfs.py

Signed-off-by: John Mulligan 
---
 src/cephadm/cephadmlib/daemons/nfs.py | 41 ++++++++++++++++-----------
 1 file changed, 24 insertions(+), 17 deletions(-)

diff --git a/src/cephadm/cephadmlib/daemons/nfs.py b/src/cephadm/cephadmlib/daemons/nfs.py
index 48653b775fb08..6e2f2a945cae5 100644
--- a/src/cephadm/cephadmlib/daemons/nfs.py
+++ b/src/cephadm/cephadmlib/daemons/nfs.py
@@ -41,12 +41,9 @@ class NFSGanesha(ContainerDaemonForm):
     def for_daemon_type(cls, daemon_type: str) -> bool:
         return cls.daemon_type == daemon_type
 
-    def __init__(self,
-                 ctx,
-                 fsid,
-                 daemon_id,
-                 config_json,
-                 image=DEFAULT_IMAGE):
+    def __init__(
+        self, ctx, fsid, daemon_id, config_json, image=DEFAULT_IMAGE
+    ):
         # type: (CephadmContext, str, Union[int, str], Dict, str) -> None
         self.ctx = ctx
         self.fsid = fsid
@@ -70,7 +67,9 @@ def init(cls, ctx, fsid, daemon_id):
         return cls(ctx, fsid, daemon_id, fetch_configs(ctx), ctx.image)
 
     @classmethod
-    def create(cls, ctx: CephadmContext, ident: DaemonIdentity) -> 'NFSGanesha':
+    def create(
+        cls, ctx: CephadmContext, ident: DaemonIdentity
+    ) -> 'NFSGanesha':
         return cls.init(ctx, ident.fsid, ident.daemon_id)
 
     @property
@@ -86,8 +85,9 @@ def _get_container_mounts(self, data_dir):
         if self.rgw:
             cluster = self.rgw.get('cluster', 'ceph')
             rgw_user = self.rgw.get('user', 'admin')
-            mounts[os.path.join(data_dir, 'keyring.rgw')] = \
-                '/var/lib/ceph/radosgw/%s-%s/keyring:z' % (cluster, rgw_user)
+            mounts[
+                os.path.join(data_dir, 'keyring.rgw')
+            ] = '/var/lib/ceph/radosgw/%s-%s/keyring:z' % (cluster, rgw_user)
         return mounts
 
     def customize_container_mounts(
@@ -99,19 +99,24 @@ def customize_container_mounts(
     @staticmethod
     def get_container_envs():
         # type: () -> List[str]
-        envs = [
-            'CEPH_CONF=%s' % (CEPH_DEFAULT_CONF)
-        ]
+        envs = ['CEPH_CONF=%s' % (CEPH_DEFAULT_CONF)]
         return envs
 
     @staticmethod
     def get_version(ctx, container_id):
         # type: (CephadmContext, str) -> Optional[str]
         version = None
-        out, err, code = call(ctx,
-                              [ctx.container_engine.path, 'exec', container_id,
-                               NFSGanesha.entrypoint, '-v'],
-                              verbosity=CallVerbosity.QUIET)
+        out, err, code = call(
+            ctx,
+            [
+                ctx.container_engine.path,
+                'exec',
+                container_id,
+                NFSGanesha.entrypoint,
+                '-v',
+            ],
+            verbosity=CallVerbosity.QUIET,
+        )
         if code == 0:
             match = re.search(r'NFS-Ganesha Release\s*=\s*[V]*([\d.]+)', out)
             if match:
@@ -131,7 +136,9 @@ def validate(self):
         if self.required_files:
             for fname in self.required_files:
                 if fname not in self.files:
-                    raise Error('required file missing from config-json: %s' % fname)
+                    raise Error(
+                        'required file missing from config-json: %s' % fname
+                    )
 
         # check for an RGW config
         if self.rgw:

From bc0c9d5422141339b651b6a96cf39f04820e7cc8 Mon Sep 17 00:00:00 2001
From: John Mulligan 
Date: Thu, 9 Nov 2023 13:58:41 -0500
Subject: [PATCH 27/29] cephadm: black format daemons/nvmeof.py

Signed-off-by: John Mulligan 
---
 src/cephadm/cephadmlib/daemons/nvmeof.py | 50 +++++++++++++++---------
 1 file changed, 31 insertions(+), 19 deletions(-)

diff --git a/src/cephadm/cephadmlib/daemons/nvmeof.py b/src/cephadm/cephadmlib/daemons/nvmeof.py
index a1a18f5a18baa..39488406bc8e6 100644
--- a/src/cephadm/cephadmlib/daemons/nvmeof.py
+++ b/src/cephadm/cephadmlib/daemons/nvmeof.py
@@ -32,12 +32,9 @@ class CephNvmeof(ContainerDaemonForm):
     def for_daemon_type(cls, daemon_type: str) -> bool:
         return cls.daemon_type == daemon_type
 
-    def __init__(self,
-                 ctx,
-                 fsid,
-                 daemon_id,
-                 config_json,
-                 image=DEFAULT_NVMEOF_IMAGE):
+    def __init__(
+        self, ctx, fsid, daemon_id, config_json, image=DEFAULT_NVMEOF_IMAGE
+    ):
         # type: (CephadmContext, str, Union[int, str], Dict, str) -> None
         self.ctx = ctx
         self.fsid = fsid
@@ -53,11 +50,12 @@ def __init__(self,
     @classmethod
     def init(cls, ctx, fsid, daemon_id):
         # type: (CephadmContext, str, Union[int, str]) -> CephNvmeof
-        return cls(ctx, fsid, daemon_id,
-                   fetch_configs(ctx), ctx.image)
+        return cls(ctx, fsid, daemon_id, fetch_configs(ctx), ctx.image)
 
     @classmethod
-    def create(cls, ctx: CephadmContext, ident: DaemonIdentity) -> 'CephNvmeof':
+    def create(
+        cls, ctx: CephadmContext, ident: DaemonIdentity
+    ) -> 'CephNvmeof':
         return cls.init(ctx, ident.fsid, ident.daemon_id)
 
     @property
@@ -69,7 +67,9 @@ def _get_container_mounts(data_dir: str) -> Dict[str, str]:
         mounts = dict()
         mounts[os.path.join(data_dir, 'config')] = '/etc/ceph/ceph.conf:z'
         mounts[os.path.join(data_dir, 'keyring')] = '/etc/ceph/keyring:z'
-        mounts[os.path.join(data_dir, 'ceph-nvmeof.conf')] = '/src/ceph-nvmeof.conf:z'
+        mounts[
+            os.path.join(data_dir, 'ceph-nvmeof.conf')
+        ] = '/src/ceph-nvmeof.conf:z'
         mounts[os.path.join(data_dir, 'configfs')] = '/sys/kernel/config'
         mounts['/dev/hugepages'] = '/dev/hugepages'
         mounts['/dev/vfio/vfio'] = '/dev/vfio/vfio'
@@ -94,10 +94,16 @@ def customize_container_binds(
 
     @staticmethod
     def get_version(ctx: CephadmContext, container_id: str) -> Optional[str]:
-        out, err, ret = call(ctx,
-                             [ctx.container_engine.path, 'inspect',
-                              '--format', '{{index .Config.Labels "io.ceph.version"}}',
-                              ctx.image])
+        out, err, ret = call(
+            ctx,
+            [
+                ctx.container_engine.path,
+                'inspect',
+                '--format',
+                '{{index .Config.Labels "io.ceph.version"}}',
+                ctx.image,
+            ],
+        )
         version = None
         if ret == 0:
             version = out.strip()
@@ -116,7 +122,9 @@ def validate(self):
         if self.required_files:
             for fname in self.required_files:
                 if fname not in self.files:
-                    raise Error('required file missing from config-json: %s' % fname)
+                    raise Error(
+                        'required file missing from config-json: %s' % fname
+                    )
 
     def get_daemon_name(self):
         # type: () -> str
@@ -147,11 +155,15 @@ def configfs_mount_umount(data_dir, mount=True):
         # type: (str, bool) -> List[str]
         mount_path = os.path.join(data_dir, 'configfs')
         if mount:
-            cmd = 'if ! grep -qs {0} /proc/mounts; then ' \
-                  'mount -t configfs none {0}; fi'.format(mount_path)
+            cmd = (
+                'if ! grep -qs {0} /proc/mounts; then '
+                'mount -t configfs none {0}; fi'.format(mount_path)
+            )
         else:
-            cmd = 'if grep -qs {0} /proc/mounts; then ' \
-                  'umount {0}; fi'.format(mount_path)
+            cmd = (
+                'if grep -qs {0} /proc/mounts; then '
+                'umount {0}; fi'.format(mount_path)
+            )
         return cmd.split()
 
     @staticmethod

From b0f1c7fff3d52e4867c0d74a6124be87865f6953 Mon Sep 17 00:00:00 2001
From: John Mulligan 
Date: Thu, 9 Nov 2023 13:58:53 -0500
Subject: [PATCH 28/29] cephadm: black format daemons/snmp.py

Signed-off-by: John Mulligan 
---
 src/cephadm/cephadmlib/daemons/snmp.py | 103 +++++++++++++++++--------
 1 file changed, 70 insertions(+), 33 deletions(-)

diff --git a/src/cephadm/cephadmlib/daemons/snmp.py b/src/cephadm/cephadmlib/daemons/snmp.py
index dc952aa4cb9d5..f334e5f765221 100644
--- a/src/cephadm/cephadmlib/daemons/snmp.py
+++ b/src/cephadm/cephadmlib/daemons/snmp.py
@@ -21,6 +21,7 @@
 @register_daemon_form
 class SNMPGateway(ContainerDaemonForm):
     """Defines an SNMP gateway between Prometheus and SNMP monitoring Frameworks"""
+
     daemon_type = 'snmp-gateway'
     SUPPORTED_VERSIONS = ['V2c', 'V3']
     default_image = DEFAULT_SNMP_GATEWAY_IMAGE
@@ -31,12 +32,14 @@ class SNMPGateway(ContainerDaemonForm):
     def for_daemon_type(cls, daemon_type: str) -> bool:
         return cls.daemon_type == daemon_type
 
-    def __init__(self,
-                 ctx: CephadmContext,
-                 fsid: str,
-                 daemon_id: Union[int, str],
-                 config_json: Dict[str, Any],
-                 image: Optional[str] = None) -> None:
+    def __init__(
+        self,
+        ctx: CephadmContext,
+        fsid: str,
+        daemon_id: Union[int, str],
+        config_json: Dict[str, Any],
+        image: Optional[str] = None,
+    ) -> None:
         self.ctx = ctx
         self.fsid = fsid
         self.daemon_id = daemon_id
@@ -49,24 +52,37 @@ def __init__(self,
         self.snmp_version = config_json.get('snmp_version', 'V2c')
         self.snmp_community = config_json.get('snmp_community', 'public')
         self.log_level = config_json.get('log_level', 'info')
-        self.snmp_v3_auth_username = config_json.get('snmp_v3_auth_username', '')
-        self.snmp_v3_auth_password = config_json.get('snmp_v3_auth_password', '')
-        self.snmp_v3_auth_protocol = config_json.get('snmp_v3_auth_protocol', '')
-        self.snmp_v3_priv_protocol = config_json.get('snmp_v3_priv_protocol', '')
-        self.snmp_v3_priv_password = config_json.get('snmp_v3_priv_password', '')
+        self.snmp_v3_auth_username = config_json.get(
+            'snmp_v3_auth_username', ''
+        )
+        self.snmp_v3_auth_password = config_json.get(
+            'snmp_v3_auth_password', ''
+        )
+        self.snmp_v3_auth_protocol = config_json.get(
+            'snmp_v3_auth_protocol', ''
+        )
+        self.snmp_v3_priv_protocol = config_json.get(
+            'snmp_v3_priv_protocol', ''
+        )
+        self.snmp_v3_priv_password = config_json.get(
+            'snmp_v3_priv_password', ''
+        )
         self.snmp_v3_engine_id = config_json.get('snmp_v3_engine_id', '')
 
         self.validate()
 
     @classmethod
-    def init(cls, ctx: CephadmContext, fsid: str,
-             daemon_id: Union[int, str]) -> 'SNMPGateway':
+    def init(
+        cls, ctx: CephadmContext, fsid: str, daemon_id: Union[int, str]
+    ) -> 'SNMPGateway':
         cfgs = fetch_configs(ctx)
         assert cfgs  # assert some config data was found
         return cls(ctx, fsid, daemon_id, cfgs, ctx.image)
 
     @classmethod
-    def create(cls, ctx: CephadmContext, ident: DaemonIdentity) -> 'SNMPGateway':
+    def create(
+        cls, ctx: CephadmContext, ident: DaemonIdentity
+    ) -> 'SNMPGateway':
         return cls.init(ctx, ident.fsid, ident.daemon_id)
 
     @property
@@ -74,9 +90,13 @@ def identity(self) -> DaemonIdentity:
         return DaemonIdentity(self.fsid, self.daemon_type, self.daemon_id)
 
     @staticmethod
-    def get_version(ctx: CephadmContext, fsid: str, daemon_id: str) -> Optional[str]:
+    def get_version(
+        ctx: CephadmContext, fsid: str, daemon_id: str
+    ) -> Optional[str]:
         """Return the version of the notifier from it's http endpoint"""
-        path = os.path.join(ctx.data_dir, fsid, f'snmp-gateway.{daemon_id}', 'unit.meta')
+        path = os.path.join(
+            ctx.data_dir, fsid, f'snmp-gateway.{daemon_id}', 'unit.meta'
+        )
         try:
             with open(path, 'r') as env:
                 metadata = json.loads(env.read())
@@ -95,8 +115,9 @@ def get_version(ctx: CephadmContext, fsid: str, daemon_id: str) -> Optional[str]
 
         for h in html:
             stripped = h.strip()
-            if stripped.startswith(('
', '
')) and \
-               stripped.endswith(('
', '
')): + if stripped.startswith(('
', '
')) and stripped.endswith(
+                ('
', '
') + ): #
(version=1.2.1, branch=HEAD, revision=7...
                 return stripped.split(',')[0].split('version=')[1]
 
@@ -116,28 +137,36 @@ def get_daemon_args(self) -> List[str]:
             f'--snmp.destination={self.destination}',
             f'--snmp.version={self.snmp_version}',
             f'--log.level={self.log_level}',
-            '--snmp.trap-description-template=/etc/snmp_notifier/description-template.tpl'
+            '--snmp.trap-description-template=/etc/snmp_notifier/description-template.tpl',
         ]
 
         if self.snmp_version == 'V3':
             # common auth settings
-            v3_args.extend([
-                '--snmp.authentication-enabled',
-                f'--snmp.authentication-protocol={self.snmp_v3_auth_protocol}',
-                f'--snmp.security-engine-id={self.snmp_v3_engine_id}'
-            ])
+            v3_args.extend(
+                [
+                    '--snmp.authentication-enabled',
+                    f'--snmp.authentication-protocol={self.snmp_v3_auth_protocol}',
+                    f'--snmp.security-engine-id={self.snmp_v3_engine_id}',
+                ]
+            )
             # authPriv setting is applied if we have a privacy protocol setting
             if self.snmp_v3_priv_protocol:
-                v3_args.extend([
-                    '--snmp.private-enabled',
-                    f'--snmp.private-protocol={self.snmp_v3_priv_protocol}'
-                ])
+                v3_args.extend(
+                    [
+                        '--snmp.private-enabled',
+                        f'--snmp.private-protocol={self.snmp_v3_priv_protocol}',
+                    ]
+                )
 
         return base_args + v3_args
 
     @property
     def data_dir(self) -> str:
-        return os.path.join(self.ctx.data_dir, self.ctx.fsid, f'{self.daemon_type}.{self.daemon_id}')
+        return os.path.join(
+            self.ctx.data_dir,
+            self.ctx.fsid,
+            f'{self.daemon_type}.{self.daemon_id}',
+        )
 
     @property
     def conf_file_path(self) -> str:
@@ -149,10 +178,16 @@ def create_daemon_conf(self) -> None:
             if self.snmp_version == 'V2c':
                 f.write(f'SNMP_NOTIFIER_COMMUNITY={self.snmp_community}\n')
             else:
-                f.write(f'SNMP_NOTIFIER_AUTH_USERNAME={self.snmp_v3_auth_username}\n')
-                f.write(f'SNMP_NOTIFIER_AUTH_PASSWORD={self.snmp_v3_auth_password}\n')
+                f.write(
+                    f'SNMP_NOTIFIER_AUTH_USERNAME={self.snmp_v3_auth_username}\n'
+                )
+                f.write(
+                    f'SNMP_NOTIFIER_AUTH_PASSWORD={self.snmp_v3_auth_password}\n'
+                )
                 if self.snmp_v3_priv_password:
-                    f.write(f'SNMP_NOTIFIER_PRIV_PASSWORD={self.snmp_v3_priv_password}\n')
+                    f.write(
+                        f'SNMP_NOTIFIER_PRIV_PASSWORD={self.snmp_v3_priv_password}\n'
+                    )
 
     def validate(self) -> None:
         """Validate the settings
@@ -169,7 +204,9 @@ def validate(self) -> None:
             raise Error(f'not a valid snmp version: {self.snmp_version}')
 
         if not self.destination:
-            raise Error('config is missing destination attribute(:) of the target SNMP listener')
+            raise Error(
+                'config is missing destination attribute(:) of the target SNMP listener'
+            )
 
     def container(self, ctx: CephadmContext) -> CephContainer:
         ctr = daemon_to_container(ctx, self)

From ed6f171ba028cb1bdd049300ad9fab007272214e Mon Sep 17 00:00:00 2001
From: John Mulligan 
Date: Thu, 9 Nov 2023 13:59:07 -0500
Subject: [PATCH 29/29] cephadm: black format daemons/tracing.py

Signed-off-by: John Mulligan 
---
 src/cephadm/cephadmlib/daemons/tracing.py | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/src/cephadm/cephadmlib/daemons/tracing.py b/src/cephadm/cephadmlib/daemons/tracing.py
index f178bd664086f..4d4fecacbb0f9 100644
--- a/src/cephadm/cephadmlib/daemons/tracing.py
+++ b/src/cephadm/cephadmlib/daemons/tracing.py
@@ -27,7 +27,7 @@ class Tracing(ContainerDaemonForm):
     components: Dict[str, Dict[str, Any]] = {
         'elasticsearch': {
             'image': DEFAULT_ELASTICSEARCH_IMAGE,
-            'envs': ['discovery.type=single-node']
+            'envs': ['discovery.type=single-node'],
         },
         'jaeger-agent': {
             'image': DEFAULT_JAEGER_AGENT_IMAGE,
@@ -50,12 +50,13 @@ def set_configuration(config: Dict[str, str], daemon_type: str) -> None:
             assert 'elasticsearch_nodes' in config
             Tracing.components[daemon_type]['envs'] = [
                 'SPAN_STORAGE_TYPE=elasticsearch',
-                f'ES_SERVER_URLS={config["elasticsearch_nodes"]}']
+                f'ES_SERVER_URLS={config["elasticsearch_nodes"]}',
+            ]
         if daemon_type == 'jaeger-agent':
             assert 'collector_nodes' in config
             Tracing.components[daemon_type]['daemon_args'] = [
                 f'--reporter.grpc.host-port={config["collector_nodes"]}',
-                '--processor.jaeger-compact.server-host-port=6799'
+                '--processor.jaeger-compact.server-host-port=6799',
             ]
 
     def __init__(self, ident: DaemonIdentity) -> None: