diff --git a/plugins/module_utils/ca_common.py b/plugins/module_utils/ca_common.py new file mode 100644 index 0000000..cfbf55a --- /dev/null +++ b/plugins/module_utils/ca_common.py @@ -0,0 +1,151 @@ +import os +import datetime +from typing import List +from ansible.module_utils.basic import AnsibleModule + + +def generate_cmd(cmd='ceph', + sub_cmd=None, + args=None, + user_key=None, + cluster='ceph', + user='client.admin', + container_image=None, + interactive=False): + ''' + Generate 'ceph' command line to execute + ''' + + if user_key is None: + user_key = '/etc/ceph/{}.{}.keyring'.format(cluster, user) + + cmd = pre_generate_cmd(cmd, container_image=container_image, interactive=interactive) # noqa: E501 + + base_cmd = [ + '-n', + user, + '-k', + user_key, + '--cluster', + cluster + ] + + if sub_cmd is not None: + base_cmd.extend(sub_cmd) + + cmd.extend(base_cmd) if args is None else cmd.extend(base_cmd + args) + + return cmd + + +def container_exec(binary, container_image, interactive=False): + ''' + Build the docker CLI to run a command inside a container + ''' + + container_binary = os.getenv('CEPH_CONTAINER_BINARY') + command_exec = [container_binary, 'run'] + + if interactive: + command_exec.extend(['--interactive']) + + command_exec.extend(['--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=' + binary, container_image]) + return command_exec + + +def is_containerized(): + ''' + Check if we are running on a containerized cluster + ''' + + if 'CEPH_CONTAINER_IMAGE' in os.environ: + container_image = os.getenv('CEPH_CONTAINER_IMAGE') + else: + container_image = None + + return container_image + + +def pre_generate_cmd(cmd, container_image=None, interactive=False): + ''' + Generate ceph prefix command + ''' + if container_image: + cmd = container_exec(cmd, container_image, interactive=interactive) + else: + cmd = [cmd] + + return cmd + + +def exec_command(module, cmd, stdin=None, check_rc=False): + ''' + Execute command(s) + ''' + + binary_data = False + if stdin: + binary_data = True + rc, out, err = module.run_command(cmd, data=stdin, binary_data=binary_data, check_rc=check_rc) # noqa: E501 + + return rc, cmd, out, err + + +def build_base_cmd(module: "AnsibleModule") -> List[str]: + cmd = ['cephadm'] + docker = module.params.get('docker') + image = module.params.get('image') + fsid = module.params.get('fsid') + + if docker: + cmd.append('--docker') + if image: + cmd.extend(['--image', image]) + + cmd.append('shell') + + if fsid: + cmd.extend(['--fsid', fsid]) + + return cmd + + +def build_base_cmd_orch(module: "AnsibleModule") -> List[str]: + cmd = build_base_cmd(module) + cmd.extend(['ceph', 'orch']) + + return cmd + + +def exit_module(module, out, rc, cmd, err, startd, changed=False, diff=dict(before="", after="")): # noqa: E501 + endd = datetime.datetime.now() + delta = endd - startd + + result = dict( + cmd=cmd, + start=str(startd), + end=str(endd), + delta=str(delta), + rc=rc, + stdout=out.rstrip("\r\n"), + stderr=err.rstrip("\r\n"), + changed=changed, + diff=diff + ) + module.exit_json(**result) + + +def fatal(message, module): + ''' + Report a fatal error and exit + ''' + + if module: + module.fail_json(msg=message, rc=1) + else: + raise Exception(message) diff --git a/plugins/modules/ceph_add_users_buckets.py b/plugins/modules/ceph_add_users_buckets.py new file mode 100644 index 0000000..d00b162 --- /dev/null +++ b/plugins/modules/ceph_add_users_buckets.py @@ -0,0 +1,571 @@ +#!/usr/bin/python + +# Copyright 2018 Daniel Pivonka +# Copyright 2018 Red Hat, Inc. +# +# GNU General Public License v3.0+ + +from ansible.module_utils.basic import AnsibleModule +from socket import error as socket_error +import boto +import radosgw + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: ceph_add_users_buckets +short_description: bulk create user and buckets +description: + - Bulk create Ceph Object Storage users and buckets + +option: + rgw_host: + description: + - a radosgw host in the ceph cluster + required: true + port: + description: + - tcp port of the radosgw host + required: true + is_secure: + description: + - boolean indicating whether the instance is running over https + required: false + default: false + admin_access_key: + description: + - radosgw admin user's access key + required: true + admin_secret_key: + description: + - radosgw admin user's secret key + required: true + users: + description: + - list of users to be created containing sub options + required: false + sub_options: + username: + description: + - username for new user + required: true + fullname: + description: + - fullname for new user + required: true + email: + description: + - email for new user + required: false + maxbucket: + description: + - max bucket for new user + required: false + default: 1000 + suspend: + description: + - suspend a new user apon creation + required: false + default: false + autogenkey: + description: + - auto generate keys for new user + required: false + default: true + accesskey: + description: + - access key for new user + required: false + secretkey: + description: + - secret key for new user + required: false + userquota: + description: + - enable/disable user quota for new user + required: false + default: false + usermaxsize: + description: + - with user quota enabled specify quota size in kb + required: false + default: unlimited + usermaxobjects: + description: + - with user quota enabled specify maximum number of objects + required: false + default: unlimited + bucketquota: + description: + - enable/disable bucket quota for new user + required: false + default: false + bucketmaxsize: + description: + - with bucket quota enabled specify bucket size in kb + required: false + default: unlimited + bucketmaxobjects: + description: + - with bucket quota enabled specify maximum number of objects # noqa: E501 + required: false + default: unlimited + buckets: + description: + - list of buckets to be created containing sub options + required: false + sub_options: + bucket: + description: + - name for new bucket + required: true + user: + description: + - user new bucket will be linked too + required: true + + +requirements: ['radosgw', 'boto'] + +author: + - 'Daniel Pivonka' + +''' + +EXAMPLES = ''' +# single basic user +- name: single basic user + ceph_add_users_buckets: + rgw_host: '172.16.0.12' + port: 8080 + admin_access_key: 'N61I8625V4XTWGDTLBLL' + admin_secret_key: 'HZrkuHHO9usUurDWBQHTeLIjO325bIULaC7DxcoV' + users: + - username: 'test1' + fullname: 'tester' + + +# single complex user +- name: single complex user + ceph_add_users_buckets: + rgw_host: '172.16.0.12' + port: 8080 + admin_access_key: 'N61I8625V4XTWGDTLBLL' + admin_secret_key: 'HZrkuHHO9usUurDWBQHTeLIjO325bIULaC7DxcoV' + users: + - username: 'test1' + fullname: 'tester' + email: 'dan@email.com' + maxbucket: 666 + suspend: true + autogenkey: true + accesskey: 'B3AR4Q33L59YV56A9A2F' + secretkey: 'd84BRnMysnVGSyZiRlYUMduVgIarQWiNMdKzrF76' + userquota: true + usermaxsize: '1000' + usermaxobjects: 3 + bucketquota: true + bucketmaxsize: '1000' + bucketmaxobjects: 3 + +# multi user +- name: multi user + ceph_add_users_buckets: + rgw_host: '172.16.0.12' + port: 8080 + admin_access_key: 'N61I8625V4XTWGDTLBLL' + admin_secret_key: 'HZrkuHHO9usUurDWBQHTeLIjO325bIULaC7DxcoV' + users: + - username: 'test1' + fullname: 'tester' + email: 'dan@email.com' + maxbucket: 666 + suspend: true + autogenkey: true + accesskey: 'B3AR4Q33L59YV56A9A2F' + secretkey: 'd84BRnMysnVGSyZiRlYUMduVgIarQWiNMdKzrF76' + userquota: true + usermaxsize: '1000K' + usermaxobjects: 3 + bucketquota: true + bucketmaxsize: '1000K' + bucketmaxobjects: 3 + - username: 'test2' + fullname: 'tester' + +# single bucket +- name: single basic user + ceph_add_users_buckets: + rgw_host: '172.16.0.12' + port: 8080 + admin_access_key: 'N61I8625V4XTWGDTLBLL' + admin_secret_key: 'HZrkuHHO9usUurDWBQHTeLIjO325bIULaC7DxcoV' + buckets: + - bucket: 'heyimabucket1' + user: 'test1' + +# multi bucket +- name: single basic user + ceph_add_users_buckets: + rgw_host: '172.16.0.12' + port: 8080 + admin_access_key: 'N61I8625V4XTWGDTLBLL' + admin_secret_key: 'HZrkuHHO9usUurDWBQHTeLIjO325bIULaC7DxcoV' + buckets: + - bucket: 'heyimabucket1' + user: 'test1' + - bucket: 'heyimabucket2' + user: 'test2' + - bucket: 'heyimabucket3' + user: 'test2' + +# buckets and users +- name: single basic user + ceph_add_users_buckets: + rgw_host: '172.16.0.12' + port: 8080 + admin_access_key: 'N61I8625V4XTWGDTLBLL' + admin_secret_key: 'HZrkuHHO9usUurDWBQHTeLIjO325bIULaC7DxcoV' + users: + - username: 'test1' + fullname: 'tester' + email: 'dan@email.com' + maxbucket: 666 + - username: 'test2' + fullname: 'tester' + email: 'dan1@email.com' + accesskey: 'B3AR4Q33L59YV56A9A2F' + secretkey: 'd84BRnMysnVGSyZiRlYUMduVgIarQWiNMdKzrF76' + userquota: true + usermaxsize: '1000' + usermaxobjects: 3 + bucketquota: true + bucketmaxsize: '1000' + bucketmaxobjects: 3 + buckets: + - bucket: 'heyimabucket1' + user: 'test1' + - bucket: 'heyimabucket2' + user: 'test2' + - bucket: 'heyimabucket3' + user: 'test2' + +''' + +RETURN = ''' +error_messages: + description: error for failed user or bucket. + returned: always + type: list + sample: [ + "test2: could not modify user: unable to modify user, cannot add duplicate email\n" # noqa: E501 + ] + +failed_users: + description: users that were not created. + returned: always + type: str + sample: "test2" + +added_users: + description: users that were created. + returned: always + type: str + sample: "test1" + +failed_buckets: + description: buckets that were not created. + returned: always + type: str + sample: "heyimabucket3" + +added_buckets: + description: buckets that were created. + returned: always + type: str + sample: "heyimabucket1, heyimabucket2" + +''' + + +def create_users(rgw, users, result): + + added_users = [] + failed_users = [] + + for user in users: + + # get info + username = user['username'] + fullname = user['fullname'] + email = user['email'] + maxbucket = user['maxbucket'] + suspend = user['suspend'] + autogenkey = user['autogenkey'] + accesskey = user['accesskey'] + secretkey = user['secretkey'] + userquota = user['userquota'] + usermaxsize = user['usermaxsize'] + usermaxobjects = user['usermaxobjects'] + bucketquota = user['bucketquota'] + bucketmaxsize = user['bucketmaxsize'] + bucketmaxobjects = user['bucketmaxobjects'] + + fail_flag = False + + # check if user exists + try: + user_info = rgw.get_user(uid=username) + except radosgw.exception.RadosGWAdminError: + # it doesnt exist + user_info = None + + # user exists can not create + if user_info: + result['error_messages'].append(username + ' UserExists') + failed_users.append(username) + else: + # user doesnt exist create it + if email: + if autogenkey: + try: + rgw.create_user(username, fullname, email=email, key_type='s3', # noqa: E501 + generate_key=autogenkey, + max_buckets=maxbucket, suspended=suspend) # noqa: E501 + except radosgw.exception.RadosGWAdminError as e: + result['error_messages'].append(username + ' ' + e.get_code()) # noqa: E501 + fail_flag = True + else: + try: + rgw.create_user(username, fullname, email=email, key_type='s3', # noqa: E501 + access_key=accesskey, secret_key=secretkey, # noqa: E501 + max_buckets=maxbucket, suspended=suspend) # noqa: E501 + except radosgw.exception.RadosGWAdminError as e: + result['error_messages'].append(username + ' ' + e.get_code()) # noqa: E501 + fail_flag = True + else: + if autogenkey: + try: + rgw.create_user(username, fullname, key_type='s3', + generate_key=autogenkey, + max_buckets=maxbucket, suspended=suspend) # noqa: E501 + except radosgw.exception.RadosGWAdminError as e: + result['error_messages'].append(username + ' ' + e.get_code()) # noqa: E501 + fail_flag = True + else: + try: + rgw.create_user(username, fullname, key_type='s3', + access_key=accesskey, secret_key=secretkey, # noqa: E501 + max_buckets=maxbucket, suspended=suspend) # noqa: E501 + except radosgw.exception.RadosGWAdminError as e: + result['error_messages'].append(username + ' ' + e.get_code()) # noqa: E501 + fail_flag = True + + if not fail_flag and userquota: + try: + rgw.set_quota(username, 'user', max_objects=usermaxobjects, + max_size_kb=usermaxsize, enabled=True) + except radosgw.exception.RadosGWAdminError as e: + result['error_messages'].append(username + ' ' + e.get_code()) # noqa: E501 + fail_flag = True + + if not fail_flag and bucketquota: + try: + rgw.set_quota(username, 'bucket', max_objects=bucketmaxobjects, # noqa: E501 + max_size_kb=bucketmaxsize, enabled=True) + except radosgw.exception.RadosGWAdminError as e: + result['error_messages'].append(username + ' ' + e.get_code()) # noqa: E501 + fail_flag = True + + if fail_flag: + try: + rgw.delete_user(username) + except radosgw.exception.RadosGWAdminError: + pass + failed_users.append(username) + else: + added_users.append(username) + + result['added_users'] = ", ".join(added_users) + result['failed_users'] = ", ".join(failed_users) + + +def create_buckets(rgw, buckets, result): + + added_buckets = [] + failed_buckets = [] + + for bucket_info in buckets: + bucket = bucket_info['bucket'] + user = bucket_info['user'] + + # check if bucket exists + try: + bucket_info = rgw.get_bucket(bucket_name=bucket) + except TypeError: + # it doesnt exist + bucket_info = None + + # if it exists add to failed list + if bucket_info: + failed_buckets.append(bucket) + result['error_messages'].append(bucket + ' BucketExists') + else: + # bucket doesn't exist, so we need to create it + bucket_info = create_bucket(rgw, bucket) + if bucket_info: + # bucket created ok, link to user + + # check if user exists + try: + user_info = rgw.get_user(uid=user) + except radosgw.exception.RadosGWAdminError: + # it doesnt exist + user_info = None + + # user exists, link + if user_info: + try: + rgw.link_bucket(bucket_name=bucket, + bucket_id=bucket_info.id, + uid=user) + added_buckets.append(bucket) + except radosgw.exception.RadosGWAdminError as e: + result['error_messages'].append(bucket + e.get_code()) + try: + rgw.delete_bucket(bucket, purge_objects=True) + except radosgw.exception.RadosGWAdminError: + pass + failed_buckets.append(bucket) + + else: + # user doesnt exist cant be link delete bucket + try: + rgw.delete_bucket(bucket, purge_objects=True) + except radosgw.exception.RadosGWAdminError: + pass + failed_buckets.append(bucket) + result['error_messages'].append(bucket + ' could not be linked' + ', NoSuchUser ' + user) # noqa: E501 + + else: + # something went wrong + failed_buckets.append(bucket) + result['error_messages'].append(bucket + ' could not be created') # noqa: E501 + + result['added_buckets'] = ", ".join(added_buckets) + result['failed_buckets'] = ", ".join(failed_buckets) + + +def create_bucket(rgw, bucket): + conn = boto.connect_s3(aws_access_key_id=rgw.provider._access_key, + aws_secret_access_key=rgw.provider._secret_key, + host=rgw._connection[0], + port=rgw.port, + is_secure=rgw.is_secure, + calling_format=boto.s3.connection.OrdinaryCallingFormat(), # noqa: E501 + ) + + try: + conn.create_bucket(bucket_name=bucket) + bucket_info = rgw.get_bucket(bucket_name=bucket) + except boto.exception.S3ResponseError: + return None + else: + return bucket_info + + +def main(): + # arguments/parameters that a user can pass to the module + fields = dict(rgw_host=dict(type='str', required=True), + port=dict(type='int', required=True), + is_secure=dict(type='bool', + required=False, + default=False), + admin_access_key=dict(type='str', required=True), + admin_secret_key=dict(type='str', required=True), + buckets=dict(type='list', required=False, elements='dict', + options=dict(bucket=dict(type='str', required=True), # noqa: E501 + user=dict(type='str', required=True))), # noqa: E501 + users=dict(type='list', required=False, elements='dict', + options=dict(username=dict(type='str', required=True), # noqa: E501 + fullname=dict(type='str', required=True), # noqa: E501 + email=dict(type='str', required=False), # noqa: E501 + maxbucket=dict(type='int', required=False, default=1000), # noqa: E501 + suspend=dict(type='bool', required=False, default=False), # noqa: E501 + autogenkey=dict(type='bool', required=False, default=True), # noqa: E501 + accesskey=dict(type='str', required=False), # noqa: E501 + secretkey=dict(type='str', required=False), # noqa: E501 + userquota=dict(type='bool', required=False, default=False), # noqa: E501 + usermaxsize=dict(type='str', required=False, default='-1'), # noqa: E501 + usermaxobjects=dict(type='int', required=False, default=-1), # noqa: E501 + bucketquota=dict(type='bool', required=False, default=False), # noqa: E501 + bucketmaxsize=dict(type='str', required=False, default='-1'), # noqa: E501 + bucketmaxobjects=dict(type='int', required=False, default=-1)))) # noqa: E501 + + # the AnsibleModule object + module = AnsibleModule(argument_spec=fields, + supports_check_mode=False) + + # get vars + rgw_host = module.params.get('rgw_host') + port = module.params.get('port') + is_secure = module.params.get('is_secure') + admin_access_key = module.params.get('admin_access_key') + admin_secret_key = module.params.get('admin_secret_key') + users = module.params['users'] + buckets = module.params.get('buckets') + + # seed the result dict in the object + result = dict( + changed=False, + error_messages=[], + added_users='', + failed_users='', + added_buckets='', + failed_buckets='', + ) + + # radosgw connection + rgw = radosgw.connection.RadosGWAdminConnection(host=rgw_host, + port=port, + access_key=admin_access_key, # noqa: E501 + secret_key=admin_secret_key, # noqa: E501 + aws_signature='AWS4', + is_secure=is_secure) + + # test connection + connected = True + try: + rgw.get_usage() + except radosgw.exception.RadosGWAdminError as e: + connected = False + result['error_messages'] = e.get_code() + except socket_error as e: + connected = False + result['error_messages'] = str(e) + + if connected and users: + create_users(rgw, users, result) + + if connected and buckets: + create_buckets(rgw, buckets, result) + + if result['added_users'] != '' or result['added_buckets'] != '': + result['changed'] = True + + # conditional state caused a failure + if result['added_users'] == '' and result['added_buckets'] == '': + module.fail_json(msg='No users or buckets were added successfully', + **result) + + # EXIT + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ceph_authtool.py b/plugins/modules/ceph_authtool.py new file mode 100644 index 0000000..e469bcb --- /dev/null +++ b/plugins/modules/ceph_authtool.py @@ -0,0 +1,131 @@ +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import container_exec, \ + is_containerized +except ImportError: + from module_utils.ca_common import container_exec, \ + is_containerized +import datetime +import os + + +class KeyringExists(Exception): + pass + + +def build_cmd(create_keyring=False, + gen_key=False, + add_key=False, + import_keyring=None, + caps={}, + name=None, + path=None, + container_image=None, + **a): + + auth_tool_binary: str = 'ceph-authtool' + + if container_image: + c = container_exec(auth_tool_binary, + container_image) + else: + c = [auth_tool_binary] + + if name: + c.extend(['-n', name]) + if create_keyring: + if os.path.exists(path): + raise KeyringExists + c.append('-C') + if gen_key: + c.append('-g') + if caps: + for k, v in caps.items(): + c.extend(['--cap'] + [k] + [v]) + + c.append(path) + + if import_keyring: + c.extend(['--import-keyring', import_keyring]) + + return c + + +def run_module(): + module_args = dict( + name=dict(type='str', required=False), + create_keyring=dict(type='bool', required=False, default=False), + gen_key=dict(type='bool', required=False, default=False), + add_key=dict(type='str', required=False, default=None), + import_keyring=dict(type='str', required=False, default=None), + caps=dict(type='dict', required=False, default=None), + path=dict(type='str', required=True) + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + add_file_common_args=True, + ) + + cmd = [] + changed = False + + result = dict( + changed=changed, + stdout='', + stderr='', + rc=0, + start='', + end='', + delta='', + ) + + if module.check_mode: + module.exit_json(**result) + + startd = datetime.datetime.now() + + # will return either the image name or None + container_image = is_containerized() + try: + cmd = build_cmd(**module.params, container_image=container_image) + except KeyringExists: + rc = 0 + out = f"{module.params['path']} already exists. Skipping" + err = "" + else: + rc, out, err = module.run_command(cmd) + if rc == 0: + changed = True + + endd = datetime.datetime.now() + delta = endd - startd + + result = dict( + cmd=cmd, + start=str(startd), + end=str(endd), + delta=str(delta), + rc=rc, + stdout=out.rstrip("\r\n"), + stderr=err.rstrip("\r\n"), + changed=changed, + ) + if rc != 0: + module.fail_json(msg='non-zero return code', **result) + + # file_args = module.load_file_common_arguments(module.params) + # module.set_fs_attributes_if_different(file_args, False) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ceph_crush.py b/plugins/modules/ceph_crush.py new file mode 100644 index 0000000..8b2e2a9 --- /dev/null +++ b/plugins/modules/ceph_crush.py @@ -0,0 +1,245 @@ +#!/usr/bin/python + +# Copyright (c) 2018 Red Hat, Inc. +# +# GNU General Public License v3.0+ + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import fatal +except ImportError: + from module_utils.ca_common import fatal +import datetime + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: ceph_crush + +author: Sebastien Han + +short_description: Create Ceph CRUSH hierarchy + +version_added: "2.6" + +description: + - By using the hostvar variable 'osd_crush_location' + ceph_crush creates buckets and places them in the right CRUSH hierarchy + +options: + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + location: + description: + - osd_crush_location dict from the inventory file. It contains + the placement of each host in the CRUSH map. + required: true + containerized: + description: + - Weither or not this is a containerized cluster. The value is + assigned or not depending on how the playbook runs. + required: false + default: None +''' + +EXAMPLES = ''' +- name: configure crush hierarchy + ceph_crush: + cluster: "{{ cluster }}" + location: "{{ hostvars[item]['osd_crush_location'] }}" + containerized: "{{ container_exec_cmd }}" + with_items: "{{ groups[osd_group_name] }}" + when: crush_rule_config | bool +''' + +RETURN = '''# ''' + + +def generate_cmd(cluster, subcommand, bucket, bucket_type, containerized=None): + ''' + Generate command line to execute + ''' + cmd = [ + 'ceph', + '--cluster', + cluster, + 'osd', + 'crush', + subcommand, + bucket, + bucket_type, + ] + if containerized: + cmd = containerized.split() + cmd + return cmd + + +def sort_osd_crush_location(location, module): + ''' + Sort location tuple + ''' + if len(location) < 2: + fatal("You must specify at least 2 buckets.", module) + + if not any(item for item in location if item[0] == "host"): + fatal("You must specify a 'host' bucket.", module) + + try: + crush_bucket_types = [ + "host", + "chassis", + "rack", + "row", + "pdu", + "pod", + "room", + "datacenter", + "region", + "root", + ] + return sorted(location, key=lambda crush: crush_bucket_types.index(crush[0])) # noqa: E501 + except ValueError as error: + fatal("{} is not a valid CRUSH bucket, valid bucket types are {}".format(error.args[0].split()[0], crush_bucket_types), module) # noqa: E501 + + +def get_crush_tree(module, cluster, containerized=None): + ''' + Get the CRUSH map + ''' + cmd = [ + 'ceph', + '--cluster', + cluster, + 'osd', + 'crush', + 'tree', + '--format', + 'json', + ] + if containerized: + cmd = containerized.split() + cmd + + rc, out, err = module.run_command(cmd) + return rc, cmd, out, err + + +def create_and_move_buckets_list(cluster, location, crush_map, containerized=None): # noqa: E501 + ''' + Creates Ceph CRUSH buckets and arrange the hierarchy + ''' + def bucket_exists(bucket_name, bucket_type): + for item in crush_map['nodes']: + if item['name'] == bucket_name and item['type'] == bucket_type: + return True + return False + + def bucket_in_place(bucket_name, target_bucket_name, target_bucket_type): # noqa: E501 + bucket_id = None + target_bucket = None + for item in crush_map['nodes']: + if item['name'] == bucket_name: + bucket_id = item['id'] + if item['name'] == target_bucket_name and item['type'] == target_bucket_type: # noqa: E501 + target_bucket = item + + if not bucket_id or not target_bucket: + return False + + return bucket_id in target_bucket['children'] + + previous_bucket = None + cmd_list = [] + for item in location: + bucket_type, bucket_name = item + # ceph osd crush add-bucket maroot root + if not bucket_exists(bucket_name, bucket_type): + cmd_list.append(generate_cmd(cluster, "add-bucket", bucket_name, bucket_type, containerized)) # noqa: E501 + if previous_bucket: + # ceph osd crush move monrack root=maroot + if not bucket_in_place(previous_bucket, bucket_name, bucket_type): # noqa: E501 + cmd_list.append(generate_cmd(cluster, "move", previous_bucket, "%s=%s" % (bucket_type, bucket_name), containerized)) # noqa: E501 + previous_bucket = item[1] + return cmd_list + + +def exec_commands(module, cmd_list): + ''' + Creates Ceph commands + ''' + for cmd in cmd_list: + rc, out, err = module.run_command(cmd) + return rc, cmd, out, err + + +def main(): + module = AnsibleModule( + argument_spec=dict( + cluster=dict(type='str', required=False, default='ceph'), + location=dict(type='dict', required=True), + containerized=dict(type='str', required=False, default=None), + ), + supports_check_mode=True, + ) + + cluster = module.params['cluster'] + location_dict = module.params['location'] + location = sort_osd_crush_location(tuple(location_dict.items()), module) + containerized = module.params['containerized'] + + diff = dict(before="", after="") + startd = datetime.datetime.now() + + # get the CRUSH map + rc, cmd, out, err = get_crush_tree(module, cluster, containerized) + if rc != 0 and not module.check_mode: + module.fail_json(msg='non-zero return code', rc=rc, stdout=out, stderr=err) # noqa: E501 + + # parse the JSON output + if rc == 0: + crush_map = module.from_json(out) + else: + crush_map = {"nodes": []} + + # run the Ceph command to add buckets + cmd_list = create_and_move_buckets_list(cluster, location, crush_map, containerized) # noqa: E501 + + changed = len(cmd_list) > 0 + if changed: + diff['after'] = module.jsonify(cmd_list) + if not module.check_mode: + rc, cmd, out, err = exec_commands(module, cmd_list) # noqa: E501 + + endd = datetime.datetime.now() + delta = endd - startd + + result = dict( + cmd=cmd, + start=str(startd), + end=str(endd), + delta=str(delta), + rc=rc, + stdout=out.rstrip("\r\n"), + stderr=err.rstrip("\r\n"), + changed=changed, + diff=diff + ) + + if rc != 0: + module.fail_json(msg='non-zero return code', **result) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ceph_crush_rule.py b/plugins/modules/ceph_crush_rule.py new file mode 100644 index 0000000..a84d7ca --- /dev/null +++ b/plugins/modules/ceph_crush_rule.py @@ -0,0 +1,258 @@ +# Copyright 2020, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import exit_module, \ + generate_cmd, \ + is_containerized, \ + exec_command +except ImportError: + from module_utils.ca_common import exit_module, \ + generate_cmd, \ + is_containerized, \ + exec_command +import datetime +import json + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: ceph_crush_rule +short_description: Manage Ceph Crush Replicated/Erasure Rule +version_added: "2.8" +description: + - Manage Ceph Crush rule(s) creation, deletion and updates. +options: + name: + description: + - name of the Ceph Crush rule. If state is 'info' - empty string + can be provided as a value to get all crush rules + required: true + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + state: + description: + If 'present' is used, the module creates a rule if it doesn't + exist or update it if it already exists. + If 'absent' is used, the module will simply delete the rule. + If 'info' is used, the module will return all details about the + existing rule (json formatted). + required: false + choices: ['present', 'absent', 'info'] + default: present + rule_type: + description: + - The ceph CRUSH rule type. + required: false + choices: ['replicated', 'erasure'] + required: false + bucket_root: + description: + - The ceph bucket root for replicated rule. + required: false + bucket_type: + description: + - The ceph bucket type for replicated rule. + required: false + choices: ['osd', 'host', 'chassis', 'rack', 'row', 'pdu', 'pod', + 'room', 'datacenter', 'zone', 'region', 'root'] + device_class: + description: + - The ceph device class for replicated rule. + required: false + profile: + description: + - The ceph erasure profile for erasure rule. + required: false +author: + - Dimitri Savineau +''' + +EXAMPLES = ''' +- name: create a Ceph Crush replicated rule + ceph_crush_rule: + name: foo + bucket_root: default + bucket_type: host + device_class: ssd + rule_type: replicated + +- name: create a Ceph Crush erasure rule + ceph_crush_rule: + name: foo + profile: bar + rule_type: erasure + +- name: get a Ceph Crush rule information + ceph_crush_rule: + name: foo + state: info + +- name: delete a Ceph Crush rule + ceph_crush_rule: + name: foo + state: absent +''' + +RETURN = '''# ''' + + +def create_rule(module, container_image=None): + ''' + Create a new crush replicated/erasure rule + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + rule_type = module.params.get('rule_type') + bucket_root = module.params.get('bucket_root') + bucket_type = module.params.get('bucket_type') + device_class = module.params.get('device_class') + profile = module.params.get('profile') + + if rule_type == 'replicated': + args = ['create-replicated', name, bucket_root, bucket_type] + if device_class: + args.append(device_class) + else: + args = ['create-erasure', name] + if profile: + args.append(profile) + + cmd = generate_cmd(sub_cmd=['osd', 'crush', 'rule'], + args=args, + cluster=cluster, + container_image=container_image) + + return cmd + + +def get_rule(module, container_image=None): + ''' + Get existing crush rule + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + + args = ['dump', name, '--format=json'] + + cmd = generate_cmd(sub_cmd=['osd', 'crush', 'rule'], + args=args, + cluster=cluster, + container_image=container_image) + + return cmd + + +def remove_rule(module, container_image=None): + ''' + Remove a crush rule + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + + args = ['rm', name] + + cmd = generate_cmd(sub_cmd=['osd', 'crush', 'rule'], + args=args, + cluster=cluster, + container_image=container_image) + + return cmd + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=False), + cluster=dict(type='str', required=False, default='ceph'), + state=dict(type='str', required=False, choices=['present', 'absent', 'info'], default='present'), # noqa: E501 + rule_type=dict(type='str', required=False, choices=['replicated', 'erasure']), # noqa: E501 + bucket_root=dict(type='str', required=False), + bucket_type=dict(type='str', required=False, choices=['osd', 'host', 'chassis', 'rack', 'row', 'pdu', 'pod', # noqa: E501 + 'room', 'datacenter', 'zone', 'region', 'root']), # noqa: E501 + device_class=dict(type='str', required=False), + profile=dict(type='str', required=False) + ), + supports_check_mode=True, + required_if=[ + ('state', 'present', ['rule_type']), + ('state', 'present', ['name']), + ('state', 'absent', ['name']), + ('rule_type', 'replicated', ['bucket_root', 'bucket_type']), + ('rule_type', 'erasure', ['profile']) + ] + ) + + # Gather module parameters in variables + name = module.params.get('name') + state = module.params.get('state') + rule_type = module.params.get('rule_type') + + if module.check_mode: + module.exit_json( + changed=False, + stdout='', + stderr='', + rc=0, + start='', + end='', + delta='', + ) + + startd = datetime.datetime.now() + changed = False + + # will return either the image name or None + container_image = is_containerized() + + rc, cmd, out, err = exec_command(module, get_rule(module, container_image=container_image)) # noqa: E501 + if state == "present": + if rc != 0: + rc, cmd, out, err = exec_command(module, create_rule(module, container_image=container_image)) # noqa: E501 + changed = True + else: + rule = json.loads(out) + if (rule['type'] == 1 and rule_type == 'erasure') or (rule['type'] == 3 and rule_type == 'replicated'): # noqa: E501 + module.fail_json(msg="Can not convert crush rule {} to {}".format(name, rule_type), changed=False, rc=1) # noqa: E501 + elif state == "absent": + if rc == 0: + rc, cmd, out, err = exec_command(module, remove_rule(module, container_image=container_image)) # noqa: E501 + changed = True + else: + rc = 0 + out = "Crush Rule {} doesn't exist".format(name) + else: + pass + + exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed) # noqa: E501 + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ceph_dashboard_user.py b/plugins/modules/ceph_dashboard_user.py new file mode 100644 index 0000000..89fdaff --- /dev/null +++ b/plugins/modules/ceph_dashboard_user.py @@ -0,0 +1,289 @@ +# Copyright 2020, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import generate_cmd, \ + is_containerized, \ + exec_command, \ + exit_module, \ + fatal +except ImportError: + from module_utils.ca_common import generate_cmd, is_containerized, exec_command, exit_module, fatal # noqa: E501 + +import datetime +import json + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: ceph_dashboard_user + +short_description: Manage Ceph Dashboard User + +version_added: "2.8" + +description: + - Manage Ceph Dashboard user(s) creation, deletion and updates. +options: + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + name: + description: + - name of the Ceph Dashboard user. + required: true + state: + description: + If 'present' is used, the module creates a user if it doesn't + exist or update it if it already exists. + If 'absent' is used, the module will simply delete the user. + If 'info' is used, the module will return all details about the + existing user (json formatted). + required: false + choices: ['present', 'absent', 'info'] + default: present + password: + description: + - password of the Ceph Dashboard user. + required: false + roles: + description: + - roles of the Ceph Dashboard user. + required: false + default: [] + +author: + - Dimitri Savineau +''' + +EXAMPLES = ''' +- name: create a Ceph Dashboard user + ceph_dashboard_user: + name: foo + password: bar + +- name: create a read-only/block-manager Ceph Dashboard user + ceph_dashboard_user: + name: foo + password: bar + roles: + - 'read-only' + - 'block-manager' + +- name: create a Ceph Dashboard admin user + ceph_dashboard_user: + name: foo + password: bar + roles: ['administrator'] + +- name: get a Ceph Dashboard user information + ceph_dashboard_user: + name: foo + state: info + +- name: delete a Ceph Dashboard user + ceph_dashboard_user: + name: foo + state: absent +''' + +RETURN = '''# ''' + + +def create_user(module, container_image=None): + ''' + Create a new user + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + + args = ['ac-user-create', '-i', '-', name] + + cmd = generate_cmd(sub_cmd=['dashboard'], + args=args, + cluster=cluster, + container_image=container_image, + interactive=True) + + return cmd + + +def set_roles(module, container_image=None): + ''' + Set user roles + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + roles = module.params.get('roles') + + args = ['ac-user-set-roles', name] + + args.extend(roles) + + cmd = generate_cmd(sub_cmd=['dashboard'], + args=args, + cluster=cluster, + container_image=container_image) + + return cmd + + +def set_password(module, container_image=None): + ''' + Set user password + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + + args = ['ac-user-set-password', '-i', '-', name] + + cmd = generate_cmd(sub_cmd=['dashboard'], + args=args, + cluster=cluster, + container_image=container_image, + interactive=True) + + return cmd + + +def get_user(module, container_image=None): + ''' + Get existing user + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + + args = ['ac-user-show', name, '--format=json'] + + cmd = generate_cmd(sub_cmd=['dashboard'], + args=args, + cluster=cluster, + container_image=container_image) + + return cmd + + +def remove_user(module, container_image=None): + ''' + Remove a user + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + + args = ['ac-user-delete', name] + + cmd = generate_cmd(sub_cmd=['dashboard'], + args=args, + cluster=cluster, + container_image=container_image) + + return cmd + + +def run_module(): + module_args = dict( + cluster=dict(type='str', required=False, default='ceph'), + name=dict(type='str', required=True), + state=dict(type='str', required=False, choices=['present', 'absent', 'info'], default='present'), # noqa: E501 + password=dict(type='str', required=False, no_log=True), + roles=dict(type='list', + required=False, + choices=['administrator', 'read-only', 'block-manager', 'rgw-manager', 'cluster-manager', 'pool-manager', 'cephfs-manager'], # noqa: E501 + default=[]), + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + required_if=[['state', 'present', ['password']]] + ) + + # Gather module parameters in variables + name = module.params.get('name') + state = module.params.get('state') + roles = module.params.get('roles') + password = module.params.get('password') + + if module.check_mode: + module.exit_json( + changed=False, + stdout='', + stderr='', + rc=0, + start='', + end='', + delta='', + ) + + startd = datetime.datetime.now() + changed = False + + # will return either the image name or None + container_image = is_containerized() + + if state == "present": + rc, cmd, out, err = exec_command(module, get_user(module, container_image=container_image)) # noqa: E501 + if rc == 0: + user = json.loads(out) + user['roles'].sort() + roles.sort() + if user['roles'] != roles: + rc, cmd, out, err = exec_command(module, set_roles(module, container_image=container_image)) # noqa: E501 + changed = True + rc, cmd, out, err = exec_command(module, set_password(module, container_image=container_image), stdin=password) # noqa: E501 + else: + rc, cmd, out, err = exec_command(module, create_user(module, container_image=container_image), stdin=password) # noqa: E501 + if rc != 0: + fatal(err, module) + rc, cmd, out, err = exec_command(module, set_roles(module, container_image=container_image)) # noqa: E501 + changed = True + + elif state == "absent": + rc, cmd, out, err = exec_command(module, get_user(module, container_image=container_image)) # noqa: E501 + if rc == 0: + rc, cmd, out, err = exec_command(module, remove_user(module, container_image=container_image)) # noqa: E501 + changed = True + else: + rc = 0 + out = "Dashboard User {} doesn't exist".format(name) + + elif state == "info": + rc, cmd, out, err = exec_command(module, get_user(module, container_image=container_image)) # noqa: E501 + + exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed) # noqa: E501 + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ceph_ec_profile.py b/plugins/modules/ceph_ec_profile.py new file mode 100644 index 0000000..778a7a7 --- /dev/null +++ b/plugins/modules/ceph_ec_profile.py @@ -0,0 +1,256 @@ +# Copyright 2020, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import is_containerized, \ + generate_cmd, \ + exec_command, \ + exit_module +except ImportError: + from module_utils.ca_common import is_containerized, \ + generate_cmd, \ + exec_command, \ + exit_module +import datetime +import json + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: ceph_ec_profile + +short_description: Manage Ceph Erasure Code profile + +version_added: "2.8" + +description: + - Manage Ceph Erasure Code profile +options: + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + name: + description: + - name of the profile. + required: true + state: + description: + If 'present' is used, the module creates a profile. + If 'absent' is used, the module will delete the profile. + required: false + choices: ['present', 'absent', 'info'] + default: present + stripe_unit: + description: + - The amount of data in a data chunk, per stripe. + required: false + k: + description: + - Number of data-chunks the object will be split in + required: true + m: + description: + - Compute coding chunks for each object and store them on different + OSDs. + required: true + crush_device_class: + description: + - Restrict placement to devices of a specific class (hdd/ssd) + required: false + +author: + - Guillaume Abrioux +''' + +EXAMPLES = ''' +- name: create an erasure code profile + ceph_ec_profile: + name: foo + k: 4 + m: 2 + +- name: delete an erassure code profile + ceph_ec_profile: + name: foo + state: absent +''' + +RETURN = '''# ''' + + +def get_profile(name, cluster='ceph', container_image=None): + ''' + Get existing profile + ''' + + args = ['get', name, '--format=json'] + + cmd = generate_cmd(sub_cmd=['osd', 'erasure-code-profile'], + args=args, + cluster=cluster, + container_image=container_image) + + return cmd + + +def create_profile(name, user_profile, force, cluster='ceph', container_image=None): # noqa: E501 + ''' + Create a profile + ''' + + args = ['set', name] + for key, value in user_profile.items(): + args.append('{}={}'.format(key, value)) + if force: + args.append('--force') + + cmd = generate_cmd(sub_cmd=['osd', 'erasure-code-profile'], + args=args, + cluster=cluster, + container_image=container_image) + + return cmd + + +def delete_profile(name, cluster='ceph', container_image=None): + ''' + Delete a profile + ''' + + args = ['rm', name] + + cmd = generate_cmd(sub_cmd=['osd', 'erasure-code-profile'], + args=args, + cluster=cluster, + container_image=container_image) + + return cmd + + +def parse_user_profile(module): + profile_keys = ['plugin', + 'k', 'm', 'd', 'l', 'c', + 'stripe_unit', 'scalar_mds', 'technique', + 'crush-root', 'crush-device-class', 'crush-failure-domain'] + + profile = {} + for key in profile_keys: + ansible_lookup_key = key.replace('-', '_') + value = module.params.get(ansible_lookup_key) + if value: + profile[key] = value + + return profile + + +def run_module(): + module_args = dict( + cluster=dict(type='str', required=False, default='ceph'), + name=dict(type='str', required=True), + state=dict(type='str', required=False, + choices=['present', 'absent'], default='present'), + stripe_unit=dict(type='str', required=False), + plugin=dict(type='str', required=False, default='jerasure'), + k=dict(type='str', required=False), + m=dict(type='str', required=False), + d=dict(type='str', required=False), + l=dict(type='str', required=False), + c=dict(type='str', required=False), + scalar_mds=dict(type='str', required=False), + technique=dict(type='str', required=False), + crush_root=dict(type='str', required=False), + crush_failure_domain=dict(type='str', required=False), + crush_device_class=dict(type='str', required=False), + force=dict(type='bool', required=False, default=False), + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + required_if=[['state', 'present', ['k', 'm']]], + ) + + # Gather module parameters in variables + name = module.params.get('name') + cluster = module.params.get('cluster') + state = module.params.get('state') + force = module.params.get('force') + user_profile = parse_user_profile(module) + + if module.check_mode: + module.exit_json( + changed=False, + stdout='', + stderr='', + rc=0, + start='', + end='', + delta='', + ) + + startd = datetime.datetime.now() + diff = dict(before="", after="") + changed = False + + # will return either the image name or None + container_image = is_containerized() + + if state == "present": + rc, cmd, out, err = exec_command(module, get_profile(name, cluster, container_image=container_image)) # noqa: E501 + current_profile = {} + if rc == 0: + current_profile = json.loads(out) + + changed = current_profile != user_profile + if changed: + diff['before'] = json.dumps(current_profile) + diff['after'] = json.dumps(user_profile) + rc, cmd, out, err = exec_command(module, + create_profile(name, + user_profile, + force, + cluster, + container_image=container_image), # noqa: E501 + check_rc=True) + + elif state == "absent": + rc, cmd, out, err = exec_command(module, delete_profile(name, cluster, container_image=container_image)) # noqa: E501 + if not err: + out = 'Profile {} removed.'.format(name) + changed = True + else: + rc = 0 + out = "Skipping, the profile {} doesn't exist".format(name) + + exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed, diff=diff) # noqa: E501 + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ceph_fs.py b/plugins/modules/ceph_fs.py new file mode 100644 index 0000000..10db2b1 --- /dev/null +++ b/plugins/modules/ceph_fs.py @@ -0,0 +1,278 @@ +# Copyright 2020, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import is_containerized, \ + exec_command, \ + generate_cmd, \ + exit_module +except ImportError: + from module_utils.ca_common import is_containerized, \ + exec_command, \ + generate_cmd, \ + exit_module + +import datetime +import json + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: ceph_fs + +short_description: Manage Ceph File System + +version_added: "2.8" + +description: + - Manage Ceph File System(s) creation, deletion and updates. +options: + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + name: + description: + - name of the Ceph File System. + required: true + state: + description: + If 'present' is used, the module creates a filesystem if it + doesn't exist or update it if it already exists. + If 'absent' is used, the module will simply delete the filesystem. + If 'info' is used, the module will return all details about the + existing filesystem (json formatted). + required: false + choices: ['present', 'absent', 'info'] + default: present + data: + description: + - name of the data pool. + required: false + metadata: + description: + - name of the metadata pool. + required: false + max_mds: + description: + - name of the max_mds attribute. + required: false + + +author: + - Dimitri Savineau +''' + +EXAMPLES = ''' +- name: create a Ceph File System + ceph_fs: + name: foo + data: bar_data + metadata: bar_metadata + max_mds: 2 + +- name: get a Ceph File System information + ceph_fs: + name: foo + state: info + +- name: delete a Ceph File System + ceph_fs: + name: foo + state: absent +''' + +RETURN = '''# ''' + + +def create_fs(module, container_image=None): + ''' + Create a new fs + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + data = module.params.get('data') + metadata = module.params.get('metadata') + + args = ['new', name, metadata, data] + + cmd = generate_cmd(sub_cmd=['fs'], + args=args, + cluster=cluster, + container_image=container_image) + + return cmd + + +def get_fs(module, container_image=None): + ''' + Get existing fs + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + + args = ['get', name, '--format=json'] + + cmd = generate_cmd(sub_cmd=['fs'], + args=args, + cluster=cluster, + container_image=container_image) + + return cmd + + +def remove_fs(module, container_image=None): + ''' + Remove a fs + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + + args = ['rm', name, '--yes-i-really-mean-it'] + + cmd = generate_cmd(sub_cmd=['fs'], + args=args, + cluster=cluster, + container_image=container_image) + + return cmd + + +def fail_fs(module, container_image=None): + ''' + Fail a fs + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + + args = ['fail', name] + + cmd = generate_cmd(sub_cmd=['fs'], + args=args, + cluster=cluster, + container_image=container_image) + + return cmd + + +def set_fs(module, container_image=None): + ''' + Set parameter to a fs + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + max_mds = module.params.get('max_mds') + + args = ['set', name, 'max_mds', str(max_mds)] + + cmd = generate_cmd(sub_cmd=['fs'], + args=args, + cluster=cluster, + container_image=container_image) + + return cmd + + +def run_module(): + module_args = dict( + cluster=dict(type='str', required=False, default='ceph'), + name=dict(type='str', required=True), + state=dict(type='str', required=False, choices=['present', 'absent', 'info'], default='present'), # noqa: E501 + data=dict(type='str', required=False), + metadata=dict(type='str', required=False), + max_mds=dict(type='int', required=False), + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + required_if=[['state', 'present', ['data', 'metadata']]], + ) + + # Gather module parameters in variables + name = module.params.get('name') + state = module.params.get('state') + max_mds = module.params.get('max_mds') + + if module.check_mode: + module.exit_json( + changed=False, + stdout='', + stderr='', + rc=0, + start='', + end='', + delta='', + ) + + startd = datetime.datetime.now() + changed = False + + # will return either the image name or None + container_image = is_containerized() + + if state == "present": + rc, cmd, out, err = exec_command(module, get_fs(module, container_image=container_image)) # noqa: E501 + if rc == 0: + fs = json.loads(out) + if max_mds and fs["mdsmap"]["max_mds"] != max_mds: + rc, cmd, out, err = exec_command(module, set_fs(module, container_image=container_image)) # noqa: E501 + if rc == 0: + changed = True + else: + rc, cmd, out, err = exec_command(module, create_fs(module, container_image=container_image)) # noqa: E501 + if max_mds and max_mds > 1: + exec_command(module, set_fs(module, container_image=container_image)) # noqa: E501 + if rc == 0: + changed = True + + elif state == "absent": + rc, cmd, out, err = exec_command(module, get_fs(module, container_image=container_image)) # noqa: E501 + if rc == 0: + exec_command(module, fail_fs(module, container_image=container_image)) # noqa: E501 + rc, cmd, out, err = exec_command(module, remove_fs(module, container_image=container_image)) # noqa: E501 + if rc == 0: + changed = True + else: + rc = 0 + out = "Ceph File System {} doesn't exist".format(name) + + elif state == "info": + rc, cmd, out, err = exec_command(module, get_fs(module, container_image=container_image)) # noqa: E501 + + exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed) # noqa: E501 + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ceph_key.py b/plugins/modules/ceph_key.py new file mode 100644 index 0000000..02bed85 --- /dev/null +++ b/plugins/modules/ceph_key.py @@ -0,0 +1,709 @@ +# Copyright 2018, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import generate_cmd, \ + is_containerized, \ + container_exec, \ + fatal +except ImportError: + from module_utils.ca_common import generate_cmd, \ + is_containerized, \ + container_exec, \ + fatal +import datetime +import json +import os +import struct +import time +import base64 +import socket + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: ceph_key + +author: Sebastien Han + +short_description: Manage Cephx key(s) + +version_added: "2.6" + +description: + - Manage CephX creation, deletion and updates. + It can also list and get information about keyring(s). +options: + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + name: + description: + - name of the CephX key + required: true + user: + description: + - entity used to perform operation. + It corresponds to the -n option (--name) + required: false + user_key: + description: + - the path to the keyring corresponding to the + user being used. + It corresponds to the -k option (--keyring) + state: + description: + - If 'present' is used, the module creates a keyring + with the associated capabilities. + If 'present' is used and a secret is provided the module + will always add the key. Which means it will update + the keyring if the secret changes, the same goes for + the capabilities. + If 'absent' is used, the module will simply delete the keyring. + If 'list' is used, the module will list all the keys and will + return a json output. + If 'info' is used, the module will return in a json format the + description of a given keyring. + If 'generate_secret' is used, the module will simply output a cephx keyring. + required: false + choices: ['present', 'update', 'absent', 'list', 'info', 'fetch_initial_keys', 'generate_secret'] + default: present + caps: + description: + - CephX key capabilities + default: None + required: false + secret: + description: + - keyring's secret value + required: false + default: None + import_key: + description: + - Wether or not to import the created keyring into Ceph. + This can be useful for someone that only wants to generate keyrings + but not add them into Ceph. + required: false + default: True + dest: + description: + - Destination to write the keyring, can a file or a directory + required: false + default: /etc/ceph/ + fetch_initial_keys: + description: + - Fetch client.admin and bootstrap key. + This is only needed for Nautilus and above. + Writes down to the filesystem the initial keys generated by the monitor. # noqa: E501 + This command can ONLY run from a monitor node. + required: false + default: false + output_format: + description: + - The key output format when retrieving the information of an + entity. + required: false + default: json +''' + +EXAMPLES = ''' + +keys_to_create: + - { name: client.key, key: "AQAin8tUUK84ExAA/QgBtI7gEMWdmnvKBzlXdQ==", caps: { mon: "allow rwx", mds: "allow *" } , mode: "0600" } # noqa: E501 + - { name: client.cle, caps: { mon: "allow r", osd: "allow *" } , mode: "0600" } # noqa: E501 + +caps: + mon: "allow rwx" + mds: "allow *" + +- name: create ceph admin key + ceph_key: + name: client.admin + state: present + secret: AQAin8tU2DsKFBAAFIAzVTzkL3+gtAjjpQiomw== + caps: + mon: allow * + osd: allow * + mgr: allow * + mds: allow + mode: 0400 + import_key: False + +- name: create monitor initial keyring + ceph_key: + name: mon. + state: present + secret: AQAin8tUMICVFBAALRHNrV0Z4MXupRw4v9JQ6Q== + caps: + mon: allow * + dest: "/var/lib/ceph/tmp/" + import_key: False + +- name: create cephx key + ceph_key: + name: "{{ keys_to_create }}" + user: client.bootstrap-rgw + user_key: /var/lib/ceph/bootstrap-rgw/ceph.keyring + state: present + caps: "{{ caps }}" + +- name: create cephx key but don't import it in Ceph + ceph_key: + name: "{{ keys_to_create }}" + state: present + caps: "{{ caps }}" + import_key: False + +- name: delete cephx key + ceph_key: + name: "my_key" + state: absent + +- name: info cephx key + ceph_key: + name: "my_key"" + state: info + +- name: info cephx admin key (plain) + ceph_key: + name: client.admin + output_format: plain + state: info + register: client_admin_key + +- name: list cephx keys + ceph_key: + state: list + +- name: fetch cephx keys + ceph_key: + state: fetch_initial_keys +''' + +RETURN = '''# ''' + + +CEPH_INITIAL_KEYS = ['client.admin', 'client.bootstrap-mds', 'client.bootstrap-mgr', # noqa: E501 + 'client.bootstrap-osd', 'client.bootstrap-rbd', 'client.bootstrap-rbd-mirror', 'client.bootstrap-rgw'] # noqa: E501 + + +def str_to_bool(val): + try: + val = val.lower() + except AttributeError: + val = str(val).lower() + if val == 'true': + return True + elif val == 'false': + return False + else: + raise ValueError("Invalid input value: %s" % val) + + +def generate_secret(): + ''' + Generate a CephX secret + ''' + + key = os.urandom(16) + header = struct.pack(' +''' + +EXAMPLES = ''' +- name: enable dashboard mgr module + ceph_mgr_module: + name: dashboard + state: enable + +- name: disable multiple mgr modules + ceph_mgr_module: + name: '{{ item }}' + state: disable + loop: + - 'dashboard' + - 'prometheus' +''' + +RETURN = '''# ''' + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + cluster=dict(type='str', required=False, default='ceph'), + state=dict(type='str', required=False, default='enable', choices=['enable', 'disable']), # noqa: E501 + ), + supports_check_mode=True, + ) + + name = module.params.get('name') + cluster = module.params.get('cluster') + state = module.params.get('state') + + startd = datetime.datetime.now() + + container_image = is_containerized() + + cmd = generate_cmd(sub_cmd=['mgr', 'module'], + args=[state, name], + cluster=cluster, + container_image=container_image) + + if module.check_mode: + exit_module( + module=module, + out='', + rc=0, + cmd=cmd, + err='', + startd=startd, + changed=False + ) + else: + rc, out, err = module.run_command(cmd) + if 'is already enabled' in err: + changed = False + else: + changed = True + exit_module( + module=module, + out=out, + rc=rc, + cmd=cmd, + err=err, + startd=startd, + changed=changed + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ceph_osd.py b/plugins/modules/ceph_osd.py new file mode 100644 index 0000000..0eadc93 --- /dev/null +++ b/plugins/modules/ceph_osd.py @@ -0,0 +1,146 @@ +# Copyright 2020, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import exit_module, generate_cmd, is_containerized # noqa: E501 +except ImportError: + from module_utils.ca_common import exit_module, generate_cmd, is_containerized # noqa: E501 +import datetime + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: ceph_osd +short_description: Manage Ceph OSD state +version_added: "2.8" +description: + - Manage Ceph OSD state +options: + ids: + description: + - The ceph OSD id(s). + required: true + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + state: + description: + - The ceph OSD state. + required: true + choices: ['destroy', 'down', 'in', 'out', 'purge', 'rm'] +author: + - Dimitri Savineau +''' + +EXAMPLES = ''' +- name: destroy OSD 42 + ceph_osd: + ids: 42 + state: destroy + +- name: set multiple OSDs down + ceph_osd: + ids: [0, 1, 3] + state: down + +- name: set OSD 42 in + ceph_osd: + ids: 42 + state: in + +- name: set OSD 42 out + ceph_osd: + ids: 42 + state: out + +- name: purge OSD 42 + ceph_osd: + ids: 42 + state: purge + +- name: rm OSD 42 + ceph_osd: + ids: 42 + state: rm +''' + +RETURN = '''# ''' + + +def main(): + module = AnsibleModule( + argument_spec=dict( + ids=dict(type='list', required=True), + cluster=dict(type='str', required=False, default='ceph'), + state=dict(type='str', required=True, choices=['destroy', 'down', 'in', 'out', 'purge', 'rm']), # noqa: E501 + ), + supports_check_mode=True, + ) + + ids = module.params.get('ids') + cluster = module.params.get('cluster') + state = module.params.get('state') + + if state in ['destroy', 'purge'] and len(ids) > 1: + module.fail_json(msg='destroy and purge only support one OSD at at time', rc=1) # noqa: E501 + + startd = datetime.datetime.now() + + container_image = is_containerized() + + cmd = generate_cmd(sub_cmd=['osd', state], args=ids, cluster=cluster, container_image=container_image) # noqa: E501 + + if state in ['destroy', 'purge']: + cmd.append('--yes-i-really-mean-it') + + if module.check_mode: + exit_module( + module=module, + out='', + rc=0, + cmd=cmd, + err='', + startd=startd, + changed=False + ) + else: + rc, out, err = module.run_command(cmd) + changed = True + if state in ['down', 'in', 'out'] and 'marked' not in err: + changed = False + exit_module( + module=module, + out=out, + rc=rc, + cmd=cmd, + err=err, + startd=startd, + changed=changed + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ceph_osd_flag.py b/plugins/modules/ceph_osd_flag.py new file mode 100644 index 0000000..ef21eaf --- /dev/null +++ b/plugins/modules/ceph_osd_flag.py @@ -0,0 +1,130 @@ +# Copyright 2020, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import exit_module, \ + generate_cmd, \ + is_containerized +except ImportError: + from module_utils.ca_common import exit_module, \ + generate_cmd, \ + is_containerized +import datetime + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: ceph_osd_flag +short_description: Manage Ceph OSD flag +version_added: "2.8" +description: + - Manage Ceph OSD flag +options: + name: + description: + - name of the ceph OSD flag. + required: true + choices: ['noup', 'nodown', 'noout', 'nobackfill', 'norebalance', + 'norecover', 'noscrub', 'nodeep-scrub'] + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + state: + description: + - If 'present' is used, the module sets the OSD flag. + If 'absent' is used, the module will unset the OSD flag. + required: false + choices: ['present', 'absent'] + default: present +author: + - Dimitri Savineau +''' + +EXAMPLES = ''' +- name: set noup OSD flag + ceph_osd_flag: + name: noup + +- name: unset multiple OSD flags + ceph_osd_flag: + name: '{{ item }}' + state: absent + loop: + - 'noup' + - 'norebalance' +''' + +RETURN = '''# ''' + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True, choices=['noup', 'nodown', 'noout', 'nobackfill', 'norebalance', 'norecover', 'noscrub', 'nodeep-scrub']), # noqa: E501 + cluster=dict(type='str', required=False, default='ceph'), + state=dict(type='str', required=False, default='present', choices=['present', 'absent']), # noqa: E501 + ), + supports_check_mode=True, + ) + + name = module.params.get('name') + cluster = module.params.get('cluster') + state = module.params.get('state') + + startd = datetime.datetime.now() + + container_image = is_containerized() + + if state == 'present': + cmd = generate_cmd(sub_cmd=['osd', 'set'], args=[name], cluster=cluster, container_image=container_image) # noqa: E501 + else: + cmd = generate_cmd(sub_cmd=['osd', 'unset'], args=[name], cluster=cluster, container_image=container_image) # noqa: E501 + + if module.check_mode: + exit_module( + module=module, + out='', + rc=0, + cmd=cmd, + err='', + startd=startd, + changed=False + ) + else: + rc, out, err = module.run_command(cmd) + exit_module( + module=module, + out=out, + rc=rc, + cmd=cmd, + err=err, + startd=startd, + changed=True + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ceph_pool.py b/plugins/modules/ceph_pool.py new file mode 100644 index 0000000..96e66dd --- /dev/null +++ b/plugins/modules/ceph_pool.py @@ -0,0 +1,698 @@ +# Copyright 2020, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import generate_cmd, \ + pre_generate_cmd, \ + is_containerized, \ + exec_command, \ + exit_module +except ImportError: + from module_utils.ca_common import generate_cmd, \ + pre_generate_cmd, \ + is_containerized, \ + exec_command, \ + exit_module + + +import datetime +import json +import os + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: ceph_pool + +author: Guillaume Abrioux + +short_description: Manage Ceph Pools + +version_added: "2.8" + +description: + - Manage Ceph pool(s) creation, deletion and updates. +options: + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + name: + description: + - name of the Ceph pool + required: true + state: + description: + If 'present' is used, the module creates a pool if it doesn't exist + or update it if it already exists. + If 'absent' is used, the module will simply delete the pool. + If 'list' is used, the module will return all details about the + existing pools. (json formatted). + required: false + choices: ['present', 'absent', 'list'] + default: present + size: + description: + - set the replica size of the pool. + required: false + default: 3 + min_size: + description: + - set the min_size parameter of the pool. + required: false + default: default to `osd_pool_default_min_size` (ceph) + pg_num: + description: + - set the pg_num of the pool. + required: false + default: default to `osd_pool_default_pg_num` (ceph) + pgp_num: + description: + - set the pgp_num of the pool. + required: false + default: default to `osd_pool_default_pgp_num` (ceph) + pg_autoscale_mode: + description: + - set the pg autoscaler on the pool. + required: false + default: 'on' + target_size_ratio: + description: + - set the target_size_ratio on the pool + required: false + default: None + pool_type: + description: + - set the pool type, either 'replicated' or 'erasure' + required: false + default: 'replicated' + erasure_profile: + description: + - When pool_type = 'erasure', set the erasure profile of the pool + required: false + default: 'default' + rule_name: + description: + - Set the crush rule name assigned to the pool + required: false + default: 'replicated_rule' when pool_type is 'erasure' else None + expected_num_objects: + description: + - Set the expected_num_objects parameter of the pool. + required: false + default: '0' + application: + description: + - Set the pool application on the pool. + required: false + default: None +''' + +EXAMPLES = ''' + +pools: + - { name: foo, size: 3, application: rbd, pool_type: 'replicated', + pg_autoscale_mode: 'on' } + +- hosts: all + become: true + tasks: + - name: create a pool + ceph_pool: + name: "{{ item.name }}" + state: present + size: "{{ item.size }}" + application: "{{ item.application }}" + pool_type: "{{ item.pool_type }}" + pg_autoscale_mode: "{{ item.pg_autoscale_mode }}" + with_items: "{{ pools }}" +''' + +RETURN = '''# ''' + + +def check_pool_exist(cluster, + name, + user, + user_key, + output_format='json', + container_image=None): + ''' + Check if a given pool exists + ''' + + args = ['stats', name, '-f', output_format] + + cmd = generate_cmd(sub_cmd=['osd', 'pool'], + args=args, + cluster=cluster, + user=user, + user_key=user_key, + container_image=container_image) + + return cmd + + +def generate_get_config_cmd(param, + cluster, + user, + user_key, + container_image=None): + _cmd = pre_generate_cmd('ceph', container_image=container_image) + args = [ + '-n', + user, + '-k', + user_key, + '--cluster', + cluster, + 'config', + 'get', + 'mon.*', + param + ] + cmd = _cmd + args + return cmd + + +def get_application_pool(cluster, + name, + user, + user_key, + output_format='json', + container_image=None): + ''' + Get application type enabled on a given pool + ''' + + args = ['application', 'get', name, '-f', output_format] + + cmd = generate_cmd(sub_cmd=['osd', 'pool'], + args=args, + cluster=cluster, + user=user, + user_key=user_key, + container_image=container_image) + + return cmd + + +def enable_application_pool(cluster, + name, + application, + user, + user_key, + container_image=None): + ''' + Enable application on a given pool + ''' + + args = ['application', 'enable', name, application] + + cmd = generate_cmd(sub_cmd=['osd', 'pool'], + args=args, + cluster=cluster, + user=user, + user_key=user_key, + container_image=container_image) + + return cmd + + +def init_rbd_pool(cluster, + name, + user, + user_key, + container_image=None): + ''' + Initialize a rbd pool + ''' + + args = [name] + + cmd = generate_cmd(cmd='rbd', + sub_cmd=['pool', 'init'], + args=args, + cluster=cluster, + user=user, + user_key=user_key, + container_image=container_image) + + return cmd + + +def disable_application_pool(cluster, + name, + application, + user, + user_key, + container_image=None): + ''' + Disable application on a given pool + ''' + + args = ['application', 'disable', name, + application, '--yes-i-really-mean-it'] + + cmd = generate_cmd(sub_cmd=['osd', 'pool'], + args=args, + cluster=cluster, + user=user, + user_key=user_key, + container_image=container_image) + + return cmd + + +def get_pool_details(module, + cluster, + name, + user, + user_key, + output_format='json', + container_image=None): + ''' + Get details about a given pool + ''' + + args = ['ls', 'detail', '-f', output_format] + + cmd = generate_cmd(sub_cmd=['osd', 'pool'], + args=args, + cluster=cluster, + user=user, + user_key=user_key, + container_image=container_image) + + rc, cmd, out, err = exec_command(module, cmd) + + if rc == 0: + out = [p for p in json.loads(out.strip()) if p['pool_name'] == name][0] + + _rc, _cmd, application_pool, _err = exec_command(module, + get_application_pool(cluster, # noqa: E501 + name, # noqa: E501 + user, # noqa: E501 + user_key, # noqa: E501 + container_image=container_image)) # noqa: E501 + + # This is a trick because "target_size_ratio" isn't present at the same + # level in the dict + # ie: + # { + # 'pg_num': 8, + # 'pgp_num': 8, + # 'pg_autoscale_mode': 'on', + # 'options': { + # 'target_size_ratio': 0.1 + # } + # } + # If 'target_size_ratio' is present in 'options', we set it, this way we + # end up with a dict containing all needed keys at the same level. + if 'target_size_ratio' in out['options'].keys(): + out['target_size_ratio'] = out['options']['target_size_ratio'] + else: + out['target_size_ratio'] = None + + application = list(json.loads(application_pool.strip()).keys()) + + if len(application) == 0: + out['application'] = '' + else: + out['application'] = application[0] + + return rc, cmd, out, err + + +def compare_pool_config(user_pool_config, running_pool_details): + ''' + Compare user input config pool details with current running pool details + ''' + + delta = {} + filter_keys = ['pg_num', 'pg_placement_num', 'size', + 'pg_autoscale_mode', 'target_size_ratio'] + for key in filter_keys: + if (str(running_pool_details[key]) != user_pool_config[key]['value'] and # noqa: E501 + user_pool_config[key]['value']): + delta[key] = user_pool_config[key] + + if (running_pool_details['application'] != + user_pool_config['application']['value'] and + user_pool_config['application']['value']): + delta['application'] = {} + delta['application']['new_application'] = user_pool_config['application']['value'] # noqa: E501 + # to be improved (for update_pools()...) + delta['application']['value'] = delta['application']['new_application'] + delta['application']['old_application'] = running_pool_details['application'] # noqa: E501 + + return delta + + +def list_pools(cluster, + user, + user_key, + details, + output_format='json', + container_image=None): + ''' + List existing pools + ''' + + args = ['ls'] + + if details: + args.append('detail') + + args.extend(['-f', output_format]) + + cmd = generate_cmd(sub_cmd=['osd', 'pool'], + args=args, + cluster=cluster, + user=user, + user_key=user_key, + container_image=container_image) + + return cmd + + +def create_pool(cluster, + user, + user_key, + user_pool_config, + container_image=None): + ''' + Create a new pool + ''' + + args = ['create', user_pool_config['pool_name']['value'], + user_pool_config['type']['value']] + + if user_pool_config['pg_autoscale_mode']['value'] == 'off': + args.extend(['--pg_num', + user_pool_config['pg_num']['value'], + '--pgp_num', + user_pool_config['pgp_num']['value'] or + user_pool_config['pg_num']['value']]) + elif user_pool_config['target_size_ratio']['value']: + args.extend(['--target_size_ratio', + user_pool_config['target_size_ratio']['value']]) + + if user_pool_config['type']['value'] == 'replicated': + args.extend([user_pool_config['crush_rule']['value'], + '--expected_num_objects', + user_pool_config['expected_num_objects']['value'], + '--autoscale-mode', + user_pool_config['pg_autoscale_mode']['value']]) + + if (user_pool_config['size']['value'] and + user_pool_config['type']['value'] == "replicated"): + args.extend(['--size', user_pool_config['size']['value']]) + + elif user_pool_config['type']['value'] == 'erasure': + args.extend([user_pool_config['erasure_profile']['value']]) + + if user_pool_config['crush_rule']['value']: + args.extend([user_pool_config['crush_rule']['value']]) + + args.extend(['--expected_num_objects', + user_pool_config['expected_num_objects']['value'], + '--autoscale-mode', + user_pool_config['pg_autoscale_mode']['value']]) + + cmd = generate_cmd(sub_cmd=['osd', 'pool'], + args=args, + cluster=cluster, + user=user, + user_key=user_key, + container_image=container_image) + + return cmd + + +def remove_pool(cluster, name, user, user_key, container_image=None): + ''' + Remove a pool + ''' + + args = ['rm', name, name, '--yes-i-really-really-mean-it'] + + cmd = generate_cmd(sub_cmd=['osd', 'pool'], + args=args, + cluster=cluster, + user=user, + user_key=user_key, + container_image=container_image) + + return cmd + + +def update_pool(module, cluster, name, + user, user_key, delta, container_image=None): + ''' + Update an existing pool + ''' + + report = "" + + for key in delta.keys(): + if key != 'application': + args = ['set', + name, + delta[key]['cli_set_opt'], + delta[key]['value']] + + cmd = generate_cmd(sub_cmd=['osd', 'pool'], + args=args, + cluster=cluster, + user=user, + user_key=user_key, + container_image=container_image) + + rc, cmd, out, err = exec_command(module, cmd) + if rc != 0: + return rc, cmd, out, err + + else: + rc, cmd, out, err = exec_command(module, disable_application_pool(cluster, name, delta['application']['old_application'], user, user_key, container_image=container_image)) # noqa: E501 + if rc != 0: + return rc, cmd, out, err + + rc, cmd, out, err = exec_command(module, enable_application_pool(cluster, name, delta['application']['new_application'], user, user_key, container_image=container_image)) # noqa: E501 + if rc != 0: + return rc, cmd, out, err + + report = report + "\n" + "{} has been updated: {} is now {}".format(name, key, delta[key]['value']) # noqa: E501 + + out = report + return rc, cmd, out, err + + +def run_module(): + module_args = dict( + cluster=dict(type='str', required=False, default='ceph'), + name=dict(type='str', required=True), + state=dict(type='str', required=False, default='present', + choices=['present', 'absent', 'list']), + details=dict(type='bool', required=False, default=False), + size=dict(type='str', required=False), + min_size=dict(type='str', required=False), + pg_num=dict(type='str', required=False), + pgp_num=dict(type='str', required=False), + pg_autoscale_mode=dict(type='str', required=False, default='on'), + target_size_ratio=dict(type='str', required=False, default=None), + pool_type=dict(type='str', required=False, default='replicated', + choices=['replicated', 'erasure', '1', '3']), + erasure_profile=dict(type='str', required=False, default='default'), + rule_name=dict(type='str', required=False, default=None), + expected_num_objects=dict(type='str', required=False, default="0"), + application=dict(type='str', required=False, default=None), + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True + ) + + # Gather module parameters in variables + cluster = module.params.get('cluster') + name = module.params.get('name') + state = module.params.get('state') + details = module.params.get('details') + size = module.params.get('size') + min_size = module.params.get('min_size') + pg_num = module.params.get('pg_num') + pgp_num = module.params.get('pgp_num') + pg_autoscale_mode = module.params.get('pg_autoscale_mode') + target_size_ratio = module.params.get('target_size_ratio') + application = module.params.get('application') + + if (module.params.get('pg_autoscale_mode').lower() in + ['true', 'on', 'yes']): + pg_autoscale_mode = 'on' + elif (module.params.get('pg_autoscale_mode').lower() in + ['false', 'off', 'no']): + pg_autoscale_mode = 'off' + else: + pg_autoscale_mode = 'warn' + + if module.params.get('pool_type') == '1': + pool_type = 'replicated' + elif module.params.get('pool_type') == '3': + pool_type = 'erasure' + else: + pool_type = module.params.get('pool_type') + + if not module.params.get('rule_name'): + rule_name = 'replicated_rule' if pool_type == 'replicated' else None + else: + rule_name = module.params.get('rule_name') + + erasure_profile = module.params.get('erasure_profile') + expected_num_objects = module.params.get('expected_num_objects') + user_pool_config = { + 'pool_name': {'value': name}, + 'pg_num': {'value': pg_num, 'cli_set_opt': 'pg_num'}, + 'pgp_num': {'value': pgp_num, 'cli_set_opt': 'pgp_num'}, + 'pg_autoscale_mode': {'value': pg_autoscale_mode, + 'cli_set_opt': 'pg_autoscale_mode'}, + 'target_size_ratio': {'value': target_size_ratio, + 'cli_set_opt': 'target_size_ratio'}, + 'application': {'value': application}, + 'type': {'value': pool_type}, + 'erasure_profile': {'value': erasure_profile}, + 'crush_rule': {'value': rule_name, 'cli_set_opt': 'crush_rule'}, + 'expected_num_objects': {'value': expected_num_objects}, + 'size': {'value': size, 'cli_set_opt': 'size'}, + 'min_size': {'value': min_size} + } + + startd = datetime.datetime.now() + changed = False + + # will return either the image name or None + container_image = is_containerized() + + user = "client.admin" + keyring_filename = cluster + '.' + user + '.keyring' + user_key = os.path.join("/etc/ceph/", keyring_filename) + + if state == "present": + rc, cmd, out, err = exec_command(module, + check_pool_exist(cluster, + name, + user, + user_key, + container_image=container_image)) # noqa: E501 + changed = rc != 0 + if not changed: + running_pool_details = get_pool_details(module, + cluster, + name, + user, + user_key, + container_image=container_image) # noqa: E501 + user_pool_config['pg_placement_num'] = {'value': str(running_pool_details[2]['pg_placement_num']), 'cli_set_opt': 'pgp_num'} # noqa: E501 + delta = compare_pool_config(user_pool_config, + running_pool_details[2]) + if len(delta) > 0: + keys = list(delta.keys()) + details = running_pool_details[2] + if details['erasure_code_profile'] and 'size' in keys: + del delta['size'] + if details['pg_autoscale_mode'] == 'on': + delta.pop('pg_num', None) + delta.pop('pgp_num', None) + + changed = len(delta) > 0 + if changed and not module.check_mode: + rc, cmd, out, err = update_pool(module, + cluster, + name, + user, + user_key, + delta, + container_image=container_image) # noqa: E501 + elif not module.check_mode: + rc, cmd, out, err = exec_command(module, + create_pool(cluster, + user, + user_key, + user_pool_config=user_pool_config, # noqa: E501 + container_image=container_image)) # noqa: E501 + if user_pool_config['application']['value']: + rc, _, _, _ = exec_command(module, + enable_application_pool(cluster, + name, + user_pool_config['application']['value'], # noqa: E501 + user, + user_key, + container_image=container_image)) # noqa: E501 + if rc == 0 and user_pool_config['application']['value'] == 'rbd': # noqa: E501 + rc, cmd, out, err = exec_command(module, + init_rbd_pool(cluster, + user_pool_config['pool_name']['value'], # noqa: E501 + user, + user_key, + container_image=container_image)) # noqa: E501 + if user_pool_config['min_size']['value']: + # not implemented yet + pass + + elif state == "list": + rc, cmd, out, err = exec_command(module, + list_pools(cluster, + name, user, + user_key, + details, + container_image=container_image)) # noqa: E501 + if rc != 0: + out = "Couldn't list pool(s) present on the cluster" + + elif state == "absent": + rc, cmd, out, err = exec_command(module, + check_pool_exist(cluster, + name, user, + user_key, + container_image=container_image)) # noqa: E501 + changed = rc == 0 + if changed and not module.check_mode: + rc, cmd, out, err = exec_command(module, + remove_pool(cluster, + name, + user, + user_key, + container_image=container_image)) # noqa: E501 + + exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, + changed=changed) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ceph_volume.py b/plugins/modules/ceph_volume.py new file mode 100644 index 0000000..ade0d55 --- /dev/null +++ b/plugins/modules/ceph_volume.py @@ -0,0 +1,732 @@ +#!/usr/bin/python + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import exec_command, \ + is_containerized, \ + fatal +except ImportError: + from module_utils.ca_common import exec_command, \ + is_containerized, \ + fatal +import datetime +import copy +import json +import os +import re + +ANSIBLE_METADATA = { + 'metadata_version': '1.0', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: ceph_volume + +short_description: Create ceph OSDs with ceph-volume + +description: + - Using the ceph-volume utility available in Ceph this module + can be used to create ceph OSDs that are backed by logical volumes. + - Only available in ceph versions luminous or greater. + +options: + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + objectstore: + description: + - The objectstore of the OSD. + required: false + choices: ['bluestore'] + default: bluestore + action: + description: + - The action to take. Creating OSDs and zapping or querying devices. + required: true + choices: ['create', 'zap', 'batch', 'prepare', 'activate', 'list', 'inventory'] + default: create + data: + description: + - The logical volume name or device to use for the OSD data. + required: true + data_vg: + description: + - If data is a lv, this must be the name of the volume group it belongs to. + required: false + osd_fsid: + description: + - The OSD FSID + required: false + osd_id: + description: + - The OSD ID + required: false + db: + description: + - A partition or logical volume name to use for block.db. + required: false + db_vg: + description: + - If db is a lv, this must be the name of the volume group it belongs to. # noqa: E501 + required: false + wal: + description: + - A partition or logical volume name to use for block.wal. + required: false + wal_vg: + description: + - If wal is a lv, this must be the name of the volume group it belongs to. # noqa: E501 + required: false + crush_device_class: + description: + - Will set the crush device class for the OSD. + required: false + dmcrypt: + description: + - If set to True the OSD will be encrypted with dmcrypt. + required: false + batch_devices: + description: + - A list of devices to pass to the 'ceph-volume lvm batch' subcommand. + - Only applicable if action is 'batch'. + required: false + osds_per_device: + description: + - The number of OSDs to create per device. + - Only applicable if action is 'batch'. + required: false + default: 1 + block_db_size: + description: + - The size in bytes of bluestore block db lvs. + - The default of -1 means to create them as big as possible. + - Only applicable if action is 'batch'. + required: false + default: -1 + block_db_devices: + description: + - A list of devices for bluestore block db to pass to the 'ceph-volume lvm batch' subcommand. + - Only applicable if action is 'batch'. + required: false + wal_devices: + description: + - A list of devices for bluestore block wal to pass to the 'ceph-volume lvm batch' subcommand. + - Only applicable if action is 'batch'. + required: false + report: + description: + - If provided the --report flag will be passed to 'ceph-volume lvm batch'. + - No OSDs will be created. + - Results will be returned in json format. + - Only applicable if action is 'batch'. + required: false + list: + description: + - List potential Ceph LVM metadata on a device + required: false + inventory: + description: + - List storage device inventory. + required: false + +author: + - Andrew Schoen (@andrewschoen) + - Sebastien Han +''' + +EXAMPLES = ''' +- name: set up a bluestore osd with a raw device for data + ceph_volume: + objectstore: bluestore + data: /dev/sdc + action: create + + +- name: set up a bluestore osd with an lv for data and partitions for block.wal and block.db # noqa: E501 + ceph_volume: + objectstore: bluestore + data: data-lv + data_vg: data-vg + db: /dev/sdc1 + wal: /dev/sdc2 + action: create +''' + + +def container_exec(binary, container_image, mounts=None): + ''' + Build the docker CLI to run a command inside a container + ''' + _mounts = {} + _mounts['/run/lock/lvm'] = '/run/lock/lvm:z' + _mounts['/var/run/udev'] = '/var/run/udev:z' + _mounts['/dev'] = '/dev' + _mounts['/etc/ceph'] = '/etc/ceph:z' + _mounts['/run/lvm'] = '/run/lvm' + _mounts['/var/lib/ceph'] = '/var/lib/ceph:z' + _mounts['/var/log/ceph'] = '/var/log/ceph:z' + if mounts is None: + mounts = _mounts + else: + _mounts.update(mounts) + + volumes = sum( + [['-v', '{}:{}'.format(src_dir, dst_dir)] + for src_dir, dst_dir in _mounts.items()], []) + + container_binary = os.getenv('CEPH_CONTAINER_BINARY') + command_exec = [container_binary, 'run', + '--rm', + '--privileged', + '--net=host', + '--ipc=host'] + volumes + \ + ['--entrypoint=' + binary, container_image] + return command_exec + + +def build_cmd(action, container_image, + cluster='ceph', + binary='ceph-volume', mounts=None): + ''' + Build the ceph-volume command + ''' + + _binary = binary + + if container_image: + cmd = container_exec( + binary, container_image, mounts=mounts) + else: + binary = [binary] + cmd = binary + + if _binary == 'ceph-volume': + cmd.extend(['--cluster', cluster]) + + cmd.extend(action) + + return cmd + + +def get_data(data, data_vg): + if data_vg: + data = '{0}/{1}'.format(data_vg, data) + return data + + +def get_journal(journal, journal_vg): + if journal_vg: + journal = '{0}/{1}'.format(journal_vg, journal) + return journal + + +def get_db(db, db_vg): + if db_vg: + db = '{0}/{1}'.format(db_vg, db) + return db + + +def get_wal(wal, wal_vg): + if wal_vg: + wal = '{0}/{1}'.format(wal_vg, wal) + return wal + + +def batch(module, container_image, report=None): + ''' + Batch prepare OSD devices + ''' + + # get module variables + cluster = module.params['cluster'] + objectstore = module.params['objectstore'] + batch_devices = module.params.get('batch_devices', None) + crush_device_class = module.params.get('crush_device_class', None) + block_db_size = module.params.get('block_db_size', None) + block_db_devices = module.params.get('block_db_devices', None) + wal_devices = module.params.get('wal_devices', None) + dmcrypt = module.params.get('dmcrypt', None) + osds_per_device = module.params.get('osds_per_device', 1) + + if not osds_per_device: + fatal('osds_per_device must be provided if action is "batch"', module) + + if osds_per_device < 1: + fatal('osds_per_device must be greater than 0 if action is "batch"', module) # noqa: E501 + + if not batch_devices: + fatal('batch_devices must be provided if action is "batch"', module) + + # Build the CLI + action = ['lvm', 'batch'] + cmd = build_cmd(action, container_image, cluster) + cmd.extend(['--%s' % objectstore]) + if not report: + cmd.append('--yes') + + if container_image: + cmd.append('--prepare') + + if crush_device_class: + cmd.extend(['--crush-device-class', crush_device_class]) + + if dmcrypt: + cmd.append('--dmcrypt') + + if osds_per_device > 1: + cmd.extend(['--osds-per-device', str(osds_per_device)]) + + if objectstore == 'bluestore' and block_db_size != '-1': + cmd.extend(['--block-db-size', block_db_size]) + + cmd.extend(batch_devices) + + if block_db_devices and objectstore == 'bluestore': + cmd.append('--db-devices') + cmd.extend(block_db_devices) + + if wal_devices and objectstore == 'bluestore': + cmd.append('--wal-devices') + cmd.extend(wal_devices) + + return cmd + + +def ceph_volume_cmd(subcommand, container_image, cluster=None): + ''' + Build ceph-volume initial command + ''' + + if container_image: + binary = 'ceph-volume' + cmd = container_exec( + binary, container_image) + else: + binary = ['ceph-volume'] + cmd = binary + + if cluster: + cmd.extend(['--cluster', cluster]) + + cmd.append('lvm') + cmd.append(subcommand) + + return cmd + + +def prepare_or_create_osd(module, action, container_image): + ''' + Prepare or create OSD devices + ''' + + # get module variables + cluster = module.params['cluster'] + objectstore = module.params['objectstore'] + data = module.params['data'] + data_vg = module.params.get('data_vg', None) + data = get_data(data, data_vg) + db = module.params.get('db', None) + db_vg = module.params.get('db_vg', None) + wal = module.params.get('wal', None) + wal_vg = module.params.get('wal_vg', None) + crush_device_class = module.params.get('crush_device_class', None) + dmcrypt = module.params.get('dmcrypt', None) + + # Build the CLI + action = ['lvm', action] + cmd = build_cmd(action, container_image, cluster) + cmd.extend(['--%s' % objectstore]) + cmd.append('--data') + cmd.append(data) + + if db and objectstore == 'bluestore': + db = get_db(db, db_vg) + cmd.extend(['--block.db', db]) + + if wal and objectstore == 'bluestore': + wal = get_wal(wal, wal_vg) + cmd.extend(['--block.wal', wal]) + + if crush_device_class: + cmd.extend(['--crush-device-class', crush_device_class]) + + if dmcrypt: + cmd.append('--dmcrypt') + + return cmd + + +def list_osd(module, container_image): + ''' + List will detect wether or not a device has Ceph LVM Metadata + ''' + + # get module variables + cluster = module.params['cluster'] + data = module.params.get('data', None) + data_vg = module.params.get('data_vg', None) + data = get_data(data, data_vg) + + # Build the CLI + action = ['lvm', 'list'] + cmd = build_cmd(action, + container_image, + cluster, + mounts={'/var/lib/ceph': '/var/lib/ceph:ro'}) + if data: + cmd.append(data) + cmd.append('--format=json') + + return cmd + + +def list_storage_inventory(module, container_image): + ''' + List storage inventory. + ''' + + action = ['inventory'] + cmd = build_cmd(action, container_image) + cmd.append('--format=json') + + return cmd + + +def activate_osd(): + ''' + Activate all the OSDs on a machine + ''' + + # build the CLI + action = ['lvm', 'activate'] + container_image = None + cmd = build_cmd(action, container_image) + cmd.append('--all') + + return cmd + + +def is_lv(module, vg, lv, container_image): + ''' + Check if an LV exists + ''' + + args = ['--noheadings', '--reportformat', 'json', '--select', 'lv_name={},vg_name={}'.format(lv, vg)] # noqa: E501 + + cmd = build_cmd(args, container_image, binary='lvs') + + rc, cmd, out, err = exec_command(module, cmd) + + if rc == 0: + result = json.loads(out)['report'][0]['lv'] + if len(result) > 0: + return True + + return False + + +def zap_devices(module, container_image): + ''' + Will run 'ceph-volume lvm zap' on all devices, lvs and partitions + used to create the OSD. The --destroy flag is always passed so that + if an OSD was originally created with a raw device or partition for + 'data' then any lvs that were created by ceph-volume are removed. + ''' + + # get module variables + data = module.params.get('data', None) + data_vg = module.params.get('data_vg', None) + db = module.params.get('db', None) + db_vg = module.params.get('db_vg', None) + wal = module.params.get('wal', None) + wal_vg = module.params.get('wal_vg', None) + osd_fsid = module.params.get('osd_fsid', None) + osd_id = module.params.get('osd_id', None) + destroy = module.params.get('destroy', True) + + # build the CLI + action = ['lvm', 'zap'] + cmd = build_cmd(action, container_image) + if destroy: + cmd.append('--destroy') + + if osd_fsid: + cmd.extend(['--osd-fsid', osd_fsid]) + + if osd_id: + cmd.extend(['--osd-id', osd_id]) + + if data: + data = get_data(data, data_vg) + cmd.append(data) + + if db: + db = get_db(db, db_vg) + cmd.extend([db]) + + if wal: + wal = get_wal(wal, wal_vg) + cmd.extend([wal]) + + return cmd + + +def allowed_in_check_mode(module): + ''' + Check if the action is allowed in check mode + ''' + + action = module.params['action'] + report = module.params.get('report', False) + + # batch is allowed in check mode if report is set + if action == 'batch' and report: + return True + + allowed_actions = ['list', 'inventory'] + + return action in allowed_actions + + +def run_module(): + module_args = dict( + cluster=dict(type='str', required=False, default='ceph'), + objectstore=dict(type='str', required=False, choices=[ + 'bluestore'], default='bluestore'), + action=dict(type='str', required=False, choices=[ + 'create', 'zap', 'batch', 'prepare', 'activate', 'list', + 'inventory'], default='create'), # noqa: 4502 + data=dict(type='str', required=False), + data_vg=dict(type='str', required=False), + db=dict(type='str', required=False), + db_vg=dict(type='str', required=False), + wal=dict(type='str', required=False), + wal_vg=dict(type='str', required=False), + crush_device_class=dict(type='str', required=False), + dmcrypt=dict(type='bool', required=False, default=False), + batch_devices=dict(type='list', required=False, default=[]), + osds_per_device=dict(type='int', required=False, default=1), + block_db_size=dict(type='str', required=False, default='-1'), + block_db_devices=dict(type='list', required=False, default=[]), + wal_devices=dict(type='list', required=False, default=[]), + report=dict(type='bool', required=False, default=False), + osd_fsid=dict(type='str', required=False), + osd_id=dict(type='str', required=False), + destroy=dict(type='bool', required=False, default=True), + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + mutually_exclusive=[ + ('data', 'osd_fsid', 'osd_id'), + ], + required_if=[ + ('action', 'zap', ('data', 'osd_fsid', 'osd_id'), True) + ] + ) + + result = dict( + changed=False, + stdout='', + stderr='', + rc=0, + start='', + end='', + delta='', + ) + + if module.check_mode and not allowed_in_check_mode(module): + module.exit_json(**result) + + # start execution + startd = datetime.datetime.now() + + # get the desired action + action = module.params['action'] + + # will return either the image name or None + container_image = is_containerized() + + # Assume the task's status will be 'changed' + changed = True + + if action == 'create' or action == 'prepare': + # First test if the device has Ceph LVM Metadata + rc, cmd, out, err = exec_command( + module, list_osd(module, container_image)) + + # list_osd returns a dict, if the dict is empty this means + # we can not check the return code since it's not consistent + # with the plain output + # see: http://tracker.ceph.com/issues/36329 + # FIXME: it's probably less confusing to check for rc + + # convert out to json, ansible returns a string... + try: + out_dict = json.loads(out) + except ValueError: + fatal("Could not decode json output: {} from the command {}".format(out, cmd), module) # noqa: E501 + + if out_dict: + data = module.params['data'] + result['stdout'] = 'skipped, since {0} is already used for an osd'.format(data) # noqa: E501 + result['rc'] = 0 + module.exit_json(**result) + + # Prepare or create the OSD + rc, cmd, out, err = exec_command( + module, prepare_or_create_osd(module, action, container_image)) + err = re.sub('[a-zA-Z0-9+/]{38}==', '*' * 8, err) + + elif action == 'activate': + if container_image: + fatal( + "This is not how container's activation happens, nothing to activate", module) # noqa: E501 + + # Activate the OSD + rc, cmd, out, err = exec_command( + module, activate_osd()) + + elif action == 'zap': + # Zap the OSD + skip = [] + for device_type in ['journal', 'data', 'db', 'wal']: + # 1/ if we passed vg/lv + if module.params.get('{}_vg'.format(device_type), None) and module.params.get(device_type, None): # noqa: E501 + # 2/ check this is an actual lv/vg + ret = is_lv(module, module.params['{}_vg'.format(device_type)], module.params[device_type], container_image) # noqa: E501 + skip.append(ret) + # 3/ This isn't a lv/vg device + if not ret: + module.params['{}_vg'.format(device_type)] = False + module.params[device_type] = False + # 4/ no journal|data|db|wal|_vg was passed, so it must be a raw device # noqa: E501 + elif not module.params.get('{}_vg'.format(device_type), None) and module.params.get(device_type, None): # noqa: E501 + skip.append(True) + + cmd = zap_devices(module, container_image) + + if any(skip) or module.params.get('osd_fsid', None) \ + or module.params.get('osd_id', None): + rc, cmd, out, err = exec_command( + module, cmd) + for scan_cmd in ['vgscan', 'lvscan']: + module.run_command([scan_cmd, '--cache']) + else: + out = 'Skipped, nothing to zap' + err = '' + changed = False + rc = 0 + + elif action == 'list': + # List Ceph LVM Metadata on a device + changed = False + rc, cmd, out, err = exec_command( + module, list_osd(module, container_image)) + + elif action == 'inventory': + # List storage device inventory. + changed = False + rc, cmd, out, err = exec_command( + module, list_storage_inventory(module, container_image)) + + elif action == 'batch': + # Batch prepare AND activate OSDs + report = module.params.get('report', None) + + # Add --report flag for the idempotency test + report_flags = [ + '--report', + '--format=json', + ] + + cmd = batch(module, container_image, report=True) + batch_report_cmd = copy.copy(cmd) + batch_report_cmd.extend(report_flags) + + # Run batch --report to see what's going to happen + # Do not run the batch command if there is nothing to do + rc, cmd, out, err = exec_command( + module, batch_report_cmd) + try: + if not out: + out = '{}' + report_result = json.loads(out) + except ValueError: + strategy_changed_in_out = "strategy changed" in out + strategy_changed_in_err = "strategy changed" in err + strategy_changed = strategy_changed_in_out or \ + strategy_changed_in_err + if strategy_changed: + if strategy_changed_in_out: + out = json.dumps({"changed": False, + "stdout": out.rstrip("\r\n")}) + elif strategy_changed_in_err: + out = json.dumps({"changed": False, + "stderr": err.rstrip("\r\n")}) + rc = 0 + changed = False + else: + out = out.rstrip("\r\n") + result = dict( + cmd=cmd, + stdout=out.rstrip('\r\n'), + stderr=err.rstrip('\r\n'), + rc=rc, + changed=changed, + ) + if strategy_changed: + module.exit_json(**result) + module.fail_json(msg='non-zero return code', **result) + + if not report: + if 'changed' in report_result: + # we have the old batch implementation + # if not asking for a report, let's just run the batch command + changed = report_result['changed'] + if changed: + # Batch prepare the OSD + rc, cmd, out, err = exec_command( + module, batch(module, container_image)) + err = re.sub('[a-zA-Z0-9+/]{38}==', '*' * 8, err) + else: + # we have the refactored batch, its idempotent so lets just + # run it + rc, cmd, out, err = exec_command( + module, batch(module, container_image)) + err = re.sub('[a-zA-Z0-9+/]{38}==', '*' * 8, err) + else: + cmd = batch_report_cmd + + endd = datetime.datetime.now() + delta = endd - startd + + result = dict( + cmd=cmd, + start=str(startd), + end=str(endd), + delta=str(delta), + rc=rc, + stdout=out.rstrip('\r\n'), + stderr=err.rstrip('\r\n'), + changed=changed, + ) + + if rc != 0: + module.fail_json(msg='non-zero return code', **result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ceph_volume_simple_activate.py b/plugins/modules/ceph_volume_simple_activate.py new file mode 100644 index 0000000..4825f0c --- /dev/null +++ b/plugins/modules/ceph_volume_simple_activate.py @@ -0,0 +1,190 @@ +# Copyright 2020, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import exit_module +except ImportError: + from module_utils.ca_common import exit_module +import datetime +import os + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: ceph_volume_simple_activate +short_description: Activate legacy OSD with ceph-volume +version_added: "2.8" +description: + - Activate legacy OSD with ceph-volume by providing the JSON file from + the scan operation or by passing the OSD ID and OSD FSID. +options: + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + path: + description: + - The OSD metadata as JSON file in /etc/ceph/osd directory, it + must exist. + required: false + osd_id: + description: + - The legacy OSD ID. + required: false + osd_fsid: + description: + - The legacy OSD FSID. + required: false + osd_all: + description: + - Activate all legacy OSDs. + required: false + systemd: + description: + - Using systemd unit during the OSD activation. + required: false + default: true +author: + - Dimitri Savineau +''' + +EXAMPLES = ''' +- name: activate all legacy OSDs + ceph_volume_simple_activate: + cluster: ceph + osd_all: true + +- name: activate a legacy OSD via OSD ID and OSD FSID + ceph_volume_simple_activate: + cluster: ceph + osd_id: 3 + osd_fsid: 0c4a7eca-0c2a-4c12-beff-08a80f064c52 + +- name: activate a legacy OSD via the JSON file + ceph_volume_simple_activate: + cluster: ceph + path: /etc/ceph/osd/3-0c4a7eca-0c2a-4c12-beff-08a80f064c52.json + +- name: activate a legacy OSD via the JSON file without systemd + ceph_volume_simple_activate: + cluster: ceph + path: /etc/ceph/osd/3-0c4a7eca-0c2a-4c12-beff-08a80f064c52.json + systemd: false +''' + +RETURN = '''# ''' + + +def main(): + module = AnsibleModule( + argument_spec=dict( + cluster=dict(type='str', required=False, default='ceph'), + path=dict(type='path', required=False), + systemd=dict(type='bool', required=False, default=True), + osd_id=dict(type='str', required=False), + osd_fsid=dict(type='str', required=False), + osd_all=dict(type='bool', required=False), + ), + supports_check_mode=True, + mutually_exclusive=[ + ('osd_all', 'osd_id'), + ('osd_all', 'osd_fsid'), + ('path', 'osd_id'), + ('path', 'osd_fsid'), + ], + required_together=[ + ('osd_id', 'osd_fsid') + ], + required_one_of=[ + ('path', 'osd_id', 'osd_all'), + ('path', 'osd_fsid', 'osd_all'), + ], + ) + + path = module.params.get('path') + cluster = module.params.get('cluster') + systemd = module.params.get('systemd') + osd_id = module.params.get('osd_id') + osd_fsid = module.params.get('osd_fsid') + osd_all = module.params.get('osd_all') + + if path and not os.path.exists(path): + module.fail_json(msg='{} does not exist'.format(path), rc=1) + + startd = datetime.datetime.now() + + container_image = os.getenv('CEPH_CONTAINER_IMAGE') + container_binary = os.getenv('CEPH_CONTAINER_BINARY') + if container_binary and container_image: + cmd = [container_binary, + 'run', '--rm', '--privileged', + '--ipc=host', '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '-v', '/run/lvm/:/run/lvm/', + '-v', '/run/lock/lvm/:/run/lock/lvm/', + '--entrypoint=ceph-volume', container_image] + else: + cmd = ['ceph-volume'] + + cmd.extend(['--cluster', cluster, 'simple', 'activate']) + + if osd_all: + cmd.append('--all') + else: + if path: + cmd.extend(['--file', path]) + else: + cmd.extend([osd_id, osd_fsid]) + + if not systemd: + cmd.append('--no-systemd') + + if module.check_mode: + exit_module( + module=module, + out='', + rc=0, + cmd=cmd, + err='', + startd=startd, + changed=False + ) + else: + rc, out, err = module.run_command(cmd) + exit_module( + module=module, + out=out, + rc=rc, + cmd=cmd, + err=err, + startd=startd, + changed=True + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ceph_volume_simple_scan.py b/plugins/modules/ceph_volume_simple_scan.py new file mode 100644 index 0000000..e76f279 --- /dev/null +++ b/plugins/modules/ceph_volume_simple_scan.py @@ -0,0 +1,163 @@ +# Copyright 2020, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import exit_module +except ImportError: + from module_utils.ca_common import exit_module +import datetime +import os + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: ceph_volume_simple_scan +short_description: Scan legacy OSD with ceph-volume +version_added: "2.8" +description: + - Scan legacy OSD with ceph-volume and store the output as JSON file + in /etc/ceph/osd directory with {OSD_ID}-{OSD_FSID}.json format. +options: + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + path: + description: + - The OSD directory or metadata partition. The directory or + partition must exist. + required: false + force: + description: + - Force re-scanning an OSD and overwriting the JSON content. + required: false + default: false + stdout: + description: + - Do not store the output to JSON file but stdout instead. + required: false + default: false +author: + - Dimitri Savineau +''' + +EXAMPLES = ''' +- name: scan all running OSDs + ceph_volume_simple_scan: + cluster: ceph + +- name: scan an OSD with the directory + ceph_volume_simple_scan: + cluster: ceph + path: /var/lib/ceph/osd/ceph-3 + +- name: scan an OSD with the partition + ceph_volume_simple_scan: + cluster: ceph + path: /dev/sdb1 + +- name: rescan an OSD and print the result on stdout + ceph_volume_simple_scan: + cluster: ceph + path: /dev/nvme0n1p1 + force: true + stdout: true +''' + +RETURN = '''# ''' + + +def main(): + module = AnsibleModule( + argument_spec=dict( + cluster=dict(type='str', required=False, default='ceph'), + path=dict(type='path', required=False), + force=dict(type='bool', required=False, default=False), + stdout=dict(type='bool', required=False, default=False), + ), + supports_check_mode=True, + ) + + path = module.params.get('path') + cluster = module.params.get('cluster') + force = module.params.get('force') + stdout = module.params.get('stdout') + + if path and not os.path.exists(path): + module.fail_json(msg='{} does not exist'.format(path), rc=1) + + startd = datetime.datetime.now() + + container_image = os.getenv('CEPH_CONTAINER_IMAGE') + container_binary = os.getenv('CEPH_CONTAINER_BINARY') + if container_binary and container_image: + cmd = [container_binary, + 'run', '--rm', '--privileged', + '--ipc=host', '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '-v', '/run/lvm/:/run/lvm/', + '-v', '/run/lock/lvm/:/run/lock/lvm/', + '--entrypoint=ceph-volume', container_image] + else: + cmd = ['ceph-volume'] + + cmd.extend(['--cluster', cluster, 'simple', 'scan']) + + if force: + cmd.append('--force') + + if stdout: + cmd.append('--stdout') + + if path: + cmd.append(path) + + if module.check_mode: + exit_module( + module=module, + out='', + rc=0, + cmd=cmd, + err='', + startd=startd, + changed=False + ) + else: + rc, out, err = module.run_command(cmd) + exit_module( + module=module, + out=out, + rc=rc, + cmd=cmd, + err=err, + startd=startd, + changed=True + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cephadm_adopt.py b/plugins/modules/cephadm_adopt.py new file mode 100644 index 0000000..723e0a4 --- /dev/null +++ b/plugins/modules/cephadm_adopt.py @@ -0,0 +1,184 @@ +# Copyright 2020, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import exit_module +except ImportError: + from module_utils.ca_common import exit_module +import datetime +import json + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: cephadm_adopt +short_description: Adopt a Ceph cluster with cephadm +version_added: "2.8" +description: + - Adopt a Ceph cluster with cephadm +options: + name: + description: + - The ceph daemon name. + required: true + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + style: + description: + - Cep deployment style. + required: false + default: legacy + image: + description: + - Ceph container image. + required: false + docker: + description: + - Use docker instead of podman. + required: false + pull: + description: + - Pull the Ceph container image. + required: false + default: true + firewalld: + description: + - Manage firewall rules with firewalld. + required: false + default: true +author: + - Dimitri Savineau +''' + +EXAMPLES = ''' +- name: adopt a ceph monitor with cephadm (default values) + cephadm_adopt: + name: mon.foo + style: legacy + +- name: adopt a ceph monitor with cephadm (with custom values) + cephadm_adopt: + name: mon.foo + style: legacy + image: quay.io/ceph/daemon-base:latest-main-devel + pull: false + firewalld: false + +- name: adopt a ceph monitor with cephadm with custom image via env var + cephadm_adopt: + name: mon.foo + style: legacy + environment: + CEPHADM_IMAGE: quay.io/ceph/daemon-base:latest-main-devel +''' + +RETURN = '''# ''' + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + cluster=dict(type='str', required=False, default='ceph'), + style=dict(type='str', required=False, default='legacy'), + image=dict(type='str', required=False), + docker=dict(type='bool', required=False, default=False), + pull=dict(type='bool', required=False, default=True), + firewalld=dict(type='bool', required=False, default=True), + ), + supports_check_mode=True, + ) + + name = module.params.get('name') + cluster = module.params.get('cluster') + style = module.params.get('style') + docker = module.params.get('docker') + image = module.params.get('image') + pull = module.params.get('pull') + firewalld = module.params.get('firewalld') + + startd = datetime.datetime.now() + + cmd = ['cephadm', 'ls', '--no-detail'] + + if module.check_mode: + exit_module( + module=module, + out='', + rc=0, + cmd=cmd, + err='', + startd=startd, + changed=False + ) + else: + rc, out, err = module.run_command(cmd) + + if rc == 0: + if name in [x["name"] for x in json.loads(out) if x["style"] == "cephadm:v1"]: # noqa: E501 + exit_module( + module=module, + out='{} is already adopted'.format(name), + rc=0, + cmd=cmd, + err='', + startd=startd, + changed=False + ) + else: + module.fail_json(msg=err, rc=rc) + + cmd = ['cephadm'] + + if docker: + cmd.append('--docker') + + if image: + cmd.extend(['--image', image]) + + cmd.extend(['adopt', '--cluster', cluster, '--name', name, '--style', style]) # noqa: E501 + + if not pull: + cmd.append('--skip-pull') + + if not firewalld: + cmd.append('--skip-firewalld') + + rc, out, err = module.run_command(cmd) + exit_module( + module=module, + out=out, + rc=rc, + cmd=cmd, + err=err, + startd=startd, + changed=True + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/radosgw_caps.py b/plugins/modules/radosgw_caps.py new file mode 100644 index 0000000..1d5f6b4 --- /dev/null +++ b/plugins/modules/radosgw_caps.py @@ -0,0 +1,378 @@ +# Copyright 2022, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule + +try: + from ansible.module_utils.ca_common import ( + exit_module, + exec_command, + is_containerized, + container_exec, + ) +except ImportError: + from module_utils.ca_common import ( + exit_module, + exec_command, + is_containerized, + container_exec, + ) +import datetime +import json +import re +from enum import IntFlag + + +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", +} + +DOCUMENTATION = """ +--- +module: radosgw_caps + +short_description: Manage RADOS Gateway Admin capabilities + +version_added: "2.10" + +description: + - Manage RADOS Gateway capabilities addition and deletion. +options: + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + type: str + name: + description: + - name of the RADOS Gateway user (uid). + required: true + type: str + state: + description: + If 'present' is used, the module will assign capabilities + defined in `caps`. + If 'absent' is used, the module will remove the capabilities. + required: false + choices: ['present', 'absent'] + default: present + type: str + caps: + description: + - The set of capabilities to assign or remove. + required: true + type: list + elements: str + +author: + - Mathias Chapelain +""" + +EXAMPLES = """ +- name: add users read capabilties to a user + radosgw_caps: + name: foo + state: present + caps: + - users=read + +- name: add users read write and all buckets capabilities + radosgw_caps: + name: foo + state: present + caps: + - users=read,write + - buckets=* + +- name: remove usage write capabilities + radosgw_caps: + name: foo + state: absent + caps: + - usage=write +""" + +RETURN = """ +--- +cmd: + description: The radosgw-admin command being run by the module to apply caps settings. + returned: always + type: str +start: + description: Timestamp of module execution start. + returned: always + type: str +end: + description: Timestamp of module execution end. + returned: always + type: str +delta: + description: Time of module execution between start and end. + returned: always + type: str +diff: + description: Dict containing the user capabilities before and after modifications. + returned: always + type: dict + contains: + before: + description: Contains user capabilities, json-formatted, as returned by `radosgw-admin user info`. + returned: always + type: str + after: + description: Contains user capabilities, json-formatted, as returned by `radosgw-admin caps add/rm`. + returned: success + type: str +rc: + description: Return code of the module command executed, see `cmd` return value. + returned: always + type: int +stdout: + description: Output of the executed command. + returned: always + type: str +stderr: + description: Error output of the executed command. + returned: always + type: str +changed: + description: Specify if user capabilities has been changed during module execution. + returned: always + type: bool +""" + + +def pre_generate_radosgw_cmd(container_image=None): + """ + Generate radosgw-admin prefix comaand + """ + if container_image: + cmd = container_exec("radosgw-admin", container_image) + else: + cmd = ["radosgw-admin"] + + return cmd + + +def generate_radosgw_cmd(cluster, args, container_image=None): + """ + Generate 'radosgw' command line to execute + """ + + cmd = pre_generate_radosgw_cmd(container_image=container_image) + + base_cmd = ["--cluster", cluster, "caps"] + + cmd.extend(base_cmd + args) + + return cmd + + +def add_caps(module, container_image=None): + """ + Add capabilities + """ + + cluster = module.params.get("cluster") + name = module.params.get("name") + caps = module.params.get("caps") + + args = ["add", "--uid=" + name, "--caps=" + ";".join(caps)] + + cmd = generate_radosgw_cmd( + cluster=cluster, args=args, container_image=container_image + ) + + return cmd + + +def remove_caps(module, container_image=None): + """ + Remove capabilities + """ + + cluster = module.params.get("cluster") + name = module.params.get("name") + caps = module.params.get("caps") + + args = ["rm", "--uid=" + name, "--caps=" + ";".join(caps)] + + cmd = generate_radosgw_cmd( + cluster=cluster, args=args, container_image=container_image + ) + + return cmd + + +def get_user(module, container_image=None): + """ + Get existing user + """ + + cluster = module.params.get("cluster") + name = module.params.get("name") + + args = ["info", "--uid=" + name, "--format=json"] + + cmd = pre_generate_radosgw_cmd(container_image=container_image) + + base_cmd = ["--cluster", cluster, "user"] + + cmd.extend(base_cmd + args) + + return cmd + + +class RGWUserCaps(IntFlag): + INVALID = 0x0 + READ = 0x1 + WRITE = 0x2 + ALL = READ | WRITE + + +def perm_string_to_flag(perm): + splitted = re.split(",|=| |\t", perm) + if ("read" in splitted and "write" in splitted) or "*" in splitted: + return RGWUserCaps.ALL + elif "read" in splitted: + return RGWUserCaps.READ + elif "write" in splitted: + return RGWUserCaps.WRITE + return RGWUserCaps.INVALID + + +def perm_flag_to_string(perm): + if perm == RGWUserCaps.ALL: + return "*" + elif perm == RGWUserCaps.READ: + return "read" + elif perm == RGWUserCaps.WRITE: + return "write" + else: + return "invalid" + + +def params_to_caps_output(current_caps, params, deletion=False): + out_caps = current_caps + for param in params: + splitted = param.split("=", maxsplit=1) + cap = splitted[0] + + new_perm = perm_string_to_flag(splitted[1]) + current = next((item for item in out_caps if item["type"] == cap), None) + + if not current: + if not deletion: + out_caps.append(dict(type=cap, perm=perm_flag_to_string(new_perm))) + continue + + current_perm = perm_string_to_flag(current["perm"]) + + new_perm = current_perm & ~new_perm if deletion else new_perm | current_perm + + if new_perm == 0x0: + out_caps.remove(current) + + current["perm"] = perm_flag_to_string(new_perm) + + return out_caps + + +def run_module(): + module_args = dict( + cluster=dict(type="str", required=False, default="ceph"), + name=dict(type="str", required=True), + state=dict( + type="str", required=False, choices=["present", "absent"], default="present" + ), + caps=dict(type="list", required=True), + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + ) + + # Gather module parameters in variables + name = module.params.get("name") + state = module.params.get("state") + caps = module.params.get("caps") + + startd = datetime.datetime.now() + changed = False + + # will return either the image name or None + container_image = is_containerized() + + diff = dict(before="", after="") + + # get user infos for diff + rc, cmd, out, err = exec_command( + module, get_user(module, container_image=container_image) + ) + + if rc == 0: + before_user = json.loads(out) + before_caps = sorted(before_user["caps"], key=lambda d: d["type"]) + diff["before"] = json.dumps(before_caps, indent=4) + + out = "" + err = "" + + if state == "present": + cmd = add_caps(module, container_image=container_image) + elif state == "absent": + cmd = remove_caps(module, container_image=container_image) + + if not module.check_mode: + rc, cmd, out, err = exec_command(module, cmd) + else: + out_caps = params_to_caps_output( + before_user["caps"], caps, deletion=(state == "absent") + ) + out = json.dumps(dict(caps=out_caps)) + + if rc == 0: + after_user = json.loads(out)["caps"] + after_user = sorted(after_user, key=lambda d: d["type"]) + diff["after"] = json.dumps(after_user, indent=4) + changed = diff["before"] != diff["after"] + else: + out = "User {} doesn't exist".format(name) + + exit_module( + module=module, + out=out, + rc=rc, + cmd=cmd, + err=err, + startd=startd, + changed=changed, + diff=diff, + ) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/radosgw_realm.py b/plugins/modules/radosgw_realm.py new file mode 100644 index 0000000..8a93f86 --- /dev/null +++ b/plugins/modules/radosgw_realm.py @@ -0,0 +1,336 @@ +# Copyright 2020, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +import datetime +import os + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: radosgw_realm + +short_description: Manage RADOS Gateway Realm + +version_added: "2.8" + +description: + - Manage RADOS Gateway realm(s) creation, deletion and updates. +options: + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + name: + description: + - name of the RADOS Gateway realm. + required: true + state: + description: + If 'present' is used, the module creates a realm if it doesn't + exist or update it if it already exists. + If 'absent' is used, the module will simply delete the realm. + If 'info' is used, the module will return all details about the + existing realm (json formatted). + required: false + choices: ['present', 'absent', 'info'] + default: present + default: + description: + - set the default flag on the realm. + required: false + default: false + url: + description: + - URL to the master RADOS Gateway zone. + required: false + access_key: + description: + - S3 access key of the master RADOS Gateway zone. + required: false + secret_key: + description: + - S3 secret key of the master RADOS Gateway zone. + required: false + +author: + - Dimitri Savineau +''' + +EXAMPLES = ''' +- name: create a RADOS Gateway default realm + radosgw_realm: + name: foo + default: true + +- name: get a RADOS Gateway realm information + radosgw_realm: + name: foo + state: info + +- name: delete a RADOS Gateway realm + radosgw_realm: + name: foo + state: absent +''' + +RETURN = '''# ''' + + +def container_exec(binary, container_image): + ''' + Build the docker CLI to run a command inside a container + ''' + + container_binary = os.getenv('CEPH_CONTAINER_BINARY') + command_exec = [container_binary, + 'run', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=' + binary, container_image] + return command_exec + + +def is_containerized(): + ''' + Check if we are running on a containerized cluster + ''' + + if 'CEPH_CONTAINER_IMAGE' in os.environ: + container_image = os.getenv('CEPH_CONTAINER_IMAGE') + else: + container_image = None + + return container_image + + +def pre_generate_radosgw_cmd(container_image=None): + ''' + Generate radosgw-admin prefix comaand + ''' + if container_image: + cmd = container_exec('radosgw-admin', container_image) + else: + cmd = ['radosgw-admin'] + + return cmd + + +def generate_radosgw_cmd(cluster, args, container_image=None): + ''' + Generate 'radosgw' command line to execute + ''' + + cmd = pre_generate_radosgw_cmd(container_image=container_image) + + base_cmd = [ + '--cluster', + cluster, + 'realm' + ] + + cmd.extend(base_cmd + args) + + return cmd + + +def exec_commands(module, cmd): + ''' + Execute command(s) + ''' + + rc, out, err = module.run_command(cmd) + + return rc, cmd, out, err + + +def create_realm(module, container_image=None): + ''' + Create a new realm + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + default = module.params.get('default', False) + + args = ['create', '--rgw-realm=' + name] + + if default: + args.append('--default') + + cmd = generate_radosgw_cmd(cluster=cluster, + args=args, + container_image=container_image) + + return cmd + + +def get_realm(module, container_image=None): + ''' + Get existing realm + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + + args = ['get', '--rgw-realm=' + name, '--format=json'] + + cmd = generate_radosgw_cmd(cluster=cluster, + args=args, + container_image=container_image) + + return cmd + + +def remove_realm(module, container_image=None): + ''' + Remove a realm + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + + args = ['delete', '--rgw-realm=' + name] + + cmd = generate_radosgw_cmd(cluster=cluster, + args=args, + container_image=container_image) + + return cmd + + +def pull_realm(module, container_image=None): + ''' + Pull a realm + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + url = module.params.get('url') + access_key = module.params.get('access_key') + secret_key = module.params.get('secret_key') + + args = [ + 'pull', + '--rgw-realm=' + name, + '--url=' + url, + '--access-key=' + access_key, + '--secret=' + secret_key + ] + + cmd = generate_radosgw_cmd(cluster=cluster, + args=args, + container_image=container_image) + + return cmd + + +def exit_module(module, out, rc, cmd, err, startd, changed=False): + endd = datetime.datetime.now() + delta = endd - startd + + result = dict( + cmd=cmd, + start=str(startd), + end=str(endd), + delta=str(delta), + rc=rc, + stdout=out.rstrip("\r\n"), + stderr=err.rstrip("\r\n"), + changed=changed, + ) + module.exit_json(**result) + + +def run_module(): + module_args = dict( + cluster=dict(type='str', required=False, default='ceph'), + name=dict(type='str', required=True), + state=dict(type='str', required=False, choices=['present', 'absent', 'info', 'pull'], default='present'), # noqa: E501 + default=dict(type='bool', required=False, default=False), + url=dict(type='str', required=False), + access_key=dict(type='str', required=False), + secret_key=dict(type='str', required=False), + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + required_if=[['state', 'pull', ['url', 'access_key', 'secret_key']]], + ) + + # Gather module parameters in variables + name = module.params.get('name') + state = module.params.get('state') + + if module.check_mode: + module.exit_json( + changed=False, + stdout='', + stderr='', + rc=0, + start='', + end='', + delta='', + ) + + startd = datetime.datetime.now() + changed = False + + # will return either the image name or None + container_image = is_containerized() + + if state == "present": + rc, cmd, out, err = exec_commands(module, get_realm(module, container_image=container_image)) # noqa: E501 + if rc != 0: + rc, cmd, out, err = exec_commands(module, create_realm(module, container_image=container_image)) # noqa: E501 + changed = True + + elif state == "absent": + rc, cmd, out, err = exec_commands(module, get_realm(module, container_image=container_image)) # noqa: E501 + if rc == 0: + rc, cmd, out, err = exec_commands(module, remove_realm(module, container_image=container_image)) # noqa: E501 + changed = True + else: + rc = 0 + out = "Realm {} doesn't exist".format(name) + + elif state == "info": + rc, cmd, out, err = exec_commands(module, get_realm(module, container_image=container_image)) # noqa: E501 + + elif state == "pull": + rc, cmd, out, err = exec_commands(module, pull_realm(module, container_image=container_image)) # noqa: E501 + + exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed) # noqa: E501 + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/radosgw_user.py b/plugins/modules/radosgw_user.py new file mode 100644 index 0000000..78addd0 --- /dev/null +++ b/plugins/modules/radosgw_user.py @@ -0,0 +1,481 @@ +# Copyright 2020, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +import datetime +import json +import os + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: radosgw_user + +short_description: Manage RADOS Gateway User + +version_added: "2.8" + +description: + - Manage RADOS Gateway user(s) creation, deletion and updates. +options: + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + name: + description: + - name of the RADOS Gateway user (uid). + required: true + state: + description: + If 'present' is used, the module creates a user if it doesn't + exist or update it if it already exists. + If 'absent' is used, the module will simply delete the user. + If 'info' is used, the module will return all details about the + existing user (json formatted). + required: false + choices: ['present', 'absent', 'info'] + default: present + display_name: + description: + - set the display name of the user. + required: false + default: None + email: + description: + - set the email of the user. + required: false + default: None + access_key: + description: + - set the S3 access key of the user. + required: false + default: None + secret_key: + description: + - set the S3 secret key of the user. + required: false + default: None + realm: + description: + - set the realm of the user. + required: false + default: None + zonegroup: + description: + - set the zonegroup of the user. + required: false + default: None + zone: + description: + - set the zone of the user. + required: false + default: None + system: + description: + - set the system flag on the user. + required: false + default: false + admin: + description: + - set the admin flag on the user. + required: false + default: false + +author: + - Dimitri Savineau +''' + +EXAMPLES = ''' +- name: create a RADOS Gateway sytem user + radosgw_user: + name: foo + system: true + +- name: modify a RADOS Gateway user + radosgw_user: + name: foo + email: foo@bar.io + access_key: LbwDPp2BBo2Sdlts89Um + secret_key: FavL6ueQWcWuWn0YXyQ3TnJ3mT3Uj5SGVHCUXC5K + state: present + +- name: get a RADOS Gateway user information + radosgw_user: + name: foo + state: info + +- name: delete a RADOS Gateway user + radosgw_user: + name: foo + state: absent +''' + +RETURN = '''# ''' + + +def container_exec(binary, container_image): + ''' + Build the docker CLI to run a command inside a container + ''' + + container_binary = os.getenv('CEPH_CONTAINER_BINARY') + command_exec = [container_binary, + 'run', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=' + binary, container_image] + return command_exec + + +def is_containerized(): + ''' + Check if we are running on a containerized cluster + ''' + + if 'CEPH_CONTAINER_IMAGE' in os.environ: + container_image = os.getenv('CEPH_CONTAINER_IMAGE') + else: + container_image = None + + return container_image + + +def pre_generate_radosgw_cmd(container_image=None): + ''' + Generate radosgw-admin prefix comaand + ''' + if container_image: + cmd = container_exec('radosgw-admin', container_image) + else: + cmd = ['radosgw-admin'] + + return cmd + + +def generate_radosgw_cmd(cluster, args, container_image=None): + ''' + Generate 'radosgw' command line to execute + ''' + + cmd = pre_generate_radosgw_cmd(container_image=container_image) + + base_cmd = [ + '--cluster', + cluster, + 'user' + ] + + cmd.extend(base_cmd + args) + + return cmd + + +def exec_commands(module, cmd): + ''' + Execute command(s) + ''' + + rc, out, err = module.run_command(cmd) + + return rc, cmd, out, err + + +def create_user(module, container_image=None): + ''' + Create a new user + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + display_name = module.params.get('display_name') + if not display_name: + display_name = name + email = module.params.get('email', None) + access_key = module.params.get('access_key', None) + secret_key = module.params.get('secret_key', None) + realm = module.params.get('realm', None) + zonegroup = module.params.get('zonegroup', None) + zone = module.params.get('zone', None) + system = module.params.get('system', False) + admin = module.params.get('admin', False) + + args = ['create', '--uid=' + name, '--display_name=' + display_name] + + if email: + args.extend(['--email=' + email]) + + if access_key: + args.extend(['--access-key=' + access_key]) + + if secret_key: + args.extend(['--secret-key=' + secret_key]) + + if realm: + args.extend(['--rgw-realm=' + realm]) + + if zonegroup: + args.extend(['--rgw-zonegroup=' + zonegroup]) + + if zone: + args.extend(['--rgw-zone=' + zone]) + + if system: + args.append('--system') + + if admin: + args.append('--admin') + + cmd = generate_radosgw_cmd(cluster=cluster, + args=args, + container_image=container_image) + + return cmd + + +def modify_user(module, container_image=None): + ''' + Modify an existing user + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + display_name = module.params.get('display_name') + email = module.params.get('email', None) + access_key = module.params.get('access_key', None) + secret_key = module.params.get('secret_key', None) + realm = module.params.get('realm', None) + zonegroup = module.params.get('zonegroup', None) + zone = module.params.get('zone', None) + system = module.params.get('system', False) + admin = module.params.get('admin', False) + + args = ['modify', '--uid=' + name] + + if display_name: + args.extend(['--display_name=' + display_name]) + + if email: + args.extend(['--email=' + email]) + + if access_key: + args.extend(['--access-key=' + access_key]) + + if secret_key: + args.extend(['--secret-key=' + secret_key]) + + if realm: + args.extend(['--rgw-realm=' + realm]) + + if zonegroup: + args.extend(['--rgw-zonegroup=' + zonegroup]) + + if zone: + args.extend(['--rgw-zone=' + zone]) + + if system: + args.append('--system') + + if admin: + args.append('--admin') + + cmd = generate_radosgw_cmd(cluster=cluster, + args=args, + container_image=container_image) + + return cmd + + +def get_user(module, container_image=None): + ''' + Get existing user + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + realm = module.params.get('realm', None) + zonegroup = module.params.get('zonegroup', None) + zone = module.params.get('zone', None) + + args = ['info', '--uid=' + name, '--format=json'] + + if realm: + args.extend(['--rgw-realm=' + realm]) + + if zonegroup: + args.extend(['--rgw-zonegroup=' + zonegroup]) + + if zone: + args.extend(['--rgw-zone=' + zone]) + + cmd = generate_radosgw_cmd(cluster=cluster, + args=args, + container_image=container_image) + + return cmd + + +def remove_user(module, container_image=None): + ''' + Remove a user + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + realm = module.params.get('realm', None) + zonegroup = module.params.get('zonegroup', None) + zone = module.params.get('zone', None) + + args = ['rm', '--uid=' + name] + + if realm: + args.extend(['--rgw-realm=' + realm]) + + if zonegroup: + args.extend(['--rgw-zonegroup=' + zonegroup]) + + if zone: + args.extend(['--rgw-zone=' + zone]) + + cmd = generate_radosgw_cmd(cluster=cluster, + args=args, + container_image=container_image) + + return cmd + + +def exit_module(module, out, rc, cmd, err, startd, changed=False): + endd = datetime.datetime.now() + delta = endd - startd + + result = dict( + cmd=cmd, + start=str(startd), + end=str(endd), + delta=str(delta), + rc=rc, + stdout=out.rstrip("\r\n"), + stderr=err.rstrip("\r\n"), + changed=changed, + ) + module.exit_json(**result) + + +def run_module(): + module_args = dict( + cluster=dict(type='str', required=False, default='ceph'), + name=dict(type='str', required=True), + state=dict(type='str', required=False, choices=['present', 'absent', 'info'], default='present'), # noqa: E501 + display_name=dict(type='str', required=False), + email=dict(type='str', required=False), + access_key=dict(type='str', required=False, no_log=True), + secret_key=dict(type='str', required=False, no_log=True), + realm=dict(type='str', required=False), + zonegroup=dict(type='str', required=False), + zone=dict(type='str', required=False), + system=dict(type='bool', required=False, default=False), + admin=dict(type='bool', required=False, default=False) + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + ) + + # Gather module parameters in variables + name = module.params.get('name') + state = module.params.get('state') + display_name = module.params.get('display_name') + if not display_name: + display_name = name + email = module.params.get('email') + access_key = module.params.get('access_key') + secret_key = module.params.get('secret_key') + system = module.params.get('system') + admin = module.params.get('admin') + + startd = datetime.datetime.now() + changed = False + + # will return either the image name or None + container_image = is_containerized() + + rc, cmd, out, err = exec_commands(module, get_user(module, container_image=container_image)) # noqa: E501 + if state == "present": + if rc == 0: + user = json.loads(out) + current = { + 'display_name': user['display_name'], + 'system': user.get('system', False), + 'admin': user.get('admin', False) + } + asked = { + 'display_name': display_name, + 'system': system, + 'admin': admin + } + if email: + current['email'] = user['email'] + asked['email'] = email + + if access_key and secret_key: + asked['access_key'] = access_key + asked['secret_key'] = secret_key + for key in user['keys']: + if key['access_key'] == access_key and key['secret_key'] == secret_key: # noqa: E501 + del asked['access_key'] + del asked['secret_key'] + break + + changed = current != asked + if changed and not module.check_mode: + rc, cmd, out, err = exec_commands(module, modify_user(module, container_image=container_image)) # noqa: E501 + else: + changed = True + if not module.check_mode: + rc, cmd, out, err = exec_commands(module, create_user(module, container_image=container_image)) # noqa: E501 + else: + rc = 0 + + elif state == "absent": + if rc == 0: + if not module.check_mode: + rc, cmd, out, err = exec_commands(module, remove_user(module, container_image=container_image)) # noqa: E501 + changed = True + else: + rc = 0 + out = "User {} doesn't exist".format(name) + + exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed) # noqa: E501 + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/radosgw_zone.py b/plugins/modules/radosgw_zone.py new file mode 100644 index 0000000..8ddab13 --- /dev/null +++ b/plugins/modules/radosgw_zone.py @@ -0,0 +1,543 @@ +# Copyright 2020, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import fatal +except ImportError: + from module_utils.ca_common import fatal +import datetime +import json +import os + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: radosgw_zone + +short_description: Manage RADOS Gateway Zone + +version_added: "2.8" + +description: + - Manage RADOS Gateway zone(s) creation, deletion and updates. +options: + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + name: + description: + - name of the RADOS Gateway zone. + required: true + state: + description: + If 'present' is used, the module creates a zone if it doesn't + exist or update it if it already exists. + If 'absent' is used, the module will simply delete the zone. + If 'info' is used, the module will return all details about the + existing zone (json formatted). + required: false + choices: ['present', 'absent', 'info'] + default: present + realm: + description: + - name of the RADOS Gateway realm. + required: true + zonegroup: + description: + - name of the RADOS Gateway zonegroup. + required: true + endpoints: + description: + - endpoints of the RADOS Gateway zone. + required: false + default: [] + access_key: + description: + - set the S3 access key of the user. + required: false + default: None + secret_key: + description: + - set the S3 secret key of the user. + required: false + default: None + default: + description: + - set the default flag on the zone. + required: false + default: false + master: + description: + - set the master flag on the zone. + required: false + default: false + +author: + - Dimitri Savineau +''' + +EXAMPLES = ''' +- name: create a RADOS Gateway default zone + radosgw_zone: + name: z1 + realm: foo + zonegroup: bar + endpoints: + - http://192.168.1.10:8080 + - http://192.168.1.11:8080 + default: true + +- name: get a RADOS Gateway zone information + radosgw_zone: + name: z1 + state: info + +- name: delete a RADOS Gateway zone + radosgw_zone: + name: z1 + state: absent +''' + +RETURN = '''# ''' + + +def container_exec(binary, container_image, container_args=[]): + ''' + Build the docker CLI to run a command inside a container + ''' + + container_binary = os.getenv('CEPH_CONTAINER_BINARY') + + command_exec = [container_binary, 'run', '--rm', '--net=host'] + command_exec.extend(container_args) + command_exec.extend([ + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=' + binary, + container_image, + ]) + + return command_exec + + +def is_containerized(): + ''' + Check if we are running on a containerized cluster + ''' + + if 'CEPH_CONTAINER_IMAGE' in os.environ: + container_image = os.getenv('CEPH_CONTAINER_IMAGE') + else: + container_image = None + + return container_image + + +def pre_generate_radosgw_cmd(container_image=None, container_args=[]): + ''' + Generate radosgw-admin prefix comaand + ''' + if container_image: + cmd = container_exec('radosgw-admin', container_image, container_args) + else: + cmd = ['radosgw-admin'] + + return cmd + + +def generate_radosgw_cmd(cluster, args, container_image=None, container_args=[]): + ''' + Generate 'radosgw' command line to execute + ''' + + cmd = pre_generate_radosgw_cmd(container_image=container_image, container_args=container_args) # noqa: E501 + + base_cmd = [ + '--cluster', + cluster, + 'zone' + ] + + cmd.extend(base_cmd + args) + + return cmd + + +def exec_commands(module, cmd): + ''' + Execute command(s) + ''' + + rc, out, err = module.run_command(cmd) + + return rc, cmd, out, err + + +def create_zone(module, container_image=None): + ''' + Create a new zone + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + realm = module.params.get('realm') + zonegroup = module.params.get('zonegroup') + endpoints = module.params.get('endpoints') + access_key = module.params.get('access_key') + secret_key = module.params.get('secret_key') + default = module.params.get('default') + master = module.params.get('master') + + args = [ + 'create', + '--rgw-realm=' + realm, + '--rgw-zonegroup=' + zonegroup, + '--rgw-zone=' + name + ] + + if endpoints: + args.extend(['--endpoints=' + ','.join(endpoints)]) + + if access_key: + args.extend(['--access-key=' + access_key]) + + if secret_key: + args.extend(['--secret-key=' + secret_key]) + + if default: + args.append('--default') + + if master: + args.append('--master') + + cmd = generate_radosgw_cmd(cluster=cluster, + args=args, + container_image=container_image) + + return cmd + + +def modify_zone(module, container_image=None): + ''' + Modify a new zone + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + realm = module.params.get('realm') + zonegroup = module.params.get('zonegroup') + endpoints = module.params.get('endpoints') + access_key = module.params.get('access_key') + secret_key = module.params.get('secret_key') + default = module.params.get('default') + master = module.params.get('master') + + args = [ + 'modify', + '--rgw-realm=' + realm, + '--rgw-zonegroup=' + zonegroup, + '--rgw-zone=' + name + ] + + if endpoints: + args.extend(['--endpoints=' + ','.join(endpoints)]) + + if access_key: + args.extend(['--access-key=' + access_key]) + + if secret_key: + args.extend(['--secret-key=' + secret_key]) + + if default: + args.append('--default') + + if master: + args.append('--master') + + cmd = generate_radosgw_cmd(cluster=cluster, + args=args, + container_image=container_image) + + return cmd + + +def get_zone(module, container_image=None): + ''' + Get existing zone + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + realm = module.params.get('realm') + zonegroup = module.params.get('zonegroup') + + args = [ + 'get', + '--rgw-realm=' + realm, + '--rgw-zonegroup=' + zonegroup, + '--rgw-zone=' + name, + '--format=json' + ] + + cmd = generate_radosgw_cmd(cluster=cluster, + args=args, + container_image=container_image) + + return cmd + + +def get_zonegroup(module, container_image=None): + ''' + Get existing zonegroup + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + realm = module.params.get('realm') + zonegroup = module.params.get('zonegroup') + + cmd = pre_generate_radosgw_cmd(container_image=container_image) + + args = [ + '--cluster', + cluster, + 'zonegroup', + 'get', + '--rgw-zone=' + name, + '--rgw-realm=' + realm, + '--rgw-zonegroup=' + zonegroup, + '--format=json' + ] + + cmd.extend(args) + + return cmd + + +def get_realm(module, container_image=None): + ''' + Get existing realm + ''' + + cluster = module.params.get('cluster') + realm = module.params.get('realm') + + cmd = pre_generate_radosgw_cmd(container_image=container_image) + + args = [ + '--cluster', + cluster, + 'realm', + 'get', + '--rgw-realm=' + realm, + '--format=json' + ] + + cmd.extend(args) + + return cmd + + +def remove_zone(module, container_image=None): + ''' + Remove a zone + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + realm = module.params.get('realm') + zonegroup = module.params.get('zonegroup') + + args = [ + 'delete', + '--rgw-realm=' + realm, + '--rgw-zonegroup=' + zonegroup, + '--rgw-zone=' + name + ] + + cmd = generate_radosgw_cmd(cluster=cluster, + args=args, + container_image=container_image) + + return cmd + + +def set_zone(module, container_image=None): + ''' + Set a zone + ''' + + cluster = module.params.get('cluster') + realm = module.params.get('realm') + zone_doc = module.params.get('zone_doc') + + # store the zone_doc in a file + filename = module.tmpdir + 'zone_doc.json' + with open(filename, 'w') as f: + json.dump(zone_doc, f) + + container_args = [ + '-v', filename + ':' + filename + ':ro' + ] + args = [ + 'set', + '--rgw-realm=' + realm, + '--infile=' + filename, + ] + + cmd = generate_radosgw_cmd(cluster=cluster, + args=args, + container_image=container_image, + container_args=container_args) + + return cmd + + +def exit_module(module, out, rc, cmd, err, startd, changed=False): + endd = datetime.datetime.now() + delta = endd - startd + + result = dict( + cmd=cmd, + start=str(startd), + end=str(endd), + delta=str(delta), + rc=rc, + stdout=out.rstrip("\r\n"), + stderr=err.rstrip("\r\n"), + changed=changed, + ) + module.exit_json(**result) + + +def run_module(): + module_args = dict( + cluster=dict(type='str', required=False, default='ceph'), + name=dict(type='str', required=True), + state=dict(type='str', required=False, choices=['present', 'absent', 'info', 'set'], default='present'), # noqa: E501 + realm=dict(type='str', require=True), + zonegroup=dict(type='str', require=True), + endpoints=dict(type='list', require=False, default=[]), + access_key=dict(type='str', required=False, no_log=True), + secret_key=dict(type='str', required=False, no_log=True), + default=dict(type='bool', required=False, default=False), + master=dict(type='bool', required=False, default=False), + zone_doc=dict(type='dict', required=False, default={}) + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + ) + + # Gather module parameters in variables + name = module.params.get('name') + state = module.params.get('state') + endpoints = module.params.get('endpoints') + access_key = module.params.get('access_key') + secret_key = module.params.get('secret_key') + + if module.check_mode: + module.exit_json( + changed=False, + stdout='', + stderr='', + rc=0, + start='', + end='', + delta='', + ) + + startd = datetime.datetime.now() + changed = False + + # will return either the image name or None + container_image = is_containerized() + + rc, cmd, out, err = exec_commands(module, get_zone(module, container_image=container_image)) # noqa: E501 + + if state == "set": + zone = json.loads(out) if rc == 0 else {} + zone_doc = module.params.get('zone_doc') + if not zone_doc: + fatal("zone_doc is required when state is set", module) + + changed = zone_doc != zone + if changed: + rc, cmd, out, err = exec_commands(module, set_zone(module, container_image=container_image)) + + if state == "present": + if rc == 0: + zone = json.loads(out) + _rc, _cmd, _out, _err = exec_commands(module, get_realm(module, container_image=container_image)) # noqa: E501 + if _rc != 0: + fatal(_err, module) + realm = json.loads(_out) + _rc, _cmd, _out, _err = exec_commands(module, get_zonegroup(module, container_image=container_image)) # noqa: E501 + if _rc != 0: + fatal(_err, module) + zonegroup = json.loads(_out) + if not access_key: + access_key = '' + if not secret_key: + secret_key = '' + current = { + 'endpoints': next(zone['endpoints'] for zone in zonegroup['zones'] if zone['name'] == name), # noqa: E501 + 'access_key': zone['system_key']['access_key'], + 'secret_key': zone['system_key']['secret_key'], + 'realm_id': zone['realm_id'] + } + asked = { + 'endpoints': endpoints, + 'access_key': access_key, + 'secret_key': secret_key, + 'realm_id': realm['id'] + } + if current != asked: + rc, cmd, out, err = exec_commands(module, modify_zone(module, container_image=container_image)) # noqa: E501 + changed = True + else: + rc, cmd, out, err = exec_commands(module, create_zone(module, container_image=container_image)) # noqa: E501 + changed = True + + elif state == "absent": + if rc == 0: + rc, cmd, out, err = exec_commands(module, remove_zone(module, container_image=container_image)) # noqa: E501 + changed = True + else: + rc = 0 + out = "Zone {} doesn't exist".format(name) + + exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed) # noqa: E501 + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/radosgw_zonegroup.py b/plugins/modules/radosgw_zonegroup.py new file mode 100644 index 0000000..533d58f --- /dev/null +++ b/plugins/modules/radosgw_zonegroup.py @@ -0,0 +1,397 @@ +# Copyright 2020, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import fatal +except ImportError: + from module_utils.ca_common import fatal +import datetime +import json +import os + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: radosgw_zonegroup + +short_description: Manage RADOS Gateway Zonegroup + +version_added: "2.8" + +description: + - Manage RADOS Gateway zonegroup(s) creation, deletion and updates. +options: + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + name: + description: + - name of the RADOS Gateway zonegroup. + required: true + state: + description: + If 'present' is used, the module creates a zonegroup if it doesn't + exist or update it if it already exists. + If 'absent' is used, the module will simply delete the zonegroup. + If 'info' is used, the module will return all details about the + existing zonegroup (json formatted). + required: false + choices: ['present', 'absent', 'info'] + default: present + realm: + description: + - name of the RADOS Gateway realm. + required: true + endpoints: + description: + - endpoints of the RADOS Gateway zonegroup. + required: false + default: [] + default: + description: + - set the default flag on the zonegroup. + required: false + default: false + master: + description: + - set the master flag on the zonegroup. + required: false + default: false + +author: + - Dimitri Savineau +''' + +EXAMPLES = ''' +- name: create a RADOS Gateway default zonegroup + radosgw_zonegroup: + name: foo + realm: bar + endpoints: + - http://192.168.1.10:8080 + - http://192.168.1.11:8080 + default: true + +- name: get a RADOS Gateway zonegroup information + radosgw_zonegroup: + name: foo + realm: bar + state: info + +- name: delete a RADOS Gateway zonegroup + radosgw_zonegroup: + name: foo + realm: bar + state: absent +''' + +RETURN = '''# ''' + + +def container_exec(binary, container_image): + ''' + Build the docker CLI to run a command inside a container + ''' + + container_binary = os.getenv('CEPH_CONTAINER_BINARY') + command_exec = [container_binary, + 'run', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=' + binary, container_image] + return command_exec + + +def is_containerized(): + ''' + Check if we are running on a containerized cluster + ''' + + if 'CEPH_CONTAINER_IMAGE' in os.environ: + container_image = os.getenv('CEPH_CONTAINER_IMAGE') + else: + container_image = None + + return container_image + + +def pre_generate_radosgw_cmd(container_image=None): + ''' + Generate radosgw-admin prefix comaand + ''' + if container_image: + cmd = container_exec('radosgw-admin', container_image) + else: + cmd = ['radosgw-admin'] + + return cmd + + +def generate_radosgw_cmd(cluster, args, container_image=None): + ''' + Generate 'radosgw' command line to execute + ''' + + cmd = pre_generate_radosgw_cmd(container_image=container_image) + + base_cmd = [ + '--cluster', + cluster, + 'zonegroup' + ] + + cmd.extend(base_cmd + args) + + return cmd + + +def exec_commands(module, cmd): + ''' + Execute command(s) + ''' + + rc, out, err = module.run_command(cmd) + + return rc, cmd, out, err + + +def create_zonegroup(module, container_image=None): + ''' + Create a new zonegroup + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + realm = module.params.get('realm') + endpoints = module.params.get('endpoints') + default = module.params.get('default') + master = module.params.get('master') + + args = ['create', '--rgw-realm=' + realm, '--rgw-zonegroup=' + name] + + if endpoints: + args.extend(['--endpoints=' + ','.join(endpoints)]) + + if default: + args.append('--default') + + if master: + args.append('--master') + + cmd = generate_radosgw_cmd(cluster=cluster, + args=args, + container_image=container_image) + + return cmd + + +def modify_zonegroup(module, container_image=None): + ''' + Modify a new zonegroup + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + realm = module.params.get('realm') + endpoints = module.params.get('endpoints') + default = module.params.get('default') + master = module.params.get('master') + + args = ['modify', '--rgw-realm=' + realm, '--rgw-zonegroup=' + name] + + if endpoints: + args.extend(['--endpoints=' + ','.join(endpoints)]) + + if default: + args.append('--default') + + if master: + args.append('--master') + + cmd = generate_radosgw_cmd(cluster=cluster, + args=args, + container_image=container_image) + + return cmd + + +def get_zonegroup(module, container_image=None): + ''' + Get existing zonegroup + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + realm = module.params.get('realm') + + args = [ + 'get', + '--rgw-realm=' + realm, + '--rgw-zonegroup=' + name, + '--format=json' + ] + + cmd = generate_radosgw_cmd(cluster=cluster, + args=args, + container_image=container_image) + + return cmd + + +def get_realm(module, container_image=None): + ''' + Get existing realm + ''' + + cluster = module.params.get('cluster') + realm = module.params.get('realm') + + cmd = pre_generate_radosgw_cmd(container_image=container_image) + + args = [ + '--cluster', + cluster, + 'realm', + 'get', + '--rgw-realm=' + realm, + '--format=json' + ] + + cmd.extend(args) + + return cmd + + +def remove_zonegroup(module, container_image=None): + ''' + Remove a zonegroup + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + realm = module.params.get('realm') + + args = ['delete', '--rgw-realm=' + realm, '--rgw-zonegroup=' + name] + + cmd = generate_radosgw_cmd(cluster=cluster, + args=args, + container_image=container_image) + + return cmd + + +def exit_module(module, out, rc, cmd, err, startd, changed=False): + endd = datetime.datetime.now() + delta = endd - startd + + result = dict( + cmd=cmd, + start=str(startd), + end=str(endd), + delta=str(delta), + rc=rc, + stdout=out.rstrip("\r\n"), + stderr=err.rstrip("\r\n"), + changed=changed, + ) + module.exit_json(**result) + + +def run_module(): + module_args = dict( + cluster=dict(type='str', required=False, default='ceph'), + name=dict(type='str', required=True), + state=dict(type='str', required=False, choices=['present', 'absent', 'info'], default='present'), # noqa: E501 + realm=dict(type='str', require=True), + endpoints=dict(type='list', require=False, default=[]), + default=dict(type='bool', required=False, default=False), + master=dict(type='bool', required=False, default=False), + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + ) + + # Gather module parameters in variables + name = module.params.get('name') + state = module.params.get('state') + endpoints = module.params.get('endpoints') + master = module.params.get('master') + + startd = datetime.datetime.now() + changed = False + + # will return either the image name or None + container_image = is_containerized() + + rc, cmd, out, err = exec_commands(module, get_zonegroup(module, container_image=container_image)) # noqa: E501 + if state == "present": + if rc == 0: + zonegroup = json.loads(out) + _rc, _cmd, _out, _err = exec_commands(module, get_realm(module, container_image=container_image)) # noqa: E501 + if _rc != 0: + fatal(_err, module) + realm = json.loads(_out) + current = { + 'endpoints': zonegroup['endpoints'], + 'master': zonegroup.get('is_master', False), + 'realm_id': zonegroup['realm_id'] + } + asked = { + 'endpoints': endpoints, + 'master': master, + 'realm_id': realm['id'] + } + changed = current != asked + if changed and not module.check_mode: + rc, cmd, out, err = exec_commands(module, modify_zonegroup(module, container_image=container_image)) # noqa: E501 + else: + if not module.check_mode: + rc, cmd, out, err = exec_commands(module, create_zonegroup(module, container_image=container_image)) # noqa: E501 + changed = True + + elif state == "absent": + if rc == 0: + if not module.check_mode: + rc, cmd, out, err = exec_commands(module, remove_zonegroup(module, container_image=container_image)) # noqa: E501 + changed = True + else: + rc = 0 + out = "Zonegroup {} doesn't exist".format(name) + + exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed) # noqa: E501 + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/tests/unit/module_utils/test_ca_common.py b/tests/unit/module_utils/test_ca_common.py new file mode 100644 index 0000000..e562bae --- /dev/null +++ b/tests/unit/module_utils/test_ca_common.py @@ -0,0 +1,144 @@ +from mock.mock import patch, MagicMock +import os +import ca_common +import pytest + +fake_container_binary = 'podman' +fake_container_image = 'docker.io/ceph/daemon:latest' + + +class TestCommon(object): + + def setup_method(self): + self.fake_binary = 'ceph' + self.fake_cluster = 'ceph' + self.fake_container_cmd = [ + fake_container_binary, + 'run', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=' + self.fake_binary, + fake_container_image + ] + + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_container_exec(self): + cmd = ca_common.container_exec(self.fake_binary, fake_container_image) + assert cmd == self.fake_container_cmd + + def test_not_is_containerized(self): + assert ca_common.is_containerized() is None + + @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image}) + def test_is_containerized(self): + assert ca_common.is_containerized() == fake_container_image + + @pytest.mark.parametrize('image', [None, fake_container_image]) + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_pre_generate_cmd(self, image): + if image: + expected_cmd = self.fake_container_cmd + else: + expected_cmd = [self.fake_binary] + + assert ca_common.pre_generate_cmd(self.fake_binary, image) == expected_cmd # noqa: E501 + + @pytest.mark.parametrize('image', [None, fake_container_image]) + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_generate_cmd(self, image): + sub_cmd = ['osd', 'pool'] + args = ['create', 'foo'] + if image: + expected_cmd = self.fake_container_cmd + else: + expected_cmd = [self.fake_binary] + + expected_cmd.extend([ + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', + '--cluster', + self.fake_cluster, + 'osd', 'pool', + 'create', 'foo' + ]) + assert ca_common.generate_cmd(sub_cmd=sub_cmd, args=args, cluster=self.fake_cluster, container_image=image) == expected_cmd # noqa: E501 + + @pytest.mark.parametrize('image', [None, fake_container_image]) + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_generate_cmd_different_cluster_name(self, image): + sub_cmd = ['osd', 'pool'] + args = ['create', 'foo'] + if image: + expected_cmd = self.fake_container_cmd + else: + expected_cmd = [self.fake_binary] + + expected_cmd.extend([ + '-n', 'client.admin', + '-k', '/etc/ceph/foo.client.admin.keyring', + '--cluster', + 'foo', + 'osd', 'pool', + 'create', 'foo' + ]) + result = ca_common.generate_cmd(sub_cmd=sub_cmd, args=args, cluster='foo', container_image=image) # noqa: E501 + assert result == expected_cmd + + @pytest.mark.parametrize('image', [None, fake_container_image]) + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_generate_cmd_different_cluster_name_and_user(self, image): + sub_cmd = ['osd', 'pool'] + args = ['create', 'foo'] + if image: + expected_cmd = self.fake_container_cmd + else: + expected_cmd = [self.fake_binary] + + expected_cmd.extend([ + '-n', 'client.foo', + '-k', '/etc/ceph/foo.client.foo.keyring', + '--cluster', + 'foo', + 'osd', 'pool', + 'create', 'foo' + ]) + result = ca_common.generate_cmd(sub_cmd=sub_cmd, args=args, cluster='foo', user='client.foo', container_image=image) # noqa: E501 + assert result == expected_cmd + + @pytest.mark.parametrize('image', [None, fake_container_image]) + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_generate_cmd_different_user(self, image): + sub_cmd = ['osd', 'pool'] + args = ['create', 'foo'] + if image: + expected_cmd = self.fake_container_cmd + else: + expected_cmd = [self.fake_binary] + + expected_cmd.extend([ + '-n', 'client.foo', + '-k', '/etc/ceph/ceph.client.foo.keyring', + '--cluster', + 'ceph', + 'osd', 'pool', + 'create', 'foo' + ]) + result = ca_common.generate_cmd(sub_cmd=sub_cmd, args=args, user='client.foo', container_image=image) # noqa: E501 + assert result == expected_cmd + + @pytest.mark.parametrize('stdin', [None, 'foo']) + def test_exec_command(self, stdin): + fake_module = MagicMock() + rc = 0 + stderr = '' + stdout = 'ceph version 1.2.3' + fake_module.run_command.return_value = 0, stdout, stderr + expected_cmd = [self.fake_binary, '--version'] + _rc, _cmd, _out, _err = ca_common.exec_command(fake_module, expected_cmd, stdin=stdin) # noqa: E501 + assert _rc == rc + assert _cmd == expected_cmd + assert _err == stderr + assert _out == stdout diff --git a/tests/unit/modules/ca_test_common.py b/tests/unit/modules/ca_test_common.py new file mode 100644 index 0000000..eaa0bd6 --- /dev/null +++ b/tests/unit/modules/ca_test_common.py @@ -0,0 +1,29 @@ +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +import json + + +def set_module_args(args): + if '_ansible_remote_tmp' not in args: + args['_ansible_remote_tmp'] = '/tmp' + if '_ansible_keep_remote_files' not in args: + args['_ansible_keep_remote_files'] = False + + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) + + +class AnsibleExitJson(Exception): + pass + + +class AnsibleFailJson(Exception): + pass + + +def exit_json(*args, **kwargs): + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): + raise AnsibleFailJson(kwargs) diff --git a/tests/unit/modules/test_ceph_crush.py b/tests/unit/modules/test_ceph_crush.py new file mode 100644 index 0000000..d24586c --- /dev/null +++ b/tests/unit/modules/test_ceph_crush.py @@ -0,0 +1,93 @@ +import sys +import pytest + +sys.path.append('./library') +import ceph_crush # noqa: E402 + + +class TestCephCrushModule(object): + + def test_no_host(self): + location = [ + ("chassis", "monchassis"), + ("rack", "monrack"), + ("row", "marow"), + ("pdu", "monpdu"), + ("pod", "monpod"), + ("room", "maroom"), + ("datacenter", "mondc"), + ("region", "maregion"), + ("root", "maroute"), + ] + with pytest.raises(Exception): + ceph_crush.sort_osd_crush_location(location, None) + + def test_lower_than_two_bucket(self): + location = [ + ("chassis", "monchassis"), + ] + with pytest.raises(Exception): + ceph_crush.sort_osd_crush_location(location, None) + + def test_invalid_bucket_type(self): + location = [ + ("host", "monhost"), + ("chassis", "monchassis"), + ("rackyyyyy", "monrack"), + ] + with pytest.raises(Exception): + ceph_crush.sort_osd_crush_location(location, None) + + def test_ordering(self): + expected_result = [ + ("host", "monhost"), + ("chassis", "monchassis"), + ("rack", "monrack"), + ("row", "marow"), + ("pdu", "monpdu"), + ("pod", "monpod"), + ("room", "maroom"), + ("datacenter", "mondc"), + ("region", "maregion"), + ("root", "maroute"), + ] + expected_result_reverse = expected_result[::-1] + result = ceph_crush.sort_osd_crush_location(expected_result_reverse, None) + assert expected_result == result + + def test_generate_commands(self): + cluster = "test" + expected_command_list = [ + ['ceph', '--cluster', cluster, 'osd', 'crush', "add-bucket", "monhost", "host"], + ['ceph', '--cluster', cluster, 'osd', 'crush', "add-bucket", "monchassis", "chassis"], + ['ceph', '--cluster', cluster, 'osd', 'crush', "move", "monhost", "chassis=monchassis"], + ['ceph', '--cluster', cluster, 'osd', 'crush', "add-bucket", "monrack", "rack"], + ['ceph', '--cluster', cluster, 'osd', 'crush', "move", "monchassis", "rack=monrack"], + ] + + location = [ + ("host", "monhost"), + ("chassis", "monchassis"), + ("rack", "monrack"), + ] + result = ceph_crush.create_and_move_buckets_list(cluster, location) + assert result == expected_command_list + + def test_generate_commands_container(self): + cluster = "test" + containerized = "docker exec -ti ceph-mon" + expected_command_list = [ + ['docker', 'exec', '-ti', 'ceph-mon', 'ceph', '--cluster', cluster, 'osd', 'crush', "add-bucket", "monhost", "host"], + ['docker', 'exec', '-ti', 'ceph-mon', 'ceph', '--cluster', cluster, 'osd', 'crush', "add-bucket", "monchassis", "chassis"], + ['docker', 'exec', '-ti', 'ceph-mon', 'ceph', '--cluster', cluster, 'osd', 'crush', "move", "monhost", "chassis=monchassis"], + ['docker', 'exec', '-ti', 'ceph-mon', 'ceph', '--cluster', cluster, 'osd', 'crush', "add-bucket", "monrack", "rack"], + ['docker', 'exec', '-ti', 'ceph-mon', 'ceph', '--cluster', cluster, 'osd', 'crush', "move", "monchassis", "rack=monrack"], + ] + + location = [ + ("host", "monhost"), + ("chassis", "monchassis"), + ("rack", "monrack"), + ] + result = ceph_crush.create_and_move_buckets_list(cluster, location, containerized) + assert result == expected_command_list diff --git a/tests/unit/modules/test_ceph_crush_rule.py b/tests/unit/modules/test_ceph_crush_rule.py new file mode 100644 index 0000000..ecaaf25 --- /dev/null +++ b/tests/unit/modules/test_ceph_crush_rule.py @@ -0,0 +1,442 @@ +from mock.mock import patch +import os +import pytest +import ca_test_common +import ceph_crush_rule + +fake_cluster = 'ceph' +fake_container_binary = 'podman' +fake_container_image = 'quay.io/ceph/daemon:latest' +fake_name = 'foo' +fake_bucket_root = 'default' +fake_bucket_type = 'host' +fake_device_class = 'ssd' +fake_profile = 'default' +fake_user = 'client.admin' +fake_keyring = '/etc/ceph/{}.{}.keyring'.format(fake_cluster, fake_user) + + +class TestCephCrushRuleModule(object): + @patch('ansible.module_utils.basic.AnsibleModule.fail_json') + def test_with_name_only(self, m_fail_json): + ca_test_common.set_module_args({ + 'name': fake_name + }) + m_fail_json.side_effect = ca_test_common.fail_json + + with pytest.raises(ca_test_common.AnsibleFailJson) as result: + ceph_crush_rule.main() + + result = result.value.args[0] + assert result['msg'] == 'state is present but all of the following are missing: rule_type' + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + def test_with_check_mode(self, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_name, + 'rule_type': 'replicated', + 'bucket_root': fake_bucket_root, + 'bucket_type': fake_bucket_type, + '_ansible_check_mode': True + }) + m_exit_json.side_effect = ca_test_common.exit_json + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_crush_rule.main() + + result = result.value.args[0] + assert not result['changed'] + assert result['rc'] == 0 + assert not result['stdout'] + assert not result['stderr'] + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_create_non_existing_replicated_rule(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_name, + 'rule_type': 'replicated', + 'bucket_root': fake_bucket_root, + 'bucket_type': fake_bucket_type + }) + m_exit_json.side_effect = ca_test_common.exit_json + get_rc = 2 + get_stderr = 'Error ENOENT: unknown crush rule \'{}\''.format(fake_name) + get_stdout = '' + create_rc = 0 + create_stderr = '' + create_stdout = '' + m_run_command.side_effect = [ + (get_rc, get_stdout, get_stderr), + (create_rc, create_stdout, create_stderr) + ] + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_crush_rule.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, + '--cluster', fake_cluster, 'osd', 'crush', 'rule', + 'create-replicated', fake_name, fake_bucket_root, fake_bucket_type] + assert result['rc'] == create_rc + assert result['stderr'] == create_stderr + assert result['stdout'] == create_stdout + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_create_existing_replicated_rule(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_name, + 'rule_type': 'replicated', + 'bucket_root': fake_bucket_root, + 'bucket_type': fake_bucket_type + }) + m_exit_json.side_effect = ca_test_common.exit_json + rc = 0 + stderr = '' + stdout = '{{"rule_name":"{}","type":1,"steps":[{{"item_name":"{}"}},{{"type":"{}"}}]}}'.format(fake_name, fake_bucket_root, fake_bucket_type) + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_crush_rule.main() + + result = result.value.args[0] + assert not result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, + '--cluster', fake_cluster, 'osd', 'crush', 'rule', + 'dump', fake_name, '--format=json'] + assert result['rc'] == 0 + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_create_non_existing_replicated_rule_device_class(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_name, + 'rule_type': 'replicated', + 'bucket_root': fake_bucket_root, + 'bucket_type': fake_bucket_type, + 'device_class': fake_device_class + }) + m_exit_json.side_effect = ca_test_common.exit_json + get_rc = 2 + get_stderr = 'Error ENOENT: unknown crush rule \'{}\''.format(fake_name) + get_stdout = '' + create_rc = 0 + create_stderr = '' + create_stdout = '' + m_run_command.side_effect = [ + (get_rc, get_stdout, get_stderr), + (create_rc, create_stdout, create_stderr) + ] + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_crush_rule.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, + '--cluster', fake_cluster, 'osd', 'crush', 'rule', + 'create-replicated', fake_name, fake_bucket_root, fake_bucket_type, fake_device_class] + assert result['rc'] == create_rc + assert result['stderr'] == create_stderr + assert result['stdout'] == create_stdout + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_create_existing_replicated_rule_device_class(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_name, + 'rule_type': 'replicated', + 'bucket_root': fake_bucket_root, + 'bucket_type': fake_bucket_type, + 'device_class': fake_device_class + }) + m_exit_json.side_effect = ca_test_common.exit_json + rc = 0 + stderr = '' + stdout = '{{"rule_name":"{}","type":1,"steps":[{{"item_name":"{}"}},{{"type":"{}"}}]}}'.format(fake_name, fake_bucket_root, fake_bucket_type) + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_crush_rule.main() + + result = result.value.args[0] + assert not result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, + '--cluster', fake_cluster, 'osd', 'crush', 'rule', + 'dump', fake_name, '--format=json'] + assert result['rc'] == 0 + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_create_non_existing_erasure_rule(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_name, + 'rule_type': 'erasure', + 'profile': fake_profile + }) + m_exit_json.side_effect = ca_test_common.exit_json + get_rc = 2 + get_stderr = 'Error ENOENT: unknown crush rule \'{}\''.format(fake_name) + get_stdout = '' + create_rc = 0 + create_stderr = '' + create_stdout = 'created rule {} at 1'.format(fake_name) + m_run_command.side_effect = [ + (get_rc, get_stdout, get_stderr), + (create_rc, create_stdout, create_stderr) + ] + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_crush_rule.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, + '--cluster', fake_cluster, 'osd', 'crush', 'rule', + 'create-erasure', fake_name, fake_profile] + assert result['rc'] == create_rc + assert result['stderr'] == create_stderr + assert result['stdout'] == create_stdout + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_create_existing_erasure_rule(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_name, + 'rule_type': 'erasure', + 'profile': fake_profile + }) + m_exit_json.side_effect = ca_test_common.exit_json + rc = 0 + stderr = '' + stdout = '{{"type":3,"rule_name":"{}","steps":[{{"item_name":"default"}},{{"type":"host"}}]}}'.format(fake_name) + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_crush_rule.main() + + result = result.value.args[0] + assert not result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, + '--cluster', fake_cluster, 'osd', 'crush', 'rule', + 'dump', fake_name, '--format=json'] + assert result['rc'] == 0 + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch('ansible.module_utils.basic.AnsibleModule.fail_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_update_existing_replicated_rule(self, m_run_command, m_fail_json): + ca_test_common.set_module_args({ + 'name': fake_name, + 'rule_type': 'replicated', + 'bucket_root': fake_bucket_root, + 'bucket_type': fake_bucket_type, + 'device_class': fake_device_class + }) + m_fail_json.side_effect = ca_test_common.fail_json + rc = 0 + stderr = '' + stdout = '{{"type":3,"rule_name":"{}","steps":[{{"item_name":"default"}},{{"type":"host"}}]}}'.format(fake_name) + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleFailJson) as result: + ceph_crush_rule.main() + + result = result.value.args[0] + print(result) + assert not result['changed'] + assert result['msg'] == 'Can not convert crush rule {} to replicated'.format(fake_name) + assert result['rc'] == 1 + + @patch('ansible.module_utils.basic.AnsibleModule.fail_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_update_existing_erasure_rule(self, m_run_command, m_fail_json): + ca_test_common.set_module_args({ + 'name': fake_name, + 'rule_type': 'erasure', + 'profile': fake_profile + }) + m_fail_json.side_effect = ca_test_common.fail_json + rc = 0 + stderr = '' + stdout = '{{"type":1,"rule_name":"{}","steps":[{{"item_name":"default"}},{{"type":"host"}}]}}'.format(fake_name) + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleFailJson) as result: + ceph_crush_rule.main() + + result = result.value.args[0] + print(result) + assert not result['changed'] + assert result['msg'] == 'Can not convert crush rule {} to erasure'.format(fake_name) + assert result['rc'] == 1 + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_remove_non_existing_rule(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_name, + 'state': 'absent' + }) + m_exit_json.side_effect = ca_test_common.exit_json + rc = 2 + stderr = 'Error ENOENT: unknown crush rule \'{}\''.format(fake_name) + stdout = '' + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_crush_rule.main() + + result = result.value.args[0] + assert not result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, + '--cluster', fake_cluster, 'osd', 'crush', 'rule', + 'dump', fake_name, '--format=json'] + assert result['rc'] == 0 + assert result['stderr'] == stderr + assert result['stdout'] == "Crush Rule {} doesn't exist".format(fake_name) + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_remove_existing_rule(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_name, + 'state': 'absent' + }) + m_exit_json.side_effect = ca_test_common.exit_json + get_rc = 0 + get_stderr = '' + get_stdout = '{{"rule_name":"{}","steps":[{{"item_name":"{}"}},{{"type":"{}"}}]}}'.format(fake_name, fake_bucket_root, fake_bucket_type) + remove_rc = 0 + remove_stderr = '' + remove_stdout = '' + m_run_command.side_effect = [ + (get_rc, get_stdout, get_stderr), + (remove_rc, remove_stdout, remove_stderr) + ] + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_crush_rule.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, + '--cluster', fake_cluster, 'osd', 'crush', 'rule', + 'rm', fake_name] + assert result['rc'] == remove_rc + assert result['stderr'] == remove_stderr + assert result['stdout'] == remove_stdout + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_get_non_existing_rule(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_name, + 'state': 'info' + }) + m_exit_json.side_effect = ca_test_common.exit_json + rc = 2 + stderr = 'Error ENOENT: unknown crush rule \'{}\''.format(fake_name) + stdout = '' + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_crush_rule.main() + + result = result.value.args[0] + assert not result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, + '--cluster', fake_cluster, 'osd', 'crush', 'rule', + 'dump', fake_name, '--format=json'] + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_get_existing_rule(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_name, + 'state': 'info' + }) + m_exit_json.side_effect = ca_test_common.exit_json + rc = 0 + stderr = '' + stdout = '{{"rule_name":"{}","steps":[{{"item_name":"{}"}},{{"type":"{}"}}]}}'.format(fake_name, fake_bucket_root, fake_bucket_type) + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_crush_rule.main() + + result = result.value.args[0] + assert not result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, + '--cluster', fake_cluster, 'osd', 'crush', 'rule', + 'dump', fake_name, '--format=json'] + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_get_all_rules(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': str(), + 'state': 'info' + }) + m_exit_json.side_effect = ca_test_common.exit_json + rc = 0 + stderr = '' + stdout = '{{"rule_name":"{}","steps":[{{"item_name":"{}"}},{{"type":"{}"}}]}}'.format(fake_name, fake_bucket_root, fake_bucket_type) + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_crush_rule.main() + + result = result.value.args[0] + assert not result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, + '--cluster', fake_cluster, 'osd', 'crush', 'rule', + 'dump', '', '--format=json'] + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image}) + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_with_container(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_name, + 'state': 'info' + }) + m_exit_json.side_effect = ca_test_common.exit_json + rc = 0 + stderr = '' + stdout = '{{"rule_name":"{}","steps":[{{"item_name":"{}"}},{{"type":"{}"}}]}}'.format(fake_name, fake_bucket_root, fake_bucket_type) + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_crush_rule.main() + + result = result.value.args[0] + assert not result['changed'] + assert result['cmd'] == [fake_container_binary, 'run', '--rm', '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', fake_container_image, + '-n', fake_user, '-k', fake_keyring, + '--cluster', fake_cluster, 'osd', 'crush', + 'rule', 'dump', fake_name, '--format=json'] + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout diff --git a/tests/unit/modules/test_ceph_dashboard_user.py b/tests/unit/modules/test_ceph_dashboard_user.py new file mode 100644 index 0000000..d0d6e9c --- /dev/null +++ b/tests/unit/modules/test_ceph_dashboard_user.py @@ -0,0 +1,170 @@ +from mock.mock import MagicMock, patch +import pytest +import os +import ca_test_common +import ceph_dashboard_user + +fake_container_binary = 'podman' +fake_container_image = 'docker.io/ceph/daemon:latest' + + +class TestCephDashboardUserModule(object): + def setup_method(self): + self.fake_binary = 'ceph' + self.fake_cluster = 'ceph' + self.fake_name = 'foo' + self.fake_user = 'foo' + self.fake_password = 'bar' + self.fake_roles = ['read-only', 'block-manager'] + self.fake_params = {'cluster': self.fake_cluster, + 'name': self.fake_user, + 'password': self.fake_password, + 'roles': self.fake_roles} + self.fake_module = MagicMock() + self.fake_module.params = self.fake_params + + def test_create_user(self): + self.fake_module.params = self.fake_params + expected_cmd = [ + self.fake_binary, + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', + '--cluster', self.fake_cluster, + 'dashboard', 'ac-user-create', + '-i', '-', + self.fake_user + ] + + assert ceph_dashboard_user.create_user(self.fake_module) == expected_cmd + + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image}) + def test_create_user_container(self): + fake_container_cmd = [ + fake_container_binary, + 'run', + '--interactive', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=' + self.fake_binary, + fake_container_image + ] + self.fake_module.params = self.fake_params + expected_cmd = fake_container_cmd + [ + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', + '--cluster', self.fake_cluster, + 'dashboard', 'ac-user-create', + '-i', '-', + self.fake_user + ] + + assert ceph_dashboard_user.create_user(self.fake_module, container_image=fake_container_image) == expected_cmd + + def test_set_roles(self): + self.fake_module.params = self.fake_params + expected_cmd = [ + self.fake_binary, + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', + '--cluster', self.fake_cluster, + 'dashboard', 'ac-user-set-roles', + self.fake_user + ] + expected_cmd.extend(self.fake_roles) + + assert ceph_dashboard_user.set_roles(self.fake_module) == expected_cmd + + def test_set_password(self): + self.fake_module.params = self.fake_params + expected_cmd = [ + self.fake_binary, + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', + '--cluster', self.fake_cluster, + 'dashboard', 'ac-user-set-password', + '-i', '-', + self.fake_user + ] + + assert ceph_dashboard_user.set_password(self.fake_module) == expected_cmd + + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image}) + def test_set_password_container(self): + fake_container_cmd = [ + fake_container_binary, + 'run', + '--interactive', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=' + self.fake_binary, + fake_container_image + ] + self.fake_module.params = self.fake_params + expected_cmd = fake_container_cmd + [ + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', + '--cluster', self.fake_cluster, + 'dashboard', 'ac-user-set-password', + '-i', '-', + self.fake_user + ] + + assert ceph_dashboard_user.set_password(self.fake_module, container_image=fake_container_image) == expected_cmd + + def test_get_user(self): + self.fake_module.params = self.fake_params + expected_cmd = [ + self.fake_binary, + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', + '--cluster', self.fake_cluster, + 'dashboard', 'ac-user-show', + self.fake_user, + '--format=json' + ] + + assert ceph_dashboard_user.get_user(self.fake_module) == expected_cmd + + def test_remove_user(self): + self.fake_module.params = self.fake_params + expected_cmd = [ + self.fake_binary, + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', + '--cluster', self.fake_cluster, + 'dashboard', 'ac-user-delete', + self.fake_user + ] + + assert ceph_dashboard_user.remove_user(self.fake_module) == expected_cmd + + @patch('ansible.module_utils.basic.AnsibleModule.fail_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_create_user_fail_with_weak_password(self, m_run_command, m_fail_json): + ca_test_common.set_module_args(self.fake_module.params) + m_fail_json.side_effect = ca_test_common.fail_json + get_rc = 2 + get_stderr = 'Error ENOENT: User {} does not exist.'.format(self.fake_user) + get_stdout = '' + create_rc = 22 + create_stderr = 'Error EINVAL: Password is too weak.' + create_stdout = '' + m_run_command.side_effect = [ + (get_rc, get_stdout, get_stderr), + (create_rc, create_stdout, create_stderr) + ] + + with pytest.raises(ca_test_common.AnsibleFailJson) as result: + ceph_dashboard_user.main() + + result = result.value.args[0] + assert result['msg'] == create_stderr + assert result['rc'] == 1 diff --git a/tests/unit/modules/test_ceph_ec_profile.py b/tests/unit/modules/test_ceph_ec_profile.py new file mode 100644 index 0000000..955148f --- /dev/null +++ b/tests/unit/modules/test_ceph_ec_profile.py @@ -0,0 +1,239 @@ +from mock.mock import MagicMock, patch +import ca_test_common +import ceph_ec_profile +import pytest + + +class TestCephEcProfile(object): + def setup_method(self): + self.fake_params = [] + self.fake_binary = 'ceph' + self.fake_cluster = 'ceph' + self.fake_name = 'foo' + self.fake_k = 2 + self.fake_m = 4 + self.fake_module = MagicMock() + self.fake_module.params = self.fake_params + + def test_get_profile(self): + expected_cmd = [ + self.fake_binary, + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', + '--cluster', self.fake_cluster, + 'osd', 'erasure-code-profile', + 'get', self.fake_name, + '--format=json' + ] + + assert ceph_ec_profile.get_profile(self.fake_module, self.fake_name) == expected_cmd + + @pytest.mark.parametrize("stripe_unit,crush_device_class,force", [(False, None, False), + (32, None, True), + (False, None, True), + (32, None, False), + (False, 'hdd', False), + (32, 'ssd', True), + (False, 'nvme', True), + (32, 'hdd', False)]) + def test_create_profile(self, stripe_unit, crush_device_class, force): + expected_cmd = [ + self.fake_binary, + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', + '--cluster', self.fake_cluster, + 'osd', 'erasure-code-profile', + 'set', self.fake_name, + 'k={}'.format(self.fake_k), 'm={}'.format(self.fake_m), + ] + if stripe_unit: + expected_cmd.append('stripe_unit={}'.format(stripe_unit)) + if crush_device_class: + expected_cmd.append('crush-device-class={}'.format(crush_device_class)) + if force: + expected_cmd.append('--force') + + assert ceph_ec_profile.create_profile(self.fake_module, + self.fake_name, + self.fake_k, + self.fake_m, + stripe_unit, + crush_device_class, + self.fake_cluster, + force) == expected_cmd + + def test_delete_profile(self): + expected_cmd = [ + self.fake_binary, + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', + '--cluster', self.fake_cluster, + 'osd', 'erasure-code-profile', + 'rm', self.fake_name + ] + + assert ceph_ec_profile.delete_profile(self.fake_module, + self.fake_name, + self.fake_cluster) == expected_cmd + + @patch('ansible.module_utils.basic.AnsibleModule.fail_json') + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ceph_ec_profile.exec_command') + def test_state_present_nothing_to_update(self, m_exec_command, m_exit_json, m_fail_json): + ca_test_common.set_module_args({"state": "present", + "name": "foo", + "k": 2, + "m": 4, + "stripe_unit": 32, + }) + m_exit_json.side_effect = ca_test_common.exit_json + m_fail_json.side_effect = ca_test_common.fail_json + m_exec_command.return_value = (0, + ['ceph', 'osd', 'erasure-code-profile', 'get', 'foo', '--format', 'json'], + '{"crush-device-class":"","crush-failure-domain":"host","crush-root":"default","jerasure-per-chunk-alignment":"false","k":"2","m":"4","plugin":"jerasure","stripe_unit":"32","technique":"reed_sol_van","w":"8"}', # noqa: E501 + '') + + with pytest.raises(ca_test_common.AnsibleExitJson) as r: + ceph_ec_profile.run_module() + + result = r.value.args[0] + assert not result['changed'] + assert result['cmd'] == ['ceph', 'osd', 'erasure-code-profile', 'get', 'foo', '--format', 'json'] + assert result['stdout'] == '{"crush-device-class":"","crush-failure-domain":"host","crush-root":"default","jerasure-per-chunk-alignment":"false","k":"2","m":"4","plugin":"jerasure","stripe_unit":"32","technique":"reed_sol_van","w":"8"}' # noqa: E501 + assert not result['stderr'] + assert result['rc'] == 0 + + @patch('ansible.module_utils.basic.AnsibleModule.fail_json') + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ceph_ec_profile.exec_command') + def test_state_present_profile_to_update(self, m_exec_command, m_exit_json, m_fail_json): + ca_test_common.set_module_args({"state": "present", + "name": "foo", + "k": 2, + "m": 6, + "stripe_unit": 32 + }) + m_exit_json.side_effect = ca_test_common.exit_json + m_fail_json.side_effect = ca_test_common.fail_json + m_exec_command.side_effect = [ + (0, + ['ceph', 'osd', 'erasure-code-profile', 'get', 'foo', '--format', 'json'], + '{"crush-device-class":"","crush-failure-domain":"host","crush-root":"default","jerasure-per-chunk-alignment":"false","k":"2","m":"4","plugin":"jerasure","stripe_unit":"32","technique":"reed_sol_van","w":"8"}', # noqa: E501 + ''), + (0, + ['ceph', 'osd', 'erasure-code-profile', 'set', 'foo', 'k=2', 'm=6', 'stripe_unit=32', '--force'], + '', + '' + ) + ] + + with pytest.raises(ca_test_common.AnsibleExitJson) as r: + ceph_ec_profile.run_module() + + result = r.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph', 'osd', 'erasure-code-profile', 'set', 'foo', 'k=2', 'm=6', 'stripe_unit=32', '--force'] + assert not result['stdout'] + assert not result['stderr'] + assert result['rc'] == 0 + + @patch('ansible.module_utils.basic.AnsibleModule.fail_json') + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ceph_ec_profile.exec_command') + def test_state_present_profile_doesnt_exist(self, m_exec_command, m_exit_json, m_fail_json): + ca_test_common.set_module_args({"state": "present", + "name": "foo", + "k": 2, + "m": 4, + "stripe_unit": 32 + }) + m_exit_json.side_effect = ca_test_common.exit_json + m_fail_json.side_effect = ca_test_common.fail_json + m_exec_command.side_effect = [ + (2, + ['ceph', 'osd', 'erasure-code-profile', 'get', 'foo', '--format', 'json'], + '', + "Error ENOENT: unknown erasure code profile 'foo'"), + (0, + ['ceph', 'osd', 'erasure-code-profile', 'set', 'foo', 'k=2', 'm=4', 'stripe_unit=32', '--force'], + '', + '' + ) + ] + + with pytest.raises(ca_test_common.AnsibleExitJson) as r: + ceph_ec_profile.run_module() + + result = r.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph', 'osd', 'erasure-code-profile', 'set', 'foo', 'k=2', 'm=4', 'stripe_unit=32', '--force'] + assert not result['stdout'] + assert not result['stderr'] + assert result['rc'] == 0 + + @patch('ansible.module_utils.basic.AnsibleModule.fail_json') + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ceph_ec_profile.exec_command') + def test_state_absent_on_existing_profile(self, m_exec_command, m_exit_json, m_fail_json): + ca_test_common.set_module_args({"state": "absent", + "name": "foo" + }) + m_exit_json.side_effect = ca_test_common.exit_json + m_fail_json.side_effect = ca_test_common.fail_json + m_exec_command.return_value = (0, + ['ceph', 'osd', 'erasure-code-profile', 'rm', 'foo'], + '', + '') + + with pytest.raises(ca_test_common.AnsibleExitJson) as r: + ceph_ec_profile.run_module() + + result = r.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph', 'osd', 'erasure-code-profile', 'rm', 'foo'] + assert result['stdout'] == 'Profile foo removed.' + assert not result['stderr'] + assert result['rc'] == 0 + + @patch('ansible.module_utils.basic.AnsibleModule.fail_json') + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ceph_ec_profile.exec_command') + def test_state_absent_on_nonexisting_profile(self, m_exec_command, m_exit_json, m_fail_json): + ca_test_common.set_module_args({"state": "absent", + "name": "foo" + }) + m_exit_json.side_effect = ca_test_common.exit_json + m_fail_json.side_effect = ca_test_common.fail_json + m_exec_command.return_value = (0, + ['ceph', 'osd', 'erasure-code-profile', 'rm', 'foo'], + '', + 'erasure-code-profile foo does not exist') + + with pytest.raises(ca_test_common.AnsibleExitJson) as r: + ceph_ec_profile.run_module() + + result = r.value.args[0] + assert not result['changed'] + assert result['cmd'] == ['ceph', 'osd', 'erasure-code-profile', 'rm', 'foo'] + assert result['stdout'] == "Skipping, the profile foo doesn't exist" + assert result['stderr'] == 'erasure-code-profile foo does not exist' + assert result['rc'] == 0 + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + def test_check_mode(self, m_exit_json): + ca_test_common.set_module_args({ + 'name': 'foo', + 'k': 2, + 'm': 4, + '_ansible_check_mode': True + }) + m_exit_json.side_effect = ca_test_common.exit_json + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_ec_profile.run_module() + + result = result.value.args[0] + assert not result['changed'] + assert result['rc'] == 0 + assert not result['stdout'] + assert not result['stderr'] diff --git a/tests/unit/modules/test_ceph_fs.py b/tests/unit/modules/test_ceph_fs.py new file mode 100644 index 0000000..f18b506 --- /dev/null +++ b/tests/unit/modules/test_ceph_fs.py @@ -0,0 +1,107 @@ +from mock.mock import MagicMock +import ceph_fs + + +fake_binary = 'ceph' +fake_cluster = 'ceph' +fake_container_binary = 'podman' +fake_container_image = 'docker.io/ceph/daemon:latest' +fake_container_cmd = [ + fake_container_binary, + 'run', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=' + fake_binary, + fake_container_image +] +fake_fs = 'foo' +fake_data_pool = 'bar_data' +fake_metadata_pool = 'bar_metadata' +fake_max_mds = 2 +fake_params = {'cluster': fake_cluster, + 'name': fake_fs, + 'data': fake_data_pool, + 'metadata': fake_metadata_pool, + 'max_mds': fake_max_mds} + + +class TestCephFsModule(object): + + def test_create_fs(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', + '--cluster', fake_cluster, + 'fs', 'new', + fake_fs, + fake_metadata_pool, + fake_data_pool + ] + + assert ceph_fs.create_fs(fake_module) == expected_cmd + + def test_set_fs(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', + '--cluster', fake_cluster, + 'fs', 'set', + fake_fs, + 'max_mds', + str(fake_max_mds) + ] + + assert ceph_fs.set_fs(fake_module) == expected_cmd + + def test_get_fs(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', + '--cluster', fake_cluster, + 'fs', 'get', + fake_fs, + '--format=json' + ] + + assert ceph_fs.get_fs(fake_module) == expected_cmd + + def test_remove_fs(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', + '--cluster', fake_cluster, + 'fs', 'rm', + fake_fs, + '--yes-i-really-mean-it' + ] + + assert ceph_fs.remove_fs(fake_module) == expected_cmd + + def test_fail_fs(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', + '--cluster', fake_cluster, + 'fs', 'fail', + fake_fs + ] + + assert ceph_fs.fail_fs(fake_module) == expected_cmd diff --git a/tests/unit/modules/test_ceph_key.py b/tests/unit/modules/test_ceph_key.py new file mode 100644 index 0000000..4b19754 --- /dev/null +++ b/tests/unit/modules/test_ceph_key.py @@ -0,0 +1,588 @@ +import json +import os +import mock +import pytest +import ca_test_common +import ceph_key + + +@mock.patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': 'docker'}) +class TestCephKeyModule(object): + + def test_generate_secret(self): + expected_length = 40 + result = len(ceph_key.generate_secret()) + assert result == expected_length + + def test_generate_caps_ceph_authtool(self): + fake_caps = { + 'mon': 'allow *', + 'osd': 'allow rwx', + } + fake_type = "ceph-authtool" + expected_command_list = [ + '--cap', + 'mon', + 'allow *', + '--cap', + 'osd', + 'allow rwx' + ] + result = ceph_key.generate_caps(fake_type, fake_caps) + assert result == expected_command_list + + def test_generate_caps_not_ceph_authtool(self): + fake_caps = { + 'mon': 'allow *', + 'osd': 'allow rwx', + } + fake_type = "" + expected_command_list = [ + 'mon', + 'allow *', + 'osd', + 'allow rwx' + ] + result = ceph_key.generate_caps(fake_type, fake_caps) + assert result == expected_command_list + + def test_generate_ceph_cmd_list_non_container(self): + fake_cluster = "fake" + fake_args = ['arg'] + fake_user = "fake-user" + fake_user_key = "/tmp/my-key" + expected_command_list = [ + 'ceph', + '-n', + "fake-user", + '-k', + "/tmp/my-key", + '--cluster', + fake_cluster, + 'auth', + 'arg' + ] + result = ceph_key.generate_cmd( + sub_cmd=['auth'], + args=fake_args, + cluster=fake_cluster, + user=fake_user, + user_key=fake_user_key) + assert result == expected_command_list + + def test_generate_ceph_cmd_list_container(self): + fake_cluster = "fake" + fake_args = ['arg'] + fake_user = "fake-user" + fake_user_key = "/tmp/my-key" + fake_container_image = "quay.io/ceph/daemon:latest-luminous" + expected_command_list = ['docker', + 'run', + '--rm', + '--net=host', # noqa E501 + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + 'quay.io/ceph/daemon:latest-luminous', + '-n', + "fake-user", + '-k', + "/tmp/my-key", + '--cluster', + fake_cluster, + 'auth', + 'arg'] + result = ceph_key.generate_cmd( + sub_cmd=['auth'], + args=fake_args, + cluster=fake_cluster, + user=fake_user, + user_key=fake_user_key, + container_image=fake_container_image) + assert result == expected_command_list + + def test_generate_ceph_authtool_cmd_non_container_no_auid(self): + fake_cluster = "fake" + fake_name = "client.fake" + fake_secret = "super-secret" + fake_caps = { + 'mon': 'allow *', + 'osd': 'allow rwx', + } + fake_dest = "/fake/ceph" + fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring" + fake_file_destination = os.path.join(fake_dest, fake_keyring_filename) + expected_command_list = [ + 'ceph-authtool', + '--create-keyring', + fake_file_destination, + '--name', + fake_name, + '--add-key', + fake_secret, + '--cap', + 'mon', + 'allow *', + '--cap', + 'osd', + 'allow rwx', + ] + result = ceph_key.generate_ceph_authtool_cmd( + fake_cluster, fake_name, fake_secret, fake_caps, fake_file_destination) # noqa E501 + assert result == expected_command_list + + def test_generate_ceph_authtool_cmd_container(self): + fake_cluster = "fake" + fake_name = "client.fake" + fake_secret = "super-secret" + fake_caps = { + 'mon': 'allow *', + 'osd': 'allow rwx', + } + fake_dest = "/fake/ceph" + fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring" + fake_file_destination = os.path.join(fake_dest, fake_keyring_filename) + fake_container_image = "quay.io/ceph/daemon:latest-luminous" + expected_command_list = ['docker', + 'run', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph-authtool', + 'quay.io/ceph/daemon:latest-luminous', + '--create-keyring', + fake_file_destination, + '--name', + fake_name, + '--add-key', + fake_secret, + '--cap', + 'mon', + 'allow *', + '--cap', + 'osd', + 'allow rwx'] + result = ceph_key.generate_ceph_authtool_cmd( + fake_cluster, fake_name, fake_secret, fake_caps, fake_file_destination, fake_container_image) # noqa E501 + assert result == expected_command_list + + def test_create_key_non_container(self): + fake_module = "fake" + fake_user = 'client.admin' + fake_user_key = '/etc/ceph/fake.client.admin.keyring' + fake_cluster = "fake" + fake_name = "client.fake" + fake_secret = "super-secret" + fake_caps = { + 'mon': 'allow *', + 'osd': 'allow rwx', + } + fake_import_key = True + fake_dest = "/fake/ceph" + fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring" + fake_file_destination = os.path.join(fake_dest, fake_keyring_filename) + expected_command_list = [ + ['ceph-authtool', '--create-keyring', fake_file_destination, '--name', fake_name, + '--add-key', fake_secret, '--cap', 'mon', 'allow *', '--cap', 'osd', 'allow rwx'], + ['ceph', '-n', fake_user, '-k', fake_user_key, '--cluster', fake_cluster, 'auth', + 'import', '-i', fake_file_destination], + ] + result = ceph_key.create_key(fake_module, fake_cluster, fake_user, fake_user_key, + fake_name, fake_secret, fake_caps, fake_import_key, + fake_file_destination) + assert result == expected_command_list + + def test_create_key_container(self): + fake_module = "fake" + fake_user = 'client.admin' + fake_user_key = '/etc/ceph/fake.client.admin.keyring' + fake_cluster = "fake" + fake_name = "client.fake" + fake_secret = "super-secret" + fake_caps = { + 'mon': 'allow *', + 'osd': 'allow rwx', + } + fake_dest = "/fake/ceph" + fake_import_key = True + fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring" + fake_file_destination = os.path.join(fake_dest, fake_keyring_filename) + fake_container_image = "quay.io/ceph/daemon:latest-luminous" + expected_command_list = [ + ['docker', + 'run', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph-authtool', + 'quay.io/ceph/daemon:latest-luminous', + '--create-keyring', fake_file_destination, + '--name', fake_name, + '--add-key', fake_secret, + '--cap', 'mon', 'allow *', + '--cap', 'osd', 'allow rwx'], + ['docker', + 'run', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + 'quay.io/ceph/daemon:latest-luminous', + '-n', 'client.admin', + '-k', '/etc/ceph/fake.client.admin.keyring', + '--cluster', fake_cluster, + 'auth', 'import', + '-i', fake_file_destination]] + result = ceph_key.create_key(fake_module, fake_cluster, fake_user, fake_user_key, fake_name, + fake_secret, fake_caps, fake_import_key, fake_file_destination, + fake_container_image) + assert result == expected_command_list + + def test_create_key_non_container_no_import(self): + fake_module = "fake" + fake_user = 'client.admin' + fake_user_key = '/etc/ceph/fake.client.admin.keyring' + fake_cluster = "fake" + fake_name = "client.fake" + fake_secret = "super-secret" + fake_caps = { + 'mon': 'allow *', + 'osd': 'allow rwx', + } + fake_dest = "/fake/ceph" + fake_import_key = False + fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring" + fake_file_destination = os.path.join(fake_dest, fake_keyring_filename) + # create_key passes (one for ceph-authtool and one for itself) itw own array so the expected result is an array within an array # noqa E501 + expected_command_list = [[ + 'ceph-authtool', + '--create-keyring', + fake_file_destination, + '--name', + fake_name, + '--add-key', + fake_secret, + '--cap', + 'mon', + 'allow *', + '--cap', + 'osd', + 'allow rwx', ] + ] + result = ceph_key.create_key(fake_module, fake_cluster, fake_user, fake_user_key, + fake_name, fake_secret, fake_caps, fake_import_key, + fake_file_destination) # noqa E501 + assert result == expected_command_list + + def test_create_key_container_no_import(self): + fake_module = "fake" + fake_user = 'client.admin' + fake_user_key = '/etc/ceph/fake.client.admin.keyring' + fake_cluster = "fake" + fake_name = "client.fake" + fake_secret = "super-secret" + fake_caps = { + 'mon': 'allow *', + 'osd': 'allow rwx', + } + fake_dest = "/fake/ceph" + fake_import_key = False + fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring" + fake_file_destination = os.path.join(fake_dest, fake_keyring_filename) + # create_key passes (one for ceph-authtool and one for itself) itw own array so the expected result is an array within an array # noqa E501 + fake_container_image = "quay.io/ceph/daemon:latest-luminous" + expected_command_list = [['docker', # noqa E128 + 'run', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph-authtool', + 'quay.io/ceph/daemon:latest-luminous', + '--create-keyring', + fake_file_destination, + '--name', + fake_name, + '--add-key', + fake_secret, + '--cap', + 'mon', + 'allow *', + '--cap', + 'osd', + 'allow rwx']] + result = ceph_key.create_key(fake_module, fake_cluster, fake_user, fake_user_key, fake_name, + fake_secret, fake_caps, fake_import_key, fake_file_destination, + fake_container_image) + assert result == expected_command_list + + def test_delete_key_non_container(self): + fake_user = 'client.admin' + fake_user_key = '/etc/ceph/fake.client.admin.keyring' + fake_cluster = "fake" + fake_name = "client.fake" + expected_command_list = [ + ['ceph', '-n', 'client.admin', '-k', '/etc/ceph/fake.client.admin.keyring', + '--cluster', fake_cluster, 'auth', 'del', fake_name], + ] + result = ceph_key.delete_key(fake_cluster, fake_user, fake_user_key, fake_name) + assert result == expected_command_list + + def test_delete_key_container(self): + fake_user = 'client.admin' + fake_user_key = '/etc/ceph/fake.client.admin.keyring' + fake_cluster = "fake" + fake_name = "client.fake" + fake_container_image = "quay.io/ceph/daemon:latest-luminous" + expected_command_list = [['docker', + 'run', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + 'quay.io/ceph/daemon:latest-luminous', + '-n', 'client.admin', + '-k', '/etc/ceph/fake.client.admin.keyring', + '--cluster', fake_cluster, + 'auth', 'del', fake_name]] + result = ceph_key.delete_key( + fake_cluster, fake_user, fake_user_key, fake_name, fake_container_image) + assert result == expected_command_list + + @pytest.mark.parametrize('output_format', ['json', 'plain', 'xml', 'yaml']) + def test_info_key_non_container(self, output_format): + fake_user = 'client.admin' + fake_user_key = '/etc/ceph/fake.client.admin.keyring' + fake_cluster = "fake" + fake_name = "client.fake" + fake_user = "fake-user" + expected_command_list = [ + ['ceph', '-n', fake_user, '-k', fake_user_key, '--cluster', fake_cluster, 'auth', + 'get', fake_name, '-f', output_format], + ] + result = ceph_key.info_key( + fake_cluster, fake_name, fake_user, fake_user_key, output_format) + assert result == expected_command_list + + @pytest.mark.parametrize('output_format', ['json', 'plain', 'xml', 'yaml']) + def test_info_key_container_json(self, output_format): + fake_cluster = "fake" + fake_name = "client.fake" + fake_user = 'client.admin' + fake_user_key = '/etc/ceph/fake.client.admin.keyring' + fake_container_image = "quay.io/ceph/daemon:latest-luminous" + expected_command_list = [['docker', # noqa E128 + 'run', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + 'quay.io/ceph/daemon:latest-luminous', + '-n', fake_user, + '-k', fake_user_key, + '--cluster', fake_cluster, + 'auth', 'get', fake_name, + '-f', output_format]] + result = ceph_key.info_key( + fake_cluster, fake_name, fake_user, fake_user_key, output_format, fake_container_image) # noqa E501 + assert result == expected_command_list + + def test_list_key_non_container(self): + fake_cluster = "fake" + fake_user = "fake-user" + fake_key = "/tmp/my-key" + expected_command_list = [ + ['ceph', '-n', "fake-user", '-k', "/tmp/my-key", + '--cluster', fake_cluster, 'auth', 'ls', '-f', 'json'], + ] + result = ceph_key.list_keys(fake_cluster, fake_user, fake_key) + assert result == expected_command_list + + def test_get_key_container(self): + fake_cluster = "fake" + fake_user = 'client.admin' + fake_user_key = '/etc/ceph/fake.client.admin.keyring' + fake_name = "client.fake" + fake_container_image = "quay.io/ceph/daemon:latest-luminous" + fake_dest = "/fake/ceph" + fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring" + fake_file_destination = os.path.join(fake_dest, fake_keyring_filename) + expected_command_list = [['docker', # noqa E128 + 'run', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + 'quay.io/ceph/daemon:latest-luminous', + '-n', fake_user, + '-k', fake_user_key, + '--cluster', fake_cluster, + 'auth', 'get', + fake_name, '-o', fake_file_destination]] + result = ceph_key.get_key( + fake_cluster, fake_user, fake_user_key, fake_name, fake_file_destination, fake_container_image) + assert result == expected_command_list + + def test_get_key_non_container(self): + fake_cluster = "fake" + fake_user = 'client.admin' + fake_user_key = '/etc/ceph/fake.client.admin.keyring' + fake_dest = "/fake/ceph" + fake_name = "client.fake" + fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring" + fake_file_destination = os.path.join(fake_dest, fake_keyring_filename) + expected_command_list = [ + ['ceph', '-n', fake_user, '-k', fake_user_key, + '--cluster', fake_cluster, 'auth', 'get', fake_name, '-o', fake_file_destination], + ] + result = ceph_key.get_key( + fake_cluster, fake_user, fake_user_key, fake_name, fake_file_destination) + assert result == expected_command_list + + def test_list_key_non_container_with_mon_key(self): + fake_hostname = "mon01" + fake_cluster = "fake" + fake_user = "mon." + fake_keyring_dirname = fake_cluster + "-" + fake_hostname + fake_key = os.path.join("/var/lib/ceph/mon/", fake_keyring_dirname, 'keyring') + expected_command_list = [ + ['ceph', '-n', "mon.", '-k', "/var/lib/ceph/mon/fake-mon01/keyring", + '--cluster', fake_cluster, 'auth', 'ls', '-f', 'json'], + ] + result = ceph_key.list_keys(fake_cluster, fake_user, fake_key) + assert result == expected_command_list + + def test_list_key_container_with_mon_key(self): + fake_hostname = "mon01" + fake_cluster = "fake" + fake_user = "mon." + fake_keyring_dirname = fake_cluster + "-" + fake_hostname + fake_key = os.path.join("/var/lib/ceph/mon/", fake_keyring_dirname, 'keyring') + fake_container_image = "quay.io/ceph/daemon:latest-luminous" + expected_command_list = [['docker', + 'run', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + 'quay.io/ceph/daemon:latest-luminous', + '-n', "mon.", + '-k', "/var/lib/ceph/mon/fake-mon01/keyring", + '--cluster', fake_cluster, + 'auth', 'ls', + '-f', 'json'], ] + result = ceph_key.list_keys(fake_cluster, fake_user, fake_key, fake_container_image) + assert result == expected_command_list + + def test_list_key_container(self): + fake_cluster = "fake" + fake_user = "fake-user" + fake_key = "/tmp/my-key" + fake_container_image = "quay.io/ceph/daemon:latest-luminous" + expected_command_list = [['docker', + 'run', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + 'quay.io/ceph/daemon:latest-luminous', + '-n', "fake-user", + '-k', "/tmp/my-key", + '--cluster', fake_cluster, + 'auth', 'ls', + '-f', 'json'], ] + result = ceph_key.list_keys( + fake_cluster, fake_user, fake_key, fake_container_image) + assert result == expected_command_list + + def test_lookup_ceph_initial_entities(self): + fake_module = "fake" + fake_ceph_dict = { "auth_dump":[ { "entity":"osd.0", "key":"AQAJkMhbszeBBBAA4/V1tDFXGlft1GnHJS5wWg==", "caps":{ "mgr":"allow profile osd", "mon":"allow profile osd", "osd":"allow *" } }, { "entity":"osd.1", "key":"AQAjkMhbshueAhAAjZec50aBgd1NObLz57SQvg==", "caps":{ "mgr":"allow profile osd", "mon":"allow profile osd", "osd":"allow *" } }, { "entity":"client.admin", "key":"AQDZjshbrJv6EhAAY9v6LzLYNDpPdlC3HD5KHA==", "auid":0, "caps":{ "mds":"allow", "mgr":"allow *", "mon":"allow *", "osd":"allow *" } }, { "entity":"client.bootstrap-mds", "key":"AQDojshbc4QCHhAA1ZTrkt9dbSZRVU2GzI6U4A==", "caps":{ "mon":"allow profile bootstrap-mds" } }, { "entity":"client.bootstrap-mgr", "key":"AQBfiu5bAAAAABAARcNG24hUMlk4AdstVA5MVQ==", "caps":{ "mon":"allow profile bootstrap-mgr" } }, { "entity":"client.bootstrap-osd", "key":"AQDjjshbYW+uGxAAyHcPCXXmVoL8VsTBI8z1Ng==", "caps":{ "mon":"allow profile bootstrap-osd" } }, { "entity":"client.bootstrap-rbd", "key":"AQDyjshb522eIhAAtAz6nUPMOdG4H9u0NgpXhA==", "caps":{ "mon":"allow profile bootstrap-rbd" } }, { "entity":"client.bootstrap-rbd-mirror", "key":"AQDfh+5bAAAAABAAEGBD59Lj2vAKIdN8pq4lbQ==", "caps":{ "mon":"allow profile bootstrap-rbd-mirror" } }, { "entity":"client.bootstrap-rgw", "key":"AQDtjshbDl8oIBAAq1SfSYQKDR49hJNWJVwDQw==", "caps":{ "mon":"allow profile bootstrap-rgw" } }, { "entity":"mgr.mon0", "key":"AQA0j8hbgGapORAAoDkyAvXVkM5ej4wNn4cwTQ==", "caps":{ "mds":"allow *", "mon":"allow profile mgr", "osd":"allow *" } } ] } # noqa E501 + fake_ceph_dict_str = json.dumps(fake_ceph_dict) # convert to string + expected_entity_list = ['client.admin', 'client.bootstrap-mds', 'client.bootstrap-mgr', # noqa E501 + 'client.bootstrap-osd', 'client.bootstrap-rbd', 'client.bootstrap-rbd-mirror', 'client.bootstrap-rgw'] # noqa E501 + result = ceph_key.lookup_ceph_initial_entities(fake_module, fake_ceph_dict_str) + assert result == expected_entity_list + + def test_build_key_path_admin(self): + fake_cluster = "fake" + entity = "client.admin" + expected_result = "/etc/ceph/fake.client.admin.keyring" + result = ceph_key.build_key_path(fake_cluster, entity) + assert result == expected_result + + def test_build_key_path_bootstrap_osd(self): + fake_cluster = "fake" + entity = "client.bootstrap-osd" + expected_result = "/var/lib/ceph/bootstrap-osd/fake.keyring" + result = ceph_key.build_key_path(fake_cluster, entity) + assert result == expected_result + + @mock.patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @mock.patch('ceph_key.exec_commands') + @pytest.mark.parametrize('output_format', ['json', 'plain', 'xml', 'yaml']) + def test_state_info(self, m_exec_commands, m_exit_json, output_format): + ca_test_common.set_module_args({"state": "info", + "cluster": "ceph", + "name": "client.admin", + "output_format": output_format}) + m_exit_json.side_effect = ca_test_common.exit_json + m_exec_commands.return_value = (0, + ['ceph', 'auth', 'get', 'client.admin', '-f', output_format], + '[{"entity":"client.admin","key":"AQC1tw5fF156GhAAoJCvHGX/jl/k7/N4VZm8iQ==","caps":{"mds":"allow *","mgr":"allow *","mon":"allow *","osd":"allow *"}}]', # noqa: E501 + 'exported keyring for client.admin') + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_key.run_module() + + result = result.value.args[0] + assert not result['changed'] + assert result['cmd'] == ['ceph', 'auth', 'get', 'client.admin', '-f', output_format] + assert result['stdout'] == '[{"entity":"client.admin","key":"AQC1tw5fF156GhAAoJCvHGX/jl/k7/N4VZm8iQ==","caps":{"mds":"allow *","mgr":"allow *","mon":"allow *","osd":"allow *"}}]' # noqa: E501 + assert result['stderr'] == 'exported keyring for client.admin' + assert result['rc'] == 0 + + @mock.patch('ansible.module_utils.basic.AnsibleModule.fail_json') + def test_state_info_invalid_format(self, m_fail_json): + invalid_format = 'txt' + ca_test_common.set_module_args({"state": "info", + "cluster": "ceph", + "name": "client.admin", + "output_format": invalid_format}) + m_fail_json.side_effect = ca_test_common.fail_json + + with pytest.raises(ca_test_common.AnsibleFailJson) as result: + ceph_key.run_module() + + result = result.value.args[0] + assert result['msg'] == 'value of output_format must be one of: json, plain, xml, yaml, got: {}'.format(invalid_format) + + @mock.patch('ceph_key.generate_secret') + @mock.patch('ansible.module_utils.basic.AnsibleModule.exit_json') + def test_generate_key(self, m_exit_json, m_generate_secret): + fake_secret = b'AQDaLb1fAAAAABAAsIMKdGEKu+lGOyXnRfT0Hg==' + ca_test_common.set_module_args({"state": "generate_secret"}) + m_exit_json.side_effect = ca_test_common.exit_json + m_generate_secret.return_value = fake_secret + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_key.run_module() + assert result.value.args[0]['stdout'] == fake_secret.decode() diff --git a/tests/unit/modules/test_ceph_mgr_module.py b/tests/unit/modules/test_ceph_mgr_module.py new file mode 100644 index 0000000..d426a95 --- /dev/null +++ b/tests/unit/modules/test_ceph_mgr_module.py @@ -0,0 +1,162 @@ +from mock.mock import patch +import os +import pytest +import ca_test_common +import ceph_mgr_module + +fake_cluster = 'ceph' +fake_container_binary = 'podman' +fake_container_image = 'quay.io/ceph/daemon:latest' +fake_module = 'noup' +fake_user = 'client.admin' +fake_keyring = '/etc/ceph/{}.{}.keyring'.format(fake_cluster, fake_user) + + +class TestCephMgrModuleModule(object): + + @patch('ansible.module_utils.basic.AnsibleModule.fail_json') + def test_without_parameters(self, m_fail_json): + ca_test_common.set_module_args({}) + m_fail_json.side_effect = ca_test_common.fail_json + + with pytest.raises(ca_test_common.AnsibleFailJson) as result: + ceph_mgr_module.main() + + result = result.value.args[0] + assert result['msg'] == 'missing required arguments: name' + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + def test_with_check_mode(self, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_module, + '_ansible_check_mode': True + }) + m_exit_json.side_effect = ca_test_common.exit_json + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_mgr_module.main() + + result = result.value.args[0] + assert not result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'mgr', 'module', 'enable', fake_module] + assert result['rc'] == 0 + assert not result['stdout'] + assert not result['stderr'] + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_with_failure(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_module + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = 'Error ENOENT: all mgr daemons do not support module \'{}\', pass --force to force enablement'.format(fake_module) + rc = 2 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_mgr_module.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'mgr', 'module', 'enable', fake_module] + assert result['rc'] == rc + assert result['stderr'] == stderr + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_enable_module(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_module, + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '' + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_mgr_module.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'mgr', 'module', 'enable', fake_module] + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_already_enable_module(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_module, + }) + m_exit_json.side_effect = ca_test_common.exit_json + stderr = 'module \'{}\' is already enabled'.format(fake_module) + stdout = '' + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_mgr_module.main() + + result = result.value.args[0] + assert not result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'mgr', 'module', 'enable', fake_module] + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_disable_module(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_module, + 'state': 'disable' + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '' + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_mgr_module.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'mgr', 'module', 'disable', fake_module] + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image}) + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_with_container(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_module, + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '{} is set'.format(fake_module) + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_mgr_module.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == [fake_container_binary, 'run', '--rm', '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', fake_container_image, + '-n', fake_user, '-k', fake_keyring, + '--cluster', fake_cluster, 'mgr', 'module', 'enable', fake_module] + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout diff --git a/tests/unit/modules/test_ceph_osd.py b/tests/unit/modules/test_ceph_osd.py new file mode 100644 index 0000000..6d1f314 --- /dev/null +++ b/tests/unit/modules/test_ceph_osd.py @@ -0,0 +1,244 @@ +from mock.mock import patch +import os +import pytest +import ca_test_common +import ceph_osd + +fake_cluster = 'ceph' +fake_container_binary = 'podman' +fake_container_image = 'quay.io/ceph/daemon:latest' +fake_id = '42' +fake_ids = ['0', '7', '13'] +fake_user = 'client.admin' +fake_keyring = '/etc/ceph/{}.{}.keyring'.format(fake_cluster, fake_user) +invalid_state = 'foo' + + +class TestCephOSDModule(object): + + @patch('ansible.module_utils.basic.AnsibleModule.fail_json') + def test_without_parameters(self, m_fail_json): + ca_test_common.set_module_args({}) + m_fail_json.side_effect = ca_test_common.fail_json + + with pytest.raises(ca_test_common.AnsibleFailJson) as result: + ceph_osd.main() + + result = result.value.args[0] + assert result['msg'] == 'missing required arguments: ids, state' + + @patch('ansible.module_utils.basic.AnsibleModule.fail_json') + def test_with_invalid_state(self, m_fail_json): + ca_test_common.set_module_args({ + 'ids': fake_id, + 'state': invalid_state, + }) + m_fail_json.side_effect = ca_test_common.fail_json + + with pytest.raises(ca_test_common.AnsibleFailJson) as result: + ceph_osd.main() + + result = result.value.args[0] + assert result['msg'] == ('value of state must be one of: destroy, down, ' + 'in, out, purge, rm, got: {}'.format(invalid_state)) + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + def test_with_check_mode(self, m_exit_json): + ca_test_common.set_module_args({ + 'ids': fake_id, + 'state': 'rm', + '_ansible_check_mode': True + }) + m_exit_json.side_effect = ca_test_common.exit_json + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_osd.main() + + result = result.value.args[0] + assert not result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', 'rm', fake_id] + assert result['rc'] == 0 + assert not result['stdout'] + assert not result['stderr'] + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_with_failure(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'ids': fake_id, + 'state': 'rm' + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = 'Error EBUSY: osd.{} is still up; must be down before removal.'.format(fake_id) + rc = 16 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_osd.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', 'rm', fake_id] + assert result['rc'] == rc + assert result['stderr'] == stderr + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + @pytest.mark.parametrize('state', ['destroy', 'down', 'in', 'out', 'purge', 'rm']) + def test_set_state(self, m_run_command, m_exit_json, state): + ca_test_common.set_module_args({ + 'ids': fake_id, + 'state': state + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = 'marked {} osd.{}'.format(state, fake_id) + rc = 0 + m_run_command.return_value = rc, stdout, stderr + cmd = ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', state, fake_id] + if state in ['destroy', 'purge']: + cmd.append('--yes-i-really-mean-it') + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_osd.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == cmd + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + @pytest.mark.parametrize('state', ['down', 'in', 'out', 'rm']) + def test_set_state_multiple_ids(self, m_run_command, m_exit_json, state): + ca_test_common.set_module_args({ + 'ids': fake_ids, + 'state': state + }) + m_exit_json.side_effect = ca_test_common.exit_json + stderr = '' + stdout = '' + for osd in fake_ids: + stderr += 'marked {} osd.{} '.format(state, osd) + rc = 0 + m_run_command.return_value = rc, stdout, stderr + cmd = ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', state] + cmd.extend(fake_ids) + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_osd.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == cmd + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch('ansible.module_utils.basic.AnsibleModule.fail_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + @pytest.mark.parametrize('state', ['destroy', 'purge']) + def test_invalid_state_multiple_ids(self, m_run_command, m_fail_json, state): + ca_test_common.set_module_args({ + 'ids': fake_ids, + 'state': state + }) + m_fail_json.side_effect = ca_test_common.fail_json + + with pytest.raises(ca_test_common.AnsibleFailJson) as result: + ceph_osd.main() + + result = result.value.args[0] + assert result['msg'] == 'destroy and purge only support one OSD at at time' + assert result['rc'] == 1 + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + @pytest.mark.parametrize('state', ['down', 'in', 'out']) + def test_already_set_state(self, m_run_command, m_exit_json, state): + ca_test_common.set_module_args({ + 'ids': fake_id, + 'state': state + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = 'osd.{} is already {}.'.format(fake_id, state) + rc = 0 + m_run_command.return_value = rc, stdout, stderr + cmd = ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', state, fake_id] + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_osd.main() + + result = result.value.args[0] + assert not result['changed'] + assert result['cmd'] == cmd + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + @pytest.mark.parametrize('state', ['down', 'in', 'out', 'rm']) + def test_one_already_set_state_multiple_ids(self, m_run_command, m_exit_json, state): + ca_test_common.set_module_args({ + 'ids': fake_ids, + 'state': state + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = 'marked {} osd.{}. osd.{} does not exist. osd.{} does not exist.'.format(state, fake_ids[0], fake_ids[1], fake_ids[2]) + rc = 0 + m_run_command.return_value = rc, stdout, stderr + cmd = ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', state] + cmd.extend(fake_ids) + if state in ['destroy', 'purge']: + cmd.append('--yes-i-really-mean-it') + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_osd.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == cmd + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image}) + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + @pytest.mark.parametrize('state', ['destroy', 'down', 'in', 'out', 'purge', 'rm']) + def test_set_state_with_container(self, m_run_command, m_exit_json, state): + ca_test_common.set_module_args({ + 'ids': fake_id, + 'state': state + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = 'marked {} osd.{}'.format(state, fake_id) + rc = 0 + m_run_command.return_value = rc, stdout, stderr + cmd = [fake_container_binary, 'run', '--rm', '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', fake_container_image, + '-n', fake_user, '-k', fake_keyring, + '--cluster', fake_cluster, 'osd', state, fake_id] + if state in ['destroy', 'purge']: + cmd.append('--yes-i-really-mean-it') + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_osd.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == cmd + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout diff --git a/tests/unit/modules/test_ceph_osd_flag.py b/tests/unit/modules/test_ceph_osd_flag.py new file mode 100644 index 0000000..bbc865e --- /dev/null +++ b/tests/unit/modules/test_ceph_osd_flag.py @@ -0,0 +1,156 @@ +from mock.mock import patch +import os +import pytest +import ca_test_common +import ceph_osd_flag + +fake_cluster = 'ceph' +fake_container_binary = 'podman' +fake_container_image = 'quay.io/ceph/daemon:latest' +fake_flag = 'noup' +fake_user = 'client.admin' +fake_keyring = '/etc/ceph/{}.{}.keyring'.format(fake_cluster, fake_user) +invalid_flag = 'nofoo' + + +class TestCephOSDFlagModule(object): + + @patch('ansible.module_utils.basic.AnsibleModule.fail_json') + def test_without_parameters(self, m_fail_json): + ca_test_common.set_module_args({}) + m_fail_json.side_effect = ca_test_common.fail_json + + with pytest.raises(ca_test_common.AnsibleFailJson) as result: + ceph_osd_flag.main() + + result = result.value.args[0] + assert result['msg'] == 'missing required arguments: name' + + @patch('ansible.module_utils.basic.AnsibleModule.fail_json') + def test_with_invalid_flag(self, m_fail_json): + ca_test_common.set_module_args({ + 'name': invalid_flag, + }) + m_fail_json.side_effect = ca_test_common.fail_json + + with pytest.raises(ca_test_common.AnsibleFailJson) as result: + ceph_osd_flag.main() + + result = result.value.args[0] + assert result['msg'] == ('value of name must be one of: noup, nodown, ' + 'noout, nobackfill, norebalance, norecover, ' + 'noscrub, nodeep-scrub, got: {}'.format(invalid_flag)) + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + def test_with_check_mode(self, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_flag, + '_ansible_check_mode': True + }) + m_exit_json.side_effect = ca_test_common.exit_json + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_osd_flag.main() + + result = result.value.args[0] + assert not result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', 'set', fake_flag] + assert result['rc'] == 0 + assert not result['stdout'] + assert not result['stderr'] + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_with_failure(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_flag + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = 'Error EINVAL: invalid command' + rc = 22 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_osd_flag.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', 'set', fake_flag] + assert result['rc'] == rc + assert result['stderr'] == stderr + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_set_flag(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_flag, + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '{} is set'.format(fake_flag) + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_osd_flag.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', 'set', fake_flag] + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_unset_flag(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_flag, + 'state': 'absent' + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '{} is unset'.format(fake_flag) + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_osd_flag.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', 'unset', fake_flag] + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image}) + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_with_container(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_flag, + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '{} is set'.format(fake_flag) + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_osd_flag.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == [fake_container_binary, 'run', '--rm', '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', fake_container_image, + '-n', fake_user, '-k', fake_keyring, + '--cluster', fake_cluster, 'osd', 'set', fake_flag] + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout diff --git a/tests/unit/modules/test_ceph_pool.py b/tests/unit/modules/test_ceph_pool.py new file mode 100644 index 0000000..f072c26 --- /dev/null +++ b/tests/unit/modules/test_ceph_pool.py @@ -0,0 +1,667 @@ +import os +import sys +import ceph_pool +from mock.mock import patch +import pytest + +sys.path.append('./library') +fake_user = 'client.admin' +fake_user_key = '/etc/ceph/ceph.client.admin.keyring' +fake_pool_name = 'foo' +fake_cluster_name = 'ceph' +fake_container_image_name = 'quay.io/ceph/daemon:latest-luminous' + + +@patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': 'podman'}) +class TestCephPoolModule(object): + def setup_method(self): + self.fake_running_pool_details = { + 'pool_id': 39, + 'pool_name': 'foo2', + 'create_time': '2020-05-12T12:32:03.696673+0000', + 'flags': 32769, + 'flags_names': 'hashpspool,creating', + 'type': 1, + 'size': 2, + 'min_size': 1, + 'crush_rule': 0, + 'object_hash': 2, + 'pg_autoscale_mode': 'on', + 'pg_num': 32, + 'pg_placement_num': 32, + 'pg_placement_num_target': 32, + 'pg_num_target': 32, + 'pg_num_pending': 32, + 'last_pg_merge_meta': { + 'source_pgid': '0.0', + 'ready_epoch': 0, + 'last_epoch_started': 0, + 'last_epoch_clean': 0, + 'source_version': "0'0", + 'target_version': "0'0" + }, + 'last_change': '109', + 'last_force_op_resend': '0', + 'last_force_op_resend_prenautilus': '0', + 'last_force_op_resend_preluminous': '0', + 'auid': 0, + 'snap_mode': 'selfmanaged', + 'snap_seq': 0, + 'snap_epoch': 0, + 'pool_snaps': [], + 'removed_snaps': '[]', + 'quota_max_bytes': 0, + 'quota_max_objects': 0, + 'tiers': [], + 'tier_of': -1, + 'read_tier': -1, + 'write_tier': -1, + 'cache_mode': 'none', + 'target_max_bytes': 0, + 'target_max_objects': 0, + 'cache_target_dirty_ratio_micro': 400000, + 'cache_target_dirty_high_ratio_micro': 600000, + 'cache_target_full_ratio_micro': 800000, + 'cache_min_flush_age': 0, + 'cache_min_evict_age': 0, + 'erasure_code_profile': '', + 'hit_set_params': { + 'type': 'none' + }, + 'hit_set_period': 0, + 'hit_set_count': 0, + 'use_gmt_hitset': True, + 'min_read_recency_for_promote': 0, + 'min_write_recency_for_promote': 0, + 'hit_set_grade_decay_rate': 0, + 'hit_set_search_last_n': 0, + 'grade_table': [], + 'stripe_width': 0, + 'expected_num_objects': 0, + 'fast_read': False, + 'options': {}, + # 'target_size_ratio' is a key present in the dict above + # 'options': {} + # see comment in get_pool_details() for more details + 'target_size_ratio': 0.3, + 'application_metadata': { + 'rbd': {} + }, + 'application': 'rbd' + } + self.fake_user_pool_config = { + 'pool_name': { + 'value': 'foo2' + }, + 'pg_num': { + 'value': '32', + 'cli_set_opt': 'pg_num' + }, + 'pgp_num': { + 'value': '0', + 'cli_set_opt': 'pgp_num' + }, + 'pg_autoscale_mode': { + 'value': 'on', + 'cli_set_opt': 'pg_autoscale_mode' + }, + 'target_size_ratio': { + 'value': '0.3', + 'cli_set_opt': 'target_size_ratio' + }, + 'application': { + 'value': 'rbd' + }, + 'type': { + 'value': 'replicated' + }, + 'erasure_profile': { + 'value': 'default' + }, + 'crush_rule': { + 'value': 'replicated_rule', + 'cli_set_opt': 'crush_rule' + }, + 'expected_num_objects': { + 'value': '0' + }, + 'size': { + 'value': '2', + 'cli_set_opt': 'size' + }, + 'min_size': { + 'value': '0', + 'cli_set_opt': 'min_size' + }, + 'pg_placement_num': { + 'value': '32', + 'cli_set_opt': 'pgp_num' + }} + + def test_check_pool_exist(self): + expected_command_list = [ + 'podman', + 'run', + '--rm', + '--net=host', + '-v', + '/etc/ceph:/etc/ceph:z', + '-v', + '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', + '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + fake_container_image_name, + '-n', + fake_user, + '-k', + fake_user_key, + '--cluster', + 'ceph', + 'osd', + 'pool', + 'stats', + self.fake_user_pool_config['pool_name']['value'], + '-f', + 'json' + ] + + cmd = ceph_pool.check_pool_exist(fake_cluster_name, + self.fake_user_pool_config['pool_name']['value'], + fake_user, fake_user_key, output_format='json', + container_image=fake_container_image_name) + assert cmd == expected_command_list + + def test_get_default_running_config(self): + params = ['osd_pool_default_size', + 'osd_pool_default_min_size', + 'osd_pool_default_pg_num', + 'osd_pool_default_pgp_num'] + + expected_command_list = [] + cmd_list = [] + + for param in params: + expected_command_list.append([ + 'podman', + 'run', + '--rm', + '--net=host', + '-v', + '/etc/ceph:/etc/ceph:z', + '-v', + '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', + '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + fake_container_image_name, + '-n', + 'client.admin', + '-k', + '/etc/ceph/ceph.client.admin.keyring', + '--cluster', + 'ceph', + 'config', + 'get', + 'mon.*', + param + ]) + cmd_list.append(ceph_pool.generate_get_config_cmd(param, + fake_cluster_name, + fake_user, fake_user_key, + container_image=fake_container_image_name)) + assert cmd_list == expected_command_list + + def test_get_application_pool(self): + expected_command = [ + 'podman', + 'run', + '--rm', + '--net=host', + '-v', + '/etc/ceph:/etc/ceph:z', + '-v', + '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', + '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + fake_container_image_name, + '-n', + 'client.admin', + '-k', + '/etc/ceph/ceph.client.admin.keyring', + '--cluster', + 'ceph', + 'osd', + 'pool', + 'application', + 'get', + self.fake_user_pool_config['pool_name']['value'], + '-f', + 'json' + ] + + cmd = ceph_pool.get_application_pool(fake_cluster_name, + self.fake_user_pool_config['pool_name']['value'], + fake_user, fake_user_key, 'json', + container_image=fake_container_image_name) + + assert cmd == expected_command + + def test_enable_application_pool(self): + expected_command = [ + 'podman', + 'run', + '--rm', + '--net=host', + '-v', + '/etc/ceph:/etc/ceph:z', + '-v', + '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', + '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + fake_container_image_name, + '-n', + 'client.admin', + '-k', + '/etc/ceph/ceph.client.admin.keyring', + '--cluster', + 'ceph', + 'osd', + 'pool', + 'application', + 'enable', + self.fake_user_pool_config['pool_name']['value'], + 'rbd' + ] + + cmd = ceph_pool.enable_application_pool(fake_cluster_name, + self.fake_user_pool_config['pool_name']['value'], + 'rbd', fake_user, fake_user_key, + container_image=fake_container_image_name) + + assert cmd == expected_command + + @pytest.mark.parametrize("container_image", [None, fake_container_image_name]) + def test_init_rbd_pool(self, container_image): + if container_image: + expected_command = [ + 'podman', + 'run', + '--rm', + '--net=host', + '-v', + '/etc/ceph:/etc/ceph:z', + '-v', + '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', + '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=rbd', + fake_container_image_name, + '-n', + fake_user, + '-k', + fake_user_key, + '--cluster', + fake_cluster_name, + 'pool', + 'init', + self.fake_user_pool_config['pool_name']['value'] + ] + else: + expected_command = [ + 'rbd', + '-n', + fake_user, + '-k', + fake_user_key, + '--cluster', + fake_cluster_name, + 'pool', + 'init', + self.fake_user_pool_config['pool_name']['value'] + ] + + cmd = ceph_pool.init_rbd_pool(fake_cluster_name, + self.fake_user_pool_config['pool_name']['value'], + fake_user, fake_user_key, + container_image) + + assert cmd == expected_command + + def test_disable_application_pool(self): + expected_command = [ + 'podman', + 'run', + '--rm', + '--net=host', + '-v', + '/etc/ceph:/etc/ceph:z', + '-v', + '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', + '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + fake_container_image_name, + '-n', + 'client.admin', + '-k', + '/etc/ceph/ceph.client.admin.keyring', + '--cluster', + 'ceph', + 'osd', + 'pool', + 'application', + 'disable', + self.fake_user_pool_config['pool_name']['value'], + 'rbd', + '--yes-i-really-mean-it' + ] + + cmd = ceph_pool.disable_application_pool(fake_cluster_name, + self.fake_user_pool_config['pool_name']['value'], + 'rbd', fake_user, fake_user_key, + container_image=fake_container_image_name) + + assert cmd == expected_command + + def test_compare_pool_config_no_diff(self): + delta = ceph_pool.compare_pool_config(self.fake_user_pool_config, self.fake_running_pool_details) + + assert delta == {} + + def test_compare_pool_config_std_diff(self): + self.fake_user_pool_config['size']['value'] = '3' + delta = ceph_pool.compare_pool_config(self.fake_user_pool_config, self.fake_running_pool_details) + + assert delta == {'size': {'cli_set_opt': 'size', 'value': '3'}} + + def test_compare_pool_config_target_size_ratio_diff(self): + self.fake_user_pool_config['target_size_ratio']['value'] = '0.5' + delta = ceph_pool.compare_pool_config(self.fake_user_pool_config, self.fake_running_pool_details) + + assert delta == {'target_size_ratio': {'cli_set_opt': 'target_size_ratio', 'value': '0.5'}} + + def test_compare_pool_config_application_diff(self): + self.fake_user_pool_config['application']['value'] = 'foo' + delta = ceph_pool.compare_pool_config(self.fake_user_pool_config, self.fake_running_pool_details) + + assert delta == {'application': {'new_application': 'foo', 'old_application': 'rbd', 'value': 'foo'}} + + def test_list_pools_details(self): + expected_command = [ + 'podman', + 'run', + '--rm', + '--net=host', + '-v', + '/etc/ceph:/etc/ceph:z', + '-v', + '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', + '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + fake_container_image_name, + '-n', + 'client.admin', + '-k', + '/etc/ceph/ceph.client.admin.keyring', + '--cluster', + 'ceph', + 'osd', + 'pool', + 'ls', + 'detail', + '-f', + 'json' + ] + + cmd = ceph_pool.list_pools(fake_cluster_name, fake_user, fake_user_key, True, 'json', container_image=fake_container_image_name) + + assert cmd == expected_command + + def test_list_pools_nodetails(self): + expected_command = [ + 'podman', + 'run', + '--rm', + '--net=host', + '-v', + '/etc/ceph:/etc/ceph:z', + '-v', + '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', + '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + fake_container_image_name, + '-n', + 'client.admin', + '-k', + '/etc/ceph/ceph.client.admin.keyring', + '--cluster', + 'ceph', + 'osd', + 'pool', + 'ls', + '-f', + 'json' + ] + + cmd = ceph_pool.list_pools(fake_cluster_name, fake_user, fake_user_key, False, 'json', container_image=fake_container_image_name) + + assert cmd == expected_command + + def test_create_replicated_pool_pg_autoscaler_enabled(self): + self.fake_user_pool_config['type']['value'] = 'replicated' + expected_command = [ + 'podman', + 'run', + '--rm', + '--net=host', + '-v', + '/etc/ceph:/etc/ceph:z', + '-v', + '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', + '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + fake_container_image_name, + '-n', + 'client.admin', + '-k', + '/etc/ceph/ceph.client.admin.keyring', + '--cluster', + 'ceph', + 'osd', + 'pool', + 'create', + self.fake_user_pool_config['pool_name']['value'], + self.fake_user_pool_config['type']['value'], + '--target_size_ratio', + self.fake_user_pool_config['target_size_ratio']['value'], + self.fake_user_pool_config['crush_rule']['value'], + '--expected_num_objects', + self.fake_user_pool_config['expected_num_objects']['value'], + '--autoscale-mode', + self.fake_user_pool_config['pg_autoscale_mode']['value'], + '--size', + self.fake_user_pool_config['size']['value'] + ] + + cmd = ceph_pool.create_pool(fake_cluster_name, + fake_user, fake_user_key, self.fake_user_pool_config, + container_image=fake_container_image_name) + + assert cmd == expected_command + + def test_create_replicated_pool_pg_autoscaler_disabled(self): + self.fake_user_pool_config['type']['value'] = 'replicated' + self.fake_user_pool_config['pg_autoscale_mode']['value'] = 'off' + expected_command = [ + 'podman', + 'run', + '--rm', + '--net=host', + '-v', + '/etc/ceph:/etc/ceph:z', + '-v', + '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', + '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + fake_container_image_name, + '-n', + 'client.admin', + '-k', + '/etc/ceph/ceph.client.admin.keyring', + '--cluster', + 'ceph', + 'osd', + 'pool', + 'create', + self.fake_user_pool_config['pool_name']['value'], + self.fake_user_pool_config['type']['value'], + '--pg_num', + self.fake_user_pool_config['pg_num']['value'], + '--pgp_num', + self.fake_user_pool_config['pgp_num']['value'], + self.fake_user_pool_config['crush_rule']['value'], + '--expected_num_objects', + self.fake_user_pool_config['expected_num_objects']['value'], + '--autoscale-mode', + self.fake_user_pool_config['pg_autoscale_mode']['value'], + '--size', + self.fake_user_pool_config['size']['value'] + ] + + cmd = ceph_pool.create_pool(fake_cluster_name, + fake_user, fake_user_key, + self.fake_user_pool_config, + container_image=fake_container_image_name) + + assert cmd == expected_command + + def test_create_erasure_pool_pg_autoscaler_enabled(self): + self.fake_user_pool_config['type']['value'] = 'erasure' + self.fake_user_pool_config['erasure_profile']['value'] = 'erasure-default' + self.fake_user_pool_config['crush_rule']['value'] = 'erasure_rule' + expected_command = [ + 'podman', + 'run', + '--rm', + '--net=host', + '-v', + '/etc/ceph:/etc/ceph:z', + '-v', + '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', + '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + fake_container_image_name, + '-n', + 'client.admin', + '-k', + '/etc/ceph/ceph.client.admin.keyring', + '--cluster', + 'ceph', + 'osd', + 'pool', + 'create', + self.fake_user_pool_config['pool_name']['value'], + self.fake_user_pool_config['type']['value'], + '--target_size_ratio', + self.fake_user_pool_config['target_size_ratio']['value'], + self.fake_user_pool_config['erasure_profile']['value'], + self.fake_user_pool_config['crush_rule']['value'], + '--expected_num_objects', + self.fake_user_pool_config['expected_num_objects']['value'], + '--autoscale-mode', + self.fake_user_pool_config['pg_autoscale_mode']['value'] + ] + + cmd = ceph_pool.create_pool(fake_cluster_name, + fake_user, fake_user_key, self.fake_user_pool_config, + container_image=fake_container_image_name) + + assert cmd == expected_command + + def test_create_erasure_pool_pg_autoscaler_disabled(self): + self.fake_user_pool_config['type']['value'] = 'erasure' + self.fake_user_pool_config['erasure_profile']['value'] = 'erasure-default' + self.fake_user_pool_config['crush_rule']['value'] = 'erasure_rule' + self.fake_user_pool_config['pg_autoscale_mode']['value'] = 'off' + expected_command = [ + 'podman', + 'run', + '--rm', + '--net=host', + '-v', + '/etc/ceph:/etc/ceph:z', + '-v', + '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', + '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + fake_container_image_name, + '-n', + 'client.admin', + '-k', + '/etc/ceph/ceph.client.admin.keyring', + '--cluster', + 'ceph', + 'osd', + 'pool', + 'create', + self.fake_user_pool_config['pool_name']['value'], + self.fake_user_pool_config['type']['value'], + '--pg_num', + self.fake_user_pool_config['pg_num']['value'], + '--pgp_num', + self.fake_user_pool_config['pgp_num']['value'], + self.fake_user_pool_config['erasure_profile']['value'], + self.fake_user_pool_config['crush_rule']['value'], + '--expected_num_objects', + self.fake_user_pool_config['expected_num_objects']['value'], + '--autoscale-mode', + self.fake_user_pool_config['pg_autoscale_mode']['value'] + ] + + cmd = ceph_pool.create_pool(fake_cluster_name, + fake_user, fake_user_key, self.fake_user_pool_config, + container_image=fake_container_image_name) + + assert cmd == expected_command + + def test_remove_pool(self): + expected_command = [ + 'podman', + 'run', + '--rm', + '--net=host', + '-v', + '/etc/ceph:/etc/ceph:z', + '-v', + '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', + '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + fake_container_image_name, + '-n', + 'client.admin', + '-k', + '/etc/ceph/ceph.client.admin.keyring', + '--cluster', + 'ceph', + 'osd', + 'pool', + 'rm', + self.fake_user_pool_config['pool_name']['value'], + self.fake_user_pool_config['pool_name']['value'], + '--yes-i-really-really-mean-it' + ] + + cmd = ceph_pool.remove_pool(fake_cluster_name, self.fake_user_pool_config['pool_name']['value'], + fake_user, fake_user_key, container_image=fake_container_image_name) + + assert cmd == expected_command diff --git a/tests/unit/modules/test_ceph_volume.py b/tests/unit/modules/test_ceph_volume.py new file mode 100644 index 0000000..0499a53 --- /dev/null +++ b/tests/unit/modules/test_ceph_volume.py @@ -0,0 +1,480 @@ +import sys +import mock +import os +import pytest +import ca_test_common +sys.path.append('./library') +import ceph_volume # noqa: E402 + + +# Python 3 +try: + from unittest.mock import MagicMock, patch +except ImportError: + # Python 2 + try: + from mock import MagicMock, patch + except ImportError: + print('You need the mock library installed on python2.x to run tests') + + +def get_mounts(mounts=None): + volumes = {} + volumes['/run/lock/lvm'] = '/run/lock/lvm:z' + volumes['/var/run/udev'] = '/var/run/udev:z' + volumes['/dev'] = '/dev' + volumes['/etc/ceph'] = '/etc/ceph:z' + volumes['/run/lvm'] = '/run/lvm' + volumes['/var/lib/ceph'] = '/var/lib/ceph:z' + volumes['/var/log/ceph'] = '/var/log/ceph:z' + if mounts is not None: + volumes.update(mounts) + + return sum([['-v', '{}:{}'.format(src_dir, dst_dir)] for src_dir, dst_dir in volumes.items()], []) + + +def get_container_cmd(mounts=None): + + return ['docker', 'run', '--rm', '--privileged', + '--net=host', '--ipc=host'] + \ + get_mounts(mounts) + ['--entrypoint=ceph-volume'] + + +@mock.patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': 'docker'}) +class TestCephVolumeModule(object): + + def test_data_no_vg(self): + result = ceph_volume.get_data("/dev/sda", None) + assert result == "/dev/sda" + + def test_data_with_vg(self): + result = ceph_volume.get_data("data-lv", "data-vg") + assert result == "data-vg/data-lv" + + def test_journal_no_vg(self): + result = ceph_volume.get_journal("/dev/sda1", None) + assert result == "/dev/sda1" + + def test_journal_with_vg(self): + result = ceph_volume.get_journal("journal-lv", "journal-vg") + assert result == "journal-vg/journal-lv" + + def test_db_no_vg(self): + result = ceph_volume.get_db("/dev/sda1", None) + assert result == "/dev/sda1" + + def test_db_with_vg(self): + result = ceph_volume.get_db("db-lv", "db-vg") + assert result == "db-vg/db-lv" + + def test_wal_no_vg(self): + result = ceph_volume.get_wal("/dev/sda1", None) + assert result == "/dev/sda1" + + def test_wal_with_vg(self): + result = ceph_volume.get_wal("wal-lv", "wal-vg") + assert result == "wal-vg/wal-lv" + + def test_container_exec(self): + fake_binary = "ceph-volume" + fake_container_image = "quay.io/ceph/daemon:latest" + expected_command_list = get_container_cmd() + [fake_container_image] + result = ceph_volume.container_exec(fake_binary, fake_container_image) + assert result == expected_command_list + + def test_zap_osd_container(self): + fake_module = MagicMock() + fake_module.params = {'data': '/dev/sda'} + fake_container_image = "quay.io/ceph/daemon:latest" + expected_command_list = get_container_cmd() + \ + [fake_container_image, + '--cluster', + 'ceph', + 'lvm', + 'zap', + '--destroy', + '/dev/sda'] + result = ceph_volume.zap_devices(fake_module, fake_container_image) + assert result == expected_command_list + + def test_zap_osd(self): + fake_module = MagicMock() + fake_module.params = {'data': '/dev/sda'} + fake_container_image = None + expected_command_list = ['ceph-volume', + '--cluster', + 'ceph', + 'lvm', + 'zap', + '--destroy', + '/dev/sda'] + result = ceph_volume.zap_devices(fake_module, fake_container_image) + assert result == expected_command_list + + def test_zap_osd_fsid(self): + fake_module = MagicMock() + fake_module.params = {'osd_fsid': 'a_uuid'} + fake_container_image = None + expected_command_list = ['ceph-volume', + '--cluster', + 'ceph', + 'lvm', + 'zap', + '--destroy', + '--osd-fsid', + 'a_uuid'] + result = ceph_volume.zap_devices(fake_module, fake_container_image) + assert result == expected_command_list + + def test_zap_osd_id(self): + fake_module = MagicMock() + fake_module.params = {'osd_id': '123'} + fake_container_image = None + expected_command_list = ['ceph-volume', + '--cluster', + 'ceph', + 'lvm', + 'zap', + '--destroy', + '--osd-id', + '123'] + result = ceph_volume.zap_devices(fake_module, fake_container_image) + assert result == expected_command_list + + def test_activate_osd(self): + expected_command_list = ['ceph-volume', + '--cluster', + 'ceph', + 'lvm', + 'activate', + '--all'] + result = ceph_volume.activate_osd() + assert result == expected_command_list + + def test_list_osd(self): + fake_module = MagicMock() + fake_module.params = {'cluster': 'ceph', 'data': '/dev/sda'} + fake_container_image = None + expected_command_list = ['ceph-volume', + '--cluster', + 'ceph', + 'lvm', + 'list', + '/dev/sda', + '--format=json'] + result = ceph_volume.list_osd(fake_module, fake_container_image) + assert result == expected_command_list + + def test_list_osd_container(self): + fake_module = MagicMock() + fake_module.params = {'cluster': 'ceph', 'data': '/dev/sda'} + fake_container_image = "quay.io/ceph/daemon:latest" + expected_command_list = get_container_cmd( + { + '/var/lib/ceph': '/var/lib/ceph:ro' + }) + \ + [fake_container_image, + '--cluster', + 'ceph', + 'lvm', + 'list', + '/dev/sda', + '--format=json'] + result = ceph_volume.list_osd(fake_module, fake_container_image) + assert result == expected_command_list + + def test_list_storage_inventory(self): + fake_module = MagicMock() + fake_container_image = None + expected_command_list = ['ceph-volume', + '--cluster', + 'ceph', + 'inventory', + '--format=json', + ] + result = ceph_volume.list_storage_inventory(fake_module, fake_container_image) + assert result == expected_command_list + + def test_list_storage_inventory_container(self): + fake_module = MagicMock() + fake_container_image = "quay.io/ceph/daemon:latest" + expected_command_list = get_container_cmd() + \ + [fake_container_image, + '--cluster', + 'ceph', + 'inventory', + '--format=json'] + result = ceph_volume.list_storage_inventory(fake_module, fake_container_image) + assert result == expected_command_list + + @pytest.mark.parametrize('objectstore', ['bluestore']) + def test_create_osd_container(self, objectstore): + fake_module = MagicMock() + fake_module.params = {'data': '/dev/sda', + 'objectstore': objectstore, + 'cluster': 'ceph', } + + fake_action = "create" + fake_container_image = "quay.io/ceph/daemon:latest" + expected_command_list = get_container_cmd() + \ + [fake_container_image, + '--cluster', + 'ceph', + 'lvm', + 'create', + '--%s' % objectstore, + '--data', + '/dev/sda'] + result = ceph_volume.prepare_or_create_osd( + fake_module, fake_action, fake_container_image) + assert result == expected_command_list + + @pytest.mark.parametrize('objectstore', ['bluestore']) + def test_create_osd(self, objectstore): + fake_module = MagicMock() + fake_module.params = {'data': '/dev/sda', + 'objectstore': objectstore, + 'cluster': 'ceph', } + + fake_container_image = None + fake_action = "create" + expected_command_list = ['ceph-volume', + '--cluster', + 'ceph', + 'lvm', + 'create', + '--%s' % objectstore, + '--data', + '/dev/sda'] + result = ceph_volume.prepare_or_create_osd( + fake_module, fake_action, fake_container_image) + assert result == expected_command_list + + @pytest.mark.parametrize('objectstore', ['bluestore']) + def test_prepare_osd_container(self, objectstore): + fake_module = MagicMock() + fake_module.params = {'data': '/dev/sda', + 'objectstore': objectstore, + 'cluster': 'ceph', } + + fake_action = "prepare" + fake_container_image = "quay.io/ceph/daemon:latest" + expected_command_list = get_container_cmd() + \ + [fake_container_image, + '--cluster', + 'ceph', + 'lvm', + 'prepare', + '--%s' % objectstore, + '--data', + '/dev/sda'] + result = ceph_volume.prepare_or_create_osd( + fake_module, fake_action, fake_container_image) + assert result == expected_command_list + + @pytest.mark.parametrize('objectstore', ['bluestore']) + def test_prepare_osd(self, objectstore): + fake_module = MagicMock() + fake_module.params = {'data': '/dev/sda', + 'objectstore': objectstore, + 'cluster': 'ceph', } + + fake_container_image = None + fake_action = "prepare" + expected_command_list = ['ceph-volume', + '--cluster', + 'ceph', + 'lvm', + 'prepare', + '--%s' % objectstore, + '--data', + '/dev/sda'] + result = ceph_volume.prepare_or_create_osd( + fake_module, fake_action, fake_container_image) + assert result == expected_command_list + + @pytest.mark.parametrize('objectstore', ['bluestore']) + def test_batch_osd_container(self, objectstore): + fake_module = MagicMock() + fake_module.params = {'data': '/dev/sda', + 'objectstore': objectstore, + 'block_db_size': '4096', + 'journal_size': '4096', + 'cluster': 'ceph', + 'batch_devices': ["/dev/sda", "/dev/sdb"]} + + fake_container_image = "quay.io/ceph/daemon:latest" + expected_command_list = get_container_cmd() + \ + [fake_container_image, + '--cluster', + 'ceph', + 'lvm', + 'batch', + '--%s' % objectstore, + '--yes', + '--prepare', + '--block-db-size', + '4096', + '/dev/sda', + '/dev/sdb'] + result = ceph_volume.batch( + fake_module, fake_container_image) + assert result == expected_command_list + + @pytest.mark.parametrize('objectstore', ['bluestore']) + def test_batch_osd(self, objectstore): + fake_module = MagicMock() + fake_module.params = {'data': '/dev/sda', + 'objectstore': objectstore, + 'block_db_size': '4096', + 'journal_size': '4096', + 'cluster': 'ceph', + 'batch_devices': ["/dev/sda", "/dev/sdb"]} + + fake_container_image = None + expected_command_list = ['ceph-volume', + '--cluster', + 'ceph', + 'lvm', + 'batch', + '--%s' % objectstore, + '--yes', + '--block-db-size', + '4096', + '/dev/sda', + '/dev/sdb'] + result = ceph_volume.batch( + fake_module, fake_container_image) + assert result == expected_command_list + + def test_batch_bluestore_with_dedicated_db(self): + fake_module = MagicMock() + fake_module.params = {'objectstore': 'bluestore', + 'block_db_size': '-1', + 'cluster': 'ceph', + 'batch_devices': ["/dev/sda", "/dev/sdb"], + 'block_db_devices': ["/dev/sdc", "/dev/sdd"]} + + fake_container_image = None + expected_command_list = ['ceph-volume', + '--cluster', + 'ceph', + 'lvm', + 'batch', + '--bluestore', + '--yes', + '/dev/sda', + '/dev/sdb', + '--db-devices', + '/dev/sdc', + '/dev/sdd'] + result = ceph_volume.batch( + fake_module, fake_container_image) + assert result == expected_command_list + + def test_batch_bluestore_with_dedicated_wal(self): + fake_module = MagicMock() + fake_module.params = {'objectstore': 'bluestore', + 'cluster': 'ceph', + 'block_db_size': '-1', + 'batch_devices': ["/dev/sda", "/dev/sdb"], + 'wal_devices': ["/dev/sdc", "/dev/sdd"]} + + fake_container_image = None + expected_command_list = ['ceph-volume', + '--cluster', + 'ceph', + 'lvm', + 'batch', + '--bluestore', + '--yes', + '/dev/sda', + '/dev/sdb', + '--wal-devices', + '/dev/sdc', + '/dev/sdd'] + result = ceph_volume.batch( + fake_module, fake_container_image) + assert result == expected_command_list + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_prepare_no_keyring_in_output(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({'data': '/dev/sda', + 'objectstore': 'bluestore', + 'cluster': 'ceph', + 'action': 'prepare'}) + keyring = 'AQBqkhNhQDlqEhAAXKxu87L3Mh3mHY+agonKZA==' + m_exit_json.side_effect = ca_test_common.exit_json + list_rc = 0 + list_stderr = '' + list_stdout = '{}' + prepare_rc = 0 + prepare_stderr = """ + Running command: /usr/bin/ceph-authtool --gen-print-key + Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-0 + Running command: /usr/bin/chown -h ceph:ceph /dev/test_group/data-lv1 + Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 + Running command: /usr/bin/ln -s /dev/test_group/data-lv1 /var/lib/ceph/osd/ceph-1/block + stderr: got monmap epoch 1 + Running command: /usr/bin/ceph-authtool /var/lib/ceph/osd/ceph-1/keyring --create-keyring --name osd.1 --add-key {} + stdout: creating /var/lib/ceph/osd/ceph-1/keyring + added entity osd.1 auth(key={}) +""".format(keyring, keyring) + prepare_stdout = '' + m_run_command.side_effect = [ + (list_rc, list_stdout, list_stderr), + (prepare_rc, prepare_stdout, prepare_stderr) + ] + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_volume.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph-volume', '--cluster', 'ceph', 'lvm', 'prepare', '--bluestore', '--data', '/dev/sda'] + assert result['rc'] == 0 + assert keyring not in result['stderr'] + assert '*' * 8 in result['stderr'] + assert not result['stdout'] + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_batch_no_keyring_in_output(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({'batch_devices': ['/dev/sda'], + 'objectstore': 'bluestore', + 'cluster': 'ceph', + 'action': 'batch'}) + keyring = 'AQBUixJhnDF1NRAAhl2xrnmOHCCI/T+W6FjqmA==' + m_exit_json.side_effect = ca_test_common.exit_json + report_rc = 0 + report_stderr = '' + report_stdout = '[{"data": "/dev/sda", "data_size": "50.00 GB", "encryption": "None"}]' + batch_rc = 0 + batch_stderr = """ + Running command: /usr/bin/ceph-authtool --gen-print-key + Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-0 + Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-863337c4-bef9-4b96-aaac-27cde8c42b8f/osd-block-b1d1036f-0d6e-493b-9d1a-6f6b96df64b1 + Running command: /usr/bin/chown -R ceph:ceph /dev/mapper/ceph--863337c4--bef9--4b96--aaac--27cde8c42b8f-osd--block--b1d1036f--0d6e--493b--9d1a--6f6b96df64b1 + Running command: /usr/bin/ln -s /dev/ceph-863337c4-bef9-4b96-aaac-27cde8c42b8f/osd-block-b1d1036f-0d6e-493b-9d1a-6f6b96df64b1 /var/lib/ceph/osd/ceph-0/block + stderr: got monmap epoch 1 + Running command: /usr/bin/ceph-authtool /var/lib/ceph/osd/ceph-0/keyring --create-keyring --name osd.0 --add-key {} + stdout: creating /var/lib/ceph/osd/ceph-0/keyring + added entity osd.0 auth(key={}) +""".format(keyring, keyring) + batch_stdout = '' + m_run_command.side_effect = [ + (report_rc, report_stdout, report_stderr), + (batch_rc, batch_stdout, batch_stderr) + ] + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_volume.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph-volume', '--cluster', 'ceph', 'lvm', 'batch', '--bluestore', '--yes', '/dev/sda'] + assert result['rc'] == 0 + assert keyring not in result['stderr'] + assert '*' * 8 in result['stderr'] + assert not result['stdout'] diff --git a/tests/unit/modules/test_ceph_volume_simple_activate.py b/tests/unit/modules/test_ceph_volume_simple_activate.py new file mode 100644 index 0000000..8eb1707 --- /dev/null +++ b/tests/unit/modules/test_ceph_volume_simple_activate.py @@ -0,0 +1,174 @@ +from mock.mock import patch +import os +import pytest +import ca_test_common +import ceph_volume_simple_activate + +fake_cluster = 'ceph' +fake_container_binary = 'podman' +fake_container_image = 'quay.io/ceph/daemon:latest' +fake_id = '42' +fake_uuid = '0c4a7eca-0c2a-4c12-beff-08a80f064c52' +fake_path = '/etc/ceph/osd/{}-{}.json'.format(fake_id, fake_uuid) + + +class TestCephVolumeSimpleActivateModule(object): + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + def test_with_check_mode(self, m_exit_json): + ca_test_common.set_module_args({ + 'osd_id': fake_id, + 'osd_fsid': fake_uuid, + '_ansible_check_mode': True + }) + m_exit_json.side_effect = ca_test_common.exit_json + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_volume_simple_activate.main() + + result = result.value.args[0] + assert not result['changed'] + assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid] + assert result['rc'] == 0 + assert not result['stdout'] + assert not result['stderr'] + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_with_failure(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'osd_id': fake_id, + 'osd_fsid': fake_uuid + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = 'error' + rc = 2 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_volume_simple_activate.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid] + assert result['rc'] == rc + assert result['stderr'] == stderr + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_activate_all_osds(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'osd_all': True + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '' + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_volume_simple_activate.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', '--all'] + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch.object(os.path, 'exists', return_value=True) + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_activate_path_exists(self, m_run_command, m_exit_json, m_os_path): + ca_test_common.set_module_args({ + 'path': fake_path + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '' + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_volume_simple_activate.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', '--file', fake_path] + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch.object(os.path, 'exists', return_value=False) + @patch('ansible.module_utils.basic.AnsibleModule.fail_json') + def test_activate_path_not_exists(self, m_fail_json, m_os_path): + ca_test_common.set_module_args({ + 'path': fake_path + }) + m_fail_json.side_effect = ca_test_common.fail_json + + with pytest.raises(ca_test_common.AnsibleFailJson) as result: + ceph_volume_simple_activate.main() + + result = result.value.args[0] + assert result['msg'] == '{} does not exist'.format(fake_path) + assert result['rc'] == 1 + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_activate_without_systemd(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'osd_id': fake_id, + 'osd_fsid': fake_uuid, + 'systemd': False + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '' + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_volume_simple_activate.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid, '--no-systemd'] + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image}) + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_activate_with_container(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'osd_id': fake_id, + 'osd_fsid': fake_uuid, + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '' + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_volume_simple_activate.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == [fake_container_binary, + 'run', '--rm', '--privileged', + '--ipc=host', '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '-v', '/run/lvm/:/run/lvm/', + '-v', '/run/lock/lvm/:/run/lock/lvm/', + '--entrypoint=ceph-volume', fake_container_image, + '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid] + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout diff --git a/tests/unit/modules/test_ceph_volume_simple_scan.py b/tests/unit/modules/test_ceph_volume_simple_scan.py new file mode 100644 index 0000000..f43dec4 --- /dev/null +++ b/tests/unit/modules/test_ceph_volume_simple_scan.py @@ -0,0 +1,166 @@ +from mock.mock import patch +import os +import pytest +import ca_test_common +import ceph_volume_simple_scan + +fake_cluster = 'ceph' +fake_container_binary = 'podman' +fake_container_image = 'quay.io/ceph/daemon:latest' +fake_path = '/var/lib/ceph/osd/ceph-0' + + +class TestCephVolumeSimpleScanModule(object): + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + def test_with_check_mode(self, m_exit_json): + ca_test_common.set_module_args({ + '_ansible_check_mode': True + }) + m_exit_json.side_effect = ca_test_common.exit_json + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_volume_simple_scan.main() + + result = result.value.args[0] + assert not result['changed'] + assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'scan'] + assert result['rc'] == 0 + assert not result['stdout'] + assert not result['stderr'] + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_with_failure(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = 'error' + rc = 2 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_volume_simple_scan.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'scan'] + assert result['rc'] == rc + assert result['stderr'] == stderr + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_scan_all_osds(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '' + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_volume_simple_scan.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'scan'] + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch.object(os.path, 'exists', return_value=True) + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_scan_path_exists(self, m_run_command, m_exit_json, m_os_path): + ca_test_common.set_module_args({ + 'path': fake_path + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '' + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_volume_simple_scan.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'scan', fake_path] + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch.object(os.path, 'exists', return_value=False) + @patch('ansible.module_utils.basic.AnsibleModule.fail_json') + def test_scan_path_not_exists(self, m_fail_json, m_os_path): + ca_test_common.set_module_args({ + 'path': fake_path + }) + m_fail_json.side_effect = ca_test_common.fail_json + + with pytest.raises(ca_test_common.AnsibleFailJson) as result: + ceph_volume_simple_scan.main() + + result = result.value.args[0] + assert result['msg'] == '{} does not exist'.format(fake_path) + assert result['rc'] == 1 + + @patch.object(os.path, 'exists', return_value=True) + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_scan_path_stdout_force(self, m_run_command, m_exit_json, m_os_path): + ca_test_common.set_module_args({ + 'path': fake_path, + 'force': True, + 'stdout': True + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '' + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_volume_simple_scan.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'scan', '--force', '--stdout', fake_path] + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image}) + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_scan_with_container(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '' + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_volume_simple_scan.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == [fake_container_binary, + 'run', '--rm', '--privileged', + '--ipc=host', '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '-v', '/run/lvm/:/run/lvm/', + '-v', '/run/lock/lvm/:/run/lock/lvm/', + '--entrypoint=ceph-volume', fake_container_image, + '--cluster', fake_cluster, 'simple', 'scan'] + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout diff --git a/tests/unit/modules/test_cephadm_adopt.py b/tests/unit/modules/test_cephadm_adopt.py new file mode 100644 index 0000000..36e3bbf --- /dev/null +++ b/tests/unit/modules/test_cephadm_adopt.py @@ -0,0 +1,208 @@ +from mock.mock import patch +import pytest +import ca_test_common +import cephadm_adopt + +fake_cluster = 'ceph' +fake_image = 'quay.io/ceph/daemon-base:latest' +fake_name = 'mon.foo01' + + +class TestCephadmAdoptModule(object): + + @patch('ansible.module_utils.basic.AnsibleModule.fail_json') + def test_without_parameters(self, m_fail_json): + ca_test_common.set_module_args({}) + m_fail_json.side_effect = ca_test_common.fail_json + + with pytest.raises(ca_test_common.AnsibleFailJson) as result: + cephadm_adopt.main() + + result = result.value.args[0] + assert result['msg'] == 'missing required arguments: name' + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + def test_with_check_mode(self, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_name, + '_ansible_check_mode': True + }) + m_exit_json.side_effect = ca_test_common.exit_json + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + cephadm_adopt.main() + + result = result.value.args[0] + assert not result['changed'] + assert result['cmd'] == ['cephadm', 'ls', '--no-detail'] + assert result['rc'] == 0 + assert not result['stdout'] + assert not result['stderr'] + + @patch('ansible.module_utils.basic.AnsibleModule.fail_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_with_failure(self, m_run_command, m_fail_json): + ca_test_common.set_module_args({ + 'name': fake_name + }) + m_fail_json.side_effect = ca_test_common.fail_json + stdout = '' + stderr = 'ERROR: cephadm should be run as root' + rc = 1 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleFailJson) as result: + cephadm_adopt.main() + + result = result.value.args[0] + assert result['rc'] == 1 + assert result['msg'] == 'ERROR: cephadm should be run as root' + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_with_default_values(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_name + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = 'Stopping old systemd unit ceph-mon@{}...\n' \ + 'Disabling old systemd unit ceph-mon@{}...\n' \ + 'Moving data...\n' \ + 'Chowning content...\n' \ + 'Moving logs...\n' \ + 'Creating new units...\n' \ + 'firewalld ready'.format(fake_name, fake_name) + stderr = '' + rc = 0 + m_run_command.side_effect = [ + (0, '[{{"style":"legacy","name":"{}"}}]'.format(fake_name), ''), + (rc, stdout, stderr) + ] + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + cephadm_adopt.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['cephadm', 'adopt', '--cluster', fake_cluster, '--name', fake_name, '--style', 'legacy'] + assert result['rc'] == 0 + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_already_adopted(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_name + }) + m_exit_json.side_effect = ca_test_common.exit_json + stderr = '' + stdout = '[{{"style":"cephadm:v1","name":"{}"}}]'.format(fake_name) + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + cephadm_adopt.main() + + result = result.value.args[0] + assert not result['changed'] + assert result['cmd'] == ['cephadm', 'ls', '--no-detail'] + assert result['rc'] == 0 + assert result['stderr'] == stderr + assert result['stdout'] == '{} is already adopted'.format(fake_name) + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_with_docker(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_name, + 'docker': True + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '' + rc = 0 + m_run_command.side_effect = [ + (0, '[{{"style":"legacy","name":"{}"}}]'.format(fake_name), ''), + (rc, stdout, stderr) + ] + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + cephadm_adopt.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['cephadm', '--docker', 'adopt', '--cluster', fake_cluster, '--name', fake_name, '--style', 'legacy'] + assert result['rc'] == 0 + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_with_custom_image(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_name, + 'image': fake_image + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '' + rc = 0 + m_run_command.side_effect = [ + (0, '[{{"style":"legacy","name":"{}"}}]'.format(fake_name), ''), + (rc, stdout, stderr) + ] + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + cephadm_adopt.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['cephadm', '--image', fake_image, 'adopt', '--cluster', fake_cluster, '--name', fake_name, '--style', 'legacy'] + assert result['rc'] == 0 + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_without_pull(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_name, + 'pull': False + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '' + rc = 0 + m_run_command.side_effect = [ + (0, '[{{"style":"legacy","name":"{}"}}]'.format(fake_name), ''), + (rc, stdout, stderr) + ] + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + cephadm_adopt.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['cephadm', 'adopt', '--cluster', fake_cluster, '--name', fake_name, '--style', 'legacy', '--skip-pull'] + assert result['rc'] == 0 + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_without_firewalld(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_name, + 'firewalld': False + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '' + rc = 0 + m_run_command.side_effect = [ + (0, '[{{"style":"legacy","name":"{}"}}]'.format(fake_name), ''), + (rc, stdout, stderr) + ] + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + cephadm_adopt.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['cephadm', 'adopt', '--cluster', fake_cluster, '--name', fake_name, '--style', 'legacy', '--skip-firewalld'] + assert result['rc'] == 0 diff --git a/tests/unit/modules/test_radosgw_caps.py b/tests/unit/modules/test_radosgw_caps.py new file mode 100644 index 0000000..1fd7a16 --- /dev/null +++ b/tests/unit/modules/test_radosgw_caps.py @@ -0,0 +1,101 @@ +import os +import sys +from mock.mock import patch, MagicMock +import pytest + +sys.path.append("./library") +import radosgw_caps # noqa: E402 + + +fake_binary = "radosgw-admin" +fake_cluster = "ceph" +fake_container_binary = "podman" +fake_container_image = "docker.io/ceph/daemon:latest" +fake_container_cmd = [ + fake_container_binary, + "run", + "--rm", + "--net=host", + "-v", + "/etc/ceph:/etc/ceph:z", + "-v", + "/var/lib/ceph/:/var/lib/ceph/:z", + "-v", + "/var/log/ceph/:/var/log/ceph/:z", + "--entrypoint=" + fake_binary, + fake_container_image, +] +fake_user = "foo" +fake_caps = ["users=write", "zone=*", "metadata=read,write"] +fake_params = { + "cluster": fake_cluster, + "name": fake_user, + "caps": fake_caps, +} + + +class TestRadosgwCapsModule(object): + @patch.dict(os.environ, {"CEPH_CONTAINER_BINARY": fake_container_binary}) + def test_container_exec(self): + cmd = radosgw_caps.container_exec(fake_binary, fake_container_image) + assert cmd == fake_container_cmd + + def test_not_is_containerized(self): + assert radosgw_caps.is_containerized() is None + + @patch.dict(os.environ, {"CEPH_CONTAINER_IMAGE": fake_container_image}) + def test_is_containerized(self): + assert radosgw_caps.is_containerized() == fake_container_image + + @pytest.mark.parametrize("image", [None, fake_container_image]) + @patch.dict(os.environ, {"CEPH_CONTAINER_BINARY": fake_container_binary}) + def test_pre_generate_radosgw_cmd(self, image): + if image: + expected_cmd = fake_container_cmd + else: + expected_cmd = [fake_binary] + + assert radosgw_caps.pre_generate_radosgw_cmd(image) == expected_cmd + + @pytest.mark.parametrize("image", [None, fake_container_image]) + @patch.dict(os.environ, {"CEPH_CONTAINER_BINARY": fake_container_binary}) + def test_generate_radosgw_cmd(self, image): + if image: + expected_cmd = fake_container_cmd + else: + expected_cmd = [fake_binary] + + expected_cmd.extend(["--cluster", fake_cluster, "caps"]) + assert ( + radosgw_caps.generate_radosgw_cmd(fake_cluster, [], image) == expected_cmd + ) + + def test_add_caps(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + "--cluster", + fake_cluster, + "caps", + "add", + "--uid=" + fake_user, + "--caps=" + ";".join(fake_caps), + ] + + assert radosgw_caps.add_caps(fake_module) == expected_cmd + + def test_remove_caps(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + "--cluster", + fake_cluster, + "caps", + "rm", + "--uid=" + fake_user, + "--caps=" + ";".join(fake_caps), + ] + + assert radosgw_caps.remove_caps(fake_module) == expected_cmd diff --git a/tests/unit/modules/test_radosgw_realm.py b/tests/unit/modules/test_radosgw_realm.py new file mode 100644 index 0000000..a1a7707 --- /dev/null +++ b/tests/unit/modules/test_radosgw_realm.py @@ -0,0 +1,124 @@ +import os +import sys +from mock.mock import patch, MagicMock +import pytest +sys.path.append('./library') +import radosgw_realm # noqa: E402 + + +fake_binary = 'radosgw-admin' +fake_cluster = 'ceph' +fake_container_binary = 'podman' +fake_container_image = 'docker.io/ceph/daemon:latest' +fake_container_cmd = [ + fake_container_binary, + 'run', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=' + fake_binary, + fake_container_image +] +fake_realm = 'foo' +fake_params = {'cluster': fake_cluster, + 'name': fake_realm, + 'default': True} +fake_url = 'http://192.168.42.100:8080' +fake_access_key = '8XQHmFxixz7LCM2AdM2p' +fake_secret_key = 'XC8IhEPJprL6SrpaJDmolVs7jbOvoe2E3AaWKGRx' + + +class TestRadosgwRealmModule(object): + + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_container_exec(self): + cmd = radosgw_realm.container_exec(fake_binary, fake_container_image) + assert cmd == fake_container_cmd + + def test_not_is_containerized(self): + assert radosgw_realm.is_containerized() is None + + @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image}) + def test_is_containerized(self): + assert radosgw_realm.is_containerized() == fake_container_image + + @pytest.mark.parametrize('image', [None, fake_container_image]) + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_pre_generate_radosgw_cmd(self, image): + if image: + expected_cmd = fake_container_cmd + else: + expected_cmd = [fake_binary] + + assert radosgw_realm.pre_generate_radosgw_cmd(image) == expected_cmd + + @pytest.mark.parametrize('image', [None, fake_container_image]) + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_generate_radosgw_cmd(self, image): + if image: + expected_cmd = fake_container_cmd + else: + expected_cmd = [fake_binary] + + expected_cmd.extend([ + '--cluster', + fake_cluster, + 'realm' + ]) + assert radosgw_realm.generate_radosgw_cmd(fake_cluster, [], image) == expected_cmd + + def test_create_realm(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'realm', 'create', + '--rgw-realm=' + fake_realm, + '--default' + ] + + assert radosgw_realm.create_realm(fake_module) == expected_cmd + + def test_get_realm(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'realm', 'get', + '--rgw-realm=' + fake_realm, + '--format=json' + ] + + assert radosgw_realm.get_realm(fake_module) == expected_cmd + + def test_remove_realm(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'realm', 'delete', + '--rgw-realm=' + fake_realm + ] + + assert radosgw_realm.remove_realm(fake_module) == expected_cmd + + def test_pull_realm(self): + fake_module = MagicMock() + fake_params.update({'url': fake_url, 'access_key': fake_access_key, 'secret_key': fake_secret_key}) + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'realm', 'pull', + '--rgw-realm=' + fake_realm, + '--url=' + fake_url, + '--access-key=' + fake_access_key, + '--secret=' + fake_secret_key + ] + + assert radosgw_realm.pull_realm(fake_module) == expected_cmd diff --git a/tests/unit/modules/test_radosgw_user.py b/tests/unit/modules/test_radosgw_user.py new file mode 100644 index 0000000..a5d629c --- /dev/null +++ b/tests/unit/modules/test_radosgw_user.py @@ -0,0 +1,151 @@ +import os +import sys +from mock.mock import patch, MagicMock +import pytest +sys.path.append('./library') +import radosgw_user # noqa: E402 + + +fake_binary = 'radosgw-admin' +fake_cluster = 'ceph' +fake_container_binary = 'podman' +fake_container_image = 'docker.io/ceph/daemon:latest' +fake_container_cmd = [ + fake_container_binary, + 'run', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=' + fake_binary, + fake_container_image +] +fake_user = 'foo' +fake_realm = 'canada' +fake_zonegroup = 'quebec' +fake_zone = 'montreal' +fake_params = {'cluster': fake_cluster, + 'name': fake_user, + 'display_name': fake_user, + 'email': fake_user, + 'access_key': 'PC7NPg87QWhOzXTkXIhX', + 'secret_key': 'jV64v39lVTjEx1ZJN6ocopnhvwMp1mXCD4kzBiPz', + 'realm': fake_realm, + 'zonegroup': fake_zonegroup, + 'zone': fake_zone, + 'system': True, + 'admin': True} + + +class TestRadosgwUserModule(object): + + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_container_exec(self): + cmd = radosgw_user.container_exec(fake_binary, fake_container_image) + assert cmd == fake_container_cmd + + def test_not_is_containerized(self): + assert radosgw_user.is_containerized() is None + + @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image}) + def test_is_containerized(self): + assert radosgw_user.is_containerized() == fake_container_image + + @pytest.mark.parametrize('image', [None, fake_container_image]) + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_pre_generate_radosgw_cmd(self, image): + if image: + expected_cmd = fake_container_cmd + else: + expected_cmd = [fake_binary] + + assert radosgw_user.pre_generate_radosgw_cmd(image) == expected_cmd + + @pytest.mark.parametrize('image', [None, fake_container_image]) + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_generate_radosgw_cmd(self, image): + if image: + expected_cmd = fake_container_cmd + else: + expected_cmd = [fake_binary] + + expected_cmd.extend([ + '--cluster', + fake_cluster, + 'user' + ]) + assert radosgw_user.generate_radosgw_cmd(fake_cluster, [], image) == expected_cmd + + def test_create_user(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'user', 'create', + '--uid=' + fake_user, + '--display_name=' + fake_user, + '--email=' + fake_user, + '--access-key=PC7NPg87QWhOzXTkXIhX', + '--secret-key=jV64v39lVTjEx1ZJN6ocopnhvwMp1mXCD4kzBiPz', + '--rgw-realm=' + fake_realm, + '--rgw-zonegroup=' + fake_zonegroup, + '--rgw-zone=' + fake_zone, + '--system', + '--admin' + ] + + assert radosgw_user.create_user(fake_module) == expected_cmd + + def test_modify_user(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'user', 'modify', + '--uid=' + fake_user, + '--display_name=' + fake_user, + '--email=' + fake_user, + '--access-key=PC7NPg87QWhOzXTkXIhX', + '--secret-key=jV64v39lVTjEx1ZJN6ocopnhvwMp1mXCD4kzBiPz', + '--rgw-realm=' + fake_realm, + '--rgw-zonegroup=' + fake_zonegroup, + '--rgw-zone=' + fake_zone, + '--system', + '--admin' + ] + + assert radosgw_user.modify_user(fake_module) == expected_cmd + + def test_get_user(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'user', 'info', + '--uid=' + fake_user, + '--format=json', + '--rgw-realm=' + fake_realm, + '--rgw-zonegroup=' + fake_zonegroup, + '--rgw-zone=' + fake_zone + ] + + assert radosgw_user.get_user(fake_module) == expected_cmd + + def test_remove_user(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'user', 'rm', + '--uid=' + fake_user, + '--rgw-realm=' + fake_realm, + '--rgw-zonegroup=' + fake_zonegroup, + '--rgw-zone=' + fake_zone + ] + + assert radosgw_user.remove_user(fake_module) == expected_cmd diff --git a/tests/unit/modules/test_radosgw_zone.py b/tests/unit/modules/test_radosgw_zone.py new file mode 100644 index 0000000..08a42b4 --- /dev/null +++ b/tests/unit/modules/test_radosgw_zone.py @@ -0,0 +1,213 @@ +import os +import sys +from mock.mock import patch, MagicMock +import pytest +sys.path.append('./library') +import radosgw_zone # noqa: E402 + + +fake_binary = 'radosgw-admin' +fake_cluster = 'ceph' +fake_container_binary = 'podman' +fake_container_image = 'docker.io/ceph/daemon:latest' +fake_container_cmd = [ + fake_container_binary, + 'run', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=' + fake_binary, + fake_container_image +] +fake_realm = 'foo' +fake_zonegroup = 'bar' +fake_zone = 'z1' +fake_endpoints = ['http://192.168.1.10:8080', 'http://192.168.1.11:8080'] +fake_params = {'cluster': fake_cluster, + 'name': fake_zone, + 'realm': fake_realm, + 'zonegroup': fake_zonegroup, + 'endpoints': fake_endpoints, + 'default': True, + 'master': True} + + +class TestRadosgwZoneModule(object): + + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_container_exec(self): + cmd = radosgw_zone.container_exec(fake_binary, fake_container_image) + assert cmd == fake_container_cmd + + def test_not_is_containerized(self): + assert radosgw_zone.is_containerized() is None + + @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image}) + def test_is_containerized(self): + assert radosgw_zone.is_containerized() == fake_container_image + + @pytest.mark.parametrize('image', [None, fake_container_image]) + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_pre_generate_radosgw_cmd(self, image): + if image: + expected_cmd = fake_container_cmd + else: + expected_cmd = [fake_binary] + + assert radosgw_zone.pre_generate_radosgw_cmd(image) == expected_cmd + + @pytest.mark.parametrize('image', [None, fake_container_image]) + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_generate_radosgw_cmd(self, image): + if image: + expected_cmd = fake_container_cmd + else: + expected_cmd = [fake_binary] + + expected_cmd.extend([ + '--cluster', + fake_cluster, + 'zone' + ]) + assert radosgw_zone.generate_radosgw_cmd(fake_cluster, [], image) == expected_cmd + + @pytest.mark.parametrize('image', fake_container_image) + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_generate_radosgw_cmd_container_args(self, image): + container_args = [ + '-v', '/test:/test:ro', + ] + expected_cmd = [ + fake_container_binary, + 'run', + '--rm', + '--net=host', + '-v', '/test:/test:ro', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=' + fake_binary, + fake_container_image + ] + + expected_cmd.extend([ + '--cluster', + fake_cluster, + 'zone' + ]) + assert radosgw_zone.generate_radosgw_cmd(fake_cluster, [], image, container_args) == expected_cmd + + def test_create_zone(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'zone', 'create', + '--rgw-realm=' + fake_realm, + '--rgw-zonegroup=' + fake_zonegroup, + '--rgw-zone=' + fake_zone, + '--endpoints=' + ','.join(fake_endpoints), + '--default', + '--master' + ] + + assert radosgw_zone.create_zone(fake_module) == expected_cmd + + def test_modify_zone(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'zone', 'modify', + '--rgw-realm=' + fake_realm, + '--rgw-zonegroup=' + fake_zonegroup, + '--rgw-zone=' + fake_zone, + '--endpoints=' + ','.join(fake_endpoints), + '--default', + '--master' + ] + + assert radosgw_zone.modify_zone(fake_module) == expected_cmd + + def test_get_zone(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'zone', 'get', + '--rgw-realm=' + fake_realm, + '--rgw-zonegroup=' + fake_zonegroup, + '--rgw-zone=' + fake_zone, + '--format=json' + ] + + assert radosgw_zone.get_zone(fake_module) == expected_cmd + + def test_get_zonegroup(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'zonegroup', 'get', + '--rgw-zone=' + fake_zone, + '--rgw-realm=' + fake_realm, + '--rgw-zonegroup=' + fake_zonegroup, + '--format=json' + ] + + assert radosgw_zone.get_zonegroup(fake_module) == expected_cmd + + def test_get_realm(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'realm', 'get', + '--rgw-realm=' + fake_realm, + '--format=json' + ] + + assert radosgw_zone.get_realm(fake_module) == expected_cmd + + def test_remove_zone(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'zone', 'delete', + '--rgw-realm=' + fake_realm, + '--rgw-zonegroup=' + fake_zonegroup, + '--rgw-zone=' + fake_zone + ] + + assert radosgw_zone.remove_zone(fake_module) == expected_cmd + + def test_set_zone(self): + fake_module = MagicMock() + fake_module.params = { + 'cluster': fake_cluster, + 'name': fake_zone, + 'realm': fake_realm, + 'zonegroup': fake_zonegroup, + 'zone_doc': {'id': 'fake_id'}, + } + + zonefile = fake_module.tmpdir + '/zone.json' + + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'zone', 'set', + '--rgw-realm=' + fake_realm, + '--infile=' + zonefile, + ] + + assert radosgw_zone.set_zone(fake_module) == expected_cmd diff --git a/tests/unit/modules/test_radosgw_zonegroup.py b/tests/unit/modules/test_radosgw_zonegroup.py new file mode 100644 index 0000000..a56fb9d --- /dev/null +++ b/tests/unit/modules/test_radosgw_zonegroup.py @@ -0,0 +1,144 @@ +import os +import sys +from mock.mock import patch, MagicMock +import pytest +sys.path.append('./library') +import radosgw_zonegroup # noqa: E402 + + +fake_binary = 'radosgw-admin' +fake_cluster = 'ceph' +fake_container_binary = 'podman' +fake_container_image = 'docker.io/ceph/daemon:latest' +fake_container_cmd = [ + fake_container_binary, + 'run', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=' + fake_binary, + fake_container_image +] +fake_realm = 'foo' +fake_zonegroup = 'bar' +fake_endpoints = ['http://192.168.1.10:8080', 'http://192.168.1.11:8080'] +fake_params = {'cluster': fake_cluster, + 'name': fake_zonegroup, + 'realm': fake_realm, + 'endpoints': fake_endpoints, + 'default': True, + 'master': True} + + +class TestRadosgwZonegroupModule(object): + + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_container_exec(self): + cmd = radosgw_zonegroup.container_exec(fake_binary, fake_container_image) + assert cmd == fake_container_cmd + + def test_not_is_containerized(self): + assert radosgw_zonegroup.is_containerized() is None + + @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image}) + def test_is_containerized(self): + assert radosgw_zonegroup.is_containerized() == fake_container_image + + @pytest.mark.parametrize('image', [None, fake_container_image]) + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_pre_generate_radosgw_cmd(self, image): + if image: + expected_cmd = fake_container_cmd + else: + expected_cmd = [fake_binary] + + assert radosgw_zonegroup.pre_generate_radosgw_cmd(image) == expected_cmd + + @pytest.mark.parametrize('image', [None, fake_container_image]) + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_generate_radosgw_cmd(self, image): + if image: + expected_cmd = fake_container_cmd + else: + expected_cmd = [fake_binary] + + expected_cmd.extend([ + '--cluster', + fake_cluster, + 'zonegroup' + ]) + assert radosgw_zonegroup.generate_radosgw_cmd(fake_cluster, [], image) == expected_cmd + + def test_create_zonegroup(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'zonegroup', 'create', + '--rgw-realm=' + fake_realm, + '--rgw-zonegroup=' + fake_zonegroup, + '--endpoints=' + ','.join(fake_endpoints), + '--default', + '--master' + ] + + assert radosgw_zonegroup.create_zonegroup(fake_module) == expected_cmd + + def test_modify_zonegroup(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'zonegroup', 'modify', + '--rgw-realm=' + fake_realm, + '--rgw-zonegroup=' + fake_zonegroup, + '--endpoints=' + ','.join(fake_endpoints), + '--default', + '--master' + ] + + assert radosgw_zonegroup.modify_zonegroup(fake_module) == expected_cmd + + def test_get_zonegroup(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'zonegroup', 'get', + '--rgw-realm=' + fake_realm, + '--rgw-zonegroup=' + fake_zonegroup, + '--format=json' + ] + + assert radosgw_zonegroup.get_zonegroup(fake_module) == expected_cmd + + def test_get_realm(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'realm', 'get', + '--rgw-realm=' + fake_realm, + '--format=json' + ] + + assert radosgw_zonegroup.get_realm(fake_module) == expected_cmd + + def test_remove_zonegroup(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'zonegroup', 'delete', + '--rgw-realm=' + fake_realm, + '--rgw-zonegroup=' + fake_zonegroup + ] + + assert radosgw_zonegroup.remove_zonegroup(fake_module) == expected_cmd