diff --git a/README b/README index fe32bd2..679f2b2 100644 --- a/README +++ b/README @@ -17,17 +17,17 @@ o- / ........................................................................... o- cluster .................................................................. [Clusters: 1] | o- ceph ..................................................................... [HEALTH_OK] | o- pools ................................................................... [Pools: 3] - | | o- ec ........................................ [(2+1), Commit: 0b/40G (0%), Used: 0b] + | | o- ec ....................................... [(2+2), Commit: 0b/40G (0%), Used: 24K] | | o- iscsi ..................................... [(x3), Commit: 0b/20G (0%), Used: 18b] | | o- rbd ....................................... [(x3), Commit: 8G/20G (40%), Used: 5K] | o- topology ......................................................... [OSDs: 3,MONs: 3] o- disks ................................................................... [8G, Disks: 5] | o- rbd ....................................................................... [rbd (8G)] - | o- disk_1 ............................................................... [disk_1 (1G)] - | o- disk_2 ............................................................... [disk_2 (2G)] - | o- disk_3 ............................................................... [disk_3 (2G)] - | o- disk_4 ............................................................... [disk_4 (1G)] - | o- disk_5 ............................................................... [disk_5 (2G)] + | o- disk_1 ........................................................... [rbd/disk_1 (1G)] + | o- disk_2 ........................................................... [rbd/disk_2 (2G)] + | o- disk_3 ........................................................... [rbd/disk_3 (2G)] + | o- disk_4 ........................................................ [rbd/ec/disk_4 (1G)] + | o- disk_5 ........................................................ [rbd/ec/disk_5 (2G)] o- iscsi-targets ............................................................. [Targets: 1] o- iqn.2003-01.com.redhat.iscsi-gw:ceph-gw1 ................... [Auth: CHAP, Gateways: 2] | o- disks ................................................................... [Disks: 1] @@ -38,7 +38,7 @@ o- / ........................................................................... o- host-groups ........................................................... [Groups : 0] o- hosts ................................................ [Auth: ACL_ENABLED, Hosts: 1] | o- iqn.1994-05.com.redhat:rh7-client .......... [LOGGED-IN, Auth: CHAP, Disks: 1(2G)] - | o- lun 0 ......................................... [rbd.disk_1(2G), Owner: rh7-gw2] + | o- lun 0 ......................................... [rbd/disk_1(2G), Owner: rh7-gw2] o- iqn.2003-01.com.redhat.iscsi-gw:ceph-gw2 ................... [Auth: None, Gateways: 2] o- disks ................................................................... [Disks: 1] | o- rbd/disk_2 .............................................. [Owner: rh7-gw1, Lun: 0] @@ -48,7 +48,17 @@ o- / ........................................................................... o- host-groups ........................................................... [Groups : 0] o- hosts ................................................ [Auth: ACL_ENABLED, Hosts: 1] o- iqn.1994-05.com.redhat:rh7-client .......... [LOGGED-IN, Auth: None, Disks: 1(2G)] - o- lun 0 ......................................... [rbd.disk_2(2G), Owner: rh7-gw1] + o- lun 0 ......................................... [rbd/disk_2(2G), Owner: rh7-gw1] + o- iqn.2003-01.com.redhat.iscsi-gw:ceph-gw3 ................... [Auth: None, Gateways: 2] + o- disks ................................................................... [Disks: 1] + | o- rbd/ec/disk_4 ........................................... [Owner: rh7-gw2, Lun: 0] + o- gateways ..................................................... [Up: 2/2, Portals: 2] + | o- rh7-gw1 ................................................... [2006:ac81::1103 (UP)] + | o- rh7-gw2 ................................................... [2006:ac81::1104 (UP)] + o- host-groups ........................................................... [Groups : 0] + o- hosts ................................................ [Auth: ACL_ENABLED, Hosts: 1] + o- iqn.1994-05.com.redhat:rh7-client .......... [LOGGED-IN, Auth: None, Disks: 1(1G)] + o- lun 0 ...................................... [rbd/ec/disk_4(1G), Owner: rh7-gw1] @@ -95,6 +105,30 @@ curl --user admin:admin -d ip_address=2006:ac81::1104 \ NOTE: please make sure both the IPv4 and IPv6 addresses are in the trusted ip list in iscsi-gateway.cfg. +Erasure Pool Support: +For the erasure pool, you need to specify the "ecpool=" parameter to store the +data when creating a disk, and the "pool=" will contiue to be a replicated pool, +which will store the metadata only. + +When creating a disk and disk snapshot, for rest api there has a litte different for +erasure pool. + +You need to use "ecdisk" instead of "disk" in URL:"http://.../disk/...": + +curl --user admin:admin -d mode=create -d size=1g -d pool=rbd -d ecpool=ec -d count=5 + -X PUT http://192.168.122.69:5000/api/ecdisk/rbd/ec/new0_ +curl --user admin:admin -d mode=create -d size=1g -d pool=rbd -d ecpool=ec -d create_image=false + -X PUT http://192.168.122.69:5000/api/ecdisk/rbd/ec/new1 +curl --user admin:admin -X GET http://192.168.122.69:5000/api/ecdisk/rbd/ec/new2 +curl --user admin:admin -X DELETE http://192.168.122.69:5000/api/ecdisk/rbd/ec/new3 + +You need to use "ecdisksnap" instead of "disksnap" in URL:"http://.../disk/...": + +curl --user admin:admin -d mode=create + -X PUT http://192.168.122.69:5000/api/ecdisksnap/rbd/ec/image/new1 +curl --user admin:admin + -X DELETE http://192.168.122.69:5000/api/ecdisksnap/rbd/ec/image/new1 + ## Installation ### Via RPM diff --git a/ceph_iscsi_config/.device_status.py.swp b/ceph_iscsi_config/.device_status.py.swp new file mode 100644 index 0000000..7dd960a Binary files /dev/null and b/ceph_iscsi_config/.device_status.py.swp differ diff --git a/ceph_iscsi_config/client.py b/ceph_iscsi_config/client.py index 7aca1fc..71ccad6 100644 --- a/ceph_iscsi_config/client.py +++ b/ceph_iscsi_config/client.py @@ -98,7 +98,7 @@ def __init__(self, logger, client_iqn, image_list, username, password, mutual_us self.error = True self.error_msg = err - # image_list is normally a list of strings (pool/image_name) but + # image_list is normally a list of strings (pool[/ecpool]/image_name) but # group processing forces a specific lun id allocation to masked disks # in this scenario the image list is a tuple if image_list: diff --git a/ceph_iscsi_config/lun.py b/ceph_iscsi_config/lun.py index c169c3d..bd6423d 100644 --- a/ceph_iscsi_config/lun.py +++ b/ceph_iscsi_config/lun.py @@ -1,3 +1,4 @@ +import json import rados import rbd import re @@ -13,8 +14,9 @@ from ceph_iscsi_config.backstore import USER_RBD from ceph_iscsi_config.utils import (convert_2_bytes, gen_control_string, valid_size, get_pool_id, ip_addresses, - get_pools, get_rbd_size, this_host, - human_size, CephiSCSIError) + get_pools, get_rbd_size, run_shell_cmd, + human_size, CephiSCSIError, this_host, + parse_disk_meta) from ceph_iscsi_config.gateway_object import GWObject from ceph_iscsi_config.target import GWTarget from ceph_iscsi_config.client import GWClient, CHAP @@ -46,13 +48,14 @@ class RBDDev(object): ] } - def __init__(self, image, size, backstore, pool=None): + def __init__(self, image, size, backstore, pool=None, ecpool=None): self.image = image self.size_bytes = convert_2_bytes(size) self.backstore = backstore if pool is None: pool = settings.config.pool self.pool = pool + self.ecpool = ecpool self.pool_id = get_pool_id(pool_name=self.pool) self.error = False self.error_msg = '' @@ -74,13 +77,14 @@ def create(self): self.image, self.size_bytes, features=RBDDev.default_features(self.backstore), - old_format=False) + old_format=False, + data_pool=self.ecpool) except (rbd.ImageExists, rbd.InvalidArgument) as err: self.error = True self.error_msg = ("Failed to create rbd image {} in " - "pool {} : {}".format(self.image, - self.pool, + "pool {}, ecpool {} : {}".format(self.image, + self.pool, self.ecpool, err)) def delete(self): @@ -289,14 +293,18 @@ class LUN(GWObject): USER_RBD: TCMU_SETTINGS } - def __init__(self, logger, pool, image, size, allocating_host, + def __init__(self, logger, pool, ecpool, image, size, allocating_host, backstore, backstore_object_name): self.logger = logger self.image = image self.pool = pool + self.ecpool = ecpool self.pool_id = 0 self.size_bytes = convert_2_bytes(size) - self.config_key = '{}/{}'.format(self.pool, self.image) + if ecpool: + self.config_key = '{}/{}/{}'.format(pool, ecpool, image) + else: + self.config_key = '{}/{}'.format(pool, image) self.allocating_host = allocating_host self.backstore = backstore @@ -351,7 +359,7 @@ def remove_lun(self, preserve_image): if self.error: return - rbd_image = RBDDev(self.image, '0G', self.backstore, self.pool) + rbd_image = RBDDev(self.image, '0G', self.backstore, self.pool, self.ecpool) if local_gw == self.allocating_host: # by using the allocating host we ensure the delete is not @@ -574,6 +582,38 @@ def activate(self): if client_err: raise CephiSCSIError(client_err) + def _erasure_pool_check(self): + if not self.ecpool: + return None + + data, err = run_shell_cmd( + "ceph -n {client_name} --conf {cephconf} osd metadata --format=json". + format(client_name=settings.config.cluster_client_name, + cephconf=settings.config.cephconf)) + if err: + self.logger.error("Cannot get the objectstore type") + return err + bluestore = False + for _osd in json.loads(data): + store_type = _osd['osd_objectstore'] + self.logger.debug(f"pool ({self.pool}) objectstore type is ({store_type})") + if store_type == 'bluestore': + bluestore = True + break + + if not bluestore: + return None + + data, err = run_shell_cmd( + "ceph -n {client_name} --conf {cephconf} osd pool get {pool} allow_ec_overwrites". + format(client_name=settings.config.cluster_client_name, + cephconf=settings.config.cephconf, pool=self.ecpool)) + if err: + self.logger.error(f"Cannot get allow_ec_overwrites from pool ({self.pool})") + return err + self.logger.debug(f"erasure pool ({self.pool}) allow_ec_overwrites is enabled") + return None + def allocate(self, keep_dev_in_lio=True, in_wwn=None): """ Create image and add to LIO and config. @@ -583,6 +623,10 @@ def allocate(self, keep_dev_in_lio=True, in_wwn=None): :return: LIO storage object if successful and keep_dev_in_lio=True else None. """ + err = self._erasure_pool_check() + if err: + return None + self.logger.debug("LUN.allocate starting, listing rbd devices") disk_list = RBDDev.rbd_list(pool=self.pool) self.logger.debug("rados pool '{}' contains the following - " @@ -593,7 +637,8 @@ def allocate(self, keep_dev_in_lio=True, in_wwn=None): "allocations is {}".format(local_gw, self.allocating_host)) - rbd_image = RBDDev(self.image, self.size_bytes, self.backstore, self.pool) + rbd_image = RBDDev(self.image, self.size_bytes, self.backstore, self.pool, + self.ecpool) self.pool_id = rbd_image.pool_id # if the image required isn't defined, create it! @@ -703,6 +748,7 @@ def allocate(self, keep_dev_in_lio=True, in_wwn=None): disk_attr = {"wwn": wwn, "image": self.image, "pool": self.pool, + "ecpool": self.ecpool, "allocating_host": self.allocating_host, "pool_id": rbd_image.pool_id, "controls": self.controls, @@ -963,7 +1009,10 @@ def valid_disk(ceph_iscsi_config, logger, **kwargs): :param ceph_iscsi_config: Config object :param logger: logger object - :param image_id: (str) . format + :param pool: (str) pool name + :param ecpool: (str) ecpool name + :param image: (str) image name + :param size: (str) size :return: (str) either 'ok' or an error description """ @@ -993,12 +1042,18 @@ def valid_disk(ceph_iscsi_config, logger, **kwargs): config = ceph_iscsi_config.config - disk_key = "{}/{}".format(kwargs['pool'], kwargs['image']) + ecpool = kwargs.get('ecpool', None) + if ecpool: + disk_key = "{}/{}/{}".format(kwargs['pool'], ecpool, kwargs['image']) + else: + disk_key = "{}/{}".format(kwargs['pool'], kwargs['image']) if mode in ['create', 'resize']: if kwargs['pool'] not in get_pools(): return "pool name is invalid" + if ecpool and ecpool not in get_pools(): + return "ecpool name is invalid" if mode == 'create': if kwargs['size'] and not valid_size(kwargs['size']): @@ -1010,6 +1065,8 @@ def valid_disk(ceph_iscsi_config, logger, **kwargs): disk_regex = re.compile(r"^[a-zA-Z0-9\-_\.]+$") if not disk_regex.search(kwargs['pool']): return "Invalid pool name (use alphanumeric, '_', '.', or '-' characters)" + if ecpool and not disk_regex.search(ecpool): + return "Invalid ecpool name (use alphanumeric, '_', '.', or '-' characters)" if not disk_regex.search(kwargs['image']): return "Invalid image name (use alphanumeric, '_', '.', or '-' characters)" @@ -1040,9 +1097,7 @@ def valid_disk(ceph_iscsi_config, logger, **kwargs): if mode in ["resize", "delete", "reconfigure"]: # disk must exist in the config if disk_key not in config['disks']: - return ("rbd {}/{} is not defined to the " - "configuration".format(kwargs['pool'], - kwargs['image'])) + return ("rbd {} is not defined to the configuration".format(disk_key)) if mode == 'resize': @@ -1112,7 +1167,7 @@ def _backstore_object_name_exists(disks_config, backstore_object_name_exists): if disk['backstore_object_name'] == backstore_object_name_exists]) > 0 @staticmethod - def get_backstore_object_name(pool, image, disks_config): + def get_backstore_object_name(pool, ecpool, image, disks_config): """ Determine the backstore storage object name based on the pool name, image name, and existing storage object names to avoid conflicts. @@ -1126,7 +1181,10 @@ def get_backstore_object_name(pool, image, disks_config): :param disks_config: disks configuration from `gateway.conf` :return: the backstore storage object name to be used """ - base_name = '{}.{}'.format(pool, image) + if ecpool: + base_name = '{}.{}.{}'.format(pool, ecpool, image) + else: + base_name = '{}.{}'.format(pool, image) candidate = base_name counter = 0 while LUN._backstore_object_name_exists(disks_config, candidate): @@ -1230,14 +1288,15 @@ def define_luns(logger, config, target): if disk_key.startswith(pool + '/')] for disk_key in pool_disks: - pool, image_name = disk_key.split('/') + pool, ecpool, image_name = parse_disk_meta(disk_key) + with rbd.Image(ioctx, image_name) as rbd_image: disk_config = config.config['disks'][disk_key] backstore = disk_config['backstore'] backstore_object_name = disk_config['backstore_object_name'] - lun = LUN(logger, pool, image_name, + lun = LUN(logger, pool, ecpool, image_name, rbd_image.size(), local_gw, backstore, backstore_object_name) diff --git a/ceph_iscsi_config/utils.py b/ceph_iscsi_config/utils.py index 4ce47d7..91ed94f 100644 --- a/ceph_iscsi_config/utils.py +++ b/ceph_iscsi_config/utils.py @@ -28,6 +28,18 @@ class CephiSCSIInval(CephiSCSIError): pass +def parse_disk_meta(disk): + pool = None + ecpool = None + image = None + try: + pool, ecpool, image = disk.split('/') + except ValueError: + pool, image = disk.split('/') + pass + return pool, ecpool, image + + def run_shell_cmd(cmd, stderr=None, shell=True): if not stderr: stderr = subprocess.STDOUT diff --git a/gwcli/client.py b/gwcli/client.py index 040bbe3..e8cba7a 100644 --- a/gwcli/client.py +++ b/gwcli/client.py @@ -4,7 +4,7 @@ from ceph_iscsi_config.client import CHAP, GWClient import ceph_iscsi_config.settings as settings -from ceph_iscsi_config.utils import human_size, this_host +from ceph_iscsi_config.utils import human_size, this_host, parse_disk_meta from rtslib_fb.utils import normalize_wwn, RTSLibError @@ -540,7 +540,7 @@ def ui_command_disk(self, action='add', disk=None, size=None): # a disk given here would be of the form pool.image try: - pool, image = disk.split('/') + pool, ecpool, image = parse_disk_meta(disk) except ValueError: self.logger.error("Invalid format. Use pool_name/disk_name") return diff --git a/gwcli/storage.py b/gwcli/storage.py index 1488856..1880d1f 100644 --- a/gwcli/storage.py +++ b/gwcli/storage.py @@ -16,7 +16,8 @@ APIRequest, valid_snapshot_name, get_config, refresh_control_values) -from ceph_iscsi_config.utils import valid_size, convert_2_bytes, human_size, this_host +from ceph_iscsi_config.utils import (valid_size, convert_2_bytes, human_size, + this_host, parse_disk_meta) from ceph_iscsi_config.lun import LUN import ceph_iscsi_config.settings as settings @@ -53,6 +54,14 @@ def __init__(self, parent): self.scan_queue = None self.scan_mutex = None + def _get_pool_type(self, pool): + root = self.get_ui_root() + pools = root.ceph.cluster.pools + pool_object = pools.pool_lookup.get(pool, None) + if pool_object: + return pool_object.type + return None + def _get_disk_meta(self, cluster_ioctx, disk_meta): """ Use the provided cluster context to take an rbd image name from the @@ -72,7 +81,7 @@ def _get_disk_meta(self, cluster_ioctx, disk_meta): except Queue.Empty: break else: - pool, image = rbd_name.split('/') + pool, ecpool, image = parse_disk_meta(rbd_name) disk_meta[rbd_name] = {} with cluster_ioctx.open_ioctx(pool) as ioctx: try: @@ -142,7 +151,7 @@ def refresh(self, disk_info): def _group_disks_by_pool(self, disks_config): result = {} for disk_id, disk_config in disks_config.items(): - pool, image = disk_id.split('/') + pool, ecpool, image = parse_disk_meta(disk_id) if pool not in result: result[pool] = [] result[pool].append(disk_config) @@ -176,7 +185,7 @@ def ui_command_attach(self, pool=None, image=None, backstore=None, wwn=None): # shorthand version of the command self.logger.debug("user provided pool/image format request") - pool, image = pool.split('/') + pool, ecpool, image = parse_disk_meta(pool) else: # long format request @@ -187,10 +196,11 @@ def ui_command_attach(self, pool=None, image=None, backstore=None, wwn=None): self.logger.debug("CMD: /disks/ attach pool={} " "image={}".format(pool, image)) - self.create_disk(pool=pool, image=image, create_image=False, backstore=backstore, wwn=wwn) + self.create_disk(pool=pool, ecpool=ecpool, image=image, create_image=False, + backstore=backstore, wwn=wwn) - def ui_command_create(self, pool=None, image=None, size=None, backstore=None, wwn=None, - count=1): + def ui_command_create(self, pool=None, ecpool=None, image=None, size=None, backstore=None, + wwn=None, count=1): """ Create a RBD image and assign to the gateway(s). @@ -205,7 +215,8 @@ def ui_command_create(self, pool=None, image=None, size=None, backstore=None, ww The syntax of each parameter is as follows; pool : Pool and image name may contain a-z, A-Z, 0-9, '_', or '-' - image characters. + ecpool: Data pool name for erasure code pool may contain a-z, A-Z, 0-9, '_', or '-' + image : characters. size : integer, suffixed by the allocation unit - either m/M, g/G or t/T representing the MB/GB/TB [1] backstore : lio backstore @@ -238,7 +249,7 @@ def ui_command_create(self, pool=None, image=None, size=None, backstore=None, ww "({} ?)".format(size)) return size = image - pool, image = pool.split('/') + pool, ecpool, image = parse_disk_meta(pool) else: # long format request @@ -260,8 +271,8 @@ def ui_command_create(self, pool=None, image=None, size=None, backstore=None, ww self.logger.debug("CMD: /disks/ create pool={} " "image={} size={} " "count={} ".format(pool, image, size, count)) - self.create_disk(pool=pool, image=image, size=size, count=count, backstore=backstore, - wwn=wwn) + self.create_disk(pool=pool, ecpool=ecpool, image=image, size=size, count=count, + backstore=backstore, wwn=wwn) def _valid_pool(self, pool=None): """ @@ -277,15 +288,15 @@ def _valid_pool(self, pool=None): pools = root.ceph.cluster.pools pool_object = pools.pool_lookup.get(pool, None) if pool_object: - if pool_object.type == 'replicated': - self.logger.debug("pool '{}' is ok to use".format(pool)) + if pool_object.type in ['replicated', 'erasure']: + self.logger.debug(f"pool '{pool}' is ok to use") return True - self.logger.error("Invalid pool ({}). Must already exist and " - "be replicated".format(pool)) + self.logger.error(f"Invalid pool ({pool}), the type is ({pool_object.type})." + " Must already exist and be erasure or replicated") return False - def create_disk(self, pool=None, image=None, size=None, count=1, + def create_disk(self, pool=None, ecpool=None, image=None, size=None, count=1, parent=None, create_image=True, backstore=None, wwn=None): rc = 0 @@ -296,18 +307,21 @@ def create_disk(self, pool=None, image=None, size=None, count=1, local_gw = this_host() disk_key = "{}/{}".format(pool, image) + if ecpool: + disk_key = "{}/{}/{}".format(pool, ecpool, image) if not self._valid_pool(pool): return - self.logger.debug("Creating/mapping disk {}/{}".format(pool, - image)) + self.logger.debug("Creating/mapping disk {}".format(disk_key)) # make call to local api server's disk endpoint - disk_api = '{}://localhost:{}/api/disk/{}'.format(self.http_mode, - settings.config.api_port, - disk_key) - api_vars = {'pool': pool, 'owner': local_gw, + disk_api = ('{}://localhost:{}/api/{}/' + '{}'.format(self.http_mode, + settings.config.api_port, + "ecdisk" if ecpool else "disk", + disk_key)) + api_vars = {'pool': pool, 'ecpool': ecpool, 'owner': local_gw, 'count': count, 'mode': 'create', 'create_image': 'true' if create_image else 'false', 'backstore': backstore, 'wwn': wwn} @@ -329,16 +343,16 @@ def create_disk(self, pool=None, image=None, size=None, count=1, for n in range(1, (int(count) + 1), 1): if int(count) > 1: - disk_key = "{}/{}{}".format(pool, image, n) - else: - disk_key = "{}/{}".format(pool, image) + disk_key = "{}/{}/{}{}".format(pool, ecpool, image, n) - disk_api = ('{}://localhost:{}/api/disk/' + api_vars = {'ecpool': ecpool} + disk_api = ('{}://localhost:{}/api/{}/' '{}'.format(self.http_mode, settings.config.api_port, + "ecdisk" if ecpool else "disk", disk_key)) - api = APIRequest(disk_api) + api = APIRequest(disk_api, data=api_vars) api.get() if api.response.status_code == 200: @@ -513,10 +527,12 @@ def delete_disk(self, image_id, preserve_image): 'preserve_image': 'true' if preserve_image else 'false' } - disk_api = '{}://{}:{}/api/disk/{}'.format(self.http_mode, - local_gw, - settings.config.api_port, - image_id) + pool, ecpool, image = parse_disk_meta(image_id) + disk_api = '{}://{}:{}/api/{}/{}'.format(self.http_mode, + local_gw, + settings.config.api_port, + "ecdisk" if ecpool else "disk", + image_id) api = APIRequest(disk_api, data=api_vars) api.delete() @@ -524,7 +540,7 @@ def delete_disk(self, image_id, preserve_image): self.logger.debug("- rbd removed from all gateways, and deleted") disk_object = [disk for disk in all_disks if disk.image_id == image_id][0] - pool, _ = image_id.split('/') + pool, ecpool, image = parse_disk_meta(image_id) pool_object = [pool_object for pool_object in self.children if pool_object.name == pool][0] pool_object.remove_child(disk_object) @@ -603,9 +619,21 @@ def __init__(self, parent, pool, pool_disks_config, disks_meta=None): self.disks_meta = disks_meta self.refresh() + def _get_pool_type(self, pool): + root = self.get_ui_root() + pools = root.ceph.cluster.pools + pool_object = pools.pool_lookup.get(pool, None) + if pool_object: + return pool_object.type + return None + def refresh(self): for pool_disk_config in self.pool_disks_config: disk_id = '{}/{}'.format(pool_disk_config['pool'], pool_disk_config['image']) + if pool_disk_config.get('ecpool', None): + disk_id = '{}/{}/{}'.format(pool_disk_config['pool'], pool_disk_config['ecpool'], + pool_disk_config['image']) + size = self.disks_meta[disk_id].get('size', 0) if self.disks_meta else None features = self.disks_meta[disk_id].get('features', 0) if self.disks_meta else None snapshots = self.disks_meta[disk_id].get('snapshots', []) if self.disks_meta else None @@ -641,11 +669,13 @@ def __init__(self, parent, image_id, image_config, size=None, :param image_config: meta data for this image :return: """ - self.pool, self.rbd_image = image_id.split('/', 1) + self.pool, self.ecpool, self.rbd_image = parse_disk_meta(image_id) UINode.__init__(self, self.rbd_image, parent) self.image_id = image_id + pool, ecpool, image = parse_disk_meta(self.image_id) + self.is_erasure = True if ecpool is not None else False self.size = 0 self.size_h = '' self.features = 0 @@ -688,8 +718,9 @@ def __init__(self, parent, image_id, image_config, size=None, def _apply_status(self): disk_api = ('{}://localhost:{}/api/' - 'disk/{}'.format(self.http_mode, - settings.config.api_port, self.image_id)) + '{}/{}'.format(self.http_mode, settings.config.api_port, + "ecdisk" if self.is_erasure else "disk", + self.image_id)) self.logger.debug("disk GET status for {}".format(self.image_id)) api = APIRequest(disk_api) api.get() @@ -733,10 +764,10 @@ def summary(self): status = True disk_api = ('{}://localhost:{}/api/' - 'disk/{}'.format(self.http_mode, settings.config.api_port, - self.image_id)) + '{}/{}'.format(self.http_mode, settings.config.api_port, + "ecdisk" if self.is_erasure else "disk", + self.image_id)) - self.logger.debug("disk GET status for {}".format(self.image_id)) api = APIRequest(disk_api) api.get() @@ -859,9 +890,10 @@ def reconfigure(self, attribute, value): # Issue the api request for reconfigure disk_api = ('{}://localhost:{}/api/' - 'disk/{}'.format(self.http_mode, - settings.config.api_port, - self.image_id)) + '{}/{}'.format(self.http_mode, + settings.config.api_port, + "ecdisk" if self.is_erasure else "disk", + self.image_id)) api_vars = {'pool': self.pool, 'owner': local_gw, 'controls': controls_json, 'mode': 'reconfigure'} @@ -902,9 +934,10 @@ def resize(self, size): # Issue the api request for the resize disk_api = ('{}://localhost:{}/api/' - 'disk/{}'.format(self.http_mode, - settings.config.api_port, - self.image_id)) + '{}/{}'.format(self.http_mode, + settings.config.api_port, + "ecdisk" if self.is_erasure else "disk", + self.image_id)) api_vars = {'pool': self.pool, 'size': size_rqst, 'owner': local_gw, 'mode': 'resize'} @@ -953,12 +986,10 @@ def snapshot(self, action, name): self.logger.warning("Please be patient, rollback might take time") self.logger.debug("Issuing snapshot {} request".format(action)) - disk_api = ('{}://localhost:{}/api/' - 'disksnap/{}/{}/{}'.format(self.http_mode, - settings.config.api_port, - self.pool, - self.rbd_image, - name)) + disk_api = ('{}://localhost:{}/api/{}/{}/' + '{}'.format(self.http_mode, settings.config.api_port, + "ecdisksnap" if self.is_erasure else "disksnap", + self.image_id, name)) if action == 'delete': api = APIRequest(disk_api) diff --git a/rbd-target-api.py b/rbd-target-api.py index 49ea5a4..ad59b5b 100644 --- a/rbd-target-api.py +++ b/rbd-target-api.py @@ -35,7 +35,7 @@ from ceph_iscsi_config.common import Config from ceph_iscsi_config.utils import (normalize_ip_literal, resolve_ip_addresses, ip_addresses, read_os_release, encryption_available, - CephiSCSIError, this_host) + CephiSCSIError, this_host, parse_disk_meta) from ceph_iscsi_config.device_status import DeviceStatusWatcher from gwcli.utils import (APIRequest, valid_gateway, valid_client, @@ -749,12 +749,12 @@ def target_disk(target_iqn=None): """ Coordinate the addition(PUT) and removal(DELETE) of a disk for a target :param target_iqn: (str) IQN of the target - :param disk: (str) rbd image name on the format pool/image + :param disk: (str) rbd image name on the format pool[/ecpool]/image **RESTRICTED** Examples: - curl --user admin:admin -d disk=rbd/new2_1 + curl --user admin:admin -d disk=rbd[/ecpool]/new2_1 -X PUT http://192.168.122.69:5000/api/targetlun/iqn.2003-01.com.redhat.iscsi-gw - curl --user admin:admin -d disk=rbd/new2_1 + curl --user admin:admin -d disk=rbd[/ecpool]/new2_1 -X DELETE http://192.168.122.69:5000/api/targetlun/iqn.2003-01.com.redhat.iscsi-gw """ @@ -791,10 +791,10 @@ def target_disk(target_iqn=None): return jsonify(message="Disk {} cannot be used because it is already mapped on " "target {}".format(disk, iqn)), 400 - pool, image_name = disk.split('/') + pool, ecpool, image_name = parse_disk_meta(disk) try: backstore = config.config['disks'][disk] - rbd_image = RBDDev(image_name, 0, backstore, pool) + rbd_image = RBDDev(image_name, 0, backstore, pool, ecpool) size = rbd_image.current_size logger.debug("{} size is {}".format(disk, size)) except rbd.ImageNotFound: @@ -873,7 +873,7 @@ def _target_disk(target_iqn=None): config.refresh() disk = request.form.get('disk') - pool, image = disk.split('/', 1) + pool, ecpool, image = parse_disk_meta(disk) disk_config = config.config['disks'][disk] backstore = disk_config['backstore'] backstore_object_name = disk_config['backstore_object_name'] @@ -897,6 +897,7 @@ def _target_disk(target_iqn=None): size = rbd_image.current_size lun = LUN(logger, pool, + ecpool, image, size, allocating_host, @@ -925,6 +926,7 @@ def _target_disk(target_iqn=None): lun = LUN(logger, pool, + ecpool, image, 0, purge_host, @@ -970,9 +972,7 @@ def get_disks(): return jsonify(response), 200 -@app.route('/api/disk//', methods=['GET', 'PUT', 'DELETE']) -@requires_restricted_auth -def disk(pool, image): +def common_disk(pool, ecpool, image): """ Coordinate the create/delete of rbd images across the gateway nodes This method calls the corresponding disk api entrypoints across each @@ -980,33 +980,15 @@ def disk(pool, image): then other gateways - whereas, rbd deletion is performed first against remote gateways and then the local machine is used to perform the actual rbd delete. - - :param pool: (str) pool name - :param image: (str) rbd image name - :param mode: (str) 'create' or 'resize' the rbd image - :param size: (str) the size of the rbd image - :param pool: (str) the pool name the rbd image will be in - :param count: (str) the number of images will be created - :param owner: (str) the owner of the rbd image - :param controls: (JSON dict) valid control overrides - :param preserve_image: (bool, 'true/false') do NOT delete RBD image - :param create_image: (bool, 'true/false') create RBD image if not exists, true as default - :param backstore: (str) lio backstore - :param wwn: (str) unit serial number - **RESTRICTED** - Examples: - curl --user admin:admin -d mode=create -d size=1g -d pool=rbd -d count=5 - -X PUT http://192.168.122.69:5000/api/disk/rbd/new0_ - curl --user admin:admin -d mode=create -d size=10g -d pool=rbd -d create_image=false - -X PUT http://192.168.122.69:5000/api/disk/rbd/new1 - curl --user admin:admin -X GET http://192.168.122.69:5000/api/disk/rbd/new2 - curl --user admin:admin -X DELETE http://192.168.122.69:5000/api/disk/rbd/new3 """ local_gw = this_host() logger.debug("this host is {}".format(local_gw)) image_id = '{}/{}'.format(pool, image) + _image_id = image_id + if ecpool: + image_id = '{}/{}/{}'.format(pool, ecpool, image) config.refresh() @@ -1015,7 +997,7 @@ def disk(pool, image): if image_id in config.config['disks']: disk_dict = config.config["disks"][image_id] global dev_status_watcher - disk_status = dev_status_watcher.get_dev_status(image_id) + disk_status = dev_status_watcher.get_dev_status(_image_id) if disk_status: disk_dict['status'] = disk_status.get_status_dict() else: @@ -1062,7 +1044,7 @@ def disk(pool, image): logger.debug("{} controls {}".format(mode, controls)) wwn = request.form.get('wwn') - disk_usable = LUN.valid_disk(config, logger, pool=pool, + disk_usable = LUN.valid_disk(config, logger, pool=pool, ecpool=ecpool, image=image, size=size, mode=mode, count=count, controls=controls, backstore=backstore, wwn=wwn) @@ -1078,7 +1060,7 @@ def disk(pool, image): try: # no size implies not intention to create an image, try to # check whether it exists - rbd_image = RBDDev(image, 0, backstore, pool) + rbd_image = RBDDev(image, 0, backstore, pool, ecpool) size = rbd_image.current_size except rbd.ImageNotFound: # the create_image=true will be implied if size is specified @@ -1107,6 +1089,7 @@ def disk(pool, image): sfx) api_vars = {'pool': pool, + 'ecpool': ecpool, 'image': image, 'size': size, 'owner': local_gw, @@ -1130,13 +1113,14 @@ def disk(pool, image): else: # this is a DELETE request - disk_usable = LUN.valid_disk(config, logger, mode='delete', - pool=pool, image=image, backstore=backstore) + disk_usable = LUN.valid_disk(config, logger, mode='delete', pool=pool, + ecpool=ecpool, image=image, backstore=backstore) if disk_usable != 'ok': return jsonify(message=disk_usable), 400 api_vars = { + 'ecpool': ecpool, 'purge_host': local_gw, 'preserve_image': request.form.get('preserve_image'), 'backstore': backstore @@ -1154,6 +1138,79 @@ def disk(pool, image): resp_code +@app.route('/api/ecdisk///', methods=['GET', 'PUT', 'DELETE']) +@requires_restricted_auth +def ecdisk(pool, ecpool, image): + """ + Coordinate the create/delete of rbd images across the gateway nodes + This method calls the corresponding disk api entrypoints across each + gateway. Processing is done serially: creation is done locally first, + then other gateways - whereas, rbd deletion is performed first against + remote gateways and then the local machine is used to perform the actual + rbd delete. + + :param pool: (str) pool name + :param ecpool: (str) erasure data pool name + :param image: (str) rbd image name + :param mode: (str) 'create' or 'resize' the rbd image + :param size: (str) the size of the rbd image + :param pool: (str) the pool name the rbd image will be in + :param count: (str) the number of images will be created + :param owner: (str) the owner of the rbd image + :param controls: (JSON dict) valid control overrides + :param preserve_image: (bool, 'true/false') do NOT delete RBD image + :param create_image: (bool, 'true/false') create RBD image if not exists, true as default + :param backstore: (str) lio backstore + :param wwn: (str) unit serial number + **RESTRICTED** + Examples: + curl --user admin:admin -d mode=create -d size=1g -d pool=rbd -d ecpool=ec -d count=5 + -X PUT http://192.168.122.69:5000/api/ecdisk/rbd/ec/new0_ + curl --user admin:admin -d mode=create -d size=1g -d pool=rbd -d create_image=false + -X PUT http://192.168.122.69:5000/api/ecdisk/rbd/ec/new1 + curl --user admin:admin -X GET http://192.168.122.69:5000/api/ecdisk/rbd/ec/new2 + curl --user admin:admin -X DELETE http://192.168.122.69:5000/api/ecdisk/rbd/ec/new3 + """ + + return common_disk(pool, ecpool, image) + + +@app.route('/api/disk//', methods=['GET', 'PUT', 'DELETE']) +@requires_restricted_auth +def disk(pool, image): + """ + Coordinate the create/delete of rbd images across the gateway nodes + This method calls the corresponding disk api entrypoints across each + gateway. Processing is done serially: creation is done locally first, + then other gateways - whereas, rbd deletion is performed first against + remote gateways and then the local machine is used to perform the actual + rbd delete. + + :param pool: (str) pool name + :param image: (str) rbd image name + :param mode: (str) 'create' or 'resize' the rbd image + :param size: (str) the size of the rbd image + :param pool: (str) the pool name the rbd image will be in + :param count: (str) the number of images will be created + :param owner: (str) the owner of the rbd image + :param controls: (JSON dict) valid control overrides + :param preserve_image: (bool, 'true/false') do NOT delete RBD image + :param create_image: (bool, 'true/false') create RBD image if not exists, true as default + :param backstore: (str) lio backstore + :param wwn: (str) unit serial number + **RESTRICTED** + Examples: + curl --user admin:admin -d mode=create -d size=1g -d pool=rbd -d count=5 + -X PUT http://192.168.122.69:5000/api/disk/rbd/ec/new0_ + curl --user admin:admin -d mode=create -d size=1g -d pool=rbd -d create_image=false + -X PUT http://192.168.122.69:5000/api/disk/rbd/ec/new1 + curl --user admin:admin -X GET http://192.168.122.69:5000/api/disk/rbd/new2 + curl --user admin:admin -X DELETE http://192.168.122.69:5000/api/disk/rbd/new3 + """ + + return common_disk(pool, None, image) + + @app.route('/api/_disk//', methods=['GET', 'PUT', 'DELETE']) @requires_restricted_auth def _disk(pool, image): @@ -1167,7 +1224,11 @@ def _disk(pool, image): **RESTRICTED** """ - image_id = '{}/{}'.format(pool, image) + ecpool = request.form.get('ecpool', None) + if ecpool: + image_id = '{}/{}/{}'.format(pool, ecpool, image) + else: + image_id = '{}/{}'.format(pool, image) config.refresh() @@ -1188,7 +1249,7 @@ def _disk(pool, image): if mode == 'create': backstore = request.form['backstore'] backstore_object_name = LUN.get_backstore_object_name(str(request.form['pool']), - image, + ecpool, image, config.config['disks']) else: disk_config = config.config['disks'][image_id] @@ -1214,6 +1275,7 @@ def _disk(pool, image): lun = LUN(logger, str(request.form['pool']), + ecpool, image, str(request.form['size']), str(request.form['owner']), @@ -1252,11 +1314,11 @@ def _disk(pool, image): return jsonify(message="LUN {} failure".format(mode)), 500 if 'owner' not in disk: - msg = "Disk {}/{} must be assigned to a target".format(disk['pool'], disk['image']) + msg = "Disk {} must be assigned to a target".format(image_id) logger.error("LUN owner not defined - {}".format(msg)) return jsonify(message="LUN {} failure - {}".format(mode, msg)), 400 - lun = LUN(logger, pool, image, size, disk['owner'], + lun = LUN(logger, pool, ecpool, image, size, disk['owner'], backstore, backstore_object_name) if mode == 'deactivate': try: @@ -1283,13 +1345,14 @@ def _disk(pool, image): purge_host = request.form['purge_host'] preserve_image = request.form.get('preserve_image') == 'true' logger.debug("delete request for disk image '{}'".format(image_id)) - pool, image = image_id.split('/', 1) + pool, ecpool, image = parse_disk_meta(image_id) disk_config = config.config['disks'][image_id] backstore = disk_config['backstore'] backstore_object_name = disk_config['backstore_object_name'] lun = LUN(logger, pool, + ecpool, image, 0, purge_host, @@ -1334,22 +1397,23 @@ def lun_reconfigure(image_id, controls, backstore): gateways.insert(0, 'localhost') + pool_name, ecpool_name, image_name = parse_disk_meta(image_id) + # deactivate disk - api_vars = {'mode': 'deactivate'} + api_vars = {'mode': 'deactivate', 'ecpool': ecpool_name} + _image_id = '{}/{}'.format(pool_name, image_name) logger.debug("deactivating disk") resp_text, resp_code = call_api(gateways, '_disk', - image_id, http_method='put', + _image_id, http_method='put', api_vars=api_vars) if resp_code != 200: return "failed to deactivate disk: {}".format(resp_text), resp_code - pool_name, image_name = image_id.split('/', 1) - - rbd_image = RBDDev(image_name, 0, backstore, pool_name) + rbd_image = RBDDev(image_name, 0, backstore, pool_name, ecpool_name) size = rbd_image.current_size - lun = LUN(logger, pool_name, image_name, size, disk['owner'], + lun = LUN(logger, pool_name, ecpool_name, image_name, size, disk['owner'], disk['backstore'], disk['backstore_object_name']) for k, v in controls.items(): @@ -1367,11 +1431,11 @@ def lun_reconfigure(image_id, controls, backstore): api_vars['controls'] = json.dumps(controls) # activate disk - api_vars['mode'] = 'activate' + api_vars = {'mode': 'activate', 'ecpool': ecpool_name} logger.debug("activating disk") activate_resp_text, activate_resp_code = call_api(gateways, '_disk', - image_id, http_method='put', + _image_id, http_method='put', api_vars=api_vars) if resp_code == 200 and activate_resp_code != 200: resp_text = activate_resp_text @@ -1389,31 +1453,15 @@ def lun_reconfigure(image_id, controls, backstore): return resp_text, resp_code -@app.route('/api/disksnap///', methods=['PUT', 'DELETE']) -@requires_restricted_auth -def disksnap(pool, image, name): - """ - Coordinate the management of rbd image snapshots across the gateway - nodes. This method calls the corresponding disk api entrypoints across - each gateway. Processing is done serially: rollback is done locally - first, then other gateways. Other actions are only performed locally. - - :param image_id: (str) rbd image name of the format pool/image - :param name: (str) rbd snapshot name - :param mode: (str) 'create' or 'rollback' the rbd snapshot - **RESTRICTED** - Examples: - curl --user admin:admin -d mode=create - -X PUT http://192.168.122.69:5000/api/disksnap/rbd.image/new1 - curl --user admin:admin - -X DELETE http://192.168.122.69:5000/api/disksnap/rbd.image/new1 - """ - +def common_disksnap(pool, ecpool, image, name): if not valid_snapshot_name(name): logger.debug("snapshot request rejected due to invalid snapshot name") return jsonify(message="snapshot name is invalid"), 400 - image_id = '{}/{}'.format(pool, image) + if ecpool: + image_id = '{}/{}/{}'.format(pool, ecpool, image) + else: + image_id = '{}/{}'.format(pool, image) if image_id not in config.config['disks']: return jsonify(message="rbd image {} not " @@ -1424,7 +1472,7 @@ def disksnap(pool, image, name): if mode == 'create': resp_text, resp_code = _disksnap_create(pool, image, name) elif mode == 'rollback': - resp_text, resp_code = _disksnap_rollback(image_id, pool, + resp_text, resp_code = _disksnap_rollback(pool, ecpool, image, name) else: logger.debug("snapshot request rejected due to invalid mode") @@ -1439,6 +1487,55 @@ def disksnap(pool, image, name): return jsonify(message=resp_text), resp_code +@app.route('/api/ecdisksnap////', methods=['PUT', 'DELETE']) +@requires_restricted_auth +def ecdisksnap(pool, ecpool, image, name): + """ + Coordinate the management of rbd image snapshots across the gateway + nodes. This method calls the corresponding disk api entrypoints across + each gateway. Processing is done serially: rollback is done locally + first, then other gateways. Other actions are only performed locally. + + :param pool: (str) pool name + :param ecpool: (str) erasure pool name + :param image: (str) rbd image name + :param name: (str) rbd snapshot name + :param mode: (str) 'create' or 'rollback' the rbd snapshot + **RESTRICTED** + Examples: + curl --user admin:admin -d mode=create + -X PUT http://192.168.122.69:5000/api/ecdisksnap/rbd/ec/image/new1 + curl --user admin:admin + -X DELETE http://192.168.122.69:5000/api/ecdisksnap/rbd/ec/image/new1 + """ + + return common_disksnap(pool, ecpool, image, name) + + +@app.route('/api/disksnap///', methods=['PUT', 'DELETE']) +@requires_restricted_auth +def disksnap(pool, image, name): + """ + Coordinate the management of rbd image snapshots across the gateway + nodes. This method calls the corresponding disk api entrypoints across + each gateway. Processing is done serially: rollback is done locally + first, then other gateways. Other actions are only performed locally. + + :param pool: (str) pool name + :param image: (str) rbd image name + :param name: (str) rbd snapshot name + :param mode: (str) 'create' or 'rollback' the rbd snapshot + **RESTRICTED** + Examples: + curl --user admin:admin -d mode=create + -X PUT http://192.168.122.69:5000/api/disksnap/rbd.image/new1 + curl --user admin:admin + -X DELETE http://192.168.122.69:5000/api/disksnap/rbd.image/new1 + """ + + return common_disksnap(pool, None, image, name) + + def _disksnap_create(pool_name, image_name, name): logger.debug("snapshot create request") try: @@ -1481,9 +1578,17 @@ def _disksnap_delete(pool_name, image_name, name): return resp_text, resp_code -def _disksnap_rollback(image_id, pool_name, image_name, name): +def _disksnap_rollback(pool, ecpool, image, name): logger.debug("snapshot rollback request") + if ecpool: + image_id = '{}/{}/{}'.format(pool, ecpool, image) + else: + image_id = '{}/{}'.format(pool, image) + + # _disk rest api will retrieve the ecpool from request parameters + _image_id = '{}/{}'.format(pool, image) + disk = config.config['disks'].get(image_id, None) if not disk: return "rbd image {} not found".format(image_id), 404 @@ -1495,12 +1600,13 @@ def _disksnap_rollback(image_id, pool_name, image_name, name): gateways.append(this_host()) api_vars = { + 'ecpool': ecpool, 'mode': 'deactivate'} need_active = True logger.debug("deactivating disk") resp_text, resp_code = call_api(gateways, '_disk', - image_id, + _image_id, http_method='put', api_vars=api_vars) if resp_code == 200 or resp_code == 400: @@ -1509,8 +1615,8 @@ def _disksnap_rollback(image_id, pool_name, image_name, name): try: with rados.Rados(conffile=settings.config.cephconf, name=settings.config.cluster_client_name) as cluster, \ - cluster.open_ioctx(pool_name) as ioctx, \ - rbd.Image(ioctx, image_name) as image: + cluster.open_ioctx(pool) as ioctx, \ + rbd.Image(ioctx, image) as image: try: logger.debug("rolling back to snapshot") @@ -1533,7 +1639,7 @@ def _disksnap_rollback(image_id, pool_name, image_name, name): logger.debug("activating disk") api_vars['mode'] = 'activate' activate_resp_text, activate_resp_code = call_api(gateways, '_disk', - image_id, + _image_id, http_method='put', api_vars=api_vars) if resp_code == 200 and activate_resp_code != 200: @@ -2015,9 +2121,9 @@ def clientlun(target_iqn, client_iqn): Examples: TARGET_IQN = iqn.2017-08.org.ceph:iscsi-gw CLIENT_IQN = iqn.1994-05.com.redhat:myhost4 - curl --user admin:admin -d disk=rbd/new2_1 + curl --user admin:admin -d disk=rbd[/ecpool]/new2_1 -X PUT http://192.168.122.69:5000/api/clientlun/$TARGET_IQN/$CLIENT_IQN - curl --user admin:admin -d disk=rbd/new2_1 + curl --user admin:admin -d disk=rbd[/ecpool]/new2_1 -X DELETE http://192.168.122.69:5000/api/clientlun/$TARGET_IQN/$CLIENT_IQN """ @@ -2379,7 +2485,7 @@ def hostgroup(target_iqn, group_name): Examples: curl --user admin:admin -X GET http://192.168.122.69:5000/api/hostgroup/group_name curl --user admin:admin -d members=iqn.1994-05.com.redhat:myhost4 - -d disks=rbd.disk1 -X PUT http://192.168.122.69:5000/api/hostgroup/group_name + -d disks=rbd[.ecpool].disk1 -X PUT http://192.168.122.69:5000/api/hostgroup/group_name curl --user admin:admin -d action=remove -d disks=rbd.disk1 -X PUT http://192.168.122.69:5000/api/hostgroup/group_name curl --user admin:admin diff --git a/tmp/keyring b/tmp/keyring new file mode 100644 index 0000000..b41743d --- /dev/null +++ b/tmp/keyring @@ -0,0 +1,3 @@ +[mon.] + key = AQAW27BgXYBCOhAAn1qekaFOPMIA2fKfIUcAXg== + caps mon = "allow *" diff --git a/tmp/kv_backend b/tmp/kv_backend new file mode 100644 index 0000000..67fb287 Binary files /dev/null and b/tmp/kv_backend differ