From 104ad8f560e23b6a983d7660a223c2d0c7cc4bab Mon Sep 17 00:00:00 2001 From: Xiubo Li Date: Fri, 18 Jun 2021 10:23:46 +0800 Subject: [PATCH] ceph-iscsi: add erasure pool support Erasure coded pools do not support omap, will just store the data in ec pool, and their metadata will be in 'rbd' replicated pool. Signed-off-by: Xiubo Li --- README | 5 +++ ceph_iscsi_config/lun.py | 78 ++++++++++++++++++++++++++++------- ceph_iscsi_config/utils.py | 11 +++++ gwcli/client.py | 2 +- gwcli/storage.py | 84 ++++++++++++++++++++++++++------------ rbd-target-api.py | 52 ++++++++++++++--------- 6 files changed, 171 insertions(+), 61 deletions(-) diff --git a/README b/README index fe32bd24..5aa13bec 100644 --- a/README +++ b/README @@ -95,6 +95,11 @@ curl --user admin:admin -d ip_address=2006:ac81::1104 \ NOTE: please make sure both the IPv4 and IPv6 addresses are in the trusted ip list in iscsi-gateway.cfg. +Erasure Pool Support: +For the erasure pool, there has no any special thing need to do beforehand, you can just +use it like the replicated pools. But internally the ceph-iscsi will use the replicated +'rbd' pool to save the meta data as defualt and use the erasure pool to save the data for +all the images. ## Installation ### Via RPM diff --git a/ceph_iscsi_config/lun.py b/ceph_iscsi_config/lun.py index c169c3d7..1778d79b 100644 --- a/ceph_iscsi_config/lun.py +++ b/ceph_iscsi_config/lun.py @@ -1,3 +1,4 @@ +import json import rados import rbd import re @@ -13,8 +14,9 @@ from ceph_iscsi_config.backstore import USER_RBD from ceph_iscsi_config.utils import (convert_2_bytes, gen_control_string, valid_size, get_pool_id, ip_addresses, - get_pools, get_rbd_size, this_host, - human_size, CephiSCSIError) + get_pools, get_rbd_size, run_shell_cmd, + human_size, CephiSCSIError, this_host, + parse_disk_meta) from ceph_iscsi_config.gateway_object import GWObject from ceph_iscsi_config.target import GWTarget from ceph_iscsi_config.client import GWClient, CHAP @@ -46,13 +48,14 @@ class RBDDev(object): ] } - def __init__(self, image, size, backstore, pool=None): + def __init__(self, image, size, backstore, pool=None, ecpool=None): self.image = image self.size_bytes = convert_2_bytes(size) self.backstore = backstore if pool is None: pool = settings.config.pool self.pool = pool + self.ecpool = ecpool self.pool_id = get_pool_id(pool_name=self.pool) self.error = False self.error_msg = '' @@ -74,13 +77,14 @@ def create(self): self.image, self.size_bytes, features=RBDDev.default_features(self.backstore), - old_format=False) + old_format=False, + data_pool=self.ecpool) except (rbd.ImageExists, rbd.InvalidArgument) as err: self.error = True self.error_msg = ("Failed to create rbd image {} in " - "pool {} : {}".format(self.image, - self.pool, + "pool {}, ecpool {} : {}".format(self.image, + self.pool, self.ecpool, err)) def delete(self): @@ -289,14 +293,18 @@ class LUN(GWObject): USER_RBD: TCMU_SETTINGS } - def __init__(self, logger, pool, image, size, allocating_host, + def __init__(self, logger, pool, ecpool, image, size, allocating_host, backstore, backstore_object_name): self.logger = logger self.image = image self.pool = pool + self.ecpool = ecpool self.pool_id = 0 self.size_bytes = convert_2_bytes(size) - self.config_key = '{}/{}'.format(self.pool, self.image) + if ecpool: + self.config_key = '{}/{}/{}'.format(pool, ecpool, image) + else: + self.config_key = '{}/{}'.format(pool, image) self.allocating_host = allocating_host self.backstore = backstore @@ -351,7 +359,7 @@ def remove_lun(self, preserve_image): if self.error: return - rbd_image = RBDDev(self.image, '0G', self.backstore, self.pool) + rbd_image = RBDDev(self.image, '0G', self.backstore, self.pool, self.ecpool) if local_gw == self.allocating_host: # by using the allocating host we ensure the delete is not @@ -574,6 +582,38 @@ def activate(self): if client_err: raise CephiSCSIError(client_err) + def _erasure_pool_check(self): + if not self.ecpool: + return None + + data, err = run_shell_cmd( + "ceph -n {client_name} --conf {cephconf} osd metadata --format=json". + format(client_name=settings.config.cluster_client_name, + cephconf=settings.config.cephconf)) + if err: + self.logger.error("Cannot get the objectstore type") + return err + bluestore = False + for _osd in json.loads(data): + store_type = _osd['osd_objectstore'] + self.logger.debug(f"pool ({self.pool}) objectstore type is ({store_type})") + if store_type is 'bluestore': + bluestore = True + break + + if not bluestore: + return None + + data, err = run_shell_cmd( + "ceph -n {client_name} --conf {cephconf} osd pool get {pool} allow_ec_overwrites". + format(client_name=settings.config.cluster_client_name, + cephconf=settings.config.cephconf, pool=self.ecpool)) + if err: + self.logger.error(f"Cannot get allow_ec_overwrites from pool ({self.pool})") + return err + self.logger.debug(f"erasure pool ({self.pool}) allow_ec_overwrites is enabled") + return None + def allocate(self, keep_dev_in_lio=True, in_wwn=None): """ Create image and add to LIO and config. @@ -583,6 +623,10 @@ def allocate(self, keep_dev_in_lio=True, in_wwn=None): :return: LIO storage object if successful and keep_dev_in_lio=True else None. """ + err = self._erasure_pool_check() + if err: + return None + self.logger.debug("LUN.allocate starting, listing rbd devices") disk_list = RBDDev.rbd_list(pool=self.pool) self.logger.debug("rados pool '{}' contains the following - " @@ -593,7 +637,8 @@ def allocate(self, keep_dev_in_lio=True, in_wwn=None): "allocations is {}".format(local_gw, self.allocating_host)) - rbd_image = RBDDev(self.image, self.size_bytes, self.backstore, self.pool) + rbd_image = RBDDev(self.image, self.size_bytes, self.backstore, self.pool, + self.ecpool) self.pool_id = rbd_image.pool_id # if the image required isn't defined, create it! @@ -703,6 +748,7 @@ def allocate(self, keep_dev_in_lio=True, in_wwn=None): disk_attr = {"wwn": wwn, "image": self.image, "pool": self.pool, + "ecpool": self.ecpool, "allocating_host": self.allocating_host, "pool_id": rbd_image.pool_id, "controls": self.controls, @@ -1112,7 +1158,7 @@ def _backstore_object_name_exists(disks_config, backstore_object_name_exists): if disk['backstore_object_name'] == backstore_object_name_exists]) > 0 @staticmethod - def get_backstore_object_name(pool, image, disks_config): + def get_backstore_object_name(pool, ecpool, image, disks_config): """ Determine the backstore storage object name based on the pool name, image name, and existing storage object names to avoid conflicts. @@ -1126,7 +1172,10 @@ def get_backstore_object_name(pool, image, disks_config): :param disks_config: disks configuration from `gateway.conf` :return: the backstore storage object name to be used """ - base_name = '{}.{}'.format(pool, image) + if ecpool: + base_name = '{}.{}.{}'.format(pool, ecpool, image) + else: + base_name = '{}.{}'.format(pool, image) candidate = base_name counter = 0 while LUN._backstore_object_name_exists(disks_config, candidate): @@ -1230,14 +1279,15 @@ def define_luns(logger, config, target): if disk_key.startswith(pool + '/')] for disk_key in pool_disks: - pool, image_name = disk_key.split('/') + pool, ecpool, image_name = parse_disk_meta(disk_key) + with rbd.Image(ioctx, image_name) as rbd_image: disk_config = config.config['disks'][disk_key] backstore = disk_config['backstore'] backstore_object_name = disk_config['backstore_object_name'] - lun = LUN(logger, pool, image_name, + lun = LUN(logger, pool, ecpool, image_name, rbd_image.size(), local_gw, backstore, backstore_object_name) diff --git a/ceph_iscsi_config/utils.py b/ceph_iscsi_config/utils.py index 4ce47d74..bac0f2fe 100644 --- a/ceph_iscsi_config/utils.py +++ b/ceph_iscsi_config/utils.py @@ -6,6 +6,7 @@ import re import datetime import os +import json import ceph_iscsi_config.settings as settings @@ -27,6 +28,16 @@ class CephiSCSIInval(CephiSCSIError): ''' pass +def parse_disk_meta(disk): + pool = None + ecpool = None + image = None + try: + pool, ecpool, image = disk.split('/') + except ValueError: + pool, image = disk.split('/') + pass + return pool, ecpool, image def run_shell_cmd(cmd, stderr=None, shell=True): if not stderr: diff --git a/gwcli/client.py b/gwcli/client.py index 040bbe39..cc53a96f 100644 --- a/gwcli/client.py +++ b/gwcli/client.py @@ -540,7 +540,7 @@ def ui_command_disk(self, action='add', disk=None, size=None): # a disk given here would be of the form pool.image try: - pool, image = disk.split('/') + pool, ecpool, image = parse_disk_meta(disk) except ValueError: self.logger.error("Invalid format. Use pool_name/disk_name") return diff --git a/gwcli/storage.py b/gwcli/storage.py index 14888567..d9fac7b7 100644 --- a/gwcli/storage.py +++ b/gwcli/storage.py @@ -16,7 +16,8 @@ APIRequest, valid_snapshot_name, get_config, refresh_control_values) -from ceph_iscsi_config.utils import valid_size, convert_2_bytes, human_size, this_host +from ceph_iscsi_config.utils import (valid_size, convert_2_bytes, human_size, + this_host, parse_disk_meta) from ceph_iscsi_config.lun import LUN import ceph_iscsi_config.settings as settings @@ -53,6 +54,14 @@ def __init__(self, parent): self.scan_queue = None self.scan_mutex = None + def _get_pool_type(self, pool): + root = self.get_ui_root() + pools = root.ceph.cluster.pools + pool_object = pools.pool_lookup.get(pool, None) + if pool_object: + return pool_object.type + return None + def _get_disk_meta(self, cluster_ioctx, disk_meta): """ Use the provided cluster context to take an rbd image name from the @@ -72,7 +81,7 @@ def _get_disk_meta(self, cluster_ioctx, disk_meta): except Queue.Empty: break else: - pool, image = rbd_name.split('/') + pool, ecpool, image = parse_disk_meta(rbd_name) disk_meta[rbd_name] = {} with cluster_ioctx.open_ioctx(pool) as ioctx: try: @@ -114,6 +123,7 @@ def refresh(self, disk_info): # Load the queue for disk_name in disk_info.keys(): + #_disk_name = self._get_erasure_image_id(disk_name) self.scan_queue.put(disk_name) start_time = int(time.time()) @@ -142,7 +152,7 @@ def refresh(self, disk_info): def _group_disks_by_pool(self, disks_config): result = {} for disk_id, disk_config in disks_config.items(): - pool, image = disk_id.split('/') + pool, ecpool, image = parse_disk_meta(disk_id) if pool not in result: result[pool] = [] result[pool].append(disk_config) @@ -176,7 +186,7 @@ def ui_command_attach(self, pool=None, image=None, backstore=None, wwn=None): # shorthand version of the command self.logger.debug("user provided pool/image format request") - pool, image = pool.split('/') + pool, ecpool, image = parse_disk_meta(pool) else: # long format request @@ -187,10 +197,10 @@ def ui_command_attach(self, pool=None, image=None, backstore=None, wwn=None): self.logger.debug("CMD: /disks/ attach pool={} " "image={}".format(pool, image)) - self.create_disk(pool=pool, image=image, create_image=False, backstore=backstore, wwn=wwn) + self.create_disk(pool=pool, ecpool=ecpool, image=image, create_image=False, backstore=backstore, wwn=wwn) - def ui_command_create(self, pool=None, image=None, size=None, backstore=None, wwn=None, - count=1): + def ui_command_create(self, pool=None, ecpool=None, image=None, size=None, backstore=None, + wwn=None, count=1): """ Create a RBD image and assign to the gateway(s). @@ -205,7 +215,8 @@ def ui_command_create(self, pool=None, image=None, size=None, backstore=None, ww The syntax of each parameter is as follows; pool : Pool and image name may contain a-z, A-Z, 0-9, '_', or '-' - image characters. + ecpool: Data pool name for erasure code pool may contain a-z, A-Z, 0-9, '_', or '-' + image : characters. size : integer, suffixed by the allocation unit - either m/M, g/G or t/T representing the MB/GB/TB [1] backstore : lio backstore @@ -238,7 +249,7 @@ def ui_command_create(self, pool=None, image=None, size=None, backstore=None, ww "({} ?)".format(size)) return size = image - pool, image = pool.split('/') + pool, ecpool, image = parse_disk_meta(pool) else: # long format request @@ -260,8 +271,8 @@ def ui_command_create(self, pool=None, image=None, size=None, backstore=None, ww self.logger.debug("CMD: /disks/ create pool={} " "image={} size={} " "count={} ".format(pool, image, size, count)) - self.create_disk(pool=pool, image=image, size=size, count=count, backstore=backstore, - wwn=wwn) + self.create_disk(pool=pool, ecpool=ecpool, image=image, size=size, count=count, + backstore=backstore, wwn=wwn) def _valid_pool(self, pool=None): """ @@ -277,15 +288,15 @@ def _valid_pool(self, pool=None): pools = root.ceph.cluster.pools pool_object = pools.pool_lookup.get(pool, None) if pool_object: - if pool_object.type == 'replicated': - self.logger.debug("pool '{}' is ok to use".format(pool)) + if pool_object.type in ['replicated', 'erasure']: + self.logger.debug(f"pool '{pool}' is ok to use") return True - self.logger.error("Invalid pool ({}). Must already exist and " - "be replicated".format(pool)) + self.logger.error(f"Invalid pool ({pool}), the type is ({pool_object.type})." + " Must already exist and be erasure or replicated") return False - def create_disk(self, pool=None, image=None, size=None, count=1, + def create_disk(self, pool=None, ecpool=None, image=None, size=None, count=1, parent=None, create_image=True, backstore=None, wwn=None): rc = 0 @@ -296,6 +307,9 @@ def create_disk(self, pool=None, image=None, size=None, count=1, local_gw = this_host() disk_key = "{}/{}".format(pool, image) + _disk_key = disk_key + if ecpool: + disk_key = "{}/{}/{}".format(pool, ecpool, image) if not self._valid_pool(pool): return @@ -306,8 +320,8 @@ def create_disk(self, pool=None, image=None, size=None, count=1, # make call to local api server's disk endpoint disk_api = '{}://localhost:{}/api/disk/{}'.format(self.http_mode, settings.config.api_port, - disk_key) - api_vars = {'pool': pool, 'owner': local_gw, + _disk_key) + api_vars = {'pool': pool, 'ecpool': ecpool, 'owner': local_gw, 'count': count, 'mode': 'create', 'create_image': 'true' if create_image else 'false', 'backstore': backstore, 'wwn': wwn} @@ -329,16 +343,17 @@ def create_disk(self, pool=None, image=None, size=None, count=1, for n in range(1, (int(count) + 1), 1): if int(count) > 1: - disk_key = "{}/{}{}".format(pool, image, n) + _disk_key = "{}/{}{}".format(pool, image, n) else: - disk_key = "{}/{}".format(pool, image) + _disk_key = "{}/{}".format(pool, image) + api_vars = {'ecpool': ecpool} disk_api = ('{}://localhost:{}/api/disk/' '{}'.format(self.http_mode, settings.config.api_port, - disk_key)) + _disk_key)) - api = APIRequest(disk_api) + api = APIRequest(disk_api, data=api_vars) api.get() if api.response.status_code == 200: @@ -524,7 +539,7 @@ def delete_disk(self, image_id, preserve_image): self.logger.debug("- rbd removed from all gateways, and deleted") disk_object = [disk for disk in all_disks if disk.image_id == image_id][0] - pool, _ = image_id.split('/') + pool, ecpool, image = parse_disk_meta(image_id) pool_object = [pool_object for pool_object in self.children if pool_object.name == pool][0] pool_object.remove_child(disk_object) @@ -603,9 +618,22 @@ def __init__(self, parent, pool, pool_disks_config, disks_meta=None): self.disks_meta = disks_meta self.refresh() + def _get_pool_type(self, pool): + root = self.get_ui_root() + pools = root.ceph.cluster.pools + pool_object = pools.pool_lookup.get(pool, None) + if pool_object: + return pool_object.type + return None + def refresh(self): for pool_disk_config in self.pool_disks_config: disk_id = '{}/{}'.format(pool_disk_config['pool'], pool_disk_config['image']) + if pool_disk_config['ecpool']: + disk_id = '{}/{}/{}'.format(pool_disk_config['pool'], pool_disk_config['ecpool'], + pool_disk_config['image']) + + #_disk_id = self._get_erasure_image_id(disk_id) size = self.disks_meta[disk_id].get('size', 0) if self.disks_meta else None features = self.disks_meta[disk_id].get('features', 0) if self.disks_meta else None snapshots = self.disks_meta[disk_id].get('snapshots', []) if self.disks_meta else None @@ -641,7 +669,7 @@ def __init__(self, parent, image_id, image_config, size=None, :param image_config: meta data for this image :return: """ - self.pool, self.rbd_image = image_id.split('/', 1) + self.pool, self.ecpool, self.rbd_image = parse_disk_meta(image_id) UINode.__init__(self, self.rbd_image, parent) @@ -732,12 +760,14 @@ def summary(self): status = True + pool, ecpool, image = parse_disk_meta(self.image_id) + _image_id = '{}/{}'.format(pool, image) + api_vars = {'ecpool': ecpool} disk_api = ('{}://localhost:{}/api/' 'disk/{}'.format(self.http_mode, settings.config.api_port, - self.image_id)) + _image_id)) - self.logger.debug("disk GET status for {}".format(self.image_id)) - api = APIRequest(disk_api) + api = APIRequest(disk_api, data=api_vars) api.get() state = "State unknown" diff --git a/rbd-target-api.py b/rbd-target-api.py index 07e7507e..9559b57b 100644 --- a/rbd-target-api.py +++ b/rbd-target-api.py @@ -35,7 +35,7 @@ from ceph_iscsi_config.common import Config from ceph_iscsi_config.utils import (normalize_ip_literal, resolve_ip_addresses, ip_addresses, read_os_release, encryption_available, - CephiSCSIError, this_host) + CephiSCSIError, this_host, parse_disk_meta) from ceph_iscsi_config.device_status import DeviceStatusWatcher from gwcli.utils import (APIRequest, valid_gateway, valid_client, @@ -791,10 +791,10 @@ def target_disk(target_iqn=None): return jsonify(message="Disk {} cannot be used because it is already mapped on " "target {}".format(disk, iqn)), 400 - pool, image_name = disk.split('/') + pool, ecpool, image_name = parse_disk_meta(disk) try: backstore = config.config['disks'][disk] - rbd_image = RBDDev(image_name, 0, backstore, pool) + rbd_image = RBDDev(image_name, 0, backstore, pool, ecpool) size = rbd_image.current_size logger.debug("{} size is {}".format(disk, size)) except rbd.ImageNotFound: @@ -873,7 +873,7 @@ def _target_disk(target_iqn=None): config.refresh() disk = request.form.get('disk') - pool, image = disk.split('/', 1) + pool, ecpool, image = parse_disk_meta(disk) disk_config = config.config['disks'][disk] backstore = disk_config['backstore'] backstore_object_name = disk_config['backstore_object_name'] @@ -897,6 +897,7 @@ def _target_disk(target_iqn=None): size = rbd_image.current_size lun = LUN(logger, pool, + ecpool, image, size, allocating_host, @@ -925,6 +926,7 @@ def _target_disk(target_iqn=None): lun = LUN(logger, pool, + ecpool, image, 0, purge_host, @@ -982,6 +984,7 @@ def disk(pool, image): rbd delete. :param pool: (str) pool name + :param ecpool: (str) erasure data pool name :param image: (str) rbd image name :param mode: (str) 'create' or 'resize' the rbd image :param size: (str) the size of the rbd image @@ -1006,7 +1009,11 @@ def disk(pool, image): local_gw = this_host() logger.debug("this host is {}".format(local_gw)) + ecpool = request.form.get('ecpool', None) image_id = '{}/{}'.format(pool, image) + _image_id = image_id + if ecpool: + image_id = '{}/{}/{}'.format(pool, ecpool, image) config.refresh() @@ -1015,7 +1022,7 @@ def disk(pool, image): if image_id in config.config['disks']: disk_dict = config.config["disks"][image_id] global dev_status_watcher - disk_status = dev_status_watcher.get_dev_status(image_id) + disk_status = dev_status_watcher.get_dev_status(_image_id) if disk_status: disk_dict['status'] = disk_status.get_status_dict() else: @@ -1078,7 +1085,7 @@ def disk(pool, image): try: # no size implies not intention to create an image, try to # check whether it exists - rbd_image = RBDDev(image, 0, backstore, pool) + rbd_image = RBDDev(image, 0, backstore, pool, ecpool) size = rbd_image.current_size except rbd.ImageNotFound: # the create_image=true will be implied if size is specified @@ -1107,6 +1114,7 @@ def disk(pool, image): sfx) api_vars = {'pool': pool, + 'ecpool': ecpool, 'image': image, 'size': size, 'owner': local_gw, @@ -1117,7 +1125,7 @@ def disk(pool, image): api_vars['controls'] = request.form['controls'] resp_text, resp_code = call_api(gateways, '_disk', - '{}/{}'.format(pool, image_name), + _image_id, http_method='put', api_vars=api_vars) @@ -1163,11 +1171,14 @@ def _disk(pool, image): Disks can be created and added to each gateway, or deleted through this call :param pool: (str) pool name + :param ecpool: (str) erasure pool name :param image: (str) image name **RESTRICTED** """ - image_id = '{}/{}'.format(pool, image) + ecpool = request.form.get('ecpool', None) + if ecpool: + image_id = '{}/{}/{}'.format(pool, ecpool, image) config.refresh() @@ -1188,7 +1199,7 @@ def _disk(pool, image): if mode == 'create': backstore = request.form['backstore'] backstore_object_name = LUN.get_backstore_object_name(str(request.form['pool']), - image, + ecpool, image, config.config['disks']) else: disk_config = config.config['disks'][image_id] @@ -1214,6 +1225,7 @@ def _disk(pool, image): lun = LUN(logger, str(request.form['pool']), + ecpool, image, str(request.form['size']), str(request.form['owner']), @@ -1256,7 +1268,7 @@ def _disk(pool, image): logger.error("LUN owner not defined - {}".format(msg)) return jsonify(message="LUN {} failure - {}".format(mode, msg)), 400 - lun = LUN(logger, pool, image, size, disk['owner'], + lun = LUN(logger, pool, ecpool, image, size, disk['owner'], backstore, backstore_object_name) if mode == 'deactivate': try: @@ -1283,13 +1295,14 @@ def _disk(pool, image): purge_host = request.form['purge_host'] preserve_image = request.form.get('preserve_image') == 'true' logger.debug("delete request for disk image '{}'".format(image_id)) - pool, image = image_id.split('/', 1) + pool, ecpool, image = parse_disk_meta(image_id) disk_config = config.config['disks'][image_id] backstore = disk_config['backstore'] backstore_object_name = disk_config['backstore_object_name'] lun = LUN(logger, pool, + ecpool, image, 0, purge_host, @@ -1334,22 +1347,23 @@ def lun_reconfigure(image_id, controls, backstore): gateways.insert(0, 'localhost') + pool_name, ecpool_name, image_name = parse_disk_meta(image_id) + # deactivate disk - api_vars = {'mode': 'deactivate'} + api_vars = {'mode': 'deactivate', 'ecpool': ecpool} + _image_id = '{}/{}'.format(pool, image) logger.debug("deactivating disk") resp_text, resp_code = call_api(gateways, '_disk', - image_id, http_method='put', + _image_id, http_method='put', api_vars=api_vars) if resp_code != 200: return "failed to deactivate disk: {}".format(resp_text), resp_code - pool_name, image_name = image_id.split('/', 1) - - rbd_image = RBDDev(image_name, 0, backstore, pool_name) + rbd_image = RBDDev(image_name, 0, backstore, pool_name, ecpool_name) size = rbd_image.current_size - lun = LUN(logger, pool_name, image_name, size, disk['owner'], + lun = LUN(logger, pool_name, ecpool_name, image_name, size, disk['owner'], disk['backstore'], disk['backstore_object_name']) for k, v in controls.items(): @@ -1367,11 +1381,11 @@ def lun_reconfigure(image_id, controls, backstore): api_vars['controls'] = json.dumps(controls) # activate disk - api_vars['mode'] = 'activate' + api_vars = {'mode': 'activate', 'ecpool': ecpool} logger.debug("activating disk") activate_resp_text, activate_resp_code = call_api(gateways, '_disk', - image_id, http_method='put', + _image_id, http_method='put', api_vars=api_vars) if resp_code == 200 and activate_resp_code != 200: resp_text = activate_resp_text