From f1b89066fa17c24a9fdbefaa2153650506942f9b Mon Sep 17 00:00:00 2001 From: Xiubo Li Date: Fri, 18 Jun 2021 10:23:46 +0800 Subject: [PATCH] ceph-iscsi: add erasure pool support The erasure coded pool does not support omap, here we will just store the data in it and will store the metadata in a replicated pool. This will add "ecpool=" parameter to specify the erasure pool to store the data when creating a disk, and the "pool=", which is "rbd" as default, will be used to store the metadata. For rest API when creating the disk and disksnap, you must replace the "disk" to "ecdisk" in the URL, more detail please see the README. Signed-off-by: Xiubo Li --- README | 50 ++++- ceph_iscsi_config/.device_status.py.swp | Bin 0 -> 16384 bytes ceph_iscsi_config/client.py | 2 +- ceph_iscsi_config/lun.py | 97 +++++++-- ceph_iscsi_config/utils.py | 12 ++ gwcli/client.py | 4 +- gwcli/storage.py | 131 +++++++----- rbd-target-api.py | 264 +++++++++++++++++------- tmp/keyring | 3 + tmp/kv_backend | Bin 0 -> 216 bytes 10 files changed, 404 insertions(+), 159 deletions(-) create mode 100644 ceph_iscsi_config/.device_status.py.swp create mode 100644 tmp/keyring create mode 100644 tmp/kv_backend diff --git a/README b/README index fe32bd24..679f2b2f 100644 --- a/README +++ b/README @@ -17,17 +17,17 @@ o- / ........................................................................... o- cluster .................................................................. [Clusters: 1] | o- ceph ..................................................................... [HEALTH_OK] | o- pools ................................................................... [Pools: 3] - | | o- ec ........................................ [(2+1), Commit: 0b/40G (0%), Used: 0b] + | | o- ec ....................................... [(2+2), Commit: 0b/40G (0%), Used: 24K] | | o- iscsi ..................................... [(x3), Commit: 0b/20G (0%), Used: 18b] | | o- rbd ....................................... [(x3), Commit: 8G/20G (40%), Used: 5K] | o- topology ......................................................... [OSDs: 3,MONs: 3] o- disks ................................................................... [8G, Disks: 5] | o- rbd ....................................................................... [rbd (8G)] - | o- disk_1 ............................................................... [disk_1 (1G)] - | o- disk_2 ............................................................... [disk_2 (2G)] - | o- disk_3 ............................................................... [disk_3 (2G)] - | o- disk_4 ............................................................... [disk_4 (1G)] - | o- disk_5 ............................................................... [disk_5 (2G)] + | o- disk_1 ........................................................... [rbd/disk_1 (1G)] + | o- disk_2 ........................................................... [rbd/disk_2 (2G)] + | o- disk_3 ........................................................... [rbd/disk_3 (2G)] + | o- disk_4 ........................................................ [rbd/ec/disk_4 (1G)] + | o- disk_5 ........................................................ [rbd/ec/disk_5 (2G)] o- iscsi-targets ............................................................. [Targets: 1] o- iqn.2003-01.com.redhat.iscsi-gw:ceph-gw1 ................... [Auth: CHAP, Gateways: 2] | o- disks ................................................................... [Disks: 1] @@ -38,7 +38,7 @@ o- / ........................................................................... o- host-groups ........................................................... [Groups : 0] o- hosts ................................................ [Auth: ACL_ENABLED, Hosts: 1] | o- iqn.1994-05.com.redhat:rh7-client .......... [LOGGED-IN, Auth: CHAP, Disks: 1(2G)] - | o- lun 0 ......................................... [rbd.disk_1(2G), Owner: rh7-gw2] + | o- lun 0 ......................................... [rbd/disk_1(2G), Owner: rh7-gw2] o- iqn.2003-01.com.redhat.iscsi-gw:ceph-gw2 ................... [Auth: None, Gateways: 2] o- disks ................................................................... [Disks: 1] | o- rbd/disk_2 .............................................. [Owner: rh7-gw1, Lun: 0] @@ -48,7 +48,17 @@ o- / ........................................................................... o- host-groups ........................................................... [Groups : 0] o- hosts ................................................ [Auth: ACL_ENABLED, Hosts: 1] o- iqn.1994-05.com.redhat:rh7-client .......... [LOGGED-IN, Auth: None, Disks: 1(2G)] - o- lun 0 ......................................... [rbd.disk_2(2G), Owner: rh7-gw1] + o- lun 0 ......................................... [rbd/disk_2(2G), Owner: rh7-gw1] + o- iqn.2003-01.com.redhat.iscsi-gw:ceph-gw3 ................... [Auth: None, Gateways: 2] + o- disks ................................................................... [Disks: 1] + | o- rbd/ec/disk_4 ........................................... [Owner: rh7-gw2, Lun: 0] + o- gateways ..................................................... [Up: 2/2, Portals: 2] + | o- rh7-gw1 ................................................... [2006:ac81::1103 (UP)] + | o- rh7-gw2 ................................................... [2006:ac81::1104 (UP)] + o- host-groups ........................................................... [Groups : 0] + o- hosts ................................................ [Auth: ACL_ENABLED, Hosts: 1] + o- iqn.1994-05.com.redhat:rh7-client .......... [LOGGED-IN, Auth: None, Disks: 1(1G)] + o- lun 0 ...................................... [rbd/ec/disk_4(1G), Owner: rh7-gw1] @@ -95,6 +105,30 @@ curl --user admin:admin -d ip_address=2006:ac81::1104 \ NOTE: please make sure both the IPv4 and IPv6 addresses are in the trusted ip list in iscsi-gateway.cfg. +Erasure Pool Support: +For the erasure pool, you need to specify the "ecpool=" parameter to store the +data when creating a disk, and the "pool=" will contiue to be a replicated pool, +which will store the metadata only. + +When creating a disk and disk snapshot, for rest api there has a litte different for +erasure pool. + +You need to use "ecdisk" instead of "disk" in URL:"http://.../disk/...": + +curl --user admin:admin -d mode=create -d size=1g -d pool=rbd -d ecpool=ec -d count=5 + -X PUT http://192.168.122.69:5000/api/ecdisk/rbd/ec/new0_ +curl --user admin:admin -d mode=create -d size=1g -d pool=rbd -d ecpool=ec -d create_image=false + -X PUT http://192.168.122.69:5000/api/ecdisk/rbd/ec/new1 +curl --user admin:admin -X GET http://192.168.122.69:5000/api/ecdisk/rbd/ec/new2 +curl --user admin:admin -X DELETE http://192.168.122.69:5000/api/ecdisk/rbd/ec/new3 + +You need to use "ecdisksnap" instead of "disksnap" in URL:"http://.../disk/...": + +curl --user admin:admin -d mode=create + -X PUT http://192.168.122.69:5000/api/ecdisksnap/rbd/ec/image/new1 +curl --user admin:admin + -X DELETE http://192.168.122.69:5000/api/ecdisksnap/rbd/ec/image/new1 + ## Installation ### Via RPM diff --git a/ceph_iscsi_config/.device_status.py.swp b/ceph_iscsi_config/.device_status.py.swp new file mode 100644 index 0000000000000000000000000000000000000000..7dd960ad71ed4890f40737591847bf3098710aa3 GIT binary patch literal 16384 zcmeHOO^h5z6|R`S*da~?BH~}=SuHaQv+Z4TiPqV0uocAz$0EDNLAz$??XH>WZ2!^S zv*X1Wf&_vK4nHSAToNUK;DW>f5JDgjBnWXp0wHm6ECk{ZM~LtvVF|uh-PP4K-TRXp zLOs&w?(M32_3Eovuj;*SZ!LY`+lox-tmzIH+ftG=mftG=mftG=mftG=mftG=mftG=mf&W1U zY?m<(y>3&$kN5v7{(t!%#{LZa4)`VTZQxtLHQ-~wRp3LwX`ln#1-x{ev0K24!1KT} zz!ShG@IK%^;9qw$_FLfl!1sVJ1D^&y1q^^C;9lS!;McEb>{;L$zz3FqlfYYmHv@mV zi?OGG5%4bH_A$nO5Bvo9F>oDt444D{a+I;30#5^<2c85T16F~*-idyIF9BZ!Hi6T? zJAk(Xe|sHcKLi5cO~B39LVw^3z!hK$%mK%Mqre|t!`KgiPXbQ>o4~`s?N>AQ9Pm}( zD?kiHz$)+na2hxc+(NSD=fF3C>p%iL3cMfa0)I!c=4Idp@C)E+;3?oq;2c16`EQlL z*~jMi!N1TB#A+(C%nL`Um4BpRIsPQgL^5!EPlVZk>MSrND;54w)!?fE68|uU1(p1&S+PR+E^B zY?6d|i~N(5fgeTNlh_)Gtmg&xNK{5gdg1v1->4KHs7Qf*Ua447SL$I%1szwfmBXp( ziIPX6P?%+|ErKXS(RwnODe|MyNF*rAZ_0MrI1#psxw6*foi!MEp_dH?`Or%?jK}># z3b=NBJ5Bjn`4P^V{08TeojGHX^kyZ~#FX92p^T>Zh-VU#VdNxkjHZ{NqI&3h4!$Ph zm0z`FgRF?E6JrA>oT;jwT2N3HS87^RyCSx*fU$742g4}Ae2rj*!j+RIdPc)^r_6TLk=FIEb|3!1dr`(%)ySZjQ;2N`h~L>a@$jSsa{E0 z=WCD=iAX zd6b|je=t|Jsirw2Bp3D3pPdO}5CcqNx{($-Ny%J;805`=aB)6ZoOjpfA6}V1zcRmO z%@5~4+NoK9oS4v^kY z1Fb8L4D1+%H3KwCvBGIKYu-Yd;%GXvB61JSh%EJ+qotP!vU3c~bBRFP;+F&uywNz5 zrkM(qXB^(h^W{o?1Pj%%eSdbLu0??)KP?6?E2Vtu`93dp2NA^CZc!{!E_MWYfoL)s zGo@0o{3fZn_-MnJ#5BL=eleQ%3wi3A&-EuCCGATY=4trPGz}Z&8k( zR&wdi7x;s#e6#GTAXHW{&AN0V(y}+}{KeBmUJd-#8$q?rie9}8zXUEHN;{S-&1#a^hsQOHyiQShbd$47<^wAF};)<~S+UAXji9 z^u4fRu2O&=PG#3?I}Q2A6?g-V1nc8 zn%v3O6WiIw*)`gF9A`Q*YALDgu5y-y&6#C*03B+Nb7v&C&AqguDt&5MDwS(Ha4ar`eJODL_v9606(&^IzF)CZ#qGg_bMDv4`ei^{&+l% zLPqibam1z+yHfnG-tWJFc>V_PE8sftIp93Nfd3$l{}C_-E&z7|FCkvP1yD?%0+)cJ zz)i&I&jZf^6w4>T1HcjB4&Y~q+rI{E0B-}{3j7A~`e%X90IR@15Tkzw*aj@%pNPp{ z0KN`nKnmOk+(2yp4d6O(5m*O~0WV@~-vvGntOF`e$7tGL%RtLO%RtLOx&7(wQ6`zJ zshxz#DD^t_5V@TqpeYVC{1jg_e*DBtTw0tUC|zZ_Vrw#zp+G9nGG+uzhmia^HeQ`$ zqqwjLpYj8c*-BSLtNfs0rsyh%1Y?ME=mYAEFf<$thmlcO)q3CYFE@?f5JhyVpH|kI zjnT|gnQA~+f#aHv0;(nIz9M$dlbotsy_)Ts4v^K_bdMOMxe=v3%+L^;kVL7_0C6E^ zMdkq-azCn|w5E~RL`pyLkm#VyQHJ(e!~NW-|T3mf7hQ&7<7IkEFa*$7CN;P%W3giYoGeBKjYe|IaVj@Gn!LZWU z$IF+_3NpdV=Ag(X^Ge24$ThIzG#kC{q1-KyjI1fMXF|E0LYe%!7;cA1mFi$s(a>nm zW#%!Ac!p4O5$~xe(M%~wZc{pvoZie9h*?W_oaX6j>?Pl5DLliQ8QbMe3r49)SCYQU zsXkHdq2Ey%jvVlORreS37YchTfIV3h$Vj(Myed;8aG`cdnVHz26{xA>yV. format + :param pool: (str) pool name + :param ecpool: (str) ecpool name + :param image: (str) image name + :param size: (str) size :return: (str) either 'ok' or an error description """ @@ -993,12 +1042,18 @@ def valid_disk(ceph_iscsi_config, logger, **kwargs): config = ceph_iscsi_config.config - disk_key = "{}/{}".format(kwargs['pool'], kwargs['image']) + ecpool = kwargs.get('ecpool', None) + if ecpool: + disk_key = "{}/{}/{}".format(kwargs['pool'], ecpool, kwargs['image']) + else: + disk_key = "{}/{}".format(kwargs['pool'], kwargs['image']) if mode in ['create', 'resize']: if kwargs['pool'] not in get_pools(): return "pool name is invalid" + if ecpool and ecpool not in get_pools(): + return "ecpool name is invalid" if mode == 'create': if kwargs['size'] and not valid_size(kwargs['size']): @@ -1010,6 +1065,8 @@ def valid_disk(ceph_iscsi_config, logger, **kwargs): disk_regex = re.compile(r"^[a-zA-Z0-9\-_\.]+$") if not disk_regex.search(kwargs['pool']): return "Invalid pool name (use alphanumeric, '_', '.', or '-' characters)" + if ecpool and not disk_regex.search(ecpool): + return "Invalid ecpool name (use alphanumeric, '_', '.', or '-' characters)" if not disk_regex.search(kwargs['image']): return "Invalid image name (use alphanumeric, '_', '.', or '-' characters)" @@ -1040,9 +1097,7 @@ def valid_disk(ceph_iscsi_config, logger, **kwargs): if mode in ["resize", "delete", "reconfigure"]: # disk must exist in the config if disk_key not in config['disks']: - return ("rbd {}/{} is not defined to the " - "configuration".format(kwargs['pool'], - kwargs['image'])) + return ("rbd {} is not defined to the configuration".format(disk_key)) if mode == 'resize': @@ -1112,7 +1167,7 @@ def _backstore_object_name_exists(disks_config, backstore_object_name_exists): if disk['backstore_object_name'] == backstore_object_name_exists]) > 0 @staticmethod - def get_backstore_object_name(pool, image, disks_config): + def get_backstore_object_name(pool, ecpool, image, disks_config): """ Determine the backstore storage object name based on the pool name, image name, and existing storage object names to avoid conflicts. @@ -1126,7 +1181,10 @@ def get_backstore_object_name(pool, image, disks_config): :param disks_config: disks configuration from `gateway.conf` :return: the backstore storage object name to be used """ - base_name = '{}.{}'.format(pool, image) + if ecpool: + base_name = '{}.{}.{}'.format(pool, ecpool, image) + else: + base_name = '{}.{}'.format(pool, image) candidate = base_name counter = 0 while LUN._backstore_object_name_exists(disks_config, candidate): @@ -1230,14 +1288,15 @@ def define_luns(logger, config, target): if disk_key.startswith(pool + '/')] for disk_key in pool_disks: - pool, image_name = disk_key.split('/') + pool, ecpool, image_name = parse_disk_meta(disk_key) + with rbd.Image(ioctx, image_name) as rbd_image: disk_config = config.config['disks'][disk_key] backstore = disk_config['backstore'] backstore_object_name = disk_config['backstore_object_name'] - lun = LUN(logger, pool, image_name, + lun = LUN(logger, pool, ecpool, image_name, rbd_image.size(), local_gw, backstore, backstore_object_name) diff --git a/ceph_iscsi_config/utils.py b/ceph_iscsi_config/utils.py index 4ce47d74..91ed94fa 100644 --- a/ceph_iscsi_config/utils.py +++ b/ceph_iscsi_config/utils.py @@ -28,6 +28,18 @@ class CephiSCSIInval(CephiSCSIError): pass +def parse_disk_meta(disk): + pool = None + ecpool = None + image = None + try: + pool, ecpool, image = disk.split('/') + except ValueError: + pool, image = disk.split('/') + pass + return pool, ecpool, image + + def run_shell_cmd(cmd, stderr=None, shell=True): if not stderr: stderr = subprocess.STDOUT diff --git a/gwcli/client.py b/gwcli/client.py index 040bbe39..e8cba7a0 100644 --- a/gwcli/client.py +++ b/gwcli/client.py @@ -4,7 +4,7 @@ from ceph_iscsi_config.client import CHAP, GWClient import ceph_iscsi_config.settings as settings -from ceph_iscsi_config.utils import human_size, this_host +from ceph_iscsi_config.utils import human_size, this_host, parse_disk_meta from rtslib_fb.utils import normalize_wwn, RTSLibError @@ -540,7 +540,7 @@ def ui_command_disk(self, action='add', disk=None, size=None): # a disk given here would be of the form pool.image try: - pool, image = disk.split('/') + pool, ecpool, image = parse_disk_meta(disk) except ValueError: self.logger.error("Invalid format. Use pool_name/disk_name") return diff --git a/gwcli/storage.py b/gwcli/storage.py index 14888567..1880d1f2 100644 --- a/gwcli/storage.py +++ b/gwcli/storage.py @@ -16,7 +16,8 @@ APIRequest, valid_snapshot_name, get_config, refresh_control_values) -from ceph_iscsi_config.utils import valid_size, convert_2_bytes, human_size, this_host +from ceph_iscsi_config.utils import (valid_size, convert_2_bytes, human_size, + this_host, parse_disk_meta) from ceph_iscsi_config.lun import LUN import ceph_iscsi_config.settings as settings @@ -53,6 +54,14 @@ def __init__(self, parent): self.scan_queue = None self.scan_mutex = None + def _get_pool_type(self, pool): + root = self.get_ui_root() + pools = root.ceph.cluster.pools + pool_object = pools.pool_lookup.get(pool, None) + if pool_object: + return pool_object.type + return None + def _get_disk_meta(self, cluster_ioctx, disk_meta): """ Use the provided cluster context to take an rbd image name from the @@ -72,7 +81,7 @@ def _get_disk_meta(self, cluster_ioctx, disk_meta): except Queue.Empty: break else: - pool, image = rbd_name.split('/') + pool, ecpool, image = parse_disk_meta(rbd_name) disk_meta[rbd_name] = {} with cluster_ioctx.open_ioctx(pool) as ioctx: try: @@ -142,7 +151,7 @@ def refresh(self, disk_info): def _group_disks_by_pool(self, disks_config): result = {} for disk_id, disk_config in disks_config.items(): - pool, image = disk_id.split('/') + pool, ecpool, image = parse_disk_meta(disk_id) if pool not in result: result[pool] = [] result[pool].append(disk_config) @@ -176,7 +185,7 @@ def ui_command_attach(self, pool=None, image=None, backstore=None, wwn=None): # shorthand version of the command self.logger.debug("user provided pool/image format request") - pool, image = pool.split('/') + pool, ecpool, image = parse_disk_meta(pool) else: # long format request @@ -187,10 +196,11 @@ def ui_command_attach(self, pool=None, image=None, backstore=None, wwn=None): self.logger.debug("CMD: /disks/ attach pool={} " "image={}".format(pool, image)) - self.create_disk(pool=pool, image=image, create_image=False, backstore=backstore, wwn=wwn) + self.create_disk(pool=pool, ecpool=ecpool, image=image, create_image=False, + backstore=backstore, wwn=wwn) - def ui_command_create(self, pool=None, image=None, size=None, backstore=None, wwn=None, - count=1): + def ui_command_create(self, pool=None, ecpool=None, image=None, size=None, backstore=None, + wwn=None, count=1): """ Create a RBD image and assign to the gateway(s). @@ -205,7 +215,8 @@ def ui_command_create(self, pool=None, image=None, size=None, backstore=None, ww The syntax of each parameter is as follows; pool : Pool and image name may contain a-z, A-Z, 0-9, '_', or '-' - image characters. + ecpool: Data pool name for erasure code pool may contain a-z, A-Z, 0-9, '_', or '-' + image : characters. size : integer, suffixed by the allocation unit - either m/M, g/G or t/T representing the MB/GB/TB [1] backstore : lio backstore @@ -238,7 +249,7 @@ def ui_command_create(self, pool=None, image=None, size=None, backstore=None, ww "({} ?)".format(size)) return size = image - pool, image = pool.split('/') + pool, ecpool, image = parse_disk_meta(pool) else: # long format request @@ -260,8 +271,8 @@ def ui_command_create(self, pool=None, image=None, size=None, backstore=None, ww self.logger.debug("CMD: /disks/ create pool={} " "image={} size={} " "count={} ".format(pool, image, size, count)) - self.create_disk(pool=pool, image=image, size=size, count=count, backstore=backstore, - wwn=wwn) + self.create_disk(pool=pool, ecpool=ecpool, image=image, size=size, count=count, + backstore=backstore, wwn=wwn) def _valid_pool(self, pool=None): """ @@ -277,15 +288,15 @@ def _valid_pool(self, pool=None): pools = root.ceph.cluster.pools pool_object = pools.pool_lookup.get(pool, None) if pool_object: - if pool_object.type == 'replicated': - self.logger.debug("pool '{}' is ok to use".format(pool)) + if pool_object.type in ['replicated', 'erasure']: + self.logger.debug(f"pool '{pool}' is ok to use") return True - self.logger.error("Invalid pool ({}). Must already exist and " - "be replicated".format(pool)) + self.logger.error(f"Invalid pool ({pool}), the type is ({pool_object.type})." + " Must already exist and be erasure or replicated") return False - def create_disk(self, pool=None, image=None, size=None, count=1, + def create_disk(self, pool=None, ecpool=None, image=None, size=None, count=1, parent=None, create_image=True, backstore=None, wwn=None): rc = 0 @@ -296,18 +307,21 @@ def create_disk(self, pool=None, image=None, size=None, count=1, local_gw = this_host() disk_key = "{}/{}".format(pool, image) + if ecpool: + disk_key = "{}/{}/{}".format(pool, ecpool, image) if not self._valid_pool(pool): return - self.logger.debug("Creating/mapping disk {}/{}".format(pool, - image)) + self.logger.debug("Creating/mapping disk {}".format(disk_key)) # make call to local api server's disk endpoint - disk_api = '{}://localhost:{}/api/disk/{}'.format(self.http_mode, - settings.config.api_port, - disk_key) - api_vars = {'pool': pool, 'owner': local_gw, + disk_api = ('{}://localhost:{}/api/{}/' + '{}'.format(self.http_mode, + settings.config.api_port, + "ecdisk" if ecpool else "disk", + disk_key)) + api_vars = {'pool': pool, 'ecpool': ecpool, 'owner': local_gw, 'count': count, 'mode': 'create', 'create_image': 'true' if create_image else 'false', 'backstore': backstore, 'wwn': wwn} @@ -329,16 +343,16 @@ def create_disk(self, pool=None, image=None, size=None, count=1, for n in range(1, (int(count) + 1), 1): if int(count) > 1: - disk_key = "{}/{}{}".format(pool, image, n) - else: - disk_key = "{}/{}".format(pool, image) + disk_key = "{}/{}/{}{}".format(pool, ecpool, image, n) - disk_api = ('{}://localhost:{}/api/disk/' + api_vars = {'ecpool': ecpool} + disk_api = ('{}://localhost:{}/api/{}/' '{}'.format(self.http_mode, settings.config.api_port, + "ecdisk" if ecpool else "disk", disk_key)) - api = APIRequest(disk_api) + api = APIRequest(disk_api, data=api_vars) api.get() if api.response.status_code == 200: @@ -513,10 +527,12 @@ def delete_disk(self, image_id, preserve_image): 'preserve_image': 'true' if preserve_image else 'false' } - disk_api = '{}://{}:{}/api/disk/{}'.format(self.http_mode, - local_gw, - settings.config.api_port, - image_id) + pool, ecpool, image = parse_disk_meta(image_id) + disk_api = '{}://{}:{}/api/{}/{}'.format(self.http_mode, + local_gw, + settings.config.api_port, + "ecdisk" if ecpool else "disk", + image_id) api = APIRequest(disk_api, data=api_vars) api.delete() @@ -524,7 +540,7 @@ def delete_disk(self, image_id, preserve_image): self.logger.debug("- rbd removed from all gateways, and deleted") disk_object = [disk for disk in all_disks if disk.image_id == image_id][0] - pool, _ = image_id.split('/') + pool, ecpool, image = parse_disk_meta(image_id) pool_object = [pool_object for pool_object in self.children if pool_object.name == pool][0] pool_object.remove_child(disk_object) @@ -603,9 +619,21 @@ def __init__(self, parent, pool, pool_disks_config, disks_meta=None): self.disks_meta = disks_meta self.refresh() + def _get_pool_type(self, pool): + root = self.get_ui_root() + pools = root.ceph.cluster.pools + pool_object = pools.pool_lookup.get(pool, None) + if pool_object: + return pool_object.type + return None + def refresh(self): for pool_disk_config in self.pool_disks_config: disk_id = '{}/{}'.format(pool_disk_config['pool'], pool_disk_config['image']) + if pool_disk_config.get('ecpool', None): + disk_id = '{}/{}/{}'.format(pool_disk_config['pool'], pool_disk_config['ecpool'], + pool_disk_config['image']) + size = self.disks_meta[disk_id].get('size', 0) if self.disks_meta else None features = self.disks_meta[disk_id].get('features', 0) if self.disks_meta else None snapshots = self.disks_meta[disk_id].get('snapshots', []) if self.disks_meta else None @@ -641,11 +669,13 @@ def __init__(self, parent, image_id, image_config, size=None, :param image_config: meta data for this image :return: """ - self.pool, self.rbd_image = image_id.split('/', 1) + self.pool, self.ecpool, self.rbd_image = parse_disk_meta(image_id) UINode.__init__(self, self.rbd_image, parent) self.image_id = image_id + pool, ecpool, image = parse_disk_meta(self.image_id) + self.is_erasure = True if ecpool is not None else False self.size = 0 self.size_h = '' self.features = 0 @@ -688,8 +718,9 @@ def __init__(self, parent, image_id, image_config, size=None, def _apply_status(self): disk_api = ('{}://localhost:{}/api/' - 'disk/{}'.format(self.http_mode, - settings.config.api_port, self.image_id)) + '{}/{}'.format(self.http_mode, settings.config.api_port, + "ecdisk" if self.is_erasure else "disk", + self.image_id)) self.logger.debug("disk GET status for {}".format(self.image_id)) api = APIRequest(disk_api) api.get() @@ -733,10 +764,10 @@ def summary(self): status = True disk_api = ('{}://localhost:{}/api/' - 'disk/{}'.format(self.http_mode, settings.config.api_port, - self.image_id)) + '{}/{}'.format(self.http_mode, settings.config.api_port, + "ecdisk" if self.is_erasure else "disk", + self.image_id)) - self.logger.debug("disk GET status for {}".format(self.image_id)) api = APIRequest(disk_api) api.get() @@ -859,9 +890,10 @@ def reconfigure(self, attribute, value): # Issue the api request for reconfigure disk_api = ('{}://localhost:{}/api/' - 'disk/{}'.format(self.http_mode, - settings.config.api_port, - self.image_id)) + '{}/{}'.format(self.http_mode, + settings.config.api_port, + "ecdisk" if self.is_erasure else "disk", + self.image_id)) api_vars = {'pool': self.pool, 'owner': local_gw, 'controls': controls_json, 'mode': 'reconfigure'} @@ -902,9 +934,10 @@ def resize(self, size): # Issue the api request for the resize disk_api = ('{}://localhost:{}/api/' - 'disk/{}'.format(self.http_mode, - settings.config.api_port, - self.image_id)) + '{}/{}'.format(self.http_mode, + settings.config.api_port, + "ecdisk" if self.is_erasure else "disk", + self.image_id)) api_vars = {'pool': self.pool, 'size': size_rqst, 'owner': local_gw, 'mode': 'resize'} @@ -953,12 +986,10 @@ def snapshot(self, action, name): self.logger.warning("Please be patient, rollback might take time") self.logger.debug("Issuing snapshot {} request".format(action)) - disk_api = ('{}://localhost:{}/api/' - 'disksnap/{}/{}/{}'.format(self.http_mode, - settings.config.api_port, - self.pool, - self.rbd_image, - name)) + disk_api = ('{}://localhost:{}/api/{}/{}/' + '{}'.format(self.http_mode, settings.config.api_port, + "ecdisksnap" if self.is_erasure else "disksnap", + self.image_id, name)) if action == 'delete': api = APIRequest(disk_api) diff --git a/rbd-target-api.py b/rbd-target-api.py index 49ea5a44..ad59b5b0 100644 --- a/rbd-target-api.py +++ b/rbd-target-api.py @@ -35,7 +35,7 @@ from ceph_iscsi_config.common import Config from ceph_iscsi_config.utils import (normalize_ip_literal, resolve_ip_addresses, ip_addresses, read_os_release, encryption_available, - CephiSCSIError, this_host) + CephiSCSIError, this_host, parse_disk_meta) from ceph_iscsi_config.device_status import DeviceStatusWatcher from gwcli.utils import (APIRequest, valid_gateway, valid_client, @@ -749,12 +749,12 @@ def target_disk(target_iqn=None): """ Coordinate the addition(PUT) and removal(DELETE) of a disk for a target :param target_iqn: (str) IQN of the target - :param disk: (str) rbd image name on the format pool/image + :param disk: (str) rbd image name on the format pool[/ecpool]/image **RESTRICTED** Examples: - curl --user admin:admin -d disk=rbd/new2_1 + curl --user admin:admin -d disk=rbd[/ecpool]/new2_1 -X PUT http://192.168.122.69:5000/api/targetlun/iqn.2003-01.com.redhat.iscsi-gw - curl --user admin:admin -d disk=rbd/new2_1 + curl --user admin:admin -d disk=rbd[/ecpool]/new2_1 -X DELETE http://192.168.122.69:5000/api/targetlun/iqn.2003-01.com.redhat.iscsi-gw """ @@ -791,10 +791,10 @@ def target_disk(target_iqn=None): return jsonify(message="Disk {} cannot be used because it is already mapped on " "target {}".format(disk, iqn)), 400 - pool, image_name = disk.split('/') + pool, ecpool, image_name = parse_disk_meta(disk) try: backstore = config.config['disks'][disk] - rbd_image = RBDDev(image_name, 0, backstore, pool) + rbd_image = RBDDev(image_name, 0, backstore, pool, ecpool) size = rbd_image.current_size logger.debug("{} size is {}".format(disk, size)) except rbd.ImageNotFound: @@ -873,7 +873,7 @@ def _target_disk(target_iqn=None): config.refresh() disk = request.form.get('disk') - pool, image = disk.split('/', 1) + pool, ecpool, image = parse_disk_meta(disk) disk_config = config.config['disks'][disk] backstore = disk_config['backstore'] backstore_object_name = disk_config['backstore_object_name'] @@ -897,6 +897,7 @@ def _target_disk(target_iqn=None): size = rbd_image.current_size lun = LUN(logger, pool, + ecpool, image, size, allocating_host, @@ -925,6 +926,7 @@ def _target_disk(target_iqn=None): lun = LUN(logger, pool, + ecpool, image, 0, purge_host, @@ -970,9 +972,7 @@ def get_disks(): return jsonify(response), 200 -@app.route('/api/disk//', methods=['GET', 'PUT', 'DELETE']) -@requires_restricted_auth -def disk(pool, image): +def common_disk(pool, ecpool, image): """ Coordinate the create/delete of rbd images across the gateway nodes This method calls the corresponding disk api entrypoints across each @@ -980,33 +980,15 @@ def disk(pool, image): then other gateways - whereas, rbd deletion is performed first against remote gateways and then the local machine is used to perform the actual rbd delete. - - :param pool: (str) pool name - :param image: (str) rbd image name - :param mode: (str) 'create' or 'resize' the rbd image - :param size: (str) the size of the rbd image - :param pool: (str) the pool name the rbd image will be in - :param count: (str) the number of images will be created - :param owner: (str) the owner of the rbd image - :param controls: (JSON dict) valid control overrides - :param preserve_image: (bool, 'true/false') do NOT delete RBD image - :param create_image: (bool, 'true/false') create RBD image if not exists, true as default - :param backstore: (str) lio backstore - :param wwn: (str) unit serial number - **RESTRICTED** - Examples: - curl --user admin:admin -d mode=create -d size=1g -d pool=rbd -d count=5 - -X PUT http://192.168.122.69:5000/api/disk/rbd/new0_ - curl --user admin:admin -d mode=create -d size=10g -d pool=rbd -d create_image=false - -X PUT http://192.168.122.69:5000/api/disk/rbd/new1 - curl --user admin:admin -X GET http://192.168.122.69:5000/api/disk/rbd/new2 - curl --user admin:admin -X DELETE http://192.168.122.69:5000/api/disk/rbd/new3 """ local_gw = this_host() logger.debug("this host is {}".format(local_gw)) image_id = '{}/{}'.format(pool, image) + _image_id = image_id + if ecpool: + image_id = '{}/{}/{}'.format(pool, ecpool, image) config.refresh() @@ -1015,7 +997,7 @@ def disk(pool, image): if image_id in config.config['disks']: disk_dict = config.config["disks"][image_id] global dev_status_watcher - disk_status = dev_status_watcher.get_dev_status(image_id) + disk_status = dev_status_watcher.get_dev_status(_image_id) if disk_status: disk_dict['status'] = disk_status.get_status_dict() else: @@ -1062,7 +1044,7 @@ def disk(pool, image): logger.debug("{} controls {}".format(mode, controls)) wwn = request.form.get('wwn') - disk_usable = LUN.valid_disk(config, logger, pool=pool, + disk_usable = LUN.valid_disk(config, logger, pool=pool, ecpool=ecpool, image=image, size=size, mode=mode, count=count, controls=controls, backstore=backstore, wwn=wwn) @@ -1078,7 +1060,7 @@ def disk(pool, image): try: # no size implies not intention to create an image, try to # check whether it exists - rbd_image = RBDDev(image, 0, backstore, pool) + rbd_image = RBDDev(image, 0, backstore, pool, ecpool) size = rbd_image.current_size except rbd.ImageNotFound: # the create_image=true will be implied if size is specified @@ -1107,6 +1089,7 @@ def disk(pool, image): sfx) api_vars = {'pool': pool, + 'ecpool': ecpool, 'image': image, 'size': size, 'owner': local_gw, @@ -1130,13 +1113,14 @@ def disk(pool, image): else: # this is a DELETE request - disk_usable = LUN.valid_disk(config, logger, mode='delete', - pool=pool, image=image, backstore=backstore) + disk_usable = LUN.valid_disk(config, logger, mode='delete', pool=pool, + ecpool=ecpool, image=image, backstore=backstore) if disk_usable != 'ok': return jsonify(message=disk_usable), 400 api_vars = { + 'ecpool': ecpool, 'purge_host': local_gw, 'preserve_image': request.form.get('preserve_image'), 'backstore': backstore @@ -1154,6 +1138,79 @@ def disk(pool, image): resp_code +@app.route('/api/ecdisk///', methods=['GET', 'PUT', 'DELETE']) +@requires_restricted_auth +def ecdisk(pool, ecpool, image): + """ + Coordinate the create/delete of rbd images across the gateway nodes + This method calls the corresponding disk api entrypoints across each + gateway. Processing is done serially: creation is done locally first, + then other gateways - whereas, rbd deletion is performed first against + remote gateways and then the local machine is used to perform the actual + rbd delete. + + :param pool: (str) pool name + :param ecpool: (str) erasure data pool name + :param image: (str) rbd image name + :param mode: (str) 'create' or 'resize' the rbd image + :param size: (str) the size of the rbd image + :param pool: (str) the pool name the rbd image will be in + :param count: (str) the number of images will be created + :param owner: (str) the owner of the rbd image + :param controls: (JSON dict) valid control overrides + :param preserve_image: (bool, 'true/false') do NOT delete RBD image + :param create_image: (bool, 'true/false') create RBD image if not exists, true as default + :param backstore: (str) lio backstore + :param wwn: (str) unit serial number + **RESTRICTED** + Examples: + curl --user admin:admin -d mode=create -d size=1g -d pool=rbd -d ecpool=ec -d count=5 + -X PUT http://192.168.122.69:5000/api/ecdisk/rbd/ec/new0_ + curl --user admin:admin -d mode=create -d size=1g -d pool=rbd -d create_image=false + -X PUT http://192.168.122.69:5000/api/ecdisk/rbd/ec/new1 + curl --user admin:admin -X GET http://192.168.122.69:5000/api/ecdisk/rbd/ec/new2 + curl --user admin:admin -X DELETE http://192.168.122.69:5000/api/ecdisk/rbd/ec/new3 + """ + + return common_disk(pool, ecpool, image) + + +@app.route('/api/disk//', methods=['GET', 'PUT', 'DELETE']) +@requires_restricted_auth +def disk(pool, image): + """ + Coordinate the create/delete of rbd images across the gateway nodes + This method calls the corresponding disk api entrypoints across each + gateway. Processing is done serially: creation is done locally first, + then other gateways - whereas, rbd deletion is performed first against + remote gateways and then the local machine is used to perform the actual + rbd delete. + + :param pool: (str) pool name + :param image: (str) rbd image name + :param mode: (str) 'create' or 'resize' the rbd image + :param size: (str) the size of the rbd image + :param pool: (str) the pool name the rbd image will be in + :param count: (str) the number of images will be created + :param owner: (str) the owner of the rbd image + :param controls: (JSON dict) valid control overrides + :param preserve_image: (bool, 'true/false') do NOT delete RBD image + :param create_image: (bool, 'true/false') create RBD image if not exists, true as default + :param backstore: (str) lio backstore + :param wwn: (str) unit serial number + **RESTRICTED** + Examples: + curl --user admin:admin -d mode=create -d size=1g -d pool=rbd -d count=5 + -X PUT http://192.168.122.69:5000/api/disk/rbd/ec/new0_ + curl --user admin:admin -d mode=create -d size=1g -d pool=rbd -d create_image=false + -X PUT http://192.168.122.69:5000/api/disk/rbd/ec/new1 + curl --user admin:admin -X GET http://192.168.122.69:5000/api/disk/rbd/new2 + curl --user admin:admin -X DELETE http://192.168.122.69:5000/api/disk/rbd/new3 + """ + + return common_disk(pool, None, image) + + @app.route('/api/_disk//', methods=['GET', 'PUT', 'DELETE']) @requires_restricted_auth def _disk(pool, image): @@ -1167,7 +1224,11 @@ def _disk(pool, image): **RESTRICTED** """ - image_id = '{}/{}'.format(pool, image) + ecpool = request.form.get('ecpool', None) + if ecpool: + image_id = '{}/{}/{}'.format(pool, ecpool, image) + else: + image_id = '{}/{}'.format(pool, image) config.refresh() @@ -1188,7 +1249,7 @@ def _disk(pool, image): if mode == 'create': backstore = request.form['backstore'] backstore_object_name = LUN.get_backstore_object_name(str(request.form['pool']), - image, + ecpool, image, config.config['disks']) else: disk_config = config.config['disks'][image_id] @@ -1214,6 +1275,7 @@ def _disk(pool, image): lun = LUN(logger, str(request.form['pool']), + ecpool, image, str(request.form['size']), str(request.form['owner']), @@ -1252,11 +1314,11 @@ def _disk(pool, image): return jsonify(message="LUN {} failure".format(mode)), 500 if 'owner' not in disk: - msg = "Disk {}/{} must be assigned to a target".format(disk['pool'], disk['image']) + msg = "Disk {} must be assigned to a target".format(image_id) logger.error("LUN owner not defined - {}".format(msg)) return jsonify(message="LUN {} failure - {}".format(mode, msg)), 400 - lun = LUN(logger, pool, image, size, disk['owner'], + lun = LUN(logger, pool, ecpool, image, size, disk['owner'], backstore, backstore_object_name) if mode == 'deactivate': try: @@ -1283,13 +1345,14 @@ def _disk(pool, image): purge_host = request.form['purge_host'] preserve_image = request.form.get('preserve_image') == 'true' logger.debug("delete request for disk image '{}'".format(image_id)) - pool, image = image_id.split('/', 1) + pool, ecpool, image = parse_disk_meta(image_id) disk_config = config.config['disks'][image_id] backstore = disk_config['backstore'] backstore_object_name = disk_config['backstore_object_name'] lun = LUN(logger, pool, + ecpool, image, 0, purge_host, @@ -1334,22 +1397,23 @@ def lun_reconfigure(image_id, controls, backstore): gateways.insert(0, 'localhost') + pool_name, ecpool_name, image_name = parse_disk_meta(image_id) + # deactivate disk - api_vars = {'mode': 'deactivate'} + api_vars = {'mode': 'deactivate', 'ecpool': ecpool_name} + _image_id = '{}/{}'.format(pool_name, image_name) logger.debug("deactivating disk") resp_text, resp_code = call_api(gateways, '_disk', - image_id, http_method='put', + _image_id, http_method='put', api_vars=api_vars) if resp_code != 200: return "failed to deactivate disk: {}".format(resp_text), resp_code - pool_name, image_name = image_id.split('/', 1) - - rbd_image = RBDDev(image_name, 0, backstore, pool_name) + rbd_image = RBDDev(image_name, 0, backstore, pool_name, ecpool_name) size = rbd_image.current_size - lun = LUN(logger, pool_name, image_name, size, disk['owner'], + lun = LUN(logger, pool_name, ecpool_name, image_name, size, disk['owner'], disk['backstore'], disk['backstore_object_name']) for k, v in controls.items(): @@ -1367,11 +1431,11 @@ def lun_reconfigure(image_id, controls, backstore): api_vars['controls'] = json.dumps(controls) # activate disk - api_vars['mode'] = 'activate' + api_vars = {'mode': 'activate', 'ecpool': ecpool_name} logger.debug("activating disk") activate_resp_text, activate_resp_code = call_api(gateways, '_disk', - image_id, http_method='put', + _image_id, http_method='put', api_vars=api_vars) if resp_code == 200 and activate_resp_code != 200: resp_text = activate_resp_text @@ -1389,31 +1453,15 @@ def lun_reconfigure(image_id, controls, backstore): return resp_text, resp_code -@app.route('/api/disksnap///', methods=['PUT', 'DELETE']) -@requires_restricted_auth -def disksnap(pool, image, name): - """ - Coordinate the management of rbd image snapshots across the gateway - nodes. This method calls the corresponding disk api entrypoints across - each gateway. Processing is done serially: rollback is done locally - first, then other gateways. Other actions are only performed locally. - - :param image_id: (str) rbd image name of the format pool/image - :param name: (str) rbd snapshot name - :param mode: (str) 'create' or 'rollback' the rbd snapshot - **RESTRICTED** - Examples: - curl --user admin:admin -d mode=create - -X PUT http://192.168.122.69:5000/api/disksnap/rbd.image/new1 - curl --user admin:admin - -X DELETE http://192.168.122.69:5000/api/disksnap/rbd.image/new1 - """ - +def common_disksnap(pool, ecpool, image, name): if not valid_snapshot_name(name): logger.debug("snapshot request rejected due to invalid snapshot name") return jsonify(message="snapshot name is invalid"), 400 - image_id = '{}/{}'.format(pool, image) + if ecpool: + image_id = '{}/{}/{}'.format(pool, ecpool, image) + else: + image_id = '{}/{}'.format(pool, image) if image_id not in config.config['disks']: return jsonify(message="rbd image {} not " @@ -1424,7 +1472,7 @@ def disksnap(pool, image, name): if mode == 'create': resp_text, resp_code = _disksnap_create(pool, image, name) elif mode == 'rollback': - resp_text, resp_code = _disksnap_rollback(image_id, pool, + resp_text, resp_code = _disksnap_rollback(pool, ecpool, image, name) else: logger.debug("snapshot request rejected due to invalid mode") @@ -1439,6 +1487,55 @@ def disksnap(pool, image, name): return jsonify(message=resp_text), resp_code +@app.route('/api/ecdisksnap////', methods=['PUT', 'DELETE']) +@requires_restricted_auth +def ecdisksnap(pool, ecpool, image, name): + """ + Coordinate the management of rbd image snapshots across the gateway + nodes. This method calls the corresponding disk api entrypoints across + each gateway. Processing is done serially: rollback is done locally + first, then other gateways. Other actions are only performed locally. + + :param pool: (str) pool name + :param ecpool: (str) erasure pool name + :param image: (str) rbd image name + :param name: (str) rbd snapshot name + :param mode: (str) 'create' or 'rollback' the rbd snapshot + **RESTRICTED** + Examples: + curl --user admin:admin -d mode=create + -X PUT http://192.168.122.69:5000/api/ecdisksnap/rbd/ec/image/new1 + curl --user admin:admin + -X DELETE http://192.168.122.69:5000/api/ecdisksnap/rbd/ec/image/new1 + """ + + return common_disksnap(pool, ecpool, image, name) + + +@app.route('/api/disksnap///', methods=['PUT', 'DELETE']) +@requires_restricted_auth +def disksnap(pool, image, name): + """ + Coordinate the management of rbd image snapshots across the gateway + nodes. This method calls the corresponding disk api entrypoints across + each gateway. Processing is done serially: rollback is done locally + first, then other gateways. Other actions are only performed locally. + + :param pool: (str) pool name + :param image: (str) rbd image name + :param name: (str) rbd snapshot name + :param mode: (str) 'create' or 'rollback' the rbd snapshot + **RESTRICTED** + Examples: + curl --user admin:admin -d mode=create + -X PUT http://192.168.122.69:5000/api/disksnap/rbd.image/new1 + curl --user admin:admin + -X DELETE http://192.168.122.69:5000/api/disksnap/rbd.image/new1 + """ + + return common_disksnap(pool, None, image, name) + + def _disksnap_create(pool_name, image_name, name): logger.debug("snapshot create request") try: @@ -1481,9 +1578,17 @@ def _disksnap_delete(pool_name, image_name, name): return resp_text, resp_code -def _disksnap_rollback(image_id, pool_name, image_name, name): +def _disksnap_rollback(pool, ecpool, image, name): logger.debug("snapshot rollback request") + if ecpool: + image_id = '{}/{}/{}'.format(pool, ecpool, image) + else: + image_id = '{}/{}'.format(pool, image) + + # _disk rest api will retrieve the ecpool from request parameters + _image_id = '{}/{}'.format(pool, image) + disk = config.config['disks'].get(image_id, None) if not disk: return "rbd image {} not found".format(image_id), 404 @@ -1495,12 +1600,13 @@ def _disksnap_rollback(image_id, pool_name, image_name, name): gateways.append(this_host()) api_vars = { + 'ecpool': ecpool, 'mode': 'deactivate'} need_active = True logger.debug("deactivating disk") resp_text, resp_code = call_api(gateways, '_disk', - image_id, + _image_id, http_method='put', api_vars=api_vars) if resp_code == 200 or resp_code == 400: @@ -1509,8 +1615,8 @@ def _disksnap_rollback(image_id, pool_name, image_name, name): try: with rados.Rados(conffile=settings.config.cephconf, name=settings.config.cluster_client_name) as cluster, \ - cluster.open_ioctx(pool_name) as ioctx, \ - rbd.Image(ioctx, image_name) as image: + cluster.open_ioctx(pool) as ioctx, \ + rbd.Image(ioctx, image) as image: try: logger.debug("rolling back to snapshot") @@ -1533,7 +1639,7 @@ def _disksnap_rollback(image_id, pool_name, image_name, name): logger.debug("activating disk") api_vars['mode'] = 'activate' activate_resp_text, activate_resp_code = call_api(gateways, '_disk', - image_id, + _image_id, http_method='put', api_vars=api_vars) if resp_code == 200 and activate_resp_code != 200: @@ -2015,9 +2121,9 @@ def clientlun(target_iqn, client_iqn): Examples: TARGET_IQN = iqn.2017-08.org.ceph:iscsi-gw CLIENT_IQN = iqn.1994-05.com.redhat:myhost4 - curl --user admin:admin -d disk=rbd/new2_1 + curl --user admin:admin -d disk=rbd[/ecpool]/new2_1 -X PUT http://192.168.122.69:5000/api/clientlun/$TARGET_IQN/$CLIENT_IQN - curl --user admin:admin -d disk=rbd/new2_1 + curl --user admin:admin -d disk=rbd[/ecpool]/new2_1 -X DELETE http://192.168.122.69:5000/api/clientlun/$TARGET_IQN/$CLIENT_IQN """ @@ -2379,7 +2485,7 @@ def hostgroup(target_iqn, group_name): Examples: curl --user admin:admin -X GET http://192.168.122.69:5000/api/hostgroup/group_name curl --user admin:admin -d members=iqn.1994-05.com.redhat:myhost4 - -d disks=rbd.disk1 -X PUT http://192.168.122.69:5000/api/hostgroup/group_name + -d disks=rbd[.ecpool].disk1 -X PUT http://192.168.122.69:5000/api/hostgroup/group_name curl --user admin:admin -d action=remove -d disks=rbd.disk1 -X PUT http://192.168.122.69:5000/api/hostgroup/group_name curl --user admin:admin diff --git a/tmp/keyring b/tmp/keyring new file mode 100644 index 00000000..b41743d8 --- /dev/null +++ b/tmp/keyring @@ -0,0 +1,3 @@ +[mon.] + key = AQAW27BgXYBCOhAAn1qekaFOPMIA2fKfIUcAXg== + caps mon = "allow *" diff --git a/tmp/kv_backend b/tmp/kv_backend new file mode 100644 index 0000000000000000000000000000000000000000..67fb28701f6e8bbab6c59c2b213e749aae81d4e0 GIT binary patch literal 216 zcmd;NyTri2u#$0W?vp6TBQtJFlozXL@G}9$x^HhtXn222Jpo9s>Mv1cWaI#f{s&?Z zfbbw}AP>l31LC~=lvD#lR>oKqJ`>0|Mn*;%AO&KAfB+CPG4MQDBXGL`#0LSW5|Cja X08t{