diff --git a/tendrl/gluster_integration/objects/definition/gluster.yaml b/tendrl/gluster_integration/objects/definition/gluster.yaml index fcee069..997643f 100644 --- a/tendrl/gluster_integration/objects/definition/gluster.yaml +++ b/tendrl/gluster_integration/objects/definition/gluster.yaml @@ -141,21 +141,6 @@ namespace.gluster: value: clusters/$TendrlContext.integration_id/raw_map/ list: clusters/TendrlContext.integration_id/TendrlContext/raw_map/ help: gluster cluster details - VolumeOptions: - attrs: - cluster_id: - help: "Tendrl managed/generated cluster id for the sds being managed by Tendrl" - type: String - vol_id: - help: Volume id - type: String - options: - help: options - type: dict - enabled: true - value: clusters/$TendrlContext.integration_id/Volumes/$Volume.vol_id/options - list: clusters/$TendrlContext.integration_id/Volumes/$Volume.vol_id/options - help: gluster volume options RebalanceDetails: attrs: vol_id: diff --git a/tendrl/gluster_integration/objects/volume/__init__.py b/tendrl/gluster_integration/objects/volume/__init__.py index 18722c9..2cd2cde 100644 --- a/tendrl/gluster_integration/objects/volume/__init__.py +++ b/tendrl/gluster_integration/objects/volume/__init__.py @@ -29,6 +29,7 @@ def __init__( profiling_enabled=None, client_count=None, rebal_estimated_time=None, + options=None, *args, **kwargs ): @@ -59,6 +60,7 @@ def __init__( self.profiling_enabled = profiling_enabled self.client_count = client_count self.rebal_estimated_time = rebal_estimated_time + self.options = options self.value = 'clusters/{0}/Volumes/{1}' def render(self): diff --git a/tendrl/gluster_integration/objects/volume_options/__init__.py b/tendrl/gluster_integration/objects/volume_options/__init__.py deleted file mode 100644 index 3fe20be..0000000 --- a/tendrl/gluster_integration/objects/volume_options/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -from tendrl.commons import objects - - -class VolumeOptions(objects.BaseObject): - def __init__( - self, - vol_id=None, - options=None, - ): - super(VolumeOptions, self).__init__() - - self.vol_id = vol_id - self.options = options - self.value = 'clusters/{0}/Volumes/{1}' - - def render(self): - self.value = self.value.format( - NS.tendrl_context.integration_id, - self.vol_id - ) - return super(VolumeOptions, self).render() diff --git a/tendrl/gluster_integration/sds_sync/__init__.py b/tendrl/gluster_integration/sds_sync/__init__.py index 720a616..f4e6beb 100644 --- a/tendrl/gluster_integration/sds_sync/__init__.py +++ b/tendrl/gluster_integration/sds_sync/__init__.py @@ -49,12 +49,11 @@ def _run(self): gluster_brick_dir = NS.gluster.objects.GlusterBrickDir() gluster_brick_dir.save() - try: - etcd_utils.read( - "clusters/%s/" - "cluster_network" % NS.tendrl_context.integration_id - ) - except etcd.EtcdKeyNotFound: + cluster = NS.tendrl.objects.Cluster( + integration_id=NS.tendrl_context.integration_id + ).load() + if cluster.cluster_network is None or\ + cluster.cluster_network == "": try: node_networks = etcd_utils.read( "nodes/%s/Networks" % NS.node_context.node_id @@ -64,9 +63,6 @@ def _run(self): node_network = NS.tendrl.objects.NodeNetwork( interface=node_networks.leaves.next().key.split('/')[-1] ).load() - cluster = NS.tendrl.objects.Cluster( - integration_id=NS.tendrl_context.integration_id - ).load() cluster.cluster_network = node_network.subnet cluster.save() except etcd.EtcdKeyNotFound as ex: @@ -81,18 +77,18 @@ def _run(self): ) raise ex + NS.node_context = NS.tendrl.objects.NodeContext().load() while not self._complete.is_set(): try: gevent.sleep( int(NS.config.data.get("sync_interval", 10)) ) try: - NS._int.wclient.write( - "clusters/%s/" - "sync_status" % NS.tendrl_context.integration_id, - "in_progress", - prevExist=False - ) + cluster = NS.tendrl.objects.Cluster( + integration_id=NS.tendrl_context.integration_id + ).load() + cluster.sync_status = "in_progress" + cluster.save() except (etcd.EtcdAlreadyExist, etcd.EtcdCompareFailed) as ex: pass @@ -184,10 +180,13 @@ def _run(self): if k.startswith('%s.options' % volname): dict1['.'.join(k.split(".")[2:])] = v options.pop(k, None) - NS.gluster.objects.VolumeOptions( - vol_id=vol_id, - options=dict1 - ).save() + volume = NS.gluster.objects.Volume( + vol_id=vol_id + ).load() + dest = dict(volume.options) + dest.update(dict1) + volume.options = dest + volume.save() # Sync cluster global details if "provisioner/%s" % NS.tendrl_context.integration_id \ @@ -208,7 +207,7 @@ def _run(self): _cluster = NS.tendrl.objects.Cluster( integration_id=NS.tendrl_context.integration_id - ) + ).load() if _cluster.exists(): _cluster.sync_status = "done" _cluster.last_sync = str(tendrl_now()) @@ -293,25 +292,22 @@ def sync_volumes(volumes, index, vol_options): devicetree = b.devicetree SYNC_TTL = int(NS.config.data.get("sync_interval", 10)) + 250 - node_context = NS.node_context.load() - tag_list = node_context.tags + NS.node_context = NS.tendrl.objects.NodeContext().load() + tag_list = NS.node_context.tags # Raise alerts for volume state change. cluster_provisioner = "provisioner/%s" % NS.tendrl_context.integration_id if cluster_provisioner in tag_list: try: - stored_volume_status = NS._int.client.read( - "clusters/%s/Volumes/%s/status" % ( - NS.tendrl_context.integration_id, - volumes['volume%s.id' % index] - ) - ).value + stored_volume = NS.gluster.objects.Volume( + vol_id=volumes['volume%s.id' % index] + ).load() current_status = volumes['volume%s.status' % index] - if stored_volume_status != "" and \ - current_status != stored_volume_status: + if stored_volume.status and stored_volume.status != "" \ + and current_status != stored_volume.status: msg = "Status of volume: %s " + \ "changed from %s to %s" % ( volumes['volume%s.name' % index], - stored_volume_status, + stored_volume.status, current_status ) instance = "volume_%s" % volumes[ @@ -359,10 +355,8 @@ def sync_volumes(volumes, index, vol_options): ] = vol_options[ 'volume%s.options.value%s' % (index, opt_count) ] - NS.gluster.objects.VolumeOptions( - vol_id=volume.vol_id, - options=vol_opt_dict - ).save(ttl=SYNC_TTL) + volume.options = vol_opt_dict + volume.save() rebal_det = NS.gluster.objects.RebalanceDetails( vol_id=volumes['volume%s.id' % index], @@ -429,18 +423,14 @@ def sync_volumes(volumes, index, vol_options): # Raise alerts if the brick path changes try: - sbs = NS._int.client.read( - "clusters/%s/Bricks/all/" - "%s/status" % ( - NS.tendrl_context. - integration_id, - brick_name - ) - ).value + stored_brick = NS.gluster.objects.Brick( + name=brick_name + ).load() current_status = volumes.get( 'volume%s.brick%s.status' % (index, b_index) ) - if current_status != sbs: + if stored_brick.status and \ + current_status != stored_brick.status: msg = "Status of brick: %s " + \ "under volume %s chan" + \ "ged from %s to %s" % ( @@ -449,7 +439,7 @@ def sync_volumes(volumes, index, vol_options): b_index )], volumes['volume%s.' 'name' % index], - sbs, + stored_brick.status, current_status ) instance = "volume_%s|brick_%s" % ( diff --git a/tendrl/gluster_integration/sds_sync/brick_device_details.py b/tendrl/gluster_integration/sds_sync/brick_device_details.py index be62203..186a111 100644 --- a/tendrl/gluster_integration/sds_sync/brick_device_details.py +++ b/tendrl/gluster_integration/sds_sync/brick_device_details.py @@ -54,14 +54,13 @@ def update_brick_device_details(brick_name, brick_path, devicetree): pvs = [dev.path for dev in device.disks] brick = NS.gluster.objects.Brick( - brick_name, - devices=disks, - mount_path=mount_point, - lv=lv, - vg=vg, - pool=pool, - pv=pvs, - size=size - ) - + name=brick_name + ).load() + brick.devices = disks + brick.mount_path = mount_point + brick.lv = lv + brick.vg = vg + brick.pool = pool + brick.pv = pvs + brick.size = size brick.save() diff --git a/tendrl/gluster_integration/sds_sync/client_connections.py b/tendrl/gluster_integration/sds_sync/client_connections.py index 3373bb4..0e8aad3 100644 --- a/tendrl/gluster_integration/sds_sync/client_connections.py +++ b/tendrl/gluster_integration/sds_sync/client_connections.py @@ -19,7 +19,8 @@ def sync_volume_connections(volumes): fetched_brick = NS.gluster.objects.Brick( name=brick_name ).load() - vol_connections += 0 if fetched_brick.client_count == '' \ + vol_connections += 0 if (fetched_brick.client_count == '' + or fetched_brick.client_count is None) \ else int(fetched_brick.client_count) subvol_count += 1 except etcd.EtcdKeyNotFound: