Skip to content

Commit

Permalink
Added logic to save/read whole object as single json
Browse files Browse the repository at this point in the history
tendrl-spec: Tendrl/specifications#172
Signed-off-by: Shubhendu <[email protected]>
  • Loading branch information
Shubhendu committed Aug 29, 2017
1 parent 5c72dc3 commit 6b4af99
Show file tree
Hide file tree
Showing 6 changed files with 39 additions and 80 deletions.
15 changes: 0 additions & 15 deletions tendrl/gluster_integration/objects/definition/gluster.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -141,21 +141,6 @@ namespace.gluster:
value: clusters/$TendrlContext.integration_id/raw_map/
list: clusters/TendrlContext.integration_id/TendrlContext/raw_map/
help: gluster cluster details
VolumeOptions:
attrs:
cluster_id:
help: "Tendrl managed/generated cluster id for the sds being managed by Tendrl"
type: String
vol_id:
help: Volume id
type: String
options:
help: options
type: dict
enabled: true
value: clusters/$TendrlContext.integration_id/Volumes/$Volume.vol_id/options
list: clusters/$TendrlContext.integration_id/Volumes/$Volume.vol_id/options
help: gluster volume options
RebalanceDetails:
attrs:
vol_id:
Expand Down
2 changes: 2 additions & 0 deletions tendrl/gluster_integration/objects/volume/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ def __init__(
profiling_enabled=None,
client_count=None,
rebal_estimated_time=None,
options=None,
*args,
**kwargs
):
Expand Down Expand Up @@ -59,6 +60,7 @@ def __init__(
self.profiling_enabled = profiling_enabled
self.client_count = client_count
self.rebal_estimated_time = rebal_estimated_time
self.options = options
self.value = 'clusters/{0}/Volumes/{1}'

def render(self):
Expand Down
21 changes: 0 additions & 21 deletions tendrl/gluster_integration/objects/volume_options/__init__.py

This file was deleted.

59 changes: 26 additions & 33 deletions tendrl/gluster_integration/sds_sync/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,12 +87,11 @@ def _run(self):
int(NS.config.data.get("sync_interval", 10))
)
try:
NS._int.wclient.write(
"clusters/%s/"
"sync_status" % NS.tendrl_context.integration_id,
"in_progress",
prevExist=False
)
cluster = NS.tendrl.objects.Cluster(
integration_id=NS.tendrl_context.integration_id
).load()
cluster.sync_status = "in_progress"
cluster.save()
except (etcd.EtcdAlreadyExist, etcd.EtcdCompareFailed) as ex:
pass

Expand Down Expand Up @@ -184,10 +183,13 @@ def _run(self):
if k.startswith('%s.options' % volname):
dict1['.'.join(k.split(".")[2:])] = v
options.pop(k, None)
NS.gluster.objects.VolumeOptions(
vol_id=vol_id,
options=dict1
).save()
volume = NS.gluster.objects.Volume(
vol_id=vol_id
).load()
dest = dict(volume.options)
dest.update(dict1)
volume.options = dest
volume.save()

# Sync cluster global details
if "provisioner/%s" % NS.tendrl_context.integration_id \
Expand Down Expand Up @@ -299,19 +301,16 @@ def sync_volumes(volumes, index, vol_options):
cluster_provisioner = "provisioner/%s" % NS.tendrl_context.integration_id
if cluster_provisioner in tag_list:
try:
stored_volume_status = NS._int.client.read(
"clusters/%s/Volumes/%s/status" % (
NS.tendrl_context.integration_id,
volumes['volume%s.id' % index]
)
).value
stored_volume = NS.gluster.objects.Volume(
vol_id=volumes['volume%s.id' % index]
).load()
current_status = volumes['volume%s.status' % index]
if stored_volume_status != "" and \
current_status != stored_volume_status:
if stored_volume.status and stored_volume.status != "" \
and current_status != stored_volume.status:
msg = "Status of volume: %s " + \
"changed from %s to %s" % (
volumes['volume%s.name' % index],
stored_volume_status,
stored_volume.status,
current_status
)
instance = "volume_%s" % volumes[
Expand Down Expand Up @@ -359,10 +358,8 @@ def sync_volumes(volumes, index, vol_options):
] = vol_options[
'volume%s.options.value%s' % (index, opt_count)
]
NS.gluster.objects.VolumeOptions(
vol_id=volume.vol_id,
options=vol_opt_dict
).save(ttl=SYNC_TTL)
volume.options = vol_opt_dict
volume.save()

rebal_det = NS.gluster.objects.RebalanceDetails(
vol_id=volumes['volume%s.id' % index],
Expand Down Expand Up @@ -429,18 +426,14 @@ def sync_volumes(volumes, index, vol_options):

# Raise alerts if the brick path changes
try:
sbs = NS._int.client.read(
"clusters/%s/Bricks/all/"
"%s/status" % (
NS.tendrl_context.
integration_id,
brick_name
)
).value
stored_brick = NS.gluster.objects.Brick(
name=brick_name
).load()
current_status = volumes.get(
'volume%s.brick%s.status' % (index, b_index)
)
if current_status != sbs:
if stored_brick.status and \
current_status != stored_brick.status:
msg = "Status of brick: %s " + \
"under volume %s chan" + \
"ged from %s to %s" % (
Expand All @@ -449,7 +442,7 @@ def sync_volumes(volumes, index, vol_options):
b_index
)],
volumes['volume%s.' 'name' % index],
sbs,
stored_brick.status,
current_status
)
instance = "volume_%s|brick_%s" % (
Expand Down
19 changes: 9 additions & 10 deletions tendrl/gluster_integration/sds_sync/brick_device_details.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,14 +54,13 @@ def update_brick_device_details(brick_name, brick_path, devicetree):
pvs = [dev.path for dev in device.disks]

brick = NS.gluster.objects.Brick(
brick_name,
devices=disks,
mount_path=mount_point,
lv=lv,
vg=vg,
pool=pool,
pv=pvs,
size=size
)

name=brick_name
).load()
brick.devices = disks
brick.mount_path = mount_point
brick.lv = lv
brick.vg = vg
brick.pool = pool
brick.pv = pvs
brick.size = size
brick.save()
3 changes: 2 additions & 1 deletion tendrl/gluster_integration/sds_sync/client_connections.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,8 @@ def sync_volume_connections(volumes):
fetched_brick = NS.gluster.objects.Brick(
name=brick_name
).load()
vol_connections += 0 if fetched_brick.client_count == '' \
vol_connections += 0 if (fetched_brick.client_count == ''
or fetched_brick.client_count is None) \
else int(fetched_brick.client_count)
subvol_count += 1
except etcd.EtcdKeyNotFound:
Expand Down

0 comments on commit 6b4af99

Please sign in to comment.