Skip to content

Commit

Permalink
Volume bug fix
Browse files Browse the repository at this point in the history
Signed-off-by: pgodithi <[email protected]>
  • Loading branch information
prudhvigodithi committed Jun 12, 2022
1 parent 4e82477 commit 45f951c
Show file tree
Hide file tree
Showing 3 changed files with 69 additions and 61 deletions.
39 changes: 20 additions & 19 deletions docs/userguide/main.md
Original file line number Diff line number Diff line change
Expand Up @@ -71,19 +71,13 @@ The minimal cluster you deployed in this section is only intended for demo purpo

By default, the Operator will create OpenSearch node pools with persistent storage from the default [Storage Class](https://kubernetes.io/docs/concepts/storage/storage-classes/). This behaviour can be changed per node pool. You may supply an alternative storage class and access mode, or configure hostPath or emptyDir storage. Please note that hostPath is strongly discouraged, and if you do choose this option, then you must also configure affinity for the node pool to ensure that multiple pods do not schedule to the same Kubernetes host:

### PVC
Default option is persistent storage, to explicitly add pvc to custom `storageClass`.
```yaml
nodePools:
- component: masters
replicas: 3
diskSize: 30
NodeSelector:
resources:
requests:
memory: "2Gi"
cpu: "500m"
limits:
memory: "2Gi"
cpu: "500m"
roles:
- "data"
- "master"
Expand All @@ -93,31 +87,38 @@ nodePools:
accessModes:
- ReadWriteOnce
```
or
### EmptyDir
Persistent source as emptyDir.
```yaml
nodePools:
- component: masters
replicas: 3
diskSize: 30
NodeSelector:
resources:
requests:
memory: "2Gi"
cpu: "500m"
limits:
memory: "2Gi"
cpu: "500m"
roles:
- "data"
- "master"
persistence:
emptyDir: {}
```
If you are using emptyDir, it is recommended that you set `spec.general.drainDataNodes` to be `true`. This will ensure that shards are drained from the pods before rolling upgrades or restart operations are performed.

### HostPath
Persistent source as hostPath.

```yaml
nodePools:
- component: masters
replicas: 3
diskSize: 30
roles:
- "data"
- "master"
persistence:
hostPath:
path: "/var/opensearch"
```

## Configuring opensearch.yml

The Operator automatically generates the main OpenSearch configuration file `opensearch.yml` based on the parameters you provide in the different sections (e.g. TLS configuration). If you need to add your own settings, you can do that using the `additionalConfig` field in the custom resource:
Expand Down
11 changes: 8 additions & 3 deletions opensearch-operator/controllers/scaler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,9 @@ var _ = Describe("Scaler Reconciler", func() {
if err := k8sClient.Get(context.Background(), client.ObjectKey{Namespace: namespace, Name: OpensearchCluster.Name}, &OpensearchCluster); err != nil {
return err
}
OpensearchCluster.Spec.NodePools[0].DiskSize = "32Gi"
if OpensearchCluster.Spec.NodePools[0].Persistence == nil || OpensearchCluster.Spec.NodePools[0].Persistence.PersistenceSource.PVC != nil {
OpensearchCluster.Spec.NodePools[0].DiskSize = "32Gi"
}

return k8sClient.Update(context.Background(), &OpensearchCluster)
})
Expand All @@ -130,8 +132,11 @@ var _ = Describe("Scaler Reconciler", func() {
if err := k8sClient.Get(context.Background(), client.ObjectKey{Namespace: namespace, Name: clusterName + "-" + cluster2.Spec.NodePools[0].Component}, &nodePool); err != nil {
return false
}
existingDisk := nodePool.Spec.VolumeClaimTemplates[0].Spec.Resources.Requests.Storage().String()
return existingDisk == "32Gi"
if OpensearchCluster.Spec.NodePools[0].Persistence == nil || OpensearchCluster.Spec.NodePools[0].Persistence.PersistenceSource.PVC != nil {
existingDisk := nodePool.Spec.VolumeClaimTemplates[0].Spec.Resources.Requests.Storage().String()
return existingDisk == "32Gi"
}
return true
}, timeout, interval).Should(BeTrue())
})
})
Expand Down
80 changes: 41 additions & 39 deletions opensearch-operator/pkg/reconcilers/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -140,52 +140,54 @@ func (r *ClusterReconciler) reconcileNodeStatefulSet(nodePool opsterv1.NodePool,
if err != nil {
return result, err
}

//Checking for existing statefulset disksize
existingDisk := existing.Spec.VolumeClaimTemplates[0].Spec.Resources.Requests.Storage().String()
r.logger.Info("The existing statefulset VolumeClaimTemplate disk size is: " + existingDisk)
r.logger.Info("The cluster definition nodePool disk size is: " + nodePool.DiskSize)
if existingDisk == nodePool.DiskSize {
r.logger.Info("The existing disk size " + existingDisk + " is same as passed in disk size " + nodePool.DiskSize)
} else {
//Removing statefulset while allowing pods to run
r.logger.Info("deleting statefulset while orphaning pods " + existing.Name)
opts := client.DeleteOptions{}
client.PropagationPolicy(metav1.DeletePropagationOrphan).ApplyToDelete(&opts)
if err := r.Delete(r.ctx, existing, &opts); err != nil {
r.logger.Info("failed to delete statefulset" + existing.Name)
return result, err
}
//Identifying the PVC per statefulset pod and patching the new size
for i := 0; i < int(*existing.Spec.Replicas); i++ {
clusterName := r.instance.Name
claimName := fmt.Sprintf("data-%s-%s-%d", clusterName, nodePool.Component, i)
r.logger.Info("The claimName identified as " + claimName)
var pvc corev1.PersistentVolumeClaim
nsn := types.NamespacedName{
Namespace: existing.Namespace,
Name: claimName,
}
if err := r.Get(r.ctx, nsn, &pvc); err != nil {
r.logger.Info("failed to get pvc" + pvc.Name)

//Default is PVC, or explicit check for PersistenceSource as PVC
if nodePool.Persistence == nil || nodePool.Persistence.PersistenceSource.PVC != nil {
existingDisk := existing.Spec.VolumeClaimTemplates[0].Spec.Resources.Requests.Storage().String()
r.logger.Info("The existing statefulset VolumeClaimTemplate disk size is: " + existingDisk)
r.logger.Info("The cluster definition nodePool disk size is: " + nodePool.DiskSize)
if existingDisk == nodePool.DiskSize {
r.logger.Info("The existing disk size " + existingDisk + " is same as passed in disk size " + nodePool.DiskSize)
} else {
//Removing statefulset while allowing pods to run
r.logger.Info("deleting statefulset while orphaning pods " + existing.Name)
opts := client.DeleteOptions{}
client.PropagationPolicy(metav1.DeletePropagationOrphan).ApplyToDelete(&opts)
if err := r.Delete(r.ctx, existing, &opts); err != nil {
r.logger.Info("failed to delete statefulset" + existing.Name)
return result, err
}
newDiskSize, err := resource.ParseQuantity(nodePool.DiskSize)
if err != nil {
r.logger.Info("failed to parse size " + nodePool.DiskSize)
return result, err
//Identifying the PVC per statefulset pod and patching the new size
for i := 0; i < int(*existing.Spec.Replicas); i++ {
clusterName := r.instance.Name
claimName := fmt.Sprintf("data-%s-%s-%d", clusterName, nodePool.Component, i)
r.logger.Info("The claimName identified as " + claimName)
var pvc corev1.PersistentVolumeClaim
nsn := types.NamespacedName{
Namespace: existing.Namespace,
Name: claimName,
}
if err := r.Get(r.ctx, nsn, &pvc); err != nil {
r.logger.Info("failed to get pvc" + pvc.Name)
return result, err
}
newDiskSize, err := resource.ParseQuantity(nodePool.DiskSize)
if err != nil {
r.logger.Info("failed to parse size " + nodePool.DiskSize)
return result, err
}

pvc.Spec.Resources.Requests["storage"] = newDiskSize

if err := r.Update(r.ctx, &pvc); err != nil {
r.logger.Info("failed to resize statefulset pvc " + pvc.Name)
return result, err
}
}

pvc.Spec.Resources.Requests["storage"] = newDiskSize

if err := r.Update(r.ctx, &pvc); err != nil {
r.logger.Info("failed to resize statefulset pvc " + pvc.Name)
return result, err
}
}

}

// Now set the desired replicas to be the existing replicas
// This will allow the scaler reconciler to function correctly
sts.Spec.Replicas = existing.Spec.Replicas
Expand Down

0 comments on commit 45f951c

Please sign in to comment.