From 45f951ce6220fbc723ea70e22693dceb88b95faf Mon Sep 17 00:00:00 2001 From: pgodithi Date: Sun, 12 Jun 2022 11:28:34 -0400 Subject: [PATCH] Volume bug fix Signed-off-by: pgodithi --- docs/userguide/main.md | 39 ++++----- .../controllers/scaler_test.go | 11 ++- .../pkg/reconcilers/cluster.go | 80 ++++++++++--------- 3 files changed, 69 insertions(+), 61 deletions(-) diff --git a/docs/userguide/main.md b/docs/userguide/main.md index 2fcf276e..931a2f93 100644 --- a/docs/userguide/main.md +++ b/docs/userguide/main.md @@ -71,19 +71,13 @@ The minimal cluster you deployed in this section is only intended for demo purpo By default, the Operator will create OpenSearch node pools with persistent storage from the default [Storage Class](https://kubernetes.io/docs/concepts/storage/storage-classes/). This behaviour can be changed per node pool. You may supply an alternative storage class and access mode, or configure hostPath or emptyDir storage. Please note that hostPath is strongly discouraged, and if you do choose this option, then you must also configure affinity for the node pool to ensure that multiple pods do not schedule to the same Kubernetes host: +### PVC +Default option is persistent storage, to explicitly add pvc to custom `storageClass`. ```yaml nodePools: - component: masters replicas: 3 diskSize: 30 - NodeSelector: - resources: - requests: - memory: "2Gi" - cpu: "500m" - limits: - memory: "2Gi" - cpu: "500m" roles: - "data" - "master" @@ -93,31 +87,38 @@ nodePools: accessModes: - ReadWriteOnce ``` - -or +### EmptyDir +Persistent source as emptyDir. ```yaml nodePools: - component: masters replicas: 3 diskSize: 30 - NodeSelector: - resources: - requests: - memory: "2Gi" - cpu: "500m" - limits: - memory: "2Gi" - cpu: "500m" roles: - "data" - "master" persistence: emptyDir: {} ``` - If you are using emptyDir, it is recommended that you set `spec.general.drainDataNodes` to be `true`. This will ensure that shards are drained from the pods before rolling upgrades or restart operations are performed. +### HostPath +Persistent source as hostPath. + +```yaml +nodePools: +- component: masters + replicas: 3 + diskSize: 30 + roles: + - "data" + - "master" + persistence: + hostPath: + path: "/var/opensearch" +``` + ## Configuring opensearch.yml The Operator automatically generates the main OpenSearch configuration file `opensearch.yml` based on the parameters you provide in the different sections (e.g. TLS configuration). If you need to add your own settings, you can do that using the `additionalConfig` field in the custom resource: diff --git a/opensearch-operator/controllers/scaler_test.go b/opensearch-operator/controllers/scaler_test.go index 2e6e9282..8448ff96 100644 --- a/opensearch-operator/controllers/scaler_test.go +++ b/opensearch-operator/controllers/scaler_test.go @@ -106,7 +106,9 @@ var _ = Describe("Scaler Reconciler", func() { if err := k8sClient.Get(context.Background(), client.ObjectKey{Namespace: namespace, Name: OpensearchCluster.Name}, &OpensearchCluster); err != nil { return err } - OpensearchCluster.Spec.NodePools[0].DiskSize = "32Gi" + if OpensearchCluster.Spec.NodePools[0].Persistence == nil || OpensearchCluster.Spec.NodePools[0].Persistence.PersistenceSource.PVC != nil { + OpensearchCluster.Spec.NodePools[0].DiskSize = "32Gi" + } return k8sClient.Update(context.Background(), &OpensearchCluster) }) @@ -130,8 +132,11 @@ var _ = Describe("Scaler Reconciler", func() { if err := k8sClient.Get(context.Background(), client.ObjectKey{Namespace: namespace, Name: clusterName + "-" + cluster2.Spec.NodePools[0].Component}, &nodePool); err != nil { return false } - existingDisk := nodePool.Spec.VolumeClaimTemplates[0].Spec.Resources.Requests.Storage().String() - return existingDisk == "32Gi" + if OpensearchCluster.Spec.NodePools[0].Persistence == nil || OpensearchCluster.Spec.NodePools[0].Persistence.PersistenceSource.PVC != nil { + existingDisk := nodePool.Spec.VolumeClaimTemplates[0].Spec.Resources.Requests.Storage().String() + return existingDisk == "32Gi" + } + return true }, timeout, interval).Should(BeTrue()) }) }) diff --git a/opensearch-operator/pkg/reconcilers/cluster.go b/opensearch-operator/pkg/reconcilers/cluster.go index a6ebfd7b..81f9c73d 100644 --- a/opensearch-operator/pkg/reconcilers/cluster.go +++ b/opensearch-operator/pkg/reconcilers/cluster.go @@ -140,52 +140,54 @@ func (r *ClusterReconciler) reconcileNodeStatefulSet(nodePool opsterv1.NodePool, if err != nil { return result, err } - //Checking for existing statefulset disksize - existingDisk := existing.Spec.VolumeClaimTemplates[0].Spec.Resources.Requests.Storage().String() - r.logger.Info("The existing statefulset VolumeClaimTemplate disk size is: " + existingDisk) - r.logger.Info("The cluster definition nodePool disk size is: " + nodePool.DiskSize) - if existingDisk == nodePool.DiskSize { - r.logger.Info("The existing disk size " + existingDisk + " is same as passed in disk size " + nodePool.DiskSize) - } else { - //Removing statefulset while allowing pods to run - r.logger.Info("deleting statefulset while orphaning pods " + existing.Name) - opts := client.DeleteOptions{} - client.PropagationPolicy(metav1.DeletePropagationOrphan).ApplyToDelete(&opts) - if err := r.Delete(r.ctx, existing, &opts); err != nil { - r.logger.Info("failed to delete statefulset" + existing.Name) - return result, err - } - //Identifying the PVC per statefulset pod and patching the new size - for i := 0; i < int(*existing.Spec.Replicas); i++ { - clusterName := r.instance.Name - claimName := fmt.Sprintf("data-%s-%s-%d", clusterName, nodePool.Component, i) - r.logger.Info("The claimName identified as " + claimName) - var pvc corev1.PersistentVolumeClaim - nsn := types.NamespacedName{ - Namespace: existing.Namespace, - Name: claimName, - } - if err := r.Get(r.ctx, nsn, &pvc); err != nil { - r.logger.Info("failed to get pvc" + pvc.Name) + + //Default is PVC, or explicit check for PersistenceSource as PVC + if nodePool.Persistence == nil || nodePool.Persistence.PersistenceSource.PVC != nil { + existingDisk := existing.Spec.VolumeClaimTemplates[0].Spec.Resources.Requests.Storage().String() + r.logger.Info("The existing statefulset VolumeClaimTemplate disk size is: " + existingDisk) + r.logger.Info("The cluster definition nodePool disk size is: " + nodePool.DiskSize) + if existingDisk == nodePool.DiskSize { + r.logger.Info("The existing disk size " + existingDisk + " is same as passed in disk size " + nodePool.DiskSize) + } else { + //Removing statefulset while allowing pods to run + r.logger.Info("deleting statefulset while orphaning pods " + existing.Name) + opts := client.DeleteOptions{} + client.PropagationPolicy(metav1.DeletePropagationOrphan).ApplyToDelete(&opts) + if err := r.Delete(r.ctx, existing, &opts); err != nil { + r.logger.Info("failed to delete statefulset" + existing.Name) return result, err } - newDiskSize, err := resource.ParseQuantity(nodePool.DiskSize) - if err != nil { - r.logger.Info("failed to parse size " + nodePool.DiskSize) - return result, err + //Identifying the PVC per statefulset pod and patching the new size + for i := 0; i < int(*existing.Spec.Replicas); i++ { + clusterName := r.instance.Name + claimName := fmt.Sprintf("data-%s-%s-%d", clusterName, nodePool.Component, i) + r.logger.Info("The claimName identified as " + claimName) + var pvc corev1.PersistentVolumeClaim + nsn := types.NamespacedName{ + Namespace: existing.Namespace, + Name: claimName, + } + if err := r.Get(r.ctx, nsn, &pvc); err != nil { + r.logger.Info("failed to get pvc" + pvc.Name) + return result, err + } + newDiskSize, err := resource.ParseQuantity(nodePool.DiskSize) + if err != nil { + r.logger.Info("failed to parse size " + nodePool.DiskSize) + return result, err + } + + pvc.Spec.Resources.Requests["storage"] = newDiskSize + + if err := r.Update(r.ctx, &pvc); err != nil { + r.logger.Info("failed to resize statefulset pvc " + pvc.Name) + return result, err + } } - pvc.Spec.Resources.Requests["storage"] = newDiskSize - - if err := r.Update(r.ctx, &pvc); err != nil { - r.logger.Info("failed to resize statefulset pvc " + pvc.Name) - return result, err - } } - } - // Now set the desired replicas to be the existing replicas // This will allow the scaler reconciler to function correctly sts.Spec.Replicas = existing.Spec.Replicas