diff --git a/infrastructure/file-sharing/README.md b/infrastructure/file-sharing/README.md index a1662bb68..7708dcf2f 100644 --- a/infrastructure/file-sharing/README.md +++ b/infrastructure/file-sharing/README.md @@ -15,11 +15,17 @@ We can significantly improve our Nextcloud server performance with memory cachin Having multiple Nextcloud server instances a memory caching is indispensable in order to prevent conflicts when same file is requested by different users at the same time. **Redis** is an excellent modern memcache to use for distributed caching, and as a key-value store for Transactional File Locking because it guarantees that cached objects are available for as long as they are needed. -To run a Redis cluster we need the [KubeDB Operator](https://kubedb.com). We can install it with a bash script or Helm. To keep it quick’n’easy we’ll use their bash script for that: + +To run a Redis cluster we need the [KubeDB Operator](https://kubedb.com), which can be installed using Helm after having obtained a [licence for the community edition](https://license-issuer.appscode.com/): + ```bash -curl -fsSL https://github.com/kubedb/installer/raw/v0.13.0-rc.0/deploy/kubedb.sh | bash -s -- --namespace=kubedb +helm repo add appscode https://charts.appscode.com/stable/ +helm repo update +helm install --create-namespace -namespace kubedb kubedb appscode/kubedb \ + --values=manifests/kubedb-values.yaml\ + --version v2021.04.16 \ + --set-file global.license= ``` -Here we decided to deploy the KubeDb Operator in a namespace called **kubedb** Then we install **redis** applying the [nextcloud-redis-cluster-manifest.yaml](manifests/nextcloud-redis-cluster-manifest.yaml): ```bash diff --git a/infrastructure/file-sharing/manifests/kubedb-values.yaml b/infrastructure/file-sharing/manifests/kubedb-values.yaml new file mode 100644 index 000000000..a7b1c7a16 --- /dev/null +++ b/infrastructure/file-sharing/manifests/kubedb-values.yaml @@ -0,0 +1,48 @@ +# Default values for kubedb. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +global: + # License for the product. + # Get a license by following the steps from [here](https://kubedb.com/docs/latest/setup/install/enterprise#get-a-trial-license). + # Example: + # helm install appscode/kubedb \ + # --set-file global.license=/path/to/license/file + # or + # helm install appscode/kubedb \ + # --set global.license= + license: "" + + # Docker registry used to pull KubeDB related images + registry: "" + + # Docker registry fqdn used to pull KubeDB related images. + # Set this to use docker registry hosted at ${registryFQDN}/${registry}/${image} + registryFQDN: "" + + # Specify an array of imagePullSecrets. + # Secrets must be manually created in the namespace. + # + # Example: + # helm template charts/kubedb \ + # --set global.imagePullSecrets[0].name=sec0 \ + # --set global.imagePullSecrets[1].name=sec1 + imagePullSecrets: [] + + # Skip generating cleaner job YAML + skipCleaner: false + +kubedb-catalog: + # If enabled, installs the kubedb-catalog chart + enabled: true +kubedb-community: + # If enabled, installs the kubedb-community chart + enabled: true + enableAnalytics: false +kubedb-enterprise: + # If enabled, installs the kubedb-enterprise chart + enabled: false +kubedb-autoscaler: + # If enabled, installs the kubedb-autoscaler chart + enabled: false + diff --git a/infrastructure/file-sharing/manifests/nextcloud-redis-cluster-manifest.yaml b/infrastructure/file-sharing/manifests/nextcloud-redis-cluster-manifest.yaml index 4f69a9646..51e270c40 100644 --- a/infrastructure/file-sharing/manifests/nextcloud-redis-cluster-manifest.yaml +++ b/infrastructure/file-sharing/manifests/nextcloud-redis-cluster-manifest.yaml @@ -6,39 +6,69 @@ metadata: app.kubernetes.io/part-of: nextcloud name: redis-nextcloud spec: - version: "5.0.3-v1" - mode: Standalone cluster: master: 1 replicas: 3 - storage: - storageClassName: "rook-ceph-block" - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 250Mi + configSecret: + name: redis-nextcloud + mode: Standalone + monitor: + agent: prometheus.io/operator + prometheus: + exporter: + port: 56790 + serviceMonitor: + interval: 10s + labels: + app: kubedb podTemplate: spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/instance: redis-nextcloud + app.kubernetes.io/managed-by: kubedb.com + app.kubernetes.io/name: redises.kubedb.com + namespaces: + - nextcloud + topologyKey: kubernetes.io/hostname + weight: 100 + - podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/instance: redis-nextcloud + app.kubernetes.io/managed-by: kubedb.com + app.kubernetes.io/name: redises.kubedb.com + namespaces: + - nextcloud + topologyKey: failure-domain.beta.kubernetes.io/zone + weight: 50 resources: - requests: - memory: "128Mi" - cpu: "250m" limits: - memory: "256Mi" - cpu: "500m" - serviceTemplate: + cpu: 500m + memory: 256Mi + requests: + cpu: 250m + memory: 128Mi + serviceAccountName: redis-nextcloud + replicas: 1 + serviceTemplates: + - alias: primary spec: - type: ClusterIP ports: - name: http port: 9200 - targetPort: http - terminationPolicy: WipeOut - updateStrategy: - type: RollingUpdate - monitor: - agent: prometheus.io/coreos-operator - prometheus: - namespace: monitoring - interval: 10s + type: ClusterIP + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 250Mi + storageClassName: rook-ceph-block + storageType: Durable + terminationPolicy: Halt + version: 5.0.3-v1