From 374d5beaa4dbc96b9e17526b068bc36a188309e1 Mon Sep 17 00:00:00 2001 From: Falko Menge Date: Thu, 2 Nov 2023 07:08:56 +0100 Subject: [PATCH 01/58] Switch to internal load balancer and reorder --- google/multi-region/active-active/dns-lb.yaml | 24 +++++++++---------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/google/multi-region/active-active/dns-lb.yaml b/google/multi-region/active-active/dns-lb.yaml index 0578bcd2..8e56fc90 100644 --- a/google/multi-region/active-active/dns-lb.yaml +++ b/google/multi-region/active-active/dns-lb.yaml @@ -1,24 +1,22 @@ apiVersion: v1 kind: Service metadata: - annotations: - # TODO: Check whether AWS/Azure can use internal load balancers. Google - # can't, unfortunately. - # service.beta.kubernetes.io/aws-load-balancer-internal: "true" - # service.beta.kubernetes.io/azure-load-balancer-internal: "true" - # TODO Falko try this: - # cloud.google.com/load-balancer-type: "Internal" - labels: - k8s-app: kube-dns name: kube-dns-lb namespace: kube-system + labels: + k8s-app: kube-dns + annotations: + # see: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + service.beta.kubernetes.io/aws-load-balancer-internal: "true" + service.beta.kubernetes.io/azure-load-balancer-internal: "true" + networking.gke.io/load-balancer-type: "Internal" spec: + type: LoadBalancer + sessionAffinity: None + selector: + k8s-app: kube-dns ports: - name: dns port: 53 protocol: UDP targetPort: 53 - selector: - k8s-app: kube-dns - sessionAffinity: None - type: LoadBalancer From 0bb7847735e825206564d2229ff43aa7cdbaa288 Mon Sep 17 00:00:00 2001 From: Falko Menge Date: Thu, 2 Nov 2023 07:09:38 +0100 Subject: [PATCH 02/58] Remove dead code --- .../multi-region/active-active/setup-zeebe.py | 86 ++----------------- 1 file changed, 5 insertions(+), 81 deletions(-) diff --git a/google/multi-region/active-active/setup-zeebe.py b/google/multi-region/active-active/setup-zeebe.py index 9b1b71ac..83c13545 100755 --- a/google/multi-region/active-active/setup-zeebe.py +++ b/google/multi-region/active-active/setup-zeebe.py @@ -26,42 +26,15 @@ 'us-east1-b': 'gke_camunda-researchanddevelopment_us-east1-b_cdame-region-1', } -# Fill in the `regions` map with the zones and corresponding regions of your -# clusters. -# -# Setting regions is optional, but recommended, because it improves cockroach's -# ability to diversify data placement if you use more than one zone in the same -# region. If you aren't specifying regions, just leave the map empty. -# -# example: -# regions = { -# 'us-central1-a': 'us-central1', -# 'us-central1-b': 'us-central1', -# 'us-west1-b': 'us-west1', -# } -regions = { - 'europe-west4-b': 'europe-west4', - 'us-east1-b': 'us-east1', -} - -# Paths to directories in which to store certificates and generated YAML files. -certs_dir = './certs' -ca_key_dir = './my-safe-directory' +# Path to directory generated YAML files. generated_files_dir = './generated' -# Path to the cockroach binary on your local machine that you want to use -# generate certificates. Defaults to trying to find cockroach in your PATH. -cockroach_path = 'cockroach' - # ------------------------------------------------------------------------------ # First, do some basic input validation. if len(contexts) == 0: exit("must provide at least one Kubernetes cluster in the `contexts` map at the top of the script") -if len(regions) != 0 and len(regions) != len(contexts): - exit("regions not specified for all kubectl contexts (%d regions, %d contexts)" % (len(regions), len(contexts))) - for zone, context in contexts.items(): try: @@ -69,37 +42,15 @@ except: exit("unable to make basic API call using kubectl context '%s' for cluster in zone '%s'; please check if the context is correct and your Kubernetes cluster is working" % (context, zone)) -# Set up the necessary directories and certificates. Ignore errors because they may already exist. -try: - os.mkdir(certs_dir) -except OSError: - pass -try: - os.mkdir(ca_key_dir) -except OSError: - pass +# Set up the necessary directory. Ignore errors because they may already exist. try: os.mkdir(generated_files_dir) except OSError: pass -# check_call([cockroach_path, 'cert', 'create-ca', '--certs-dir', certs_dir, '--ca-key', ca_key_dir+'/ca.key']) -# check_call([cockroach_path, 'cert', 'create-client', 'root', '--certs-dir', certs_dir, '--ca-key', ca_key_dir+'/ca.key']) -# For each cluster, create secrets containing the node and client certificates. -# Note that we create the root client certificate in both the zone namespace -# and the default namespace so that it's easier for clients in the default -# namespace to use without additional steps. -# -# Also create a load balancer to each cluster's DNS pods. +# For each cluster, create a load balancer to its DNS pod. for zone, context in contexts.items(): - #check_call(['kubectl', 'create', 'namespace', zone, '--context', context]) - # check_call(['kubectl', 'create', 'secret', 'generic', 'cockroachdb.client.root', '--from-file', certs_dir, '--context', context]) - # check_call(['kubectl', 'create', 'secret', 'generic', 'cockroachdb.client.root', '--namespace', zone, '--from-file', certs_dir, '--context', context]) - # check_call([cockroach_path, 'cert', 'create-node', '--certs-dir', certs_dir, '--ca-key', ca_key_dir+'/ca.key', 'localhost', '127.0.0.1', 'cockroachdb-public', 'cockroachdb-public.default', 'cockroachdb-public.'+zone, 'cockroachdb-public.%s.svc.cluster.local' % (zone), '*.cockroachdb', '*.cockroachdb.'+zone, '*.cockroachdb.%s.svc.cluster.local' % (zone)]) - # check_call(['kubectl', 'create', 'secret', 'generic', 'cockroachdb.node', '--namespace', zone, '--from-file', certs_dir, '--context', context]) - # check_call('rm %s/node.*' % (certs_dir), shell=True) - check_call(['kubectl', 'apply', '-f', 'dns-lb.yaml', '--context', context]) # Set up each cluster to forward DNS requests for zone-scoped namespaces to the @@ -144,37 +95,10 @@ check_call(['kubectl', 'apply', '-f', config_filename, '--namespace', 'kube-system', '--context', context]) check_call(['kubectl', 'delete', 'pods', '-l', 'k8s-app=kube-dns', '--namespace', 'kube-system', '--context', context]) -# Create a cockroachdb-public service in the default namespace in each cluster. -# for zone, context in contexts.items(): -# yaml_file = '%s/external-name-svc-%s.yaml' % (generated_files_dir, zone) -# with open(yaml_file, 'w') as f: -# check_call(['sed', 's/YOUR_ZONE_HERE/%s/g' % (zone), 'external-name-svc.yaml'], stdout=f) -# check_call(['kubectl', 'apply', '-f', yaml_file, '--context', context]) - -# Generate the join string to be used. +# Generate ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS join_addrs = [] for zone in contexts: for i in range(3): join_addrs.append('camunda-zeebe-%d.camunda-zeebe.%s' % (i, zone)) join_str = ','.join(join_addrs) -print(join_str) - -# Create the cockroach resources in each cluster. -# for zone, context in contexts.items(): -# if zone in regions: -# locality = 'region=%s,zone=%s' % (regions[zone], zone) -# else: -# locality = 'zone=%s' % (zone) -# yaml_file = '%s/cockroachdb-statefulset-%s.yaml' % (generated_files_dir, zone) -# with open(yaml_file, 'w') as f: -# check_call(['sed', 's/JOINLIST/%s/g;s/LOCALITYLIST/%s/g' % (join_str, locality), 'cockroachdb-statefulset-secure.yaml'], stdout=f) -# check_call(['kubectl', 'apply', '-f', yaml_file, '--namespace', zone, '--context', context]) - -# Finally, initialize the cluster. -# print 'Sleeping 30 seconds before attempting to initialize cluster to give time for volumes to be created and pods started.' -# sleep(30) -# for zone, context in contexts.items(): -# check_call(['kubectl', 'create', '-f', 'cluster-init-secure.yaml', '--namespace', zone, '--context', context]) -# # We only need run the init command in one zone given that all the zones are -# # joined together as one cluster. -# break +print(join_str) \ No newline at end of file From b3fb685482ee33f7b353789209136079a51dce7c Mon Sep 17 00:00:00 2001 From: Falko Menge Date: Wed, 13 Dec 2023 00:22:51 +0100 Subject: [PATCH 03/58] Sync and improve Makefiles --- .../active-active/region0/Makefile | 33 +++++++++---------- .../active-active/region1/Makefile | 31 ++++++++--------- 2 files changed, 29 insertions(+), 35 deletions(-) diff --git a/google/multi-region/active-active/region0/Makefile b/google/multi-region/active-active/region0/Makefile index c37664dd..2877aaef 100644 --- a/google/multi-region/active-active/region0/Makefile +++ b/google/multi-region/active-active/region0/Makefile @@ -3,13 +3,14 @@ # Already have a Cluster? Set these values to point to your existing environment # Otherwise, these values will be used to create a new Cluster -#project ?= camunda-researchanddevelopment +# GCP project project ?= camunda-researchanddevelopment -#region ?= us-east1-b # see: https://cloud.withgoogle.com/region-picker/ -region ?= europe-west4-b -clusterName ?= cdame-region-0 - -machineType ?= n2-standard-8 +# GCP region (see: https://cloud.withgoogle.com/region-picker/) +region ?= us-east1 +# GKE cluster name +clusterName ?= falko-region-0 +# GCP machine type +machineType ?= n2-standard-2 minSize ?= 1 maxSize ?= 24 @@ -26,26 +27,22 @@ namespace ?= $(region) # Helm release name release ?= camunda # Helm chart coordinates for Camunda -chart ?= $(root)/../camunda-platform-helm/charts/camunda-platform - +chart ?= camunda/camunda-platform +# Helm chart values chartValues ?= camunda-values.yaml .PHONY: all all: use-kube namespace prepare-elastic-backup-key camunda external-urls -# 0 kube from aks.mk: Create Kubernetes cluster. (No aplication gateway required) -.PHONY: kube +.PHONY: kube # Create Kubernetes cluster. (No aplication gateway required) kube: kube-gke -# 2 helm install camunda from camunda.mk - -# 3 Show external URLs -.PHONY: external-urls +.PHONY: external-urls # Show external URLs external-urls: external-urls-no-ingress ### <--- End of setup ---> -#: Create temporary brokers that impersonate half of the ones lost in region 1 to backfill and restore quorum +.PHONY: fail-over-region1 # Create temporary brokers that impersonate half of the ones lost in region 1 to backfill and restore quorum fail-over-region1: -kubectl create namespace $(namespace)-failover -kubectl config set-context --current --namespace=$(namespace)-failover @@ -59,7 +56,7 @@ fail-over-region1: # TODO connect to existing elastic in current region # TODO importers -fail-back: use-kube +fail-back: use-kube namespace prepare-elastic-backup-key helm install --namespace $(region) $(release) $(chart) -f $(chartValues) --skip-crds \ --set global.installationType=failBack \ --set operate.enabled=false \ @@ -103,7 +100,7 @@ prepare-elastic-backup-key: .PHONY: prepare-elastic-backup-repo prepare-elastic-backup-repo: kubectl exec elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPUT http://localhost:9200/_snapshot/camunda_backup -H 'Content-Type: application/json' -d'{"type": "gcs","settings":{"bucket": "cdame-elasticsearch-backup", "base_path": "backups"}}' - + .PHONY: operate-snapshot operate-snapshot: kubectl exec $$(kubectl get pod --namespace $(namespace) --selector="app=camunda-platform,app.kubernetes.io/component=operate,app.kubernetes.io/instance=camunda,app.kubernetes.io/managed-by=Helm,app.kubernetes.io/name=operate,app.kubernetes.io/part-of=camunda-platform" --output jsonpath='{.items[0].metadata.name}') --namespace $(namespace) -c operate -- curl -i http://localhost:8080/actuator/backups -XPOST -H 'Content-Type: application/json' -d'{"backupId": 3}' @@ -115,4 +112,4 @@ restore-operate-snapshot: kubectl exec elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_3_of_6/_restore?wait_for_completion=true kubectl exec elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_4_of_6/_restore?wait_for_completion=true kubectl exec elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_5_of_6/_restore?wait_for_completion=true - kubectl exec elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_6_of_6/_restore?wait_for_completion=true \ No newline at end of file + kubectl exec elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_6_of_6/_restore?wait_for_completion=true diff --git a/google/multi-region/active-active/region1/Makefile b/google/multi-region/active-active/region1/Makefile index 9de2ad14..3d2e8c6f 100644 --- a/google/multi-region/active-active/region1/Makefile +++ b/google/multi-region/active-active/region1/Makefile @@ -3,13 +3,14 @@ # Already have a Cluster? Set these values to point to your existing environment # Otherwise, these values will be used to create a new Cluster -#project ?= camunda-researchanddevelopment +# GCP project project ?= camunda-researchanddevelopment -#region ?= us-east1-b # see: https://cloud.withgoogle.com/region-picker/ -region ?= us-east1-b -clusterName ?= cdame-region-1 - -machineType ?= n2-standard-8 +# GCP region (see: https://cloud.withgoogle.com/region-picker/) +region ?= europe-west1 +# GKE cluster name +clusterName ?= falko-region-1 +# GCP machine type +machineType ?= n2-standard-2 minSize ?= 1 maxSize ?= 24 @@ -26,27 +27,23 @@ namespace ?= $(region) # Helm release name release ?= camunda # Helm chart coordinates for Camunda -chart ?= $(root)/../camunda-platform-helm/charts/camunda-platform - +chart ?= camunda/camunda-platform +# Helm chart values chartValues ?= camunda-values.yaml .PHONY: all all: use-kube namespace prepare-elastic-backup-key camunda external-urls -# 0 kube from aks.mk: Create Kubernetes cluster. (No aplication gateway required) -.PHONY: kube +.PHONY: kube # Create Kubernetes cluster. (No aplication gateway required) kube: kube-gke -# 2 helm install camunda from camunda.mk - -# 3 Show external URLs -.PHONY: external-urls +.PHONY: external-urls # Show external URLs external-urls: external-urls-no-ingress ### <--- End of setup ---> -#: Create temporary brokers that impersonate half of the ones lost in region 0 to backfill and restore quorum -fail-over-region0: +.PHONY: fail-over-region0 # Create temporary brokers that impersonate half of the ones lost in region 0 to backfill and restore quorum +fail-over-region1: -kubectl create namespace $(namespace)-failover -kubectl config set-context --current --namespace=$(namespace)-failover helm install --namespace $(namespace)-failover $(release) $(chart) -f $(chartValues) --skip-crds \ @@ -115,4 +112,4 @@ restore-operate-snapshot: kubectl exec elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_3_of_6/_restore?wait_for_completion=true kubectl exec elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_4_of_6/_restore?wait_for_completion=true kubectl exec elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_5_of_6/_restore?wait_for_completion=true - kubectl exec elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_6_of_6/_restore?wait_for_completion=true \ No newline at end of file + kubectl exec elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_6_of_6/_restore?wait_for_completion=true From b2741a1cb95e26a3f032a888874bd13484bdd07c Mon Sep 17 00:00:00 2001 From: Falko Menge Date: Sat, 20 Jan 2024 12:37:38 +0100 Subject: [PATCH 04/58] Remove generated configmaps --- .../generated/dns-configmap-europe-west1.yaml | 8 ++++++++ .../active-active/generated/dns-configmap-us-east1.yaml | 8 ++++++++ 2 files changed, 16 insertions(+) create mode 100644 google/multi-region/active-active/generated/dns-configmap-europe-west1.yaml create mode 100644 google/multi-region/active-active/generated/dns-configmap-us-east1.yaml diff --git a/google/multi-region/active-active/generated/dns-configmap-europe-west1.yaml b/google/multi-region/active-active/generated/dns-configmap-europe-west1.yaml new file mode 100644 index 00000000..055bddd7 --- /dev/null +++ b/google/multi-region/active-active/generated/dns-configmap-europe-west1.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: kube-dns + namespace: kube-system +data: + stubDomains: | + {"us-east1.svc.cluster.local": ["10.142.0.113"], "us-east1-failover.svc.cluster.local": ["10.142.0.113"]} diff --git a/google/multi-region/active-active/generated/dns-configmap-us-east1.yaml b/google/multi-region/active-active/generated/dns-configmap-us-east1.yaml new file mode 100644 index 00000000..ec18a7ab --- /dev/null +++ b/google/multi-region/active-active/generated/dns-configmap-us-east1.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: kube-dns + namespace: kube-system +data: + stubDomains: | + {"europe-west1.svc.cluster.local": ["10.132.0.112"], "europe-west1-failover.svc.cluster.local": ["10.132.0.112"]} From 563ad1efcad08d9ca0ac9574ab78ae876a054e5b Mon Sep 17 00:00:00 2001 From: Falko Menge Date: Sat, 20 Jan 2024 12:42:27 +0100 Subject: [PATCH 05/58] kube-gke: Delete kubectl context and improve command order --- google/include/kubernetes-gke.mk | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/google/include/kubernetes-gke.mk b/google/include/kubernetes-gke.mk index f77b6d73..915c128b 100644 --- a/google/include/kubernetes-gke.mk +++ b/google/include/kubernetes-gke.mk @@ -22,17 +22,16 @@ kube-gke: --maintenance-window=4:00 \ --release-channel=regular \ --cluster-version=latest - gcloud container clusters list - kubectl apply -f $(root)/google/include/ssd-storageclass-gke.yaml - gcloud config set project $(project) + gcloud container clusters list --filter "name=$(clusterName)" --location $(region) --project $(project) gcloud container clusters get-credentials $(clusterName) --region $(region) + kubectl apply -f $(root)/google/include/ssd-storageclass-gke.yaml .PHONY: node-pool # create an additional Kubernetes node pool node-pool: gcloud beta container node-pools create "pool-c3-standard-8" \ --project $(project) \ - --cluster $(clusterName) \ --region $(region) \ + --cluster $(clusterName) \ --machine-type "c3-standard-8" \ --disk-type "pd-ssd" \ --spot \ @@ -56,7 +55,8 @@ clean-kube-gke: use-kube # -kubectl delete pvc --all @echo "Please check the console if all PVCs have been deleted: https://console.cloud.google.com/compute/disks?authuser=0&project=$(project)&supportedpurview=project" gcloud container clusters delete $(clusterName) --region $(region) --async --quiet - gcloud container clusters list + gcloud container clusters list --filter "name=$(clusterName)" --location $(region) --project $(project) + kubectl config delete-context gke_$(project)_$(region)_$(clusterName) .PHONY: use-kube use-kube: From a12a7e3fa914f8d012475d081046b4e675d24034 Mon Sep 17 00:00:00 2001 From: Falko Menge Date: Sat, 20 Jan 2024 12:45:51 +0100 Subject: [PATCH 06/58] Add lots of sample output & firewall screenshot --- google/multi-region/active-active/README.md | 700 ++++++++++++++++-- .../active-active/firewall-rule.png | Bin 0 -> 178721 bytes 2 files changed, 657 insertions(+), 43 deletions(-) create mode 100644 google/multi-region/active-active/firewall-rule.png diff --git a/google/multi-region/active-active/README.md b/google/multi-region/active-active/README.md index 696a2e61..c1114333 100644 --- a/google/multi-region/active-active/README.md +++ b/google/multi-region/active-active/README.md @@ -24,6 +24,7 @@ A multi-region setup in Kubernetes really means a multi-cluster setup and that c * [Google Kubernetes Engine (GKE) Fleet Management](https://cloud.google.com/kubernetes-engine/docs/fleets-overview) * [Azure Kubernetes Fleet Manager](https://azure.microsoft.com/en-us/products/kubernetes-fleet-manager) * [SIG Multicluster](https://multicluster.sigs.k8s.io/guides/) +* K8s service with external route for each broker * etc. ## Special Case: Dual-Region Active-Active @@ -34,18 +35,29 @@ We are basing our dual-region active-active setup on standard Kubernetes feature ### Prepare installation -You should clone this repository as well as the [second one](https://github.com/camunda-consulting/camunda-platform-helm) locally. This repository references the first one in the makefile of each region : https://github.com/camunda-community-hub/camunda-8-helm-profiles/blob/d9168169ffe368a817e67c8cd70217ace1071285/google/multi-region/active-active/region0/Makefile#L29. So depending on how you clone these repositories you may want to change that line. +You should clone this repository locally. -The installation configurations are available at the beginning of these makefiles (clustername, region, project, machine type, etc). For this example, we decided to name our namespaces as our regions for an easier readability. You may want to change this. In such a case and if you want to use setup-zeebe.py to configure kube-dns, this script should be updated accordingly. +The installation configurations are available at the beginning of these makefiles (clustername, region, project, machine type, etc). For this example, we decided to name our namespaces after our regions for an easier readability. You may want to change this. In such a case and if you want to use setup-zeebe.py to configure kube-dns, this script should be updated accordingly. #### Prepare Kubernetes Clusters Edit [region0/Makefile](region0/Makefile) and [region1/Makefile](region1/Makefile) and adjust `project`, `region`, and `clusterName`. -We recommend to include `region-0` into the `clusterName` -to abstract away from physical region names like `us-east1-b`. +We recommend to include `region-0`/`region-1` into the `clusterName` +to abstract away from physical region names like `us-east1`/`europe-west1`. The physical region name will however be used as a Kubernetes namespace. + + + + + + + + + +
Using GNU MakeManual Commands
+ ```sh cd region0 make kube @@ -53,37 +65,240 @@ cd ../region1 make kube cd .. ``` + + +```sh +gcloud config set project camunda-researchanddevelopment +cd region0 +gcloud container clusters create falko-region-0 \ + --region us-east1 \ + --num-nodes=1 \ + --enable-autoscaling --max-nodes=24 --min-nodes=1 \ + --enable-ip-alias \ + --machine-type=n2-standard-2 \ + --disk-type "pd-ssd" \ + --spot \ + --maintenance-window=4:00 \ + --release-channel=regular \ + --cluster-version=latest +kubectl apply -f ../../../include/ssd-storageclass-gke.yaml +cd ../region1 +gcloud container clusters create falko-region-1 \ + --region europe-west1 \ + --num-nodes=1 \ + --enable-autoscaling --max-nodes=24 --min-nodes=1 \ + --enable-ip-alias \ + --machine-type=n2-standard-2 \ + --disk-type "pd-ssd" \ + --spot \ + --maintenance-window=4:00 \ + --release-channel=regular \ + --cluster-version=latest +kubectl apply -f ../../../include/ssd-storageclass-gke.yaml +cd .. +``` +
+ +
+Example Command Output + +```sh +$ cd region0 +$ make kube +gcloud config set project camunda-researchanddevelopment +Updated property [core/project]. +gcloud container clusters create falko-region-0 \ + --region us-east1 \ + --num-nodes=1 \ + --enable-autoscaling --max-nodes=24 --min-nodes=1 \ + --enable-ip-alias \ + --machine-type=n2-standard-2 \ + --disk-type "pd-ssd" \ + --spot \ + --maintenance-window=4:00 \ + --release-channel=regular \ + --cluster-version=latest +Note: The Pod address range limits the maximum size of the cluster. Please refer to https://cloud.google.com/kubernetes-engine/docs/how-to/flexible-pod-cidr to learn how to optimize IP address allocation. +Creating cluster falko-region-0 in us-east1... Cluster is being health-checked (master is healthy)...done. +Created [https://container.googleapis.com/v1/projects/camunda-researchanddevelopment/zones/us-east1/clusters/falko-region-0]. +To inspect the contents of your cluster, go to: https://console.cloud.google.com/kubernetes/workload_/gcloud/us-east1/falko-region-0?project=camunda-researchanddevelopment +kubeconfig entry generated for falko-region-0. +NAME LOCATION MASTER_VERSION MASTER_IP MACHINE_TYPE NODE_VERSION NUM_NODES STATUS +falko-region-0 us-east1 1.28.3-gke.1203001 104.196.17.109 n2-standard-2 1.28.3-gke.1203001 3 RUNNING +gcloud container clusters list --filter "name=falko-region-0" --location us-east1 --project camunda-researchanddevelopment +NAME LOCATION MASTER_VERSION MASTER_IP MACHINE_TYPE NODE_VERSION NUM_NODES STATUS +falko-region-0 us-east1 1.28.3-gke.1203001 104.196.17.109 n2-standard-2 1.28.3-gke.1203001 3 RUNNING +gcloud container clusters get-credentials falko-region-0 --region us-east1 +Fetching cluster endpoint and auth data. +kubeconfig entry generated for falko-region-0. +kubectl apply -f /home/falko/git/camunda-8-helm-profiles/google/multi-region/active-active/region0/../../../../google/include/ssd-storageclass-gke.yaml +storageclass.storage.k8s.io/ssd created +$ cd ../region1 +$ make kube +gcloud config set project camunda-researchanddevelopment +Updated property [core/project]. +gcloud container clusters create falko-region-1 \ + --region europe-west1 \ + --num-nodes=1 \ + --enable-autoscaling --max-nodes=24 --min-nodes=1 \ + --enable-ip-alias \ + --machine-type=n2-standard-2 \ + --disk-type "pd-ssd" \ + --spot \ + --maintenance-window=4:00 \ + --release-channel=regular \ + --cluster-version=latest +Note: The Pod address range limits the maximum size of the cluster. Please refer to https://cloud.google.com/kubernetes-engine/docs/how-to/flexible-pod-cidr to learn how to optimize IP address allocation. +Creating cluster falko-region-1 in europe-west1... Cluster is being health-checked (master is healthy)...done. +Created [https://container.googleapis.com/v1/projects/camunda-researchanddevelopment/zones/europe-west1/clusters/falko-region-1]. +To inspect the contents of your cluster, go to: https://console.cloud.google.com/kubernetes/workload_/gcloud/europe-west1/falko-region-1?project=camunda-researchanddevelopment +kubeconfig entry generated for falko-region-1. +NAME LOCATION MASTER_VERSION MASTER_IP MACHINE_TYPE NODE_VERSION NUM_NODES STATUS +falko-region-1 europe-west1 1.28.3-gke.1203001 35.241.249.94 n2-standard-2 1.28.3-gke.1203001 3 RUNNING +gcloud container clusters list --filter "name=falko-region-1" --location europe-west1 --project camunda-researchanddevelopment +NAME LOCATION MASTER_VERSION MASTER_IP MACHINE_TYPE NODE_VERSION NUM_NODES STATUS +falko-region-1 europe-west1 1.28.3-gke.1203001 35.241.249.94 n2-standard-2 1.28.3-gke.1203001 3 RUNNING +gcloud container clusters get-credentials falko-region-1 --region europe-west1 +Fetching cluster endpoint and auth data. +kubeconfig entry generated for falko-region-1. +kubectl apply -f /home/falko/git/camunda-8-helm-profiles/google/multi-region/active-active/region1/../../../../google/include/ssd-storageclass-gke.yaml +storageclass.storage.k8s.io/ssd created +$ cd .. +``` +
#### Configure Kube-dns -Note : this step should not be executed if you plan to user another solution for cross cluster communication. +Note: this step should not be executed if you plan to user another solution for cross cluster communication. Edit the Python script [setup-zeebe.py](./setup-zeebe.py) -and adjust the lists of `contexts` and `regions`. +and adjust the list of `contexts` and the `number_of_zeebe_brokers_per_region`. To get the names of your kubectl "contexts" for each of your clusters, run: ```sh kubectl config get-contexts ``` +
+Example Command Output + +```sh +$ kubectl config get-contexts +CURRENT NAME CLUSTER AUTHINFO NAMESPACE +* gke_camunda-researchanddevelopment_europe-west1_falko-region-1 gke_camunda-researc gke_camunda-researchanddevelopment_europe-west1_falko-region-1 + gke_camunda-researchanddevelopment_us-east1_falko-region-0 gke_camunda-researc gke_camunda-researchanddevelopment_us-east1_falko-region-0 +``` +
+ Then run that script to adjust the DNS configuration of both Kubernetes clusters so that they can resolve each others service names. ```sh ./setup-zeebe.py ``` -For troubleshooting, you can test the DNS connection as described in the [Kubernetes Documentation on Debugging DNS Resolution](https://kubernetes.io/docs/tasks/administer-cluster/dns-debugging-resolution/) (you could also build [your own](https://github.com/wkruse/dnsutils-docker) [dnsutils image](https://github.com/docker-archive/dnsutils) if you can't pull one). + +
+Example Command Output + +```sh +$ ./setup-zeebe.py +No resources found in default namespace. +No resources found in default namespace. +service/kube-dns-lb created +service/kube-dns-lb created +Waiting for DNS load balancer IP in us-east1... +DNS endpoint for zone us-east1: 10.142.0.113 +Waiting for DNS load balancer IP in europe-west1... +DNS endpoint for zone europe-west1: 10.132.0.112 +{'europe-west1.svc.cluster.local': ['10.132.0.112'], 'europe-west1-failover.svc.cluster.local': ['10.132.0.112']} +Warning: resource configmaps/kube-dns is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. +configmap/kube-dns configured +pod "kube-dns-54594c4b9-lxp4c" deleted +pod "kube-dns-54594c4b9-s8n6r" deleted +{'us-east1.svc.cluster.local': ['10.142.0.113'], 'us-east1-failover.svc.cluster.local': ['10.142.0.113']} +Warning: resource configmaps/kube-dns is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. +configmap/kube-dns configured +pod "kube-dns-54594c4b9-94wdv" deleted +pod "kube-dns-54594c4b9-fzcfv" deleted +camunda-zeebe-0.camunda-zeebe.us-east1,camunda-zeebe-1.camunda-zeebe.us-east1,camunda-zeebe-2.camunda-zeebe.us-east1,camunda-zeebe-3.camunda-zeebe.us-east1,camunda-zeebe-0.camunda-zeebe.europe-west1,camunda-zeebe-1.camunda-zeebe.europe-west1,camunda-zeebe-2.camunda-zeebe.europe-west1,camunda-zeebe-3.camunda-zeebe.europe-west1 +``` +
+ +For troubleshooting, you can test the DNS connection as described in the [Kubernetes Documentation on Debugging DNS Resolution](https://kubernetes.io/docs/tasks/administer-cluster/dns-debugging-resolution/) (you could also build [your own](https://github.com/wkruse/dnsutils-docker) [dnsutils image](https://github.com/docker-archive/dnsutils) if you can't pull one). + +To roll back the changes made by the Python script [setup-zeebe.py](./setup-zeebe.py), you can adjust and run [teardown-zeebe.py](./teardown-zeebe.py): + +```sh +./teardown-zeebe.py +``` + +
+Example Command Output + +```sh +$ ./teardown-zeebe.py +namespace "us-east1" deleted +service "kube-dns-lb" deleted +configmap "kube-dns" deleted +pod "kube-dns-54594c4b9-9qzz2" deleted +pod "kube-dns-54594c4b9-k84z8" deleted +namespace "europe-west1" deleted +service "kube-dns-lb" deleted +configmap "kube-dns" deleted +pod "kube-dns-54594c4b9-g4h6c" deleted +pod "kube-dns-54594c4b9-n9hrw" deleted +``` +
#### Enabling Firewall rules -To allow communication between the zeebe nodes and from the zeebe nodes to the Elasticsearch, we need to authorize the traffic. -The rule should have the correct : -- Target tags : can be retrieved from VM Instance => Network tags -- IP ranges : can be retrieved from cluster detailed => Cluster Pod IPv4 range (default) -- Protocols and ports : tcp:26502 and tcp:9200 +To allow communication among the Zeebe brokers and from the Zeebe brokers to the Elasticsearch, we need to authorize the traffic with a [firewall rule](https://console.cloud.google.com/net-security/firewall-manager/firewall-policies/details/zeebe-between-clusters?project=camunda-researchanddevelopment). +The rule should have the correct: +- Target tags: can be retrieved from any of the + [Kubernetes nodes](https://console.cloud.google.com/kubernetes/clusters/details/us-east1/falko-region-0/nodes?project=camunda-researchanddevelopment): + [Node details](https://console.cloud.google.com/kubernetes/node/us-east1/falko-region-0/gke-falko-region-0-default-pool-281239b8-p89s/details?project=camunda-researchanddevelopment) + => [VM Instance](https://console.cloud.google.com/compute/instancesDetail/zones/us-east1-c/instances/gke-falko-region-0-default-pool-281239b8-p89s?project=camunda-researchanddevelopment) + => Network tags, e.g. `gke-falko-region-0-deefc726-node` +- IP ranges: can be retrieved from [cluster details](https://console.cloud.google.com/kubernetes/clusters/details/us-east1/falko-region-0/details?project=camunda-researchanddevelopment) => Cluster Pod IPv4 range (default), e.g. `10.64.0.0/14` +- Protocols and ports: TCP `9600,26501,26502,9300,9200` and UDP `26502,9300,9200` + +Firewall Rule + +#### Storage Bucket for Elasticsearch Backup + +You need to [create Google Cloud Storage Bucket](https://console.cloud.google.com/storage/create-bucket). We named ours `falko-elasticsearch-backup`. We created a regional one. + +You need to [set up a service account](https://console.cloud.google.com/iam-admin/serviceaccounts/create) that will be used by Elasticsearch to Backup. You should grant it the "Storage Admin" role to allow it to access the bucket. + +Download the JSON API key and save it in each region as `gcs_backup_key.json` #### Installing Camunda -Adjust `ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS` in [region0/camunda-values.yaml](region0/camunda-values.yaml) and [region1/camunda-values.yaml](region1/camunda-values.yaml) +Adjust `ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS` and `ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_URL` in [region0/camunda-values.yaml](region0/camunda-values.yaml) and [region1/camunda-values.yaml](region1/camunda-values.yaml) with the values printed by the Python script [setup-zeebe.py](./setup-zeebe.py), e.g. + +```yaml +zeebe: + ... + env: + ... + - name: ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS + value: "camunda-zeebe-0.camunda-zeebe.us-east1.svc.cluster.local:26502,camunda-zeebe-1.camunda-zeebe.us-east1.svc.cluster.local:26502,camunda-zeebe-2.camunda-zeebe.us-east1.svc.cluster.local:26502,camunda-zeebe-3.camunda-zeebe.us-east1.svc.cluster.local:26502,camunda-zeebe-0.camunda-zeebe.europe-west1.svc.cluster.local:26502,camunda-zeebe-1.camunda-zeebe.europe-west1.svc.cluster.local:26502,camunda-zeebe-2.camunda-zeebe.europe-west1.svc.cluster.local:26502,camunda-zeebe-3.camunda-zeebe.europe-west1.svc.cluster.local:26502" + ... + - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_URL + value: "http://elasticsearch-master-headless.europe-west1.svc.cluster.local:9200" + ... +``` +Then install Camunda using: + + + + + + + + + + +
Using GNU MakeManual Commands
```sh cd region0 @@ -92,32 +307,300 @@ cd ../region1 make ``` -##### What happens behind the scenes? +> Hint: If you don't want to run the installation fully automated with GNU Make, you can still use `make --dry-run` to generate commands for your configuration and then run them manually as shown on the right. + + -`make` is going to run commands similar to the following output of `make --dry-run` for region 0: +`make` is going to run commands similar to the following for region 0: ```sh gcloud config set project camunda-researchanddevelopment -gcloud container clusters get-credentials cdame-region-0 --region europe-west4-b -kubectl create namespace europe-west4-b -kubectl config set-context --current --namespace=europe-west4-b +gcloud container clusters get-credentials falko-region-0 --region us-east1 +kubectl create namespace us-east1 +kubectl config set-context --current --namespace=us-east1 kubectl create secret generic gcs-backup-key --from-file=gcs_backup_key.json=gcs_backup_key.json -helm install --namespace europe-west4-b camunda ../../../../../camunda-platform-helm/charts/camunda-platform -f camunda-values.yaml --skip-crds +helm install --namespace us-east1 camunda ../../../../../camunda-platform-helm/charts/camunda-platform -f camunda-values.yaml --skip-crds ``` and for region 1: ```sh gcloud config set project camunda-researchanddevelopment -gcloud container clusters get-credentials cdame-region-1 --region europe-west1-b -kubectl create namespace europe-west1-b -kubectl config set-context --current --namespace=europe-west1-b +gcloud container clusters get-credentials falko-region-1 --region europe-west1 +kubectl create namespace europe-west1 +kubectl config set-context --current --namespace=europe-west1 kubectl create secret generic gcs-backup-key --from-file=gcs_backup_key.json=gcs_backup_key.json -helm install --namespace europe-west1-b camunda ../../../../../camunda-platform-helm/charts/camunda-platform -f camunda-values.yaml --skip-crds +helm install --namespace europe-west1 camunda ../../../../../camunda-platform-helm/charts/camunda-platform -f camunda-values.yaml --skip-crds ``` -If you don't want to use make you can also run the above commands +If you don't want to use `make` you can also run the above commands manually or with some other automation tool. +
+ +
+Example Command Output + +```sh +$ cd region0 +$ make +gcloud config set project camunda-researchanddevelopment +Updated property [core/project]. +gcloud container clusters get-credentials falko-region-0 --region us-east1 +Fetching cluster endpoint and auth data. +kubeconfig entry generated for falko-region-0. +kubectl create namespace us-east1 +namespace/us-east1 created +kubectl config set-context --current --namespace=us-east1 +Context "gke_camunda-researchanddevelopment_us-east1_falko-region-0" modified. +kubectl create secret generic gcs-backup-key --from-file=gcs_backup_key.json=gcs_backup_key.json +secret/gcs-backup-key created +Attempting to install camunda using chartValues: camunda-values.yaml +helm repo add camunda https://helm.camunda.io +"camunda" already exists with the same configuration, skipping +helm repo update camunda +Hang tight while we grab the latest from your chart repositories... +...Successfully got an update from the "camunda" chart repository +Update Complete. ⎈Happy Helming!⎈ +helm search repo camunda/camunda-platform +WARNING: Repo "prometheus-community" is corrupt or missing. Try 'helm repo update'. +WARNING: open /home/falko/.cache/helm/repository/prometheus-community-index.yaml: no such file or directory +WARNING: Repo "stable" is corrupt or missing. Try 'helm repo update'. +WARNING: open /home/falko/.cache/helm/repository/stable-index.yaml: no such file or directory +NAME CHART VERSION APP VERSION DESCRIPTION +camunda/camunda-platform 8.3.4 8.3.x Camunda 8 Self-Managed Helm charts. Camunda's p... +helm install --namespace us-east1 camunda camunda/camunda-platform -f camunda-values.yaml --skip-crds +W1213 13:36:32.695588 47912 warnings.go:70] spec.template.spec.containers[0].env[6]: hides previous definition of "CAMUNDA_OPERATE_CLIENT_USERNAME" +W1213 13:36:32.695633 47912 warnings.go:70] spec.template.spec.containers[0].env[7]: hides previous definition of "CAMUNDA_OPERATE_CLIENT_PASSWORD" +W1213 13:36:32.903716 47912 warnings.go:70] spec.template.spec.containers[0].env[25]: hides previous definition of "ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS" +NAME: camunda +LAST DEPLOYED: Wed Dec 13 13:36:24 2023 +NAMESPACE: us-east1 +STATUS: deployed +REVISION: 1 +NOTES: +# (camunda-platform - 8.3.4) + + ###### ### ## ## ## ## ## ## ######## ### +## ## ## ## ### ### ## ## ### ## ## ## ## ## +## ## ## #### #### ## ## #### ## ## ## ## ## +## ## ## ## ### ## ## ## ## ## ## ## ## ## ## +## ######### ## ## ## ## ## #### ## ## ######### +## ## ## ## ## ## ## ## ## ### ## ## ## ## + ###### ## ## ## ## ####### ## ## ######## ## ## + +################################################################### + +## Installed Services: + +- Zeebe: + - Enabled: true + - Docker Image used for Zeebe: camunda/zeebe:8.3.4 + - Zeebe Cluster Name: "camunda-zeebe" + - Prometheus ServiceMonitor Enabled: false +- Operate: + - Enabled: true + - Docker Image used for Operate: camunda/operate:8.3.4 +- Tasklist: + - Enabled: true + - Docker Image used for Tasklist: camunda/tasklist:8.3.4 +- Optimize: + - Enabled: false +- Connectors: + - Enabled: true + - Docker Image used for Connectors: camunda/connectors-bundle:8.3.2 +- Identity: + - Enabled: false +- Web Modeler: + - Enabled: false +- Elasticsearch: + - Enabled: true + - Elasticsearch URL: http://camunda-elasticsearch:9200 + +### Zeebe + +The Cluster itself is not exposed as a service which means that you can use `kubectl port-forward` to access the Zeebe cluster from outside Kubernetes: + +> kubectl port-forward svc/camunda-zeebe-gateway 26500:26500 -n us-east1 + +Now you can connect your workers and clients to `localhost:26500` +### Connecting to Web apps + + +As part of the Helm charts, an ingress definition can be deployed, but you require to have an Ingress Controller for that Ingress to be Exposed. +In order to deploy the ingress manifest, set `.ingress.enabled` to `true`. Example: `operate.ingress.enabled=true` + +If you don't have an ingress controller you can use `kubectl port-forward` to access the deployed web application from outside the cluster: + + +Operate: +> kubectl port-forward svc/camunda-operate 8081:80 +Tasklist: +> kubectl port-forward svc/camunda-tasklist 8082:80 + +Connectors: +> kubectl port-forward svc/camunda-connectors 8088:8080 + + +Now you can point your browser to one of the service's login pages. Example: http://localhost:8081 for Operate. + +Default user and password: "demo/demo" + + +## Console config +- name: camunda + namespace: us-east1 + version: 8.3.4 + components: + + + - name: Operate + url: http:// + readiness: http://camunda-operate.us-east1:80/actuator/health/readiness + + + + - name: Tasklist + url: http:// + readiness: http://camunda-tasklist.us-east1:80/actuator/health/readiness + + - name: Zeebe Gateway + url: grpc:// + readiness: http://camunda-zeebe-gateway.us-east1:9600/actuator/health/readiness +To access operate: make port-operate, then browse to: http://localhost:8081 +To access tasklist: make port-tasklist, then browse to: http://localhost:8082 +To access inbound connectors: make port-connectors, then browse to: http://localhost:8084/inbound +To deploy to the cluster: make port-zeebe, then: zbctl status --address localhost:26500 --insecure +$ cd ../region1 +$ make +gcloud config set project camunda-researchanddevelopment +Updated property [core/project]. +gcloud container clusters get-credentials falko-region-1 --region europe-west1 +Fetching cluster endpoint and auth data. +kubeconfig entry generated for falko-region-1. +kubectl create namespace europe-west1 +namespace/europe-west1 created +kubectl config set-context --current --namespace=europe-west1 +Context "gke_camunda-researchanddevelopment_europe-west1_falko-region-1" modified. +kubectl create secret generic gcs-backup-key --from-file=gcs_backup_key.json=gcs_backup_key.json +secret/gcs-backup-key created +Attempting to install camunda using chartValues: camunda-values.yaml +helm repo add camunda https://helm.camunda.io +"camunda" already exists with the same configuration, skipping +helm repo update camunda +Hang tight while we grab the latest from your chart repositories... +...Successfully got an update from the "camunda" chart repository +Update Complete. ⎈Happy Helming!⎈ +helm search repo camunda/camunda-platform +WARNING: Repo "prometheus-community" is corrupt or missing. Try 'helm repo update'. +WARNING: open /home/falko/.cache/helm/repository/prometheus-community-index.yaml: no such file or directory +WARNING: Repo "stable" is corrupt or missing. Try 'helm repo update'. +WARNING: open /home/falko/.cache/helm/repository/stable-index.yaml: no such file or directory +NAME CHART VERSION APP VERSION DESCRIPTION +camunda/camunda-platform 8.3.4 8.3.x Camunda 8 Self-Managed Helm charts. Camunda's p... +helm install --namespace europe-west1 camunda camunda/camunda-platform -f camunda-values.yaml --skip-crds +W1213 13:41:59.312024 49320 warnings.go:70] spec.template.spec.containers[0].env[6]: hides previous definition of "CAMUNDA_OPERATE_CLIENT_USERNAME" +W1213 13:41:59.312065 49320 warnings.go:70] spec.template.spec.containers[0].env[7]: hides previous definition of "CAMUNDA_OPERATE_CLIENT_PASSWORD" +W1213 13:41:59.428526 49320 warnings.go:70] spec.template.spec.containers[0].env[25]: hides previous definition of "ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS" +NAME: camunda +LAST DEPLOYED: Wed Dec 13 13:41:54 2023 +NAMESPACE: europe-west1 +STATUS: deployed +REVISION: 1 +NOTES: +# (camunda-platform - 8.3.4) + + ###### ### ## ## ## ## ## ## ######## ### +## ## ## ## ### ### ## ## ### ## ## ## ## ## +## ## ## #### #### ## ## #### ## ## ## ## ## +## ## ## ## ### ## ## ## ## ## ## ## ## ## ## +## ######### ## ## ## ## ## #### ## ## ######### +## ## ## ## ## ## ## ## ## ### ## ## ## ## + ###### ## ## ## ## ####### ## ## ######## ## ## + +################################################################### + +## Installed Services: + +- Zeebe: + - Enabled: true + - Docker Image used for Zeebe: camunda/zeebe:8.3.4 + - Zeebe Cluster Name: "camunda-zeebe" + - Prometheus ServiceMonitor Enabled: false +- Operate: + - Enabled: true + - Docker Image used for Operate: camunda/operate:8.3.4 +- Tasklist: + - Enabled: true + - Docker Image used for Tasklist: camunda/tasklist:8.3.4 +- Optimize: + - Enabled: false +- Connectors: + - Enabled: true + - Docker Image used for Connectors: camunda/connectors-bundle:8.3.2 +- Identity: + - Enabled: false +- Web Modeler: + - Enabled: false +- Elasticsearch: + - Enabled: true + - Elasticsearch URL: http://camunda-elasticsearch:9200 + +### Zeebe + +The Cluster itself is not exposed as a service which means that you can use `kubectl port-forward` to access the Zeebe cluster from outside Kubernetes: + +> kubectl port-forward svc/camunda-zeebe-gateway 26500:26500 -n europe-west1 + +Now you can connect your workers and clients to `localhost:26500` +### Connecting to Web apps + + +As part of the Helm charts, an ingress definition can be deployed, but you require to have an Ingress Controller for that Ingress to be Exposed. +In order to deploy the ingress manifest, set `.ingress.enabled` to `true`. Example: `operate.ingress.enabled=true` + +If you don't have an ingress controller you can use `kubectl port-forward` to access the deployed web application from outside the cluster: + + +Operate: +> kubectl port-forward svc/camunda-operate 8081:80 +Tasklist: +> kubectl port-forward svc/camunda-tasklist 8082:80 + +Connectors: +> kubectl port-forward svc/camunda-connectors 8088:8080 + + +Now you can point your browser to one of the service's login pages. Example: http://localhost:8081 for Operate. + +Default user and password: "demo/demo" + + +## Console config +- name: camunda + namespace: europe-west1 + version: 8.3.4 + components: + + + - name: Operate + url: http:// + readiness: http://camunda-operate.europe-west1:80/actuator/health/readiness + + + + - name: Tasklist + url: http:// + readiness: http://camunda-tasklist.europe-west1:80/actuator/health/readiness + + - name: Zeebe Gateway + url: grpc:// + readiness: http://camunda-zeebe-gateway.europe-west1:9600/actuator/health/readiness +To access operate: make port-operate, then browse to: http://localhost:8081 +To access tasklist: make port-tasklist, then browse to: http://localhost:8082 +To access inbound connectors: make port-connectors, then browse to: http://localhost:8084/inbound +To deploy to the cluster: make port-zeebe, then: zbctl status --address localhost:26500 --insecure +``` +
#### Verification @@ -131,7 +614,7 @@ make zbctl-status The output should look something like this (Note how brokers alternate between two Kubernetes namespaces -`europe-west4-b` and `us-east1-b` that represent the physical regions, +`us-east1` and `europe-west1` that represent the physical regions, in which they are hosted.): ```sh @@ -140,49 +623,49 @@ Partitions count: 8 Replication factor: 4 Gateway version: 8.2.8 Brokers: - Broker 0 - camunda-zeebe-0.camunda-zeebe.europe-west4-b.svc:26501 + Broker 0 - camunda-zeebe-0.camunda-zeebe.us-east1.svc:26501 Version: 8.2.8 Partition 1 : Leader, Healthy Partition 6 : Leader, Healthy Partition 7 : Leader, Healthy Partition 8 : Leader, Healthy - Broker 1 - camunda-zeebe-0.camunda-zeebe.us-east1-b.svc:26501 + Broker 1 - camunda-zeebe-0.camunda-zeebe.europe-west1.svc:26501 Version: 8.2.8 Partition 1 : Follower, Healthy Partition 2 : Follower, Healthy Partition 7 : Follower, Healthy Partition 8 : Follower, Healthy - Broker 2 - camunda-zeebe-1.camunda-zeebe.europe-west4-b.svc:26501 + Broker 2 - camunda-zeebe-1.camunda-zeebe.us-east1.svc:26501 Version: 8.2.8 Partition 1 : Follower, Healthy Partition 2 : Leader, Healthy Partition 3 : Leader, Healthy Partition 8 : Follower, Healthy - Broker 3 - camunda-zeebe-1.camunda-zeebe.us-east1-b.svc:26501 + Broker 3 - camunda-zeebe-1.camunda-zeebe.europe-west1.svc:26501 Version: 8.2.8 Partition 1 : Follower, Healthy Partition 2 : Follower, Healthy Partition 3 : Follower, Healthy Partition 4 : Follower, Healthy - Broker 4 - camunda-zeebe-2.camunda-zeebe.europe-west4-b.svc:26501 + Broker 4 - camunda-zeebe-2.camunda-zeebe.us-east1.svc:26501 Version: 8.2.8 Partition 2 : Follower, Healthy Partition 3 : Follower, Healthy Partition 4 : Leader, Healthy Partition 5 : Leader, Healthy - Broker 5 - camunda-zeebe-1.camunda-zeebe.us-east1-b.svc:26501 + Broker 5 - camunda-zeebe-1.camunda-zeebe.europe-west1.svc:26501 Version: 8.2.8 Partition 3 : Follower, Healthy Partition 4 : Follower, Healthy Partition 5 : Follower, Healthy Partition 6 : Follower, Healthy - Broker 6 - camunda-zeebe-3.camunda-zeebe.europe-west4-b.svc:26501 + Broker 6 - camunda-zeebe-3.camunda-zeebe.us-east1.svc:26501 Version: 8.2.8 Partition 4 : Follower, Healthy Partition 5 : Follower, Healthy Partition 6 : Follower, Healthy Partition 7 : Follower, Healthy - Broker 7 - camunda-zeebe-3.camunda-zeebe.us-east1-b.svc:26501 + Broker 7 - camunda-zeebe-3.camunda-zeebe.europe-west1.svc:26501 Version: 8.2.8 Partition 5 : Follower, Healthy Partition 6 : Follower, Healthy @@ -198,7 +681,7 @@ Operate has a defect for now and if the zeebe brokers negotiation takes too long ##### Elasticsearch -Elastic doesn't support a dual-region active-active setup. You would need a tie breaker in a 3rd region : https://www.elastic.co/guide/en/elasticsearch/reference/current/high-availability-cluster-design-large-clusters.html#high-availability-cluster-design-two-zones +Elastic doesn't support a dual-region active-active setup. You would need a tie breaker in a 3rd region: https://www.elastic.co/guide/en/elasticsearch/reference/current/high-availability-cluster-design-large-clusters.html#high-availability-cluster-design-two-zones Cross-Cluster Replication is an Active-Passive setup that doesn't fit the current requirement. So the current approach is to have 2 ES clusters in each region with their own Operate,Tasklist, Optimize on top of it. In case of disaster (loosing a region), procedure would be to pause the exporters & then start the failOver. @@ -210,13 +693,21 @@ You can check the status of the Elasticsearch cluster using: make elastic-nodes ``` +
+Example Command Output + +```sh +``` +
+ + ### Disaster -In case of disaster, if a region is lost, attempting to start a process instance would lead to an exception : +In case of disaster, if a region is lost, attempting to start a process instance would lead to an exception: io.grpc.StatusRuntimeException: RESOURCE_EXHAUSTED: Expected to execute the command on one of the partitions, but all failed; there are no more partitions available to retry. Please try again. If the error persists contact your zeebe operator -the procedure would be to : +the procedure would be to: * start temporary nodes that will restore the quorum in the surviving region * restore disaster region * restore missing nodes in the disastered region (wihtout operate and tasklist) @@ -242,6 +733,14 @@ cd region0 make fail-over-region1 ``` +
+Example Command Output + +```sh +``` +
+ + If region1 survived, the command would be ```sh @@ -249,7 +748,14 @@ cd region1 make fail-over-region0 ``` -> :information_source: As a result, we have a working zeebe engine but the exporters are stucked because one ES target is not yet available. +
+Example Command Output + +```sh +``` +
+ +> :information_source: As a result, we have a working zeebe engine but the exporters are stuck because one ES target is not yet available. ##### restore missing nodes in the disastered region (failBack) @@ -259,6 +765,13 @@ cd region0 make fail-back ``` +
+Example Command Output + +```sh +``` +
+ > :information_source: This will indeed create all the brokers. But half of them (the ones in the failOver) will not be started (start script is altered in the configmap). Operate and tasklist are not restarted on purpose to avoid touching ES indices. ##### pause exporters @@ -269,34 +782,69 @@ cd region0 make pause-exporters ``` +
+Example Command Output + +```sh +``` +
+ ##### Take Operate/Tasklist snapshots -A preriquisite is that ES is configured to take/restore snapshots (skip if that was already done) : +A preriquisite is that ES is configured to take/restore snapshots (skip if that was already done): ```sh cd region0 make prepare-elastic-backup-repo ``` +
+Example Command Output + +```sh +``` +
+ We have paused exporters. We can safely take our applications snapshots. ```sh cd region0 make operate-snapshot ``` +
+Example Command Output + +```sh +``` +
+ ##### Restore Operate/Tasklist snapshots in the lost region -A preriquisite is that ES is configured to take/restore snapshots (skip if that was already done) : +A preriquisite is that ES is configured to take/restore snapshots (skip if that was already done): ```sh cd region1 make prepare-elastic-backup-repo ``` +
+Example Command Output + +```sh +``` +
+ We can restore our applications snapshots ```sh cd region1 make restore-operate-snapshot ``` +
+Example Command Output + +```sh +``` +
+ ##### resume exporters We now have our 2 regions with our 2 ES in the same state and we can resume exporters @@ -305,6 +853,13 @@ cd region0 make resume-exporters ``` +
+Example Command Output + +```sh +``` +
+ ##### clean the temporary nodes (prepare transition to initial state) You can safely delete the temporary nodes from surviving region as the quorum is garantied by the restored brokers in the disastered region. @@ -314,6 +869,13 @@ cd region0 make clean-fail-over-region1 ``` +
+Example Command Output + +```sh +``` +
+ ##### restore the initial setup (back to normal) You now want to recreate the missing brokers in the disastered region. @@ -323,17 +885,69 @@ cd region1 make fail-back-to-normal ``` +
+Example Command Output + +```sh +``` +
+ > :information_source: This will change the startup script in the configmap and delete the considered pods (to force recreation). The pod deletion should be changed depending on your initial setup. -#### Some extra work before the magic appears : +### Deleting the GEK clusters -You need to [create Google Cloud Storage Bucket](https://console.cloud.google.com/storage/create-bucket). We named ours cdame-elasticsearch-backup. We created a regional one. + + + + + + + + + +
Using GNU MakeManual Commands
+ +```sh +cd region0 +make clean-kube +cd ../region1 +make clean-kube +cd .. +``` + + +```sh +cd region0 +gcloud config set project camunda-researchanddevelopment +gcloud container clusters get-credentials falko-region-0 --region us-east1 +echo "Please check the console if all PVCs have been deleted: https://console.cloud.google.com/compute/disks?authuser=0&project=camunda-researchanddevelopment&supportedpurview=project" +gcloud container clusters delete falko-region-0 --region us-east1 --async --quiet +gcloud container clusters list +cd ../region1 +gcloud config set project camunda-researchanddevelopment +gcloud container clusters get-credentials falko-region-1 --region europe-west1 +echo "Please check the console if all PVCs have been deleted: https://console.cloud.google.com/compute/disks?authuser=0&project=camunda-researchanddevelopment&supportedpurview=project" +gcloud container clusters delete falko-region-1 --region europe-west1 --async --quiet +gcloud container clusters list +cd .. +``` +
+ +
+Example Command Output + +```sh +``` +
-You need to [set up a service account](https://console.cloud.google.com/iam-admin/serviceaccounts/create) that will be used by Elasticsearch to Backup. You should grant it the "Storage Admin" role to allow it to access the bucket. -Download the JSON API key and save it in each region as gcs_backup_key.json ## FAQ ### Broker names are same in all the regions instead of incremental, e.g. there is a camunda-zeebe-0 in every region? These pod names are correct. Kubernetes counts each stateful set starting from zero. The fully qualified names are still globally unique due to the different namespace names, e.g. camunda-zeebe-0.camunda-zeebe.**eastus**.svc and camunda-zeebe-0.camunda-zeebe.**centralus**.svc. The node ids of the brokers are important to be unique, e.g. even numbers in centralus and odd numbers in eastus. + +### I am getting the error `/usr/bin/env: ‘python’: No such file or directory`? +```sh +sudo apt install python-is-python3 +``` \ No newline at end of file diff --git a/google/multi-region/active-active/firewall-rule.png b/google/multi-region/active-active/firewall-rule.png new file mode 100644 index 0000000000000000000000000000000000000000..34209f8a4c62359f35620bc9a07cf82a08f19abd GIT binary patch literal 178721 zcmeEuWl&t(7AEe{Sb!jn1-C##aECyE;O-J2K!Qu-?gR-=a1ZVh+=IIYcc&Y7rjy+J za%b+VshU4iHC0n}ezcr(_TJ0)TI<^(3UZQ|XvAnRFff>Jq{NkAU=V;XFv$EUNYGyp z7$sw1U?^bTh>NIz^bS%{>s2R6dRn(!l!z5V24e?fDWk^=-h7SCp;Gpv5~mXH8x_w~ zJk5~p)8W83`5ybgAQp|3>vC1*th>M9aj|%t;t}W(=(cq1!e4K8%y~CGzkP6CYVDz) z*4gj>8t$3!f4Ky)zl*K5BNsA%Mv(IU*CmXX0d5QVZ_hnjA%R5$v}BsJ|JT>z1G$m^ z%lo_$+^L0E+qtTl6aO{V&(S;Jw_#AD{(X5{Bhd3-1;!)B{QEx`R0YrqDFrOKXEdtqJ^ol}(`|)@!*>Zj1cD=h@QwbRnGC5xI z2q}nn{4-NwEj)Uaf((^H1ra&!2X3DG>X_VjA-FB)K^=b9?=_$QT9kJ>0>$$|U2yUp zcw)p;t+VpYe<)5tjgsQj|7N^c5uQOILwMm&F=hHkA+*w|M`Vm ztRUT%8`Bym)PvdDxL79j>0?Gs!9TR$(1P=NI8TsuHHkmCyVP$0H`(WSHV>@LtD^yI zyZbtRGxv_xo4jXiop#6Kn{$I6qZ<9Oa~yN}AJ*Cl_~=hRGP21h<=xKR)&iBXK#=oy>)@Z8lX|zUjO<$5o`+|HDh4aEj+<50)qAd&fCTxr8#Q zVZ)Ky=?wf`m^A2AzZNcq;A7Ss9>bzt*T=%;7*9PACnFwL?DuCD9KLPe^rr{>zrM(k z-pt+%#5!D?HiIrhtk3uNGH^u1`wk2ntUYJz)Mx-y9)W zAy15nbTqer=oN(W_gz=vm@<^VSB3raHoQp(HL*Wx%OKwt>CA!#U=TX^@Rw}Fd6NqT zKKm`Izh4+7@%yF}-QUt}4*gEvSq<0L^Z7s8;~j}`2b#$OudI7ly#LLJD?IRe@AHR} zcOie2A}a0oZBc3VJ|(o!dHICXtn@}2_H?}K{Uf|>um2j*^#A&RX{a9-JT>AUK*I44 zHVVsCrV}F9&eqe1z3ShYy3seI=jk?M=IOQ)LUT_*tOT zlDx@}weW#@Ae^2V!<{8aRE8QA=muNo#+yQ%;eP8Wslt6 zS+5&nHg{!^urX1iyaRsn0pY*%p%h`7KGSgW$L+hR8)iVz`(o2LcWG~W`l&cc1Qvdu zTOk*%rMu1Swg9tuJ_)@7F7CTeZ(58_w^}#D;)>qQlrHY2>sDSLx8_Q3_J32QOLxe+ z+4g@{7d5p_3+H)9v_&g;fc{5yyF`Dpqas|2wdJy;4D0*%x=W-8rLOTQqX6+6P6pXy zWB~TW8<2Nf`Fn_4<-1)DmY4pOSrVb&HEV`rx89PxX|OZVWjkCr-)b`(N`Cp-GMV#2 zn8jJ=E`FuPJlZEc5kWz2n<>Jm;9Z&V1*AnUzQPZiE0nIC#(*9`R zXSv@~dP6=vX$jLrYU8YN$-KF9<%eI95`1dbBMK*76g*J`ufJncu<4a?Cp zQ!bX+rQ&DSx+@+0(yHKexJ1U@4xOLOJ=31_SeAtSg3PKf@{f)!_oX( z&j**1{f{-n#SA0-oXq1!E&_i@^DOl@n?DN{f1QE>@nYy+>32!VOsv$5o;1OYpDg$) zHQDUE`q4~7!OK6DDK&em6eez`6|>g?-$a@x_k~7AP;U#R;KLksj?Nr&B$7_!>dZ1} zS&`B;QF}y-u6qU;>fHunwtr1$h_kHqI-dXL3eV8m2Mh!2`wthqD-I<{Ni`vGzV_o6 zC1dW7^VP8y^3OhLUJr=uzA%xG(6&(NWAf9Mpd~#}>E>@Sh@`+nl0ZJrj_U7#_eUT_ z)M#Cu$-q(-+uQSGfHTOeDD4g*t*4vmb^!?PVhI*3mPd;l_=d>B+R_4c|s z`J9eoS-Bh&K9O(*H0K(1qj8+ZF&LJuBmP6-rGG0t_BM{3+3M=I=*D+3xM;;{kKuRY zts!>K!Q&Ld{-;sFl{-2v+KlJ`Y%=ViHjQYifX#2UA2-(K30;ZN7j8Sk@HUQH>vr}n z^&NZ`CUccXoAH74{GcRRsIX5iJKgNujjVy2M}XNdM3b1(v~$Zxr`1vWT50n!#YB;g66MeMiLW7wvV0z+j~ zR}K5CpPtmBJu}CUWIWoL6Dw5d>(^PGTH%E3jIQ1YHtu+H(DN&Rz#Iusqj9EmOisdO zVI7pp_jqpC(2@s&7;%WX2*lFsEu0E8J$J^IiU*G#r@5KT(WUc>`d=x2b1>)G?ZV4B z%1w}39AJ&RyFT5wz!~tS%3SMOFC%3lAylt71!!&KBwiRvsonR z&_E1bB)+<@Y#gKN#@*>~^lNpl*b_-$(t%Z1t|!!gwSO1=EcN#SN1+Q$#=7OAr+;R| zD|@IJzJz9*M!p&`=d5x`Y#kl?M|Hkn^E|@B|>E0anv++>d@q zEG2)n`Aqvju8(E;0FOO!Ku~bT=I1bFMNoNmiQYZPHk?=ZR)~@}2_n7u+ zAhzCBr^aH8Fn-nf22vV#(IFN%5>oHr)fb7p>7&_RK~~yY@9pr7+i^h@P<*Y3_1LI< zqFO-j!UTMh;JGh)Q5@6hHZ7CpP9q_m^0d`1o0;KB05WN0d5>8=ByeAVyet?WD)?vs zy{+)SL_Zo>Q=h3UxjW8tB^5pM4W#ReCshkp5YJ@?zBoyu2!6afjl{a)b31AhR*Uav zoIM^`*N^hG7?TVW@&Ta(ZZ8gFOj)#%UGzS=I{ByA-t0oG-Ti4?aey`$*$;7=k{noM zyw85rS|>~uwI<9gyDNL%9y;GUcYXSHoYsg4#M!mL2Y!BGr*@o-eAKF5THj=4NBkMx z;(T2OzuVTC!|1}K%jPmE6ip0g&Z?v-f}LZ(ej&!I_c}ny zWn-}L8li#%Y~9s#bv+^`A~B=V&i6@zn*YGXGV?(HJb-lJtdVr#Drlof^8)X1~ zwkV-);Uk}xpK}LG-6iS**5!iR&3Qy^efgyXBR_>z7a^k!4V+%`72fwk#Znzj9vQ^O z;_Nl4dKdK!_MPP$%(*14LKQ*Yjq^PCqzlf~}>z`P51BJ4c!=S7?wK z{=5*5o?kp*2tq5gR720-W$iv*`|OrI>|L+ZY^z1U2Xautg$;_?b)=A@G=SvrqiwTs zE~&<%>bvbio9y4`Q%Gj9#N#Ilo_wPwQ}(@bHME8ZGB3)F_0tit*`GeU~roW|R&w81v) zbf9lQC;{78V20^1s5Ug;T*PcLwJ5OT49A@g3rMm}uP$HZN{3J<_7c3y}4o3 zbI@q=MJK-tg1F9@7Z*r#lv-fBo{%n3XYj{<9r|Kv5|Q&EI6xZbIE3uFYm~%Kvj*PFsNzHll_H_QSZ-YS@8mKaDrh7Sk$B`t>ytVfT+TR$eBS7THs5hy( zIyl{&@4nkFYn?jJ8c|gR>puVJ^7gcV!!U8>vtA|r&!COGp+N0WBk5|bp7h>dTd3y& zb1DH-8oXF09x5HjXpsqLtX`PIo3~>`?pZCar+bxrm(kSVM92~jgU?aQ~+#h`5nA!`}o~n`4;nQBMbC~1UtbP1)Fju7> zL9hHelBRMX#qQ({&r!{Az^*cFUtw8m{O$|0KDO54gu=aRkjuiU$fdzpr0#XPr&czi zG4su8>U90s6t6-4;z0*|3%bHX-E!&l8sNrh43SXE%#j)WBc_R0;dX7@Nj+TlEz$zZ zc8W)Q_4`FNa!sNP0-BXQQ!?v%#p#k;iBj_=71u@2FHH|UUn;@pT<;Dar_CX|iP@3y zOnXyP;frt)YmEleDGevnS>65GHh!LI5z6++%kY>}m(#iDm3wt&HRY^GohCQehnLH_ zA8(nClmE^HS&mg`vFU z;mB>*bD-qr6*IVev`XvQ`KlmW2Ye4?-6VZy8^PWq>}H8sv(o=aGP{VRz_?_tqo#(% zRBqMTv`dp((@$@VHpA@<+<6(KEr|U>bJSkYdyLdt`uJ76k-$qa7U+5(<$1$p5;FBEXe%H~cFhX>J1XS3L_(Jx4132z>%@EO% z>G*0bRG+>&Q)JL@aUbo)hPgRw)PBe(ptA}Lc&0a5vRzTqA(kD8d4|rUrX^ zfOS|yLcT*s=o>r`vgHNu2P?>|me?BOw)N5-4jJwk{gV$s@Y{&5hSXYn~hpmj$yhQSvICR52 z>U+K^;+T8T^QC~xwLQqO)RxJ~?|TQok`prJCCWQN+6EaA2)ditmtS0PLjKVaV8b9YSSo}Sy8r8ib9tiA*Ll09rF^lei-d)+y7c32=CSADg9 z?y@_3{hCEj%)j#z1!MhV*Oof%TND@O>GRBpH9t@&pbwX9^y9BcMVvj;Zh}RwqqF=X zz>y!0ji3bVU0H&DBu3(uz_Q4Z0@1ZfE3^$XjNGQ|7~K z?03kueLhbZgQM?3603}#>rM_ot9Lt$iqOPy+e&r2b61NXt|_(C`eF344(pflQ%EB} zglk|f_8gDfk{$$tuvn&|L-(GX>?LB!c=1!<2`CPqHRFE&;Y@?EqgdZGX>m?_?yHCk%-sqTFbikVZ6OSYuvua{SbW+()N z@D+8qji)~xVjWJckwxK#`M4c>39CnvEt6R96g#KYef+_C>IqGrq*E3*ASPPDQt%3%%;Xos8$xj@M!lWa0#%>WYaQ6s!75UpbUYWEGgzb zEo%8_@BlY4!L{W_6{OrV8H zGMNB-@5e=g-3a50h!7ZY_+ubWU57PbYT+VC>6CMQ5Dp-OmQ~Ze1NNqMNlyG<<(e% zXiA5b5QgN{O0<%Q3A{+FX#tsWEPQ8#+p?ZBQh!|p=L4wcX}*j$jWB5mKgO#fNZKo7 zQ-jsni}~sCD#^>X=8MPr+UEI&$9q8Ymrrdj&q3K3E*}aWj?I;6kp!rxZ$E*(uOqfG z?TdVf)0J|iE#y&{hZSEPKqSeBS2>^WtWs89r8=K)gR8xfQT>&e%k|yvhbVAZ)K+2F z19U;NSzH82WR&eykS;LJ%?l7xyBPe)a2ex!#;r6Qm81H%-NEmppHQzQobR9CdABQX z6h8WDYh73GkG^5+Sk>ixZ*V~?^YT1%!KTBHN6FU`LN;3chMGqy7k9Csl8*9fpQ^?O zwsB*P_vE`6KXgId4>2p%cXiHLRCUPuD;*^!OK$T z3yL0IQz|1eOHD2tjmMr*3AA#tCE9hnx2uYQn7E*tN##>4;M3ru^OLSOu1jqx{O4Jl zrZ=Wr<~buW9FbZY$PB0NCY z=~7BuUcbf?URrFE>JiFaYs?B9Tf{0^===8ez7${3Jdb}Eq`XG^mehmQc1D;J=u;RyWKR1m0>0x1-7+9=`jdqh@^5yPa((xJ z{2OC!1CLFb6LB)`PS|q~@JAD@;~L*1Jdct{S?{nZtPhsN8sw~}Othdbcec#R_0v+4 zJv!R^{-ECO{jyM5&}>fIOT5v%nT*>*WO zomMZMa#MS9J@QGag>8Cv^ZN?S(y=V42%0>mN_ip@+fpCT5l`X5`Xhy|#c!5lYE~MH zuo1BjBu;_W3i~*$(U?nT=zOHAIYY!5_ zA=diS?kplnSkc@bad#q8h_7YDLH&6Y>`v z(1^bk>@J2~ODrA+6tD)|^?m(xwPan?180*dkn@aGRwuA8Pui!Ll zT`}?yuuIFIQCHCg3Qp+l+%k2Hs}pl74)G<_rOE}ma%>nF7~VVI=80SyZnuiJ7N-i# z`p8dVP0EE0G@m$p_j*7x8>6&|Ir-e?p*DgH5}4G(qn`mr;gN$N_zmU@R+^Rr7OPY1 zunZD)7dzNpIafo?IZ^{oJlySm?`BfWeZvVaC{m+A0t@ciWo=mka~+*+B9ZUo$dI_U z3?sK4CLEKW+pF62UZYp88)`HI*I4*xtGrhZSm&P(+vr2%B_tO!1&~et7PJ6V&mMYl zDphHqv0qL;6Ab5)9B?W=M;`x1h$chv~pD+x0=ljq=)Oo+Eeb=ov{LcD^PFs(Zev@emX}O4%MeN zTiAoGA4A``g`2Q~naU3tBZQS}M$u_VS9t5YIyr%+gg`;; ztgm_9nCDwSx;0MKW-QqITkJmwKHhw>b^r@&G}g`_EwsIDv~}oQva)AebEL3B=BHY6 z{U{B$d&YBpkXR@pfg8$V;p zLQ%rkW=e^OgE|r%^W`x{{!`R-8Eef$qSoKN0FznW$j8ifveehXxS`lw2tYLG^B5r7 z%oZO%B+E#@6N_H=JY0d4H8W^nE9re;u7=4*O*{v`D0rW8a*-&YUT3+*q+FdJwg~3$ zqV9q3ml*V;3pDAzCC(R@Szm;+MWw>)@oF*J#W(I^48`^@v@J*1|W zIqV&s0fZ@Kk^DTGmNbdh3-xbCu4nX9NgUk2>sa6T9MieWKMf1eE*_C#L2BEk)Lh%`N2g%x;{pMaG>CK}XW=p-`uT!pJfZyJdI~G)Z-_ zj;^k!s7_Y#y&=kWFGd%m4HEw4+q={!k#%727d<24gY&sQbCFK|Mpl=u(1roI0jTp~ z8xgCX(WoK;F;J&TvZOubkv1Jp&ejJ)eD?@O0V23Ki2$oDZo-49l@p8vEjJ3WBJc`< z?C>{*200dC>m$z?;~G;+^vjD|hf0%Bq>#a%SdRCx-e_}V_fFPHT2ynx_uOX z$5m4{faFKouWiZPcmTuh*?Y$K+qZkOu>8cXMTuxO+ja5BldK2RGp*l3A`-?b?j6TV zEaSDC76sLPF`euTxvgvqd5PDj28{-eT&nhB4HEn-@`rxl6jJbwI6Z#ie0o&yKxbxYYX@-bm39+b2lt~!CD+^ zEQ^SKC!{xdxRrLYcqz0aT(quAipCWtsfHd)#>^i+?(@O0ZoxA@g58j?vG}mvmSm77 zTrARx%lsIfs3k_0Laq^c=3MvN9`~zYOk4Hi17nVg$!%JG1LiV|oL57!m$C&bN$;z0 zn!wDfwRooV3$|&d{E>x2#NDNyjKLmW{LI_??a!DwkmqEBR~!%wJdcf(_#-rv4$^R@V^c2> z9hXj?m1k)n)HoFo@Y7zxH$C5ct3qIXv964(s+8I?KlXVsJ;rjJ)2q?Krzw2 zE-xNGeg4rAYZ=r<5fA4iYU=|Qp5)@7AIje4ZhLwOVv7<)xu;wMPCMq zvB@cJmFhO0!%bR@p#Y@#LLs(%rZ|N|dIUt5H+oE6IgG^+- z)lO~iC~BWT1FH9gCq$gThcTiupv0}8$+ ze5Hk~>3|R1Lv+*iXS{9Wn^XL1n+l1y!38#vRy}u|i_``^0XjvfRG@8{GW8=`>NC28 z#VMF`R{2KpXg;jVjY_ho7zu-1Xb;eLgOIRB74z;n>JXb3VX)|E=p7m4u^r z9T}e!62Z|J8bFTk03|I*R{=y&PZr=!2;cOSsv_WY8=;|@+UU{2QO>K~(i$a#LIbmL zOPALf?F9e&b#8pq_4JDFoy|D__)>wj@fni`jZt*B~}?Pt?s#FJ=)}=YvO#b)_;fs{F#CHE4muCRlHFF*Ie;-9#?Sz?E6= zDI*+UxftMB#BD{^?Gc(-OUx#tC+EAT1(`20k!)ZKh<))gcy@v5Va`F;Bj~AkK`me!XfUmr073j|4Ht&hJTF?hs>MXgXsd3qURh z&z`2Mwxer zmGw1l7M;&gg{!Z&5BGi4>S)Z2>eo@sk>Hc>RTX_}jetUOH5%1MGi{9K$xjf14c$-~ zomFX?NoMOFD1*SpXUz$_&_dDWeDk4B20wy~Ka3%BiUtlVnclOHFK3qcz#`2=?U0a! z-d1cy9BxmaVgd~t${lJatZ4nE0C_D}zOzNwqdIa>7`RoZ7iu+=0d+JPXePtxcI7mYjCD zViMeer@g(KWD%pf*2t%`wbg5)M$oV$G^i3SpXYE%5VzZ?$+{>dQj?xe*{0Jy4miL# zyPzOQ^_taaL}*bE(N}s?$iP+7h#OU~=DfNqblOnGo{_?kWVdv$sNv;6FXdH)v|+ z_?ci%){b}WFTPTKwm#8uFM;l_hZwI_;sa+#w4qfjN$mCZMBT}9@|y2ncOgoqAJl|P zZMJ?WqO00MeJi#k3sMauv!mQuyzfgrNtS~THsZNEUQ3wV7PxF;qhdKq90Yw3AE_Tv zSkOP&sgu9rYs|bH=x6N6CWwvzIn3Nb%U3ltDD^WEsh6)uOKbvF>`}T*2lwFi9LjVH zP}ExzMuvtdoiLW6Ef2dm?xxVP}8Mg(0{ zeZl`+*8U!Gu}ZQ70uKp2P0r;L7?Zr;_RFE3}(c%&5F#q#%cgQnfR zs1vucxb)D2WAwK0(Kzh|lZgNoq&2|-UlLk!tH9p26`_EyjJz4Co=ujDpx})!{0aJQ zi}sE0CWM0gTtn&+!E(XVre3xBuUVD|9UAY6f?_}lPAB~G6)5iLtk*N{JXfmIGEo&E zgan=Jh%XN6y>i7{(Cyn(MkoI0RI|lc9}NIDsZCQCpenzjIBQ%>;7nL8wD~<|8WLU6 zq^FZT9?%GtHH6oyp~RVj{tG9j*-Iec!mSu;K-y^zvm|nQ-(-=6IW&#@Op1_FVdC!+ zXpC8UROnr{2a)%A0N$A}6P+KR#I1oHdZ|{Q`lRmi66)e}<9!@w zP7lu}*g|{)`(j!5?6pFy*MwS(Db6D~+bZmua?BQsL*6ILni2p@HS~R% zQxa(m#?q$A15*!nM|9B6E3OU~{uRM}{#vBrF0<*j469wi#l6>PY9QIRo8_rqb{b|Q ztp)pKVUJIopxnL!@6#RbnPUQ!sjo)^r3GcZoh=GAdX`ydl6dQeT`xwy%2J$G4+vEP zxlKSm^o#fLKQ>k}{dgse7GUm;@t;DQOQZBrr+JV8be0*j z&@?~Z2DUMWWQfG{WM!TRIJ_eoU!!{WWd)HWcobUJ9vwN&N$5dVGSAY9xraxGsME@a zGj5jmoxxitLDQdK%?z_FF;X=4(Br1o@_+(yy_*2L4^xF45Sk}vI%L{aUpR{pE02UX zUT&GlX&S4~`d!`}La={2dKqVEGBNlV1oB;Ub5^5RR(bg=Q=hXYMfdfsTm^76cHV-n zRe!w$2l)%>H=DAi^Eeimq)r^H{OZJDeFckAJjyrk7H630b#nwT&4wM48CkZj2Re}w z9b`K-Z?}crQpU|Ank#};%A&a5eK}5VkQ97i_;CwQ==KB{MRj_NrP@DKbtmsP)Z?Sr`d)VeCO8n zx1oe=v@c?3#F~610*gi-u05Z$Ve`3a*M5k5G0d%-_VgiV$-Fk1^Zn}L{f+d`8dmRC zBGPbpD~Gu#g>-8JVVGab{qY#oT>G1k?ax6>o8Hw3^FZ}5k?&Zwxxfz-S`ydM;hPm8 z-{oywm=tUL-nmA@pTQi=gY8%jddjyqN2 zI(5rxlEzOxMLn*yKL)PcaDdoV>+=_6HTfbn7QK6mdi$cw2)UiUJ0ZP+J0Gx|#Iy!i z<%O6o7ww%5e8O~X43Fy6N0J#Qlat_fi7PtM?FT;ixHw*V->SM!PmB1l z4=JJ?Xhf2{VC*HWd);2=#sY$PR($JiG%@(u1;?`EO|A4SIU~D*IY;pR!2UkG{ig5( z`p_i%X9ZKtadY_q;xMqcv3jcYU_Y%dX%387OH&BVx>+e1{AH{7HSSmS9;tE@dA+|9voVC zU+>!J4o_}WOcF|io>$P0mZotzC)9q_+9p!7-pz!dC; zt!cpaiG*n?Qb9 zZ(Ie5=gXi;57_&`v-0|g^h<$wH2mHq?=Sd1KY})DAii;DDX3qoJ9E-y7IT<=Xw$9q znj#drj3s`lf>wBilh80_LBFw-`3N~PR7P_Pfoh}ctOv6 zA1FT(I}sa(ExV{kv0?^RP0tj)41#tvsIsXEDcDj+MYkiIbaEay-V+0B3eG zA}(_IlJPu$Zu$gB!ThUw5rtihho1JrGA(n!cV=?o`qZzhBtMO`aNxoMnwE~`G+T0a zkw!(^Q`)>ZOr~{bBqAun7vdjUxki2zFkBy``LraCOwm9^|;-=bmcSeaNIf5j(g&I~|$Q zNzIXXNlMdp)S6TzkKrbrZ{TkzGB3TTu13~=eO|YW_>}TFd3dVrjd}m=_HNrFRGV!e z<@sp@!L;Q)pNJW)6HLG9qm+jd;eB^9y7}-49%n$*VkM*fWa*at9b&jjZ%7kLVvhuH zq)X?C!YIloXl%GMlN~9P>1=NJofgh8ThwLhvGpMq5nX>q(jgq08l#Zn{KhM@z+e98 z?J1|`aUN&-oaia{>Uis+@**RfE2s?G;Fu;zrGXxj^)WvoDfm2^sN@m@$jOYie4r{G+ zJ?8?m4DTBt53gixA}OAq7YY@$PuWnk`Xk9bYbp6D6WN6MJWoAKlr0%TFfZLEA#-Js z`u7O)a{CcO&0K^a8X;iRTgJqkn1KFRAKhz~#UD(^0V}EE%<*A`sjmC%aHKrKH&V=b zC4m@NCTSyr@~P)VdE-W&g%K17n&b;P9jY|xxVRLeQV-)d-#WdT8IflSvoYK}QCnW= zzvxS>i{ag86LH{@<>Ur9a6 z6U>DOd^r~SyUd)}dnVtkwnHq%AIG5K)aAUdP{>(<#LMle=31K1`=*1*CHk`^qCV%I zO>NWylzKX!R_7@#Dts1s=)?@VCRDV9n8OWho|ZZtRNS=UQSxV`F))U;CvKKM|XPFHMHu1mB$pP<`JgI>?(;rfR7XTkK5O=0&{j!9YNqru4cY~DveOAbY`7Jhtbdye7AHlb z^CNu!1_)A0(C#_PMDI>U za7~_uI&}bDc0?MWMa`R5<0rt20$CcOnYf}#FSe3%>Xx2Jq5B*GRA>n%la@og0NSRa z@aW+1Nzz_{Wj@`*Dpv3Y*TPfaY~>yacQ)LD9)m{=#sfmQPK9rAfhJqdT{m>?6 zOyQM2*!$yt+v?sy2L3X-f^3_e$QZ*_0a2HRIm9Ay6u_8W*o8~Q#WLeyVyip|g4nCl zlGdv60xbG7&$6@*#SaL{`+nILtFFXH2ijc*U3{PDnB{b@l z<^gNUaVeI0^j+#JytzG>gAg~xn>VxKLK9*EYY4Bw@1aRKCa-Uc6vMGVL0ZN#NI&6xK`o>L?88DWJ)JpMzr1|9!+0c;es0Pu?_w>HxN9_#Tr&Wx`%&I&S z#8S;{1?UVyX2Z!$C`%y{#9c8jMv5~R6Ck1yi*G%2 zQ+#E#dU0*pR~F0H!s0B{J@aE3l(pRG+QMMN3=T9pp$&8ofySuH`9o1lK8_S`-e=4ACVO{M3(>*m*T z`I@S5G!9VcOLK=BgNogEvtt1RcYGGnEv7>w^QSom?Y>Gi(ogOMW|dq-xw~tHHR}l* zx2+i?iG#^Cyc3u;MZ>gar#=>J6+s-zr!m<=p$&lhyk7T*>K<=vET+!JC{cJ$&9_O@O*bftzNM+*#6-YafA}5d2dMtl(TfDGzFB> zNN)<#NT5xmu+Ze{L2<9N(G#Eoms89bD@btVk~<7i;VPRpZGTRQ1X`UM4koQ-fJ(p*X)Aw0D zJKiS>e9~_8t9;lasf)Oj{E58On*@iaMu*hn*MeMj)i)onq#o7QHOblUP)F>U7IePH zj{NSo+n$Rd;Rj;iG8ap>G;`6P8|TzB#tu^7C*yy9QRF&sRc^mwGbiAzB|atWKz}|O zL+Qr^d@%MU)mh}sva_{qQC1yImrm6=_T%bo({%GyiViG908&p^=hnYcf~pnhZHf6u znc^%z26Q$OZaITb*Vqgi`Q5zzD4P(2J#Pi?1V=hZX9=0p2->YgJjbG2P zB4rQ?Z~KYX;~vRhc*alk66~1>UTqykf|xVW%@a3m^Zk;=L;pdMo&2De zHRY(&=$m*eauXSu_jac`i#@Q>hEap8?7O;s#0JB9n*Q9vmTJaVMVc}rB?X{@XuuRS zYPS*MK2#7L$`s-X6l0X&bBcrZdz>NirAU{W=`E204p2wP6E$LtLh!0#a zi-BtY>Q?E!eT?47VT;Civ&1}sp8RjnA`cW)`)*i+t|}jz!N_d}M-9UERe$3uoW+#! z;?7v-Zfnu2s^MHu zT;);v!2!-@7%AmBqdrlktId3oc(4bfeZW*(K@?iGrc`%dd5J6-O1 zU&i>*9@s`jg6>!^zc$OnMqLoc%72x?kHR>e2#_<87?%qsrpN0Q3Re#>gikPLBD8FXt`+#qE^}#l4}}QE zhH&EfPc-Z4)KZtk%=`^K4nQ9E5yuAqLT*sKj&SS}hoSQ4jLP90eJ6`p^aWr&gcgc4 zpqypeWh9n!{zExlwA`geewg$qI(}gP4MhxeOJtn*=5AK~H|+9^MtGIdqn9;R>sKW- z$RQc_gQYaCHyuh7Srf=5+}$mG@Y>xggPhwFq7p>KFvs>P6hY}1>htTodTrQWB3umR zrH$Y--{1H)Tz0!+YIR)i%Z`+h%JFv{_J$TCMn3q-a%4`@?N1rI-QBHlP8HnW5K^sY*od(C8koVh>`fjx>X5%sDw+`X@J~N84eFO;P?&7f zPw42V+hNl`K;(8@u)ne!8%I2yOV?5>4@qtt>*-iXQ!pbzYK->X5{yyL5D-B;ZPZ2= zUeKahn9E>CZ@b}GL-J1`Wdku0pX48SCeB~HKSK0W zWMyXAGXa8^->Fpu^3~p&PpnPodesz0uSnv41fZ4Cy2Gm@v4nVr9m#i}~-DsP5iY)(xgX_FC zxDV9d%t>7Q6~dbqNdi#+%1DXtsj3ggCUrHf#Wkiz8if1EZ@ zN?mR{kd$=J9`0{V76z0zMuxu7c(u6O z4Qrb92W+QO)5ye2iJ$)yBX5TY3;N7@`=O!u$}a9`WGs$RdS;kxZHWttGnDLy`z;Yr zoc!@pGlv@#jz2MC`M&_&b>6O=Da*y@eJVYnu_Ouz(h+c+<@sq>zTYF%eq3hN_qt6j z*1Q#h_V2UJHCpQ*9lvyw`okS|TzaLPausHMJ-K4floY4EzMqE$5Xm^SrvBpyh5rE9 z|M~zm#=n%~&v^lVeO*R@f;KT((0_u~|8@P=@IR>6{{Zp-pFh;XhhL~v(>amokI??- zhzaP2HUJzx@GhgDM21O@H2CxGw+ypZ|ZAe&4xQ4X;l&E8q3u&V6$%nk>`Tx0oqSRea&yA@&-Ri|-$9 z=eH8Rp^L)lfEN#p6ugn5;DzF{B*m7ST&fVu2lOUeK+!>p<%hb8YP5(EWPe)^~|HMl^t;@A}7on+!#q@0=Z- z$E}a0#$0Wk?P1WYVDTk25<-Nb%2%asnFH(2{Bwucvnv0Ye=A;>GzCila=t-yv~REq==o;8+jD6&K=d&FU-A5Z ztQhnIHo8wWE>ILPEF537y2Gy~%>m=}n^ns3q>lZ6>%I|X)Jccm4D`jK!9ieyJ=uJN zSbQK=D#JgIaQKU26a;V#O#I@+%p6K65PV>*=q=0Dt#qJV>YN&`iuPrMGpQWHay6B36L z>JK;2(Phg8V*^5&|8dI8Uk|l0`Gn&Dr4v|L;E^zFHcr~$0p9~LNt!rL9iS~0@`)gv zzYW#j1}#dMl`aa}ylC4?MknBUWfG56X_< zl|E`Zl|uux-eo;if!fTs`5&%K*)(c()$Xp!_-A(uz=Wr0|8_z8sPDqTFrkZ$&I#+# zA`0^8`Th)g6EN!A3%%EoL7?dHV$xtNKia!eYYTTnhEPEC>iQIe?8>ZhBxbeMe6_h< zJ0c-M%|(p7V07)rA4m_iR*V0Gy|<34YFh(%NlDpsv+0sz6PS(@xH$`;GS$&jv(xs%o4nNvi`G_j9m@?Jk;{5)yDU~ z##uB`QH=l+K}3@VCQB>l`eTncujVd*)JF$r0Q>uG%lf>!#!ABv2wp zF?zG*AlJ-$)K*EN`QrTKcAd*)+cXKYJmVrY$7|_bqF5i=*-hBtOjz_=Y2BLUqlK*R z<(4Y<>BFPs{P#QG{@Mlbi6JUYxxPDB% zMx$H9RVnAFW^t6*RJgfxwbJu|wI_{6D8(qZxuG)K(`rC`l`cyQYk;Dc2i0ytf8FcN zdH-93qMJJlUtT#3ltjtr&SFPIEpgk5`&^GE=^|QuRuTAa8Sjrxgx>a1wn;(H%=;FhlNvpr z3G}g91P8Z0aeL(YLDns(Q*EYFHaIEAKl=urfd?n2mjJxb?-A9O3%Le67M%cW2HpiI zwZhqL1~ZeEjS3R!*6mx%>j&E=2XLEw`x8NF|@4Q|jWw38T-zqhe;WaGDSX9=#zRcmf`o0cpIFU(KlE?pZ zQC`rs#T=j9#cg=LEGUn)EFvqa2}l3&$VA_)x9=Yvpzj?m54R*t79A$NtwQ>@^1|Tu zU3{7t!1_E*R}-58BaL^#EF42pAy!KSDQn)oc`F2|;iMSy)6}))u3@Pe@0aHisiD|- zYWHx;d{}|$EB9Irz+mRNK9|pP6{~qB>&=wn(lOi+1L1}mXuO{CF;j&@1CD%54nKPL zAF#YurJy2a_z7>LAFa0yPVkR?TTpP9kMb#z9-p9qpddTdr(Zq-t%)&w==OIx$2)>o z`#uVMNqD-v>HLiQwd?K#*J{MvvEehZVlk3Xisn01&{>hhiBE1>s%7uk*%8UxC(|dd zmRp>#(pzM9ad?fs2Yz@?w0;ngRYH}>!$K5wC$VqBdndrdNo9`c4*K}(=WH66xNP|% zG&&EBA-zQj=2caUsbyMapk223mQlAU9HCci95a_L5vKLJtFxB_a9klkHC2ut=19Sq zVrJpVz(~pO#OsXl3+~h;ljw(_+Q&k$j&?Mdsl=OxIBglnZ=hITqGFb3y(pk;nd-o) zQ*;F^XJ!UM2sMiJ_u8(P<-H!x{_D(>5He-Ld2-Va#^I0CJOq@e42lRm*PTcs&d3no z;Bw{yHwxz5HTO6(Z@Xv&y*(B5-Q8ScZQ z!eRVtSf$4o!82Fu;UQUNzq+rPHGP_J#mI;od5gP9I7SH3h>u1b+kSeQJRN*-A9$ZvWt=lVJ`9~n zL2Ow_arZ~nO$V8NN)zB?kbIN`4r22eEBa5MTAWxr@Ab}fr)E$AhnGpHeg;cv4UaaL z0teaA`T&l?HEHpySsSTPQA>m}QSu>okK01AMSZtz+LK9bzK(ndY+`ipB=gw*$gP#i zrBi5H=W_d{>euv62G?f&Va(OBn&Q&FFOkG_YzI^RW#@;P_PNR9E=^zd#th%z`v?@6 zy+xl1_oz5p5#-j`-D1OKNuhhy8bN1!Mx&tM&(mvnZ`L&_g=+Y1ikJ8T?%F5&bmoI)2rAa0wK zcHm+1>%r5L%~srsD+~R3W#cuj)AArqxH&s;fAo9fW2Znb9@nX6cYn9ndU=8rT-x7y zqHjqB(R`IVFk2p}D6o)My#|(SQISp-*ZNou(KvUtGmxC6!!29g41VW=_Z#n=AI?F} z7-IY%4!z?PxL2LDzdpYD^o7g9yl-%%&zFg)FxFf`9-!~xq@`5D=}#jWcP zQF{nA*h1}I6Q=reJIHnU$9Pg-Ow-bk9dCmUP+>!CKkWBq=7CX%B_I(XpxeM&7OKA!G;J@pcbFf6-(2XE#vv*`3eW@ z^l&cm`dx17bQi-4s~b67;J|BCO8=Z8q7rQ1mY{GBZ^s(>BG|aZW>Who^PF~?ij-)g z1RTBFi+LtOp=GV-jpC0+gqLpPvx6nur2Sl6lZ4rD+)&UER0;AunooahMl7Wji3X=O zvx9!8;J4Q?X!J^P*-95rIHOb;hKTDFOlBd&8YxG@s-Vt9NFRZeE^wdCip#L>l<{C|p0hlkhFb$ed^inP*5zP1r z;^gR?ZBF}y?DA$tWsg`ko(y0A{1{}fzdqe6b4?p4JAZX?fFM7zX-}ef;vLBgQ@w6g z@ql@gou*bZ;N)n>PQpS5ZP?J zzu#1AQS{>r6h4Ewj)}6Ngd*b=ZSoAf5OL|l#i;@?i#q?w#>Xev52Q{PiN}WEY z$V_g`9EC^F5ff?u(64sz#t=l17$bttj}}9{+xk$Q6nZ~~+q2q^&-7m?Vusp3Gd&)# z!H2lar4n8i`SROeR(7xE9sh$5W=S=i^c|iaV7BmZ zcnz*}Q&=~Ep>NA>e?sfo{+UP$#hNA)FB5Uj5VrKe)80_HK})cKW9BZ_Ci5xRy>(EA z;b&mQ`IT88s$4!a_0e)s9@ctKpgMD4^}gT}n&?S@F3rd&?q|g_RE1543M@=3T$~md zL_D`AcMi!2kH?0sij(83vzX1)!|lt^rf_Kx^Dg~xqCQ&5zS^{j5>(d;+o=pL;DLo_ zk!@YTn82FeK_FF57YVeqJ4&cWCXr-BQXPG-eV2(O+D;A@%@YTedfrcPoNHm`j@l^re2K$@ua1rA1R`Mu z7hmA*dz;e=8jPMhaKv!7^C*P&x|}g=FY*1wM<+wLdw@4cs>oSjjxX^|tPg zOP*@+cVfl5656v|CkWDTU`*)jc9yi6UXp~58SJ8@G@v3BgLsfM5_2n04J#i%oPz0d z3po8xsiEH%qPQoH##F}*m? z65@s^9)Yad+3}v|Bdmz1!#H9v_-FcsscO>|cL88`Ie=_RWDrM+v`BfB(# z4uvh-FxMSVv~64|2xTRl@odV(C8f4paKOPKtM?wix;R}?A7MEV8<8xs_I!N1l**FGn1HMG_k35cQ~y3oIIs4 z%U*`%lR9hT$##L5z1YG6zdPzOLWqq?^6Pa*Ewjnez}9C zkd(I;aLdNMpQcP%%0L8>#q7Rf-KVe8LhJ?HhmFtLn|PxzWxI9T>2k&}jO?4#lhPv) zLpvBvKaJt0G($fd-9BTgvwi-`ry4~h)clPq`f&wLx@RYkaAXGmTdXlf{_DtU#xg)u3+qU{?QB2;_la>yah)^&1~5Pckyh;~ohEu}d7 z2m=|?>X}h2&uutjFF$FeUoPP2WKmCY2F3< zLBfR&Ry5gIgXfL~Hpdea3lDIsZ*6Jc%hjFGquRv&0p0^~J!#2%N541&V!XNN)KLWQJd>1JUwnUYsT z`gN+~3d@=$eaJ2cIrk;D2A47%9z0L7EX&1LOLm)95Sk`Hez|%Bey`pcSR#m6><7Fm zL5I);HBXFRm1Vv3IIP!bc2JSTrq86HaB%8oQt;g(t3JGSpoBKZ6GlX#vq4Z^(Lr?r zmp0cQCx_S;9j8c)>#2>^RBJfw$M2wh<*I)+9{;`$i#&{*R3y&ydhTKm)Sv&Bgy^k| z0tyHAbA@cnf$;qq|EsYA^&45&4sBLFDQrh7LmDxcMFsjpqd zClQ9H8Dl3$GKhI5lBK!*!n0E6A>xL0`s)@504gpcDq|JB(f)D@v5j7;+NZ)4=$Bc{ zeo@+^0#^*he{+nAVqU2v%cd~rLTrT;$cvLB!WAoT;8hP@1NLlZL=lzGx^h!rVv@(J zZ9Msdm927zBLz_lMmb>KFa(+Zxdv@=V$*-{2o7BXp%}qCEM;C!*-Z5^=1_>UARqe; zLYX3@yl%~_0o)DqLfTaaIp|I5FMPvhYa1rP?(yUv3tKl%vvnI9*T5o8;=0-tryH5h zyr$f~KIEcNOfbN9$Bqc+Nrx7dS=r=w$LAi&{fvw4@0C{EHV?Pv=7Im)>k;!O_42Aa z)m~)KHo@;S2>ABbJ1ETd1Gj|h_jW2tqdVu?pO1+?mjpVt{gbw5-MPW%zROcBP3O}< zVP0Asp`G}|N}TQ9O?RAuOKpROD+}Misq38mOuNx#;zc_Pt9BaxTH{*}F(*L(QGx4qcU-1`QD410PrpFbN- zx+xQ13+kJ(obfd)ed88_hJgGEv>=j<(COYCxLAmUs2fX0ktsi+&|I&RXqaN*Ch^w3 zm+(^xrn&s%fe4$iv(cW^r7f`Qb4!t765U~eP+zL{wQ7*g)cl9(D{A^Q*6U6Iun-M- zc&hMheXGCE>ao~H;APL7m0N^5B~}y@7};mnbPW(@Pf9MjbVNfCY9uO(vi?DSMTA=~ zV_^%@(}a2iHdTsDpiWWNsAcR$QkS9CbppuJTWhzEY`(USHs2{NvRN0TH284K*4xw< zP#dG!G0dy?zshbdMZ}7H4(9)EfzCzxdSvWn%gpu?(}}8HOs=q6%K0a3Lg9jKM3_nk z6A3o$T3B05+DV#1XCM(*5EdfwJ*V4B+QT|=7-a~BVU$$JWK&P?`-9?5Te*X86acf; zAJCJ))B&ZdIOx+uVLnFH(N>6{$2V=@3#1y97phs*5^262{svi!?+_#}p5mh{(S-^& z=if_8sIPt%;Dt=Xy(~h23n!@bbupUsAygl1bJ(YFdILjvh|?;9j2)JU8IE6X8 zN=m_`W?P9WRO}&0Do47Y`*{GZm%bH%c9KLCHli(>Nfj|J;^BnJw6Losj#BPI3Cfw( zS#F~}$=4Vwgx#rT2y830S-ur$>L51Rap*dID9rXPdHjKcJwd1P3TG!SsXB@`H01dk zicFc$SR?e8NLv%y!WQznmj(6fi27lIVw}e(4U`w;yZQ6#{6j`_7p{ocrc9`CJcR`> z4x~yfwSbo)39a2Re7P)KQrHyz+-)1-I{m3PA)_Ngkm z$6besXlQ5EEe`6;Tl}8Pr0f$@OUFbod9S6+JqOkqQ9?n+4aoPa^4q=aGS4xD_z^_i z@0raZ5V_qXCj%hh3L4f#LE;50mZo1`XuS)Y_4|=!%T&-0tXL~2L)@ZDuqc8)JC=bh z(880D#NzN;Q|jTSR%JMzlkALb_y`0Y)Tz+ziJz+`JxkeFQEt3|yQ#y3^_J_p1MAHE zv@MF;L@CbH%I14fB##iU_&1hkyGwT@bn2Qsv${7}Euc7?nsPp^4z)`xU ziu0UN*1P0<#F;gw?HLy**ArH@dFwYzC(Hb>nKikDcWuJ*wA@lvMY=dNp1WNW6gS~* z5~R?$7hyT>HVx>(5=VpNYZM~E$|lmN_C%R=fVGe7yt6NAa9KCHh7b9GVuRb0?sSzCyL-{649{3vG_7YHJDDnOPw zVDKE^(;`<$6wSXJit<4jpnfBIShEi=t1m_no4qp_`fdWpFo=O{<(Rw`gb_nw#z|Y4 zS0GsY;?0d8o05?)sW!%XLqeq?hz<-RPuj}aj0?#Fg`;1o9>e#2aOHdSuXX0|=)Vjn z(Os=ZHvpFkRxT-ic&tT3K7URGUi}g~E%#?eb@x>#prX#@Jh?vOD+=6Ahw_xQ-%A@7 z=rg5UbI+ZkNSJV5-o88r4@HVVeWgQZq&{1Q>6qbQ9RMKlHJqN@WGgS$G+6DVg$hcW zTN5ks>KvOr;Kn4+I_EkB5!Z!ClR{% z8`qwCDyCi3Yt^oQzHW}`6zE8|JS`~wO5z-ZQnv;ro<&4CEG7?{5#Oi{Y+*0FJ&I*@ z0CzAis^0E(|Ee3hy^^hO25vX*JecoKSKoMKGF78Fa3!5Tzw(}VBa3WTl>zS1HR`8n z>Gk#>G){@N^XLuk!4)yoRV5O;m7^ix^w6lY`w~K?Gk(_0JqglJ^AAZ|=~7mZ9WhjV zPi`NwER}QYEb{8FiL(2m+69<4R@~ZM zf0me7C_Qu~3Ma&;7VeA{j+HBtRMBpK(ziBSWEzKO!`MbfNh=!3Qk<04Uc;;$SK$DI zmRoF$*OdukSzjUJ`C&yh+-}Rf70eco7wV6iw7Mn~ z5(=y&fFuL4J{#adXr#9>6>-;GbFPN&xXE(aY=kbk>fBv1io+``37m_U@~S~ot3INX z>d+@D91%CsVY$ab#Vt^UUs$;mk_>8jnX4`QT$q358CH3Gd)2Cced^K~yZMu<8a)HN z7R|R3HznTk!&GGxn?K)h8Wq};HT)`FszHLu%D4>GNqkyrlpcF155w!rl^v})bN~kU zde=E?RN(l zV+kC_U$m|bx>a?{Gp7lda%lw9e{$RHL^L3xFw3quF<$W$61}Y$E|RH=K5z(+S|6`- z(kH;8AYOh(Cj6Zq=lhsBzPd$3TOs~C{QgW0;ytBmMS5G>Ct_c-Wd=pWt~^O&gTEPV z_aTSgQfpq^1(6nN>9i%u;9Ek?zTdoQn>o5^T8z>@aoF7&}wehf2EAx{#h za@dut-|xPduJ3th2uT8)a;dt;Maf2lS!c%RY3Ljc>3xUH8!Ra|av$)DsgND0zxpTA-P5na;rU%_)k3Y5Ousro%gU2gHp6Yu%!=CtJb;& z_15#_;&b9284D^t!yiymbV}RHfWg-~qRwOD0zv0nJSrn!Y0}ut+FRd$YY5`{6UYl4iRBqK=ko2Ro*vEzoF@26ZTdsY-5wA7rI^;7!egB z!vBf2mKKxPlcJRq-8lk`oSEyuH#Ox+ZtWP#cIh3rub=j;m00MJ?~KZza6@fuQ0=<#Bx>3iW?FuW z&erO}&W7NvcH1hO5Zz0aYo6BQr|dJ2bwO<$kR~|n2e>?r=Zc?*AQ+-Hy-95(x5Y!& zX2WWTao&kGJsx=Kyuk}RdI=Gl{PRbTU0ukbZHQTwM`D~zU>}giHiGE?GDcFj8wXY{ zJ}FcAPJ^$f;Zs*y?-~?+0u!9OLvG}51cn9ilL(pHmaTYf^IbF#7!So%aVc&MIOCMa zw)}Kr3Aop#i{K_Tm^s5xQ9v!*)`6(cx33^5{1DYcJ4RTB!*7mS4vAnV4wSGWK3BL0 z&|{@lRwsn z;3^!0;fA73V2joF5upf-3Zj0T+fQaMEHe%e$(A&~SafHYS4R}3k=fyJOwa4cORLhB z{R$yWQ~UlQLoPwwt4wagO(K{I{#r}{d-Z4b8QWy_0oI*`8>0RJqd5w_U*$|%JuB{Y zu@m+pS%<6zA`em(Z<1>ep;*UQQVnO}p(2Z3>+GRSLhpR{608H-HceO5 zQjtp|$Y)~pHY0mB{n@h4+`5N}$VY0ReIm-3jO!MQs92lM)R%I%ZeGC^lS zYG1@2h904mN~Q)8;>MegBkrGgi=^##G(mxAB4@XrWOAvFCyZyE$YEoCkhh{JtOZA8 zOnMYGw!hcQlfT!Nz=@4TjYjxd%#ukD;r2ritZ0r907%Tr6tU92aN#~D)d5w%uDcT# zf(Xg6uZLK5FpwF3C2n_cy?4Gy5hakC2ms&PCmf?re1ZehxB@D;i_>uF`dCm%w{SZuEg}1U5fgwR6z zC1(20E$@!2QaCWOqgVZ=q)in$_0}B_P*@e3}CQ|4)=0^GGDDcxOwq#&iL`Cp*vNJwz zb2$>}VPwY-Ab^m=yF2^b%rKiS9t3PfJeq90i#|)VmIV=-!Jc5ZPVBSr2#U*r9H2%B zXJny&g{qT^%CopcPxfIe(;tWas36^U7nj)thGaT~OEvNX)0CpL)( z)sYxR2rtO+vRB1~AdGg)vI}x?+e;%|^P~4an4Dh^u^c&J793VOWsYit^q2}`WV%ti znfo7XpP)(C;oI{6#eIYxQh*Q%CVrA1kvfM}g)yjcTUxJ-hq@_MfKGB@@818%6Ot6c zcM7ynn7uu5bfElGY5embexyoc*W+?!U{LKnMaBO&nW9gsVT!`sg0je;g7ZY*%aj8M zG~TY`gUviccP0>?mbd|@`0o*6#4iLDVn6h-$pfE4Vbcqj77l4b4@$+%L0FZ0O~aQA3aAY>uoF_R zKVAtJ5V|i2ctJt-u76HXlHvwSQgJvQ@yAO%t1HTlP6$Z=k-mp16y-7hX+9zOyo?5X z@n^`bQtY2^oV1POk%P8Cbevd%&&*urjrSlQi1i8q$DaXCSNn6My~>_zJN5gpdaR0MFe7@M(^#(oO#j z>~R5f<0E#`(h4t({u56G zzh`K`8VgFcZTRQj`{O=Pr-n|F0RDsR|2_iSb}=eTQ9u6r_@YetHDNljkqtt$xbQP? zglH=HV@>4+R8$rluL>bqW`nYwdloSj-Az$$mDg;_jiO$6moEIvf>uw5Cw=m^WctCs zle54Uzj=B4&6b~jWi`X*t5qLXn>biPA5z?czcD`JgWV}X3Y6k-uwW8sC}NQ(g@B)- z^5EiOxW_WSgfLkozKQ8uzi^pb|ExP8Y{NYX=y?;%>zwT13A>aY?b4~>sKv5h0<6<7T zxZ8l`m(vVNW<_V^{_U0jH^F}!t^ZB%Uz_`nwe|na9UPiZ1tn29MVBAW#`atY)Q(tL z0{7-SZ=H(=%T&-uSNi{1R8pYS368x8hwKxucVK;$Sdf>(aLy~=k3W%;?*M2ze2f}|I3CHQK*Y+#kpwo`1HA? zb}mfDyapA?5c$mTo@2;6pW_nxIm;Kt{hzh|E+c8Bft=F`j{;fa_0$k65LUh##pu!7 zcGn}I6K4j+kg$nS<(r(X6(8AyD+AaJ;gcFMc zG0cU;R^kzAMkC=vX{Ii8-JrndH>%Q}4n?E9%=EvDUST(!Yuqr>Dpnfu+n=w5X_1(X zOLY8Wq&bAaM~b;&KVnB;poKBs5Apv)^e45ok-f=A-x9J51J$H!?u+sNy%%8Q6<>HD zu|*h4>t|z(B^aEm^#v~P#i;&CXy-ntBXt$5FMs}{^OH?xZveD~7P$RNR*&2aI%pD^ zc6wi9m?7@?z3o{$F@z@6I#Es%t*~~s85}GPKG+sj9FnH?mz;_d!NL}TdGTE~PV1Cu zS0u^X2$1u+;JNpnZJ2n(jC9*^havBqHX2by3TMmzYO*X~p0B1LnXIF$lVNSH| z!D%O-`|b{?cf5=hyr%{F10{&%uk2moS{XdKY4I&0s_N@?zDISF-pQ!(sr_0-hM#|( zmu<#e!f2#Qu?T~-q$l8-N=#`8A=V9_u+PQD&)+9U_J9Fi;Lkz6Hsu4 zH!M6a9({#+f_Nlg(C8Nc_M5ijJso%uL7@i|F$>%q5>cFt z6Z=X_ZEO)*zVO^B(z$L@vgE_@ihx4#RD^AhBXDyMqGYF!bSD;68!wKBTaizQ1Zm|# zH`KZM1e+cKaQb#+j`tX^pQ=RF7pZBOS-@5os-g!<1PCR!b&vD2pLHK%6oVX zUWX6rDaf_0=K60C0AR~31Mm`avY!WXWjE@r*PLRyr#ApI^*Ro~vxg&)A5|`U>h{_0 zYHRF=7obxTr{@Q)5FpHy4>x>T8Q`rk``=U`L&xuJ-^D>8~ZK4>hN2A zPcyC|8)#%eYW`dGY|CI1-bG?pkGiyT6eJ(E;lxIdmuDe6f}QGt^agoB;X0C|925SV zEswLXQnSER3zYyVa5amFqTji8l*=sWX#R0o78plt?y5DQ{J$CoAp=!1E}gOu#}AkH zTz&gYUs`PELjvD88Z5{>I2jvWk&neo3g*&8Fy!3RhmQmvOwM^`hCHLAxRU|)j=WCl zS~c-HA;cz};PykB>657gE4oi>?}|$aS4*2**12(t?i$^%*}bQlci(%I^SpNZ6W_3| zFt7Q9qf8$;({hyu%JS{2v>4y>&-U^ZLFh2%AMb@aLzpiPdr)6ogFFYXJ9}ep4uMpQ z`zsY`(#<8@xqEl{+0XODjiNA?^nL-}|M ziQB0B@iQ{#gXd1A1nK5}%i=e9fsd7_`2uXHVF4*NoatpXr59N>SA&0_Bo=GrZ^F?S z=YF#t<*x5}BwH-Wp&8$5L}dckYlgvhBPVXFw8wyh*PXl*XT_TN#25~E8K>dOPx0SE z9J*{cM)|8glV@0zR&>+9a+TscE+L!~Nsc2REq-1?$mRz2>&4qM?;rZ=5nv>J8tvzi z9ka1QP-6*g$sR_}PGio(8+54^exFoh)>Uo0YqWzho^sB~Xg@lPA3_J8@8s1?OO9BC zNq_X8Ts0ro*GnUUz$H%i%7Wj^<$0l=7%95nMs{A=j2hs541u}D_!F^Z#N9=kr_8LcKBKQ_I0hf}Js^2-wqjJ&P?Gg{ z=skEr>?x5=c#rHu{!H;5sf=0Pr{netC!;)8@tqE`aM78))HomaXwEbQlnP(P*DP;7 z{ek@Hc6uvuGhf4A#~h{$T$>fjRp8n_NRoJUDs-Px?3}tY^TZY7T3ENodL!qvMax1l;lDIRqXDO1!FZdwv0H0T{*fT3kdhw zGg?zV<7mJm%Q?-5bXi6*Q71^xBF5f7T5rQ`rsxCZ7Zgf|v@0`547jFrYE@Y|I8k_| z+8jFqB+KV6Y57C6kfU%0vLyi0^LMqo)~9f@n-iN}`xGb7qr>G|kBY3W8#tObBO|(iFeY=QgH<%AYOk^%!H= z_BWpGkI(&*eSc5vZRZYaolAQDT~sJcgzn}Pi>*6!WHA?OknIge8MUEzX2yK?yS)@G zLAs017??95E02j(-{=>6@abN|vP>45K6M!$DJYpHDcA-gFDck1m7sMfpg_HAvF}>U z0aCce>kZzRAm8psKLHr2N<@Yr?NKO^vg!buGCx=oNmX$TZWQw~0WJLejLcM-)O*!r zDvCvoMfIukcq0(6FMG3^mEI`KsyqB%_{5Mf~`R*+4W z^23W$pV7CfN6wv}HB3ulPW=U7zuLLU9->d**hfMA9CkA=BWe>w+;80CiS=)jFpHJ&h}GLu;yCtN!5YULDafuB|5ZFvvNXpk0`T9V_cskY zkK}N(_+p^z(4tH|yJ$5rUU(gB{I|?Fvwo_vG`XF^7u*6{2Kgnn%Gq^=l2LL`N%8Q>EjIG>S7#h5(piU3ZDy+u0X#g2eQq2-HG-w>FCWA_I zBFLjAt0ru<2ko-PWe*fFZtQ9er$u|cCxo1BBzm+H>5)uD`4>xSvsZj@(Hq)_b(^(pKSrgHfybsb{Ar7xf%-R2C4Ys+F0^=aT z?@rzRre{$ z{sR*#^h7$7o#{(#AB^H=9J?Wzz3~2!Ywf6=$foLE4?-XekDxoS^Gh3IC&c2Z=^t_^ zxd#NLa31SG#n-FZYx4_3$~!ySQtdf@I-x?FXD6TZX6D(cjH8zBaxKEUSj=jy{+xFn zBISo$;uPUDwE`6_PtHv?BMl!;I{nKA3Ipz1%ktH-^$AE5y6r{o2(BZuynP2nJ--e~ zUx@qj8+^G0dlnSCg6~({am+WqPrBezCEj^t1ZR=6GUhN^54ez{m`-Lih@x#XKlA2_ z`&C4N3WZbuV(P;ld0b5F+fvO3Bi(iRUZ}Z62LDvWmcHyQvqnol1*Q%ioP{`Vt|~u`I$^ zb&on+1D@w{t@i;)j)`J5`7j^qOz!w3)(A)aU=Mj6)SkYPk!_)Rq&H5b zR&d$PP8t@;D|`(I!p?yZu~5#6SpvdKpVuiv+DaHZ7c)4P>)*v2>%$yhWzDZGRPm@N zgyS_oNC{C((#(kOJm!V{I#Mfd1a^&eGg0k}LJG(FNRMsTv6}*%2XnhN^zqGt0|-@voceAGzMzYfY4sqSa}`9} zWJ{(9oxMk71+z>}_`uqS%IO^gh;$~a?`^Mb{tZVXc<=p_1_elf3GufBkcrp>2cHcA zQ@-jkhynGc4QNFQ=1stA z8BQSmyqf<Jq#^2z?y8h6 zY4;w!eSY|C`{EN{nd-1R7TdOL>J+6@*vURfd0u|^;?Hh7fNs;0->KyH)@<4L2YVzO zs5OG;+T|uztsG8VFqTJg3{s`ua!5Q(vQ-myu)M40dFiYuY~R{zBgx(f*DlUj8dMub zLCrpm!=(%E5Ukg&dPvxzYNg1%_D8ndQ zXMv3d0XT&f@25Xhp_M2!#*28#F&QH}f~( z=5p5l)D`CCqKSC0GGO(-I`qH3RXA1U0in~KWu;c$QiUG#)TRt+3Ca13pRnt0er%zP znMYOUnVmX&<6N5+3Y6Lpg#^6V*By%3fc&LgQUYge=)2Nnd+(8OyeE5PyJ+AniguIs z4b`-0f)Z)7Wolc&aovb*Kdj`Ya!X>RvT*%c>(li=8I(|Uy;w17z1K7k450V8?EF7j zirj?Z3gF|~bDg%p5YAgjF@w)UsR{Nr)L{>&)Oq1p`tdyaPQZ8?kAFOo-pm$r)O5Pt z*FkOih-@bqq_FEzD7+l{nLz}Z=%_-I^Axjl%`&(K4K;Owgco4)T>ck{06wUhUSl|6mgUfuzUh0_M%;rN_b#spM(d$OqLP1UUqphvX@@q z6nEf%i!AnafAw^Fd&uX!xFCPf{&M!NJ}$6BVrOPbyIV~}K8=GRlV(aj zB8ozbu#t9^m8s_RA9&7rek*qw+~62oD1OsK$H;|ZRj`8Gtlz4N`aoqp^T3OuJI4#SAoDO*ctK!Zo^^C*0fgH*Bwn1%Cz%LyYs5y zNyMAC79h4L&1|&Ailkdfb6j&T9jqdYd4mYGa~&tg}1kB_9};_0oQ=JLD}^;C$)}n1jozo60|~e> zY9Rl2h)mcF2<6CP`K&ZM;G%(9`42v!5gY-Y&ko@97Mpa|CeB<-Ymi!`sq)hdsS_Pw zz2762VLZD(*^4h9y_Af_voO4X#IB$trR0(F|JE0GIwNyyVI%KBe|U&9ammO}f9hd& ztMZnDQJe<-BM(&QM;ddIY-kFMIOagq_hlU1TfEXG4F!BEapu?HT)%3^9u^P1SK!jt zCP|eTyWIi4+_DDz)LR}Cvzgx{A1W+2fR!ti3rdh+av_2*{V4?p`s{OZ^1#7k#UOYO z^!T8B{Yu%pUl9gr&`p6&@?08YIY6$FL$21!7qVpTZ6|V5trJ+j%-zbR@OiSh;5`$H zr{?MxWhd1CXkmf~3-tjdD>#68VhdER(tH;aH_~XCVW!t6Ek@0RtSB(mK-dB-} zZgZ`dVZS+})=y1rI1geFY4zVfP6czxX%nxkzZLvRX{waLK`Qm+BNs&=7iDLX#WTkA zz}yFR4d{@NcGZl#wL3n5R#@?o*p2&30rIHY56c zW;dEryqw=zm0gu+CB&J!(5a%%}WUrthr zP#=nc&~>%F65w+=@x2)JP&MU5J6wN-lOHqWJ7qbw&ap2ct4ra^4YMXC_l5S`ztlP5 zF2ymm+IFUkAH(lg#CRdVsyGvIBdJ?W*|P4q04N7fgKMK^?y9z+-YQ|3&l<2ywI>LO zbgmBk71hxNfy@q*va8$*rz4t-;W7D@|Kw0OzTtvU@Ak450~y=0*uzqH-8&MzflEJW!rE()u>CBLmSkHdhVA8 zGORj4fY^j?O9|CJZ2q79yZmE6O@Qun6l$g-}vx6*}kU#h5Gj2pU{@U`T~t^y{i-#6-~jN zka?5tOB`T4xbEi({Kr20Fn??OV$8yd%%cI zstytm8Z>r!f&1nK8@_%ZhrqcW+=#qZTtd#Jm&<4e3>N+Vj_LFLy7Kem)hOWk;$Cas z`HFf|kU+mDuJaGPiXUhkVRs*}e|4q`k76str8Z{K?KJ|x0@)5jG2|~)p>Ikr_;|2< zqj5Dsx(T1Chsnn*;48xvIuhL_6y*NgJ{J%LsKLB*uXR_0BHgQ1K)FW@?qU9I5x1c) z^~&7FM{hYg_+bGHVtLWUrxyTKz^2#&1)k)E$#hc}K$*>pP`*m%)@&@YLwZ4F8myIkLe(!y1h-@4wPI5P_D4`JBj(c7Bu zIA9pRfH55_f?wLW@~+puAYPI%{XvkgR!K9DDkMe?3K32DYX$!?Xy4+1rYEMXvhNSR zpZyLZgomYLRTg}H1LmffWKrw|Cj;A0KAhg z8J|#$%#R;-ayI`D_TDlo%C>DAreUa|L1IV&DJf}??lNdZLQ1-a?ru?1O2DE+M7l#o zN-S~+35fww8s6jdzOVa{`(5k%*7x&U>-oc)>pGn|Yd`inwr$&^W_+jk&oWwrR+X3u z@1E^u*~rcPEZC3Ab8WtI)-Q5)I1<`_9s=r@`?mK2hc0KCQ=Er@R)`wn*<(-5=@iUV zK&KT5LJ{3106LaF{KFHv()=4WJ)}Nn=b)`~iNNj<+B?Rp@1+5BE$-V?Bzgb^B*Fd| z^t*TVp?xbMH|E1ZwQjlNja&3AqK~30MW#am2r&8&%mtuZh#*YKv|-)xR*Ra&f12xm z9-&|(fGQ_#9<2pzz2ug`@Bo)b9D2a=c6{?6EIyWCdR9i zA;ZoSv_M&y{e8cc@3Ho7B=bqp-kDp-&%7J0qZjR|RtxGXe;R(iRPnOAX8;(0JD_i= zJ=H!+1@d4_~Zm%W{$EHxAKUJ=?ZlCAU=ni06c)d6q0N+9awoyH);Ar}J zZVhFO1*Un-@Bz%mPo_>P)iL)72pL?PtTyK=juwT`0PA-p0EZ(LH!2M*6iJILBDjAE z%bl$@fZ1FqG$ThexdJI&Vg~0L1->89PKf*Y>HcQ~5qs-3f)DZX;o^7*y0ycsJH93- zZmk?hUG~&i0FAlA@=#`PRA~oMrDhp+dG<2cfj2ujhJaro0Cp04?UQjyW@;Dcj=*y*Kw_CiF4feg0>)04Vf;P zz9;*vvOpsX@;%rE*mJLB>aD2s&M+Pm36ZTOIAh>hsIw^kKM#|sTz?6wk$Lv_DO|wT> z;oiWCgqvyCwDK`dYMgmS`bp)NYHP1e9^aF?5Y6PhKrk7;L^4TjK#ka?gFlW*Yh{Mu zi*x|W5eR`;A&4qc(t;GJU2~1?wf9S0Gd5511>pRMfD5o}-Sa9uHkTuCDU|mQ(Ov1}*hg#2TtQoKBQ9&qE45O7x*K|Wfc(??XK?Dq zE}+Mm=!Gz%t8L*vJTmUiTss_sGR!w?hS}^>7R~BRNTfgVKPzPC zhszzzJ4Pti{fgM6aSX&%Q-L7MJ2=UHZ9nepr2mT0qNsTgf4%<`aU*Y1Bf9f9IhF$F z4^?-Kf<7_Il;+;JfzGu{Z^-wyicyY)a%vB&erE_ z43eJv-Q7z=89jIm&iXC5wKf1pRK=7xL=$uEv$+T%^~os+7>S=jzdGHj+X8NacRgB?#*s^H+o0ZIptvk%EwlMo6Ji!sm^Cw=GeIRtb@N>K4%?|v|cPLsz#0s`j(hpbNA8&WGQ zX|OYmt-vFMlg#8dE#$hwA}hoF;4>EQ(Nc5OXVLV{v`*(;H!~#p2Jxo0A zHEBIszgiA`vuQqps2kXmMF33Te9F0AXKg2Q4KIp7IkMqsWX^BO<@IW^{z%+*XK#D< z7HHx!0GMXss*Apz_KBp4H2@N)T=%<$wA4~*qvVt*UR*6nEfCNGfS5=TfEC zpVt6k9Wg3I6AGjn$!se3{rY0P%vXWJsa!rwDOxbDEzv*ouiQ{x5^Rtqyh;KRLP+uV zptSRN1*x-;g3>PAE>OOW$Np8J1{z8rprkMcxH;Z_#!fBkmd(aI&J?8pyjpe%nxFj) zZb<{4A4Dg#!(HgK8*>InfO{+(LaBJX7aCBmCf@2P4?BQEnb98MU}au6wiyF~_mBv* z>qY161$KrLXBli~S(dQj4*-iq^iSp-F1!MwHMf4+h^iU?%}eM736PpD_@+P!57|{4 zR5WeMj+n$G+uYHEts8*EH*oK7GF?nASl;?Z+k4RroS?mF@Y%!T^%v{JZ#3jlv7k-; zV$t5eNso zN5(tj>Bz-&``cx}9u%m>?65#tb@J9kZt%)@V+@=9Z5%A&Rpfx4NdLAAnfAx;ihI34 zkLu*gY5l!MC>*jt-rmxScM=c{RAA)m|EoT=>;yarV&mv!(H3B8msd|O#1!zFM@hl! z^>rZWdPN9V(vPQ6xbW*~ofzOE=!)L1s~S=xniaB3y zDyGC2o(7(<-#!J?i@&|MmSO53t5;S|ghCWus$VDS!F@Uk@M- zYH@#y1%LjRG5PXBv7_$ z^VfTci3Urr=HtD@WqstqdC><@Y@r`j2VHL64&EjL`R%^b^*<#Xp#SLgAObhY&!4?g zod-l0Vkk9iJxGz>B7hu@SxfB`qe>86!}oW9z5yXrx%Kl!Lv1cb?@z4g6cM_? zwH&zv&C*!$wl$p_aS}j+t|t{#duUMZ_X+E7H$(#5x;P)$B&((gS>^MOJ#Z($w%X6bT`G zbB8f%=qHpK|7oTG!D-R=GC$v2eYMRZsc!pi+5|X(*a)&b=Q<5g1)h;v>^Fao_Fu^t zZ})4!4ru{IgdPelzA?l@nrZ}&FF#G+J5}|fjG+$@E2ak2#;WN_jnk{rLfsoar@z5e z6ds^%ekEup@h)-U(Z^@aXD7PM*_@v$XXl){eDtI7k1UL5KwXIf(Sa27ShW5Sbf&iL z@?2<+XKStVTN76t(u(S-#{J! zh6d%-+Q%2S7JoOsZvR!8&Rk&kWkBwCt^2gcK=P%X;<@g~V4o2SF3Jb;0}$kU)L(?9 z9RyYDfnod;=mZ03yq$h48iaA5sn_teMYU`L6S)YeaGJS)6S?Y2WQcS~wQtE-2_b;T z`9Y0~$L=CKACg_vGr06fp$6M@!kCs*){Oe@cS~Q6J=ODB-6(wP2lKuozQrVfDX}N= zwT}uStOBpeF^$G+yl|_ny3@E-L4w_z}G=q5-VNb2Qz7+ z;X(7*n0i^DmmqCOVIlP})+q&)@-+ZIo9ep!>cAT){ww(B;P7Y{QFQ;IYvRU2U5VP4 z9x%IWIA(z21jaVPQ4AozEOsLVUA3B{o^!OPB{h-98c17Ge9Y|#;WI8@?7I`VMhHsua1;(+ROcume5eI=^rJd8{Fx_UDeU=~ z#~a-Z{#CpmL1%~x@0v4ps-#AQp6s}Tob$k_WQ1r7TkT4mGKhW8=A%I`))%tNRPI2Qer-_2iZX;Ai;;e4yDI0n|7pO%nqYkD3osKYH)w;$}@dRky7-FE3Q(P5<`)^mF7ov(W&=9_yPd zyk!4ai0{?Mn<&b{17-6yN1sVP82jV5^7J*f%YX1rkydYYg3TTiO>>q+Di>DdgZ@6ZaULU$2!2!69&y49nj*EQlvEEJRgPD-we1op9}5!z zhGJ%5`_W`|4AjfRV}|627hiBST=^z3XJEbgIB0jMRy!!mQ7i8Bqo17Bb-)?48NEJx z<>>R1Num!%k2e~|U(a7lYaB)kG+4iAe-U`3Gt2&adgAv@{TbhN_A-BfHdE_*-2AMg zlZdi7bsKLEvr(M2l*@_gq$z)!a^#0yo0(_9_3M-@YM|6FRyk5o&^CO)Y^od z8_3n^y^a>4kXSaCp}YI({&Crd9r|NH!D7lZvCE-b%~cU>H0ey*Cl6662`w(PY9X95 z^jyq~q3haD-p|{Mlp!J4$*U?SL+k0;3{=O(iY$PfJ zX5l)d6xG2h(V37cvCN?(_u=tDx*6fyezi+V-j<4qelRXN8IW%5YyIIfovJU`1`g7*GV|CHv|F0YFIhzs5rTk2~Rs9 z0h?$2vBoLXh#!oBvHCwtA(tcAi$P~FY#f>;NGhI-0PUgvaF$5%aeJ}}zQJ$d_z1eb z2|(czPk?FfUQ!m$mI1wYn+J~ivY-Li7t|FgysJy5gdHAsZr80MU6jgBgPkF87783( z%Cc^T8D~aAPQw$eONl%5<8CLi@z+6@+&-uu&`YVg9zUl4nKKgfG$wj3N4bZD#+|dH zfw@3IGCNJIUKDn(CC80|6E+!8ao$jSc4u!L4q@t~pC+IpITjI;*-E zve>3He1+0r)`#Q_xV{ZU5doa(FIUSR91-p+&j=wonF^E+g(6}tz|b=tBXkBD$tImA^&7!US_`Q~|oa=4%4{>*r8sE1>JKSjii|R<=S;I|c zjpLQPe5?h|S#xo#?x=5pg^RcIZKQ{fSvqoVSv zW}e2v)BU-QOpaiw2;5+~V4;y^lRZbe4kq6(R=-|4<#w>k|2*4GE($H{C_?xTtR<46 zc7+thqhk%%y+s&+Bg~?Z(jSmiN;Cc`C*}P z@GaCotUkU5=Y@pL!N52cw`gK)1cXPwjt z4@GgAn3Aio^aPfqmxZ%`*7r>j)OHkUvU49PtwM@&XHd&^YPV-^)DU(D6I0~SkuyZb z3xBSSDB#20zW1P~Ae6I$jt!qTxdOv_I4X9pRTXnSTfRFc{xZI9SNvez*T#8jL$b#c=26Be#Q^T|MUB z^Lj#Jn{OPy9yW#m9P7;-dY4z~k?S8x$ryI2cR|NZwrX_zsWD88k^Fv6kucH*H?Pat zHM!BWMl*I$++(44tv5m#g&86!->OKp^mU2cw_>-9eam6u&489kJ|`+%fdxBYT*r-RHwGB*YPQ(3XdG1_C`KYq=7)OZMNHZ%X=L)mlq`N01rU%D zz=yKU*=}Ffn$urhC`0yv(8+L zBg8JtyUp8#-zCu2u_xK8S7Yf@zgl*)B9a3@WM@f^h&ef@Q!V?X)05^p&VE%7oJGNi zx3LcG=5;M7f4IWnm3#c`pw-T3>}WSBL%aB`63%(Ofs!+NkHJ~Rava8f(4D4X52!<2 zbI)8G;{$@-*7FWrpYGQoU0ODz{Ih<_QMgHR{%Fu>7ulOx!hAQUbvENVyY9hF2yf+G zVvQ#W^2%5-4p9A8(0!~K*hz#}9ED4I#MeHY6u9dqdWv%jTO?`EaYRLUz=C}O)G|iz zUhQ!HWqE?-^o_;__Dy=xW!A5svovwV^yriKE#zt`V{&7kkXZ5DN}ElEVyGH(uOgl~ z`uV-8FnqksR9Z4OblR1WEU6(I+1ax@SO%e$a;Plqt#)P0A5iwpE979N3GCG=&96cU89Sj zBP)%^XAY4w*4ED^vm)km%+o$Iel&0ML0Rez(=+NDoK4y<8BFLU>UIaCUNmiDh`Sq7 zUm9g2EfpP4aQ=E0^mdSt+V!`8`=Oty!uQ4ZEsq$=VNK=MGh~SD_d@bvlvVvIBdHt( z!1IM`guh(`6l==JP?(0@2A#STQl~ds>yr!mycCWG(EMMAiKKghSTNjV} zjSy0q&Z%#ab>?#d=$4Wi1|_QIS+l*pADZj@)aikIxBNp58`l{s#P}7urjNpdyjzJY zr}AM%Bec>M#9&wFh;A2big}|$M4o0%e$Y*-x#M&MxBkb+%a*EVhwZ14h$qo zKSn~=XkMUOO_ct07`C0}>- z+9-sv3hF@~c_R>6KlImLnPw`sPSa3v5n(&Gj#ixXgx)dCFd$?t46vL%0$0{W^`R^G#jJGpg*72E4L3d(=hhs%xcPr(Tg_D%O zar_n4B_8R{8K_NGBdVZwdw#KhJ{4x4?ZlHp)|!-&jyvOH2YVyURMfr}^Mn5UEb@cVuv14}J6z#H4_GCI7{ z)67u+2K!ZD1|rZB>kTmvcAamwQgxJm^Qxbcm=Manf|k)lsT6$q-8VQ5(Shsmo5ZmX zLrS{%s=oG~47R;A*0I%sdB_@5+dmlY#nPgD8sr(-BA^J1z^eLXtLRTZ|MY-MCN*AL zw)%O)labPxR>7hBpNu%VC{dC7t?6ZAxAuOk?{m~v6*$Rm^Blg7&Qu^ryp?$<^&)kN z=kE6H?1ufe|5!_Ho1o*sRP>Pfm>E|`Bc*SC>$~9yt#QA4e&Va#vr7N66HgQ-oV!sP z{o$6MZ3OX1IFpd|Aj9Vca!ey-X&Or7+k>wiJ>he{M>lb>kGAVvo({K@5D+E#WT#D} zq8x9C_EWkq7a@;JVn>bFWp6bHQIB~Sk|n)^+jyRx6`nn=;V2^(Ojmdnk9k$x8#$y6XD`T=I+5dMtjL#C(hTuNQXf(g zR1Gri#0;5JaM722ek;%BqM=@ZXtEdf=zV_ucX;Nda@LO?6kP6C>>~1r#QTnObqW$c zr`|?d*1S!C^L~uG)|Aaw!NHrS>ZuT7UtXSp$9N0DG8Kjy`{oc>5< zLA0cwNfh)2Ubevd9 ztTHuh`qO#S!uA!`gLBNw49>)U1M)%-SWM-n>Y?{!L_r#Ks7NxZPb4s zoa;hv3g(wTYAhk(?sOfH{bkFNnJr0 z8f42`Em69WPgj%A8`X6d?SPI0oK|z&FV+t z%DIAN26I7IktWJpw*?IIvl%HIQVi1UC`GoM#QX(znLw+Dkgoh^99-LwyyQ7P&gI=? zI-zgf*qiabeK_@#6TIuM&h0jkpUF!+QL&1QahkVFZ$4?vuKzO8Kn{kg zIp4#4lBBn;MW=_(=hMy15Lf+*g*C_1C^QCtYFUkxo5$>D(?)nb zwwL5e2$!j!@%;FsoFPBx245&er0J!r-VX)>HhVg>ig<8hOu+)s8F**Ou@r@kG`1t)R*b2%x(_#}aroNS@@N3wlhVhr=pT@gMBjCg9W$9@7 z&TA>{)`cQwEN}Tph#6ptYwd=36l?xN*wJkh6*N}19<1Z_l#3h>N<=2p$xw5!sovhmVY+ zvI#2P)O0={jC}QUWyRtTw#+e!y_J)$W1M+s=2>C)kMJ|0d`5%JxyLGP_E|HZT~MrL zWA`Q3e@z~~X4V^*p|fv0x^LT>HaB%2k@rjFOaPLpYUoxnA&%3PWe3}_-<`sESt;dG z+!yKUti$RbI)w~OR!La7sv*@e7u`Q=O}QrAe$e71#Ppkbp=v}EXvrakm&U%6i_%Hd zWl>hc3>gl-WW9-`op2U&e=kdWGYQ|E>ucZ)cfXRSfxT_iq&pw(%P7h6PbyCPO2eng zU`ik_T{ddC7o+h#;7pm`aQ%4GZ0PQ;Id|fY{Nc}Ayf;nXB)aChvwadgP^}5~%W<-= zAM&c@%MXyL2@Y$B6HiYO7%rUQMX~lFpVNvyqE?IIoDN7Fn@Qx$t+gq~*fvIJvG&<; zOP|%)L^ zK{SnH^C;zwCksNYeZRl{d%Y=1mBk=s_A=tO@|P-SkF&mAovKEAS}7#a2|BOvQ~tcg zm>94lw0dh?kvdDfSFv?s3|GB#2UJg+J2Z+2abgUSmSjpF z(NBumUN?{M&y)lH_uc4*SK;tWQ<&?UO4`g~AuG*$C?_@K{NfNtR#*(9R+Ez%Gt%ECH0VT%v;RJXn~^=onQmewQkqMB}9ys!xT=q z@w$4kF|ShKaIL@M3$r*dLiul8arq@NyfkymcU`GO!6(F2&2;G9yX3sh!U{9W?g2}7a9v1WS{yNV!$a4yN6ja9coUC4c$s`HxvCMYD9)8L+(_X= zvB;Vq)_MHl*qrLe7hRQ{1P|YJ7(DqX)0GIG|51&?H12YkrO8Q1txMwhD+=?vKY2Wk zOUhyyi@#h7F0XD|Icc=Mz*U`lR+ zj+OTG-K+|ZE$8W8HJ<~O-nROt3@t*6U#+BF%0-O_;a@;URsu^$peO+~7Fbfu=;;kC zY8JtV-PlzjLHd%ql@4H-Hvdz~d(wCwn8ufQ5%BCSAc~jgrYLn*`BnMye$sr;yI3m7 zqK=k6B+z1DXMD6ckQe%D`3N5~ye+FwK}RYzZ-vFOM)45?FZq=%%&jwGZqwQmMqjyR z<#>CRM4>mFceQ)nHk~2%`(Oyp8W|k-BpxtvaHK*6xflg zO;BW!28=RRo!|bX>n8p*(8{^Hnit&S!BzL2zKX1(a~3Mh}Q?PbQJ{cjuatm zu3pJdJ0j(bZb-hTO>^lv`80f+Xxv(OK%Ou>4)Zehz=DSd&U)nKu5;r3}Z`Xe@}c}c4z`^9h{l(tqprNQm?7KR_!Sbd-&kR3!FwA8Y!LDY>Ax08ZxEzifk3^jhVOMI%!wwNl zEw#2r9vRVfvu1vo&et0j=1xShFENT3qac2Rlyf;4O%Ed$2`{EZWJ{1;&h0WLe3`)`ICo!RKx5zmLwSoWmkod_ zw;6>E5m73eOTUvLOOVv%(pcJ|{YH&&&ILG1M}6s!fw4Shn~^;(nR} zZRBOnZ>@!K>}$rxoZ|!(wq}nkF>*b0FgXDT!%9Ha$@%!k%fio5y_AI~z- z^2bq_K{*XwkM_fa1v_o$3mAyM?soZzILrp2iZ?yr_85B+N(}<+e;qx!21VbJ2IE*-R<|wti|$Q zGJ7YIs30tTMXkP_zQB2^)UD{G)$F6}_NB}~N-;{r`0*6~r{P&zs3s<(<3X{;lWJ?% zENSYj^skG)~JZFP!AZ(H2UCJ7JH8CABlpt+@C9cjL&Z|^0z0(fN7A<4V zBG_cbGDJw_lQ+$Dm|s`Z9D$Gj*z!F~v;h$i=QCNxhBdkhsw?Qr_ga&wTr#%hr%V~d zXCq+4#Ac(@HzKC8>O0T6lwpErUt#&ckF$aJ16r=Q9lZ-Y?)+gz|9qKz!3$o)-w^se ztBYBY^4R$j#yA&SW~Y>YQq9eHy0nU^ZtRgV1hPi)ZnjT zbX8WnVO(knOgF35qG6j9m~lf~F2o+u3ex57DkZ_Jt>er$LUuJ{gl_I_KH|EHbD6c-z0PsWn{u!pWoq!OLF zBwKFuJ)j%=^F-lKekE@n*Vk+oWT(`Ncu}siFqz-0)ec-5rGpd8GpM|w{d~F1ZPF*W zyk{rXvtv(f{a4IB)H@UsECXi5bL2U>8SiW8mquTAJ+2IGT}kRfrxo)x#x*#a@fJQjvUG%zP6#m;uR|G>F z@1@6BOA@tJiuw2bfM*s-AzRx2M(&N?ff@;DRDZCpgGXZzU-dD+OB~8YU z`uyfiqhq(kqzXUD0%Tle{cWT}KA3tUk5!_D@!F@cw}vF87PjLlXny{6w>Oh=dR5Y~ z7>PBq`u+X0e@kVn#&+rhD7tcN6va-s`sl`(8$j=tOdJHQ4?+OR5BjKLA47{PK&D$R z+`E319{l^_-i82S0vf$Dz?CB%exj>6zGG)&L$0hMBh^&S&EaF0KL;r#3C zdodUa#{}GOU*I7vJ)e+yFiFh5p0q4)8+!5L-zY>?Rm|z+s zOC3vxD11X$!ZV{KA*P$xXkQz-E6F7# z`t!fo`0AFc1jH3+d7XKG`A&%>_7{L_l?tJfq9O6q+yox!wunt?Xp5%@$obCbBPCE# zl-=OxOk2L20S!QT{GjngtJPU6B96acAN+%b%F2CP2w!0&f1zJbnpa`3yD z)3oO93Qdr<=#nJ9v`gXhm9?X8j{kpyOw}ys|L9)DKu@$lGrVn?-wg)Q^r+yFG}G#2 zTGA%43z?XVjup(yhx_(&KXU}Z?reDZymbsU0eqz9g~kB5 zwB)qkTyS_c*_%~&K&p&__MH<*Rm=d9(dKcS0-XB=S}<+R6kwvOx zG-NDaEYh)bpXPQGS%>4&#Bmu-1Nh;Dzj)~nGDZT9Hwy{8q$-!eYr1pIl;yv#O389H zD)#Y-8_*{2JRjf5T@#mK0aUI+?OPg*SrBe71}JF8> z91KHJy7g2$-hwQS45_dLqAhJJ+hJ|MZfgztu6)sE<*5m7>mr{kmWlC{bSq7og)RRhw)Jmv=cpO*n{CT8 z`yatj6AN6si=59i`7(gGzWuUU2^u`}zjI^$cW%r@`uYD32KsBg6#f5yFp%8ui53tE zyMgWj*7-T$0QH>@K=4n9Kpq3ZZLI)VCguEp21oqMDK!9YZSfsz?2xwZo-iXz+WJ2v zV`zIBf@tx}`lPGSz<_qTHpIX;ZJtg13qB8hlm}n8-h}>8Tii5{irH+O5-<(l2QDT;NK3Km>K+YRQW{AWqJUE z;m1xCXm+S<)ss8C2L*6m!;(sBTEp#t&n@c#QWdX0nL<$bq;QTMJN4gyhs=V$9-)_c z=`)*L)8>;`0cf7TfBJShMANu`2fE|4&^{IYCJ-((iS$~b$qkLHbXYeiXrH`CicJZK z{{1y@6Sm;Kc|Hk1RX49%qFbD1}jHMBe!~uY)On~e>ZhD~bJoyelZnsd)1NQWH zN7_jz(@f*P;{rdR!8pJs(B^>E11R?QHOSyM+i16ka3jHl1a{YfLH<;f{_nu%Oi_q0 zXIQ`Fb0H0skA{N6^SUc9yOmz?w5A(l!~+q4kJeHT{Y4=$E)+=QDit94dn4MSaL)3{ zM!w3w69ZF-FU0eL^^GnVYGbqnfv4R8N-qYYw@(A^0t!TS7&Pag8|dxQKva7u(fnBX z3qo(BAPOj?uZ#qc!|raPtBhg*w<`#MOwfd(So^|YvG*YCq9E43=m_#~IqMJ4UjUHN z+`;7V80c(5ES!#zMuF8M1H-lZT27!@{3z-hn0f=qju3aF?9o8Lf7Chu+`2w12$McM z#~`Uppd7RkiH<%0#BJRLPE84om}xd>1^HR5TksFFiPIE^8ju~v0tmKG^Bg3y=?+G9 z;3_8r0yIG_fHy}06@+J^I;}8L*}YMBpf2WH`Slth&sR-0oaCiTbhRuH4!Zd@Ib{U9 z^tJzb{SbZa!Eb2L3K|I0fv(q%pDo9LBb0U|S>S&G#6%iMA*}}BdZQuiJ-yHZfG5N8 z8QtZRaUwo?zT>BLV5eKa%GiOjCkSRf5l5(k3YSmOSd}mO0_>%m(d#`qq_co-+YYYI z?1y;$H9!j0FZv9S2I;l{w=ku^KxXsF*ZPC}O`3m;24JxHj{SoL_>W5jbAeni!~>#Y zfgYyAs%atu&MH`YJ{l`W_2YN4TaZl?Kt!(q~3xPo)-pZOlwMBWQ3A%QaDVq?lZMtez! zA+9%WcdRB}aYH*sb>wOSss9=MOzgAR{~FW8!c{b&q!9>_rWbnC9=UQB^nePGRXbad z^)Wy;*P6x;$ydZYsRZn76c}z4ANrj@W8tbL5V&A2dk3% zHW%-gA*PVBM}~xEu5Xt04Vt=@2(|i<^0+D`70f2!*0gl)fwJ}| zwC|ybF6Uy4Ng^@Od=}T*!Ac-KcMV8`gdD8th0i**T;ZnvkM^*y3HgRkp z6+&@O<^YH(ke5G{Aa@I#ntED33$Xgv2}$X?ajiLc>WGuf<{YM$d~TKoSyX$=PtE zJXK{ny7SfJCju*V=w+E|0=U%OlfDD5!%&U-q7xMX>L4k+q+V!*j@sBfi5s#KCCfNtf&hnt1kw*_8E@S^@m>xmL3GE{IB#B*xWI%h;ozAfvjL~s z8~Li8K>NTx_gLaeB&A4A!>bar zJCkgQd=xj^{C?#0t<>DP94}e&C}!m1yhXf>PI)3Y0UoUd(#l0q;vHYTXeDmS;Cnqv zb@BPbP7QopHoSdkUXf0rECDUIOcXS_HU>D+tA?~Y2-o(*@7&y%ZFqaRt{)B^=*iGa zgoBYE5c>)RoM2Li2VBkL0~lewGgsZZt2zH=2$I1$lfFI~(Wo-7x)Hn`co!7P7>|aU z@J9&wTsBt?1KEt1K<)o|d~uN78qqJ7?A>c$pZD_%*>Ke_zY8w6$P{{}O-JkH*ylY7 z1;qKc*{7}&2;?NfXOOO3{+B8PV@!}=z^ssNh!jrCv_lC^RT`N4d4L-SM=J0&j4GnQ z&?fNBHRC~Ox(TE(^U5Ab#jp!~P$JaTsK9SUko|GEP?(sk;zv9KC9&2gtec+Z-vKL( z3G#VcZTqu-S|ck3fdaGdLbrhk#vK=|O1zxjL}v28i{BuIMuABxd{8v-ahn1N`<@$WOw@~# zORDo4QG}p=RXri-h4IlM4#b~X&pY0rc_=I!d+WADvlmcTaxi?{7-G+WvqO6ZNQL69 z)j(+64c?elldtr*>H%0k`l=wqiz3`w)+Xfw1LeFcRZ*ypZfp=+s{1tLe(6{$_YyRs zYN#XlshuinRcDS?jv$iN&OO2{C3nkmoc;MbVf;XEVWQ6v31lp6-v3E?z|#&TfXMC# zp&K*!NNR)Gft~9&vDLFE(s?3^n$&AC+Qq0gqU(6Zqi{dR+{97dh-R}a;eH#zLMXtN z`6`hxf`Bif;)&=BO#@TK(3Md4D)Av5Pp^#k)6y$}vjg@T!hRp#uUz*pv=5OiQ;a6G zJ56()X|^>MVY|xQDe3h++mX@aT0W2P{g2#UaaEzk6QXtZ`M7hks29u$Si&%{{`Cjv zR_Dukg=hN8^_QOth`#8Gl;*pxUIxF3CQJ1Ja<@-lWDRTHgmF}?2 z>QlT&y=oIxd~cpGV&ag=WBmJ11q|U$u1P}aJsjiEji2vXJmq?SVq`5d{~NuytMi#u z{`2H{*8N8lEjk-va#?{^riotso(requniHtS2E9)6PEjJ(&{Xi{&N zlh6BP=KihM?>{nK_3p(X`TGXR;tW~PT}sH+`6tSR%zF~~*2TrjN1I|W#FOC15uuOq zNP&O9z68b?eb}?R7MCp>=-Zu?G}gy@Eo$@yWphR!SYu9Gn?M;yP}+PomzrKou#)Xy zMBFd#pZCEMCLcn8BZVY@GkFI@r`WmsQ4zqIy+xxCLh=k_PJ%cfO zx8a@sXqfVr?%8vaAd}(vW;6VY?LUU`$3%J)`{@CmACeX_hv zArjBzQ}PR%_}|7}ZGYb_xQQJ$vcY-QY7ZD<*Yx2}){D3Gg-sq!rcwspn*0LUUnceE zOl$;XGRix~;7@oj>wo;*^%l?>mHjIE=zs^#u0>0LoS_J2z5nvob9Fjk1>HyUpVE_ zmL`2L1Rt-&>%!k;xbvhDPAOBmA&=(VpbMp#yV_>J}~}+Vt_+#aG;dOMu`j zzupyTi@V}W{vV$L{i8x~C`cge^9-d7l3zFU!rh0>V1Y}VES&GEi|}f>cxVJ1lB4bX zu?0i0C6>P%OlP+pdT_Ze_}JI+Tf+;8lkJV(nrX3ppDhIP^TNRx5{A;C!(HLP-_BRe z8Y@0uF1>!iI#`Nv2oMx6&EIr86T|UqT^S4}nwBl)^Ac1b3Mq_|Y@o8N-rM*p zqBQgEl@nJY?bS@6fpWLmcQY5Y!mgSmIKgN7>i%V*?WZ@`>@{}r)!YSV3mH28=r$S= z%c>MJ`0&cDQq^R7VXyDnpk&v^`wFU513Wl_4>;!xp!FlKE45ZCIP zE>n${tUqPyCXb3q;tR9hqsQkp- zdrT##*&LKDb>3??-+-!VxB=$kI#0Hylt{z>z+JgH;J*Fwj_4*D4HCb>-gwI4hw(Q`RBbiN%(-r;t~)^w$l>j>A>K)UUoj0K1_Iw(#hFX zYx*N?M+7grviQsL=rrSBZ+}hVT|pv&X5BK7Vt59Os(<7DjT*}??cnJ<{Og}8c|qYG zI?gyi4}TLlZ85 zxlD;sYRUitv*+q4tH`*5noOyi_N$tWtgwr|S`Yk|%<}J!`XXF7PqXWc)Y@1{>!pM9BtnQecxrKr)IVQ6%RiNdd_@ z2!iA!86->E0!oG&+gsl`aBjWVT2-sM?Y>vvADibOd#|_j&=l{MjFL8MP&R>%x3S?_ zB4cvP^}cd;qUuB_2L$E!g-;0odhFufkQp$zoOO7*%a29SRZ}j*=>EZ=EsEtj^#!48 znTQ0+#KPmO1EY0nnR9_&H?`j`cd^EDx{s0w6Ekaxw?(o?Qj9!{gw!Y|@d~*tfUFr}CI|yEk$4!&xBJ$)D|2nOgX2;8ef_S@`?KxClFzHuQZY){vzGG6d_Hmmo&H z&+*(Aoy#qk#VlecS+`jeA;i5jwfZVy`UT0q5-9A#Gxbs-IajAAye|qX?IQw&`3`UL z*iX+L@o|qWy;;^spP8{r%i__UN+rsa|C!SAWjS#*V!Gg%#cYt7gLCW4PSkoj>-Lvf zg3|VFD-GkvKZtJUtKH+U(9jh$)He)!N#$m@Z?iyK{%opC19>a7OLl;70q^lmv5&GI zJJ;@f-pXg}_z@|;SwvKD%vj)FA~lj|q`h}sZU0tvLdM-AOe~Iwd}b}1Ls^B{HrRcA zS{!hZyzyK*Z z>55y92X^l7^%3(wxwokA^dUBn8VtC<>9D4SXzty`90(J<6Xb8{U^k=Dxv8OY^y|o} zXTd`C$AfJB{13&py1Es&wqrcD6twGoR6eDR0>8_iOj@J8XSAi-0;vq(d7M6%- zwPmxC+O+=#N-9J49RAg)p3+2P$up%khle4>WXPi3!bS{}bfbXc>4r&aoz*IY*@1qV z+>M+mi6NR>->WDJoD5}u=@Q)X>>0cvu2&uY@(Y8Rmo%Piq18upg5^?*5TOKvg>MwR z?)|wM{l2`hrPRFROF?^&O0_zWLrA-#(l+1iV0L5{=cGw6C&TE`t?L2t?VK*&-S0W#9dpT2EYx?>URtr$v?mu{#JPJbS&E72S$9ohZB9&_ zNEJuT}cut-_A4c$vw%`@Tuk=K5ogF;+oIaibqC zw=_j~FF5F#)=T~HO0{3dy=t7mGbgJzaOu1v5!2GW&1DaPAHv~$ImCAPIZtC2L=kww zk(g8?X5}-ug7s2i!mGkLBOc>+-TjX{Oz~bxzrVxAq8Bf?xaklVBN&u#=`zcI(4VMAiTiy8>{>1-LP7tdmPG< zBcf#rgGW!rWyf^ybc)TmY!-d>#&R#OTrU5`c+Vwm^u>m!83pd8t+|S>Z@(Cg%YLTV z+pAV8%rF-!*cSv4hF%~Uh>GGTbw&g3IjFNJTNn`bvtO zx9jdixP222MoH_IfKW$QgAW#-CdcY=xGU{o9%H3YVowtNz;4cQ>rySb72h+@MP?0d z=H-ZMZ>J;*76iIpPt@^j)Jo<-+87bh<1Lr%GxN>4UM z#5g_FJ%-b?wtVlJ@{MHLn89bmKZrMlxY8U7BUn7*aJUWa63p->`nNg)RNr1Yy09@e zRbO48Hxfrkei%ON(7b)3?3Cw(1ACZii5c)P+vFPY1EnMaO9m}&8UjZ+Tf!Uty4)SXUgv0twTc?xl6UJFWhpFVCPO9tPHp{SqE z@!jdUn?p`EYwe%>X~|!vJwB$WRv{5gMT6DK3mB9ctuk9qC%cxEN|=9Tt0?VODuKTc z=bWG)UCulvfnaY+7|%u8BL3fN#q`*`2W9nS=^H_t!sCwaG6I<%x67A44ww=*p)dFy zKYq}6aV*M%GCGNK4GNd4A4=+11ZHkNN_p@?i0&Fx7&$!g3+K%Q-;{XbWzl8|bZNSb zet(r){uHUu^XaPW(I$gH!Su(FFV&dwPMg1Hn9SZ`&iZRI32Fu!dL}vfTTqkNSYT&% z1&d0Jj7?3iKs(H`^{T?jcaH?#y?gx^VHELss^7Ob6QZ^bZ{t&mvK*7?GHnex8|;f4 z@fX+{%C6P!7Vw>;!yvGgPxkCc;CXM@6pC$&jeN=DC$Z4Jj63P)t{sRUa6Fq`F<O4W>2yW=jy?e^!lq(Qg$z-uSCtk8 z@y6v29vXhn5u&F?90ofb-_>5Lrd{PF-%b}R8d(|jl#@^s<40HCi5F;X^;*m z*48~rn|jdMJKj4KO_xMZW_i5J{jR32;=`neS>Ul|X##PK|8$LSC1;;QjbkT|lh2rk zvdkr6{qIVbvSxCdkv|g8dR5jmUL_dhm)b5u9Yv|}Q3K4KidK7d*NST3;Lu;lwfWGg zGc$eL>|=9K{3i-JzE4`3H{+gUzFYiQL?xp0UeMDDRK-ct_d^krEEynykdW zq_nuXgSqNZo`!4B;(b@x9>rTuCv4v~8)>}SS&U^P`0GIhk8YB5MGw_*_13Z(pZxI0 z{U^%S~9HP-ds4M;+lF=u*7Fp(SmI@BIj}|bewMtxoElT+URj9e~~VJX_U%L zkk}$VVTH@YWgqRBr8CEN{>97QuC_tM!i%^GHH$KRRL)LeWofsW z9Tu+|twb;x9r6`RH8+OeY+otxa5hx1y3KA@FLTIRe)Q?UW}#rQIX+4Y@8bJgoX_?g zdD*Fau(o3wNcEIFIoA{`gr3%qV@SW*|2AwOO=_ex$&E|3RaV~qyR~1nr2YHl{jZXV zBZ%`IpCCylFypq25^ACV4SM%F|y1-c;Ag^dVr*UHb69v;cmr>v|TFn3RH(BV^M_ob`Km-E9On z#N}wVR@H8^L^4|sI+q<)6>=j#=a%*$_Jdodrdbv|9PJu&Gej?VSfo}RcW@LbXVma6 zBi?PgQO7>K(MZOeRLPb07~;a($EQ?y)bq!W-&9pjIo}@k)cPURXK1))#M6jRzSNaC zIwc`jxEM%cQ<2|%!L0vfHIPZmZx{Ej>AcprJI6E5+&Ip{ zQ)yN9S%*|o4jNUcheejr35IcgI{Z~&dzqu7kqNJimG`0J2{t@v38o~Y7<(dd?!{J~ ze!0CVx-TY?Njufm3hFeH;wcHM)Me(cG24|o6dFb~h%|{bnO#_)NdeXa=c^@yY?!yq zw8k$;Ef&46=D442CB4{5wr5XXv)L@WfT2oyrr(di>Mk>?bfE9hdcBg6?Q($%@$+kW z=Xam@cmvio?9oqj2Zm!yR|o{Hg!O+ekQ1yC`w&&fQOX{XjFS^o#_`c=#5ocn6r}bH zSb4X4WT^RGVSB#l`pSE=zEQ5T)!VDH6>a=&p-%NhMy;M^3P<%8TPewfB1 zg?cj*&hK1Ch6Q9uBl{O_X*gs)(sxy{aIG7Tm(+6_dQw@0`xVWM^j)l8{fqYfEg|NEip`{FKde*9I{MEXJ`=_yAYU_Y zO%1Z>^r%wtH(*CtX^wx_8fxF^CUdv1a)>Lb=M+k%Az2G6ilY=k#K`UqTFr76E@rx0 zkVGMdass!)3KG{hW_5QxY+jN{q|o0kwET!kP;qD?WTvN$hllv61eTaSmkmlKOS!AJ zx|S}ElH(74|z-+1aYcK(AqR-3`B; zZ@YkZG*r}m|27~#{zg)!xcZhm>9`?3vR+5OiL-8qKxPE_{8*#YWhx~SMxQ(0bfEs% zq4B&H%?pPLAaSOgyVRo|T{acAKEbx;Jt;CJxVL{)5I8J!^7V1^hG0MbWH}IKSp9TY zFI)JBNMOBB=V9&Gu*Z{}qK>4J7422AA{9H=FEtTl#o zd>V%HxkgCd)ce?Sx~3M1PmdT81lkttJQ=zq#aSR|pEg#?|Q8^k*x={c`b+U-XZAV4p}tJRlbWeUf&M2$ft-5$jo=LQbv<<1pZ z;l-_p-vk90DY<$-Xom^+u1fdh@qLPx;mka?lto;4QZ;&1uEts~Rb2a#;%{s!BoY`t zEWGoGH18u)#lFx>dx=+#0WL{dTs0f34Y4X%LwmDSK+_wDwSC2zRpGFrxahm5iCIL$ z;3Po|DfhUggM8*IJ;5JAvwqG(qn3g~hQ+qp3RI4UvkjwfmNnC79$9@YMVMFO+r24q zB@F4BbHV#bhfibE|=}2-oC0MM(bX zY8!6b-`bmb4`t5F56|F|8I5yj=KV6c1(e53gIFr(&!=PfOy9nhEZ?gmw$N*rudzj{ z_*~*!|vuq3qF0 zDks@qEe8^d{&+eM$J}Cz=gIVY3e%0ZGkZY^uU%`a6Gd$% zE)=WcTbc@W_Nq1I`_zkwA%bmA+V z&Y>lp^5JKfNGA1Hs$6Q~M&Qag&rm&SA2g4S*M z`beCF;N@R2@5?Pl*vSf-?+{I(a*iu_Xr>+G%5twfcGG;sfnbx%rx?0}-iW&cj`|%W;$^pwAuR4^W4Fy88JI zHD^VF55IwMA&kLFv67IsL?`&U+iid61rsuxM=&M5=+>h>8B)h-S%d3#1w~Rarj>>a z4dWMu86+diWdAyP&eo#mo`zxRabtP&ghL?Cis%yPmmi{ZNL`tEg z9L_b}^J{9MN6T9n_nUmE-!QmiD%1R`Q(wx%y(T!CVA=Nz9pTjkVCTDd4ur(#g|+ty zt82tRS0s^yT?5l2f8(16Bq)7F_T|U5hfd}%W$f(>?VL097l-Z|x|;a2-^8Ih@DN@S zyN*VF19Bnijo)`YTLHW@PAs}4n_lQQH->pF-C_B>hJdxRzjT$EcYt&*`Ox3Q&Q#>{ z%K*C-bRQR53eHI$<30wEUc`Uv?`8W#5D;p4)u9sr6%#hs9u+H?KB>Y}S}z6vody(* zmkZ_}?wxud=dSAFzD4hHfNzZl2bGsJ={?@He(73_eCoeevyLA1Nz>B}bzbMvt%nO~ z8}4~y#rtzT-7ey^I~EQJhNNQ)yp%L=SXE}PKJ*ZzZ;MwMaqN7A& zajd+E*SwqNZoN-%Lbp2c2uhWz^aO?78jrjnT&yEo~KwEdL=)b=?z$PPRl3yQ?$wpCc><#2YXvfh2}kUntll)ZVEtD zchqWXXh^1#a{dkS^!7jZNwPL%w|Z zV){~=O&QWI2MgR@URpv1B1vy~HsH&U4w0-O{Ok`ibj>iFzwihQ(^&{WTy% z+g?8&bAL-jV0ZT-2lii!gZe(~Vw8HsAWCA$tUE)K3DM>k>=Br;bREkeNh8zLT(Oqj zjCUb$`w{3vswMSLvpIxpUUe7*x(P@FwiG;Skh$dPe!6(bX%2ED^UHG{e{X8|o7WBr zwlOkkb{bdNlvFg0fYQ4p$#{oe9!WS^8=_D77j1xAdO}{WI!prHETjzGujRMwdTlV7 zj$))*GE?i>!OxZr(E0fPh>4Y9zBD~e<09|NV)y!AiO-*nBmM*(jZ)Y=xQWZT8 zmG@oEwbM%%2uxdmOTLPzK9stmN%jhVM?BQuyp&LcrrRy}9!eKBxCa-notsVc5ckrMb+ZIP@@tlGID=P$EJZ?(OdLZKlW z(5&_D=p`{Toew5nTam7-JKcXG_l4j?RNAAGjQIjt3fJoN6D9x6V z=QV|h5D~ZE3@evpkRcz^XBtkN@_|+m1Isv6CyeyFeFD%h&bLCh11-C5aqIIaYI-Vc zs*NMExmPE{Dj^&E#{B`F(Mp`B$fPqMyRSg$OW?92KKo43F1Pp)5ey&gJ^5D-@*j#! z8qn{Y)1~udXoZ~iXOb0%d7mG>~Q(3O;+ z4;j|{ayv(*(gZX>@g^FS+Qz>kdqEn&x7u-V_Wq2z@cZe z(gr2ff!?raPPy0Q*Q(vN@efyEZUjknltHDFOjW1l!24+N20ZeuB;*vWfPhXgl?Y4h z&BrPZB01W5ZM-gJAjgxmGFuELh3OWNID#iJa}e}8f$(V<2V-RcHs_Yi9`t|&EC!YH z;}K23&=)m~A}9&0RPb=dXp#T~wuj-LvFf;e8OpfsMsgyHCGl7Jt#5E{j+m9&jw?}v z9FB+nO7W87@e8Je>##FPC}om`3?Un#p1)#4?RoIA*m8gcVFT%LBq-tnYHGlu-?%eQ zV15}ToU_P`H$6Ip6}b&xAmf8;gA(qLz4-8WQq&cu#hJiF;`x2@vV-l~W4u))5tXp7 zST8iN$59B_OXE`sU#~EfNn7QmcAX_6<2IIGr1~`i&w=X0Bu!!vIP=GAbGdF9_e9y-fO04tHPFgZ~2y~R;0_9T$(At{Gnw$4o;H4`@@o%f8D_kBfw$NO> zMR_@yo~PJxskix+WVZ#$2=BMy(WIrPAxhx1(p!!U)#5GxaHy)2bgHkiU<&@bt zb?H7Mk*T~GRLmdGh>|Co00H0s3fu|WC~`P|S7=M`@q+-9OvovB_cuxqrYiC)V%OOT z3N`m9L($#MbdIYSloINtwg#K7@t^HL>lmw!zZP_e5;t@1Kv{PF^(~Y_v>a%Z%QLW8 z9Z%vboP02G1qG!>LHhbt_ri3&RJuZ>QdwAy9~J>9oNwZ4RkVOgmKAY)c870@$TZUfe0 z6H?(tt_oZ4n*JzuJ#7TSPsOwQ2s*u7hD&T>qtAagTnG0JIw*v2^OTe8{WvhQm4}{p zk8>6K8A?04~#QTu}~N+q{3N-S({k?6K2fI1wwwFCgP zv%nD)k%$cz?3SSzH(+fLbJ;;YDB3Z031WmRPC{`NlmRels{WN_HwLvMxYp~&=HE_E zrA5@YfWyf6Gu3mr&dKtJf(L?p=-s-O?=DUtT5$2M6)$xm#$y6A?}f3 z)~BTy?21&M8bqbC(Nsa@nqWC}RQ4*}>?R@>^cIix$06rlYL-__oIr){37O~Mp?z&Wh%v@&3 z7W;9!Mb%&h0qg?U$EC+KfI^%2n0_e%L78hT9jTuxgJ$E?eL!gv+ z`}S=O$b$OJ-cX5r)g_u{FQup2(id`G#*Ln-8BopN~o$*DG%sgEF3ZJF|L zCQX~Ff2_FoH^Ka;d!p449<^JlIHd%VFw5Sa=Vb2rTzm`^mrd?3Hh!Z)i_Bnn(doAX z+Zd%+9liuQ?*A%XEr6E~0MOci{)KkvTu>T-MwvO7DHTy@(yqHn3?gt%OIzZTSq5Ps zF5nkfc$BhlsMwjAnJuQm!@|;_w`S0n0G+_@r(286$+zYV+93A5OTy6haT^tHKYjob zIRs*$o;t!Ci?!{m@@av;9sPaCsO!!V7++bP(oTlZ1bqYqV#Wq`a!F64zhIUuvy#6zdQ3tbAf&K1X3dmKQXW}J@gaHqnA@P3?Yk_N@A z6y2R5t5fQA^ovID%RjB&r3%5nA|k~)jY(q9YQT@9p1=4FY=Lynm7&uuNem^Rj(s@9 z*>YDE8S;o*^c0?nO%s9(HIx3-weNB`R2M&5>hqj}>q5$430~+`zRQoBqGjY4^*!|f zX$Vu{vtK=Vu?I7Y(A<4f|D^?x&Z8Lm3+er{V;C|}S24&wpTp=97#;kN)(`Bae-&X( z8B-4ib9L;&V52oVGl2!h6WS(3tmYKdgh6v*tE$)`19S<2xj`xF?2lwx2%cwv>4R@k z?GIqMz!uGKqsvP@eH!nry9-&5(QfrR+=SESaFLnf`b2dH4s-^w00_a%i#5H#_S9zbU|s{U(*4s*uF0D`Erw6t%kBr{A!G<`cqeLAh#(1zuCEHZOs4WAUbI92bXflBwLVJMZ{kD&tU(HHi+%&-P%8+Ir6 zKT;n~d0wb0JZx~C8AxGNO<;Q$eWYf;rRkASVH*z=mSEZ z091VfDI+^d5OWR-Umi;CS+GtCqmy}QTwaz&bqxK$t-a}rCS3_4_Pw{6z5x8n0oZxe z_vWBjNk1(7C06G)=FC=TLZaULeRQ}Fz1oIc^mN(f zR46d4W^-#e-60Wp69wDm$cq}OLeC47x-U3(%Ah%}991$xrf8apR_0ZgO05yLq zJ|q_o^It=u%Q*VOgW1;56nfm^?AJ9}RxU|m&QchMI!>DGV}!{TeHj~2ZAagb8ijV- zSvYQq@vs4DNMyu@%cMQ9dtmqF+0oqe*2$1W7!GfolJ$%IWEuMo7pc7QEHbX9d{f)@)cVP5~XYq0us@X3vOZ|}~PW;Q{KE4zjsx3i8 zu>@ohJZ!?*n%BVFf!SiBsyzDTG{1L1)1k=swv(?8%~N;^i+N~v#_$jThH6Ds2c={7 zK`0A1vL8HFgSi*1piIJ|oKEs;@I2n14AgtbZ#(u)X$Y0RNa8A(-uzCR695K86E{*R z5kGtkFV#!Yctn=roP0q7QDILZD~W;x6s5h2oRNv1N5iie%b?3V6##&E%y^aLhU(;UAe)s?+0O96He7Any?$fQx| zIl?u_c{o+)MaBV^%!@I^6}L=c`E9;-F+q13^bID(fiDEN%}6nC;)AhPVTy? zPmn#w517Ts;qW1Q!U8}FXoeTR&Rw3U)+M=qO?gi z&awp-HJ1)2&3&+(OYz6Q%z(qp_U`Rex06pseCi!sl8LHh2ykIlAMfO7Z=gUuRONW& z5bJ*yf+DvS2P)6rgg!}VmX}I{(DUcLXXoQ9z6EWhC~C@dOF}~H$;( zrpj33Dw;#SF5s(?4mjO>MFJFNJ`_22=k$auXwl(1r8K3r9lc}U+uIqUpz z-cQ0B^K{@xg%1EuoZoy+&d05J1ECvmRDg*$f=?~V2DvpM<+WsG($xNpA+;(OyUoNS zp^>S2JgqGXL0u_!0B`4>Jq^3Xelm8`5XQsKL#TKOu$Y5#RaWliD{?*s@bS2MNn9&O zyW^E`w>q|94DDecR_Qwkc3i2{ycfFfCqq{-%A`PXdu_bKtU(VD5vu)Rv&1ieNY24| zb2F8RoY(xmWGIB%7{}|ycvtzAEc>&F1V06bbvJW4=AFkQ<7MlyDgy(tue$g~o`;V8 z^^Fk3jdzgKrsIJCd7->Dz7Fx8+6#56aH@;_a$ghzK@`C&#OWlBhA*+F*AtKh;5?B*dfhX*85T{HIykbDn%Ho{Jx# zssqp08(Zh0I*14l|7UeTkE#yn{|l=F6rS+ERoC60Mfler|KIk$|1Zp3g?y-&g5Xt@ zI|eP^y$8azZ&V{%0;g!zS(+VBmuvN~-FJ$D%kfw)&e_4vk5I6SKfR}K0Az}$2fWg2^GUxL}<{xhds(SK* z`bm}B`81eJLl_15E|jLb_wCjHaSio^9~fO)Ey_`e(wtoH^1U}=RZ(kkAZ6kvg!AW@7AY&3+~Go`XC8~k+}c3sORD}I8YbQ zB7R7#yy+@EKlk&6&J67v34zmEi09&O;d?zd(u~-`ro1L8PDLufL+v3ghMICeGmz(W z)eTkKi!=4->d)0u{PT)tV^Kelsu9tT`-hz24C=XKdElOkX>hBpkh8*qJaa3%#dUf? z&ir|_2PFUL%O9iRTosSY=i*k5CrQLO&}Tms$xTw9r^eEBIf6e{MX%ktl*Xfv(&XSaX8 z*ucIqK=*&WgIkT8q*Q!fO1n}3p_#6s&vfb@3I7XsBkCrxKZk!c_{Z~XfP-{!W0 z!pk|{XPBqn4Wu$0A~&CU3YLHs1m`mp^av)!tQfc?Ux_^6V4nlNhC5MUf2GI-Y#Idi zp5w|W(&jJo0kG{G!QX!F;#D1AEcy|T?U_$hGeZJ?QiP_yxx zUsRQ1b}AVpgBG&rkogz@hI$>UG~CNG;Jg*qlPb56b?Xw7N(8E+>Vni#71D>dsa-8k zk@Y@U=^lrBh4ym_kmkLD9DVH7)Dt~mvP~dOoo|NR7Z zI}H{AaC2{g4bSuvdouOh{M!wXWNjV-6t7ZqeB>tZt6$SWE9o+;c599Mei|4+2<++G z5=<1Cy&)S=gUpTDsODKr9~AA{QC+8sc2aoL46nDSQ6C=^Tsqn>lOfIrd&plV}a zQth-Nzj63Y(emFEJrzwSAYi&Iuev?v$0T?;QSM+CE$mwOvla?lsbgC(S3phv=AZP0 z8S5Rz{=iGh0P+RwVVz-?|kj+t2bqzy?uYTfCI&UqjY{-4%L~k&oTv$z(W+*j}!br_%t>CLGpS4 zpD-I>Ns-t$IF)?2z>wSHz(q6qs%=?))uI(H0Tno4v4KSj{+-9&NfIfXi@;h}jQR$P ze*0u~x2f$e@Nlhh1F!+=J zi(O@1LE=lb4W*7LsIvEP1s{t0pxKOc0b*o2=;H!KzZCr?M1=QmBQU3N(04(G)J)RL z1qX7l>13%T2i7;n2{?c~#sJ&(-KEULm^aIvSAqJh*3sWQK3az+y3B<0?EJ$iGPt`? z%j`XwJOy8n=WiIicT+4(uwdos4uoP~4i;s^8ca^q1{Z!G8ovKALHN;|+4 zCkF5gT9@m(#aU#ONt#>;OJ9GSz3N<9;Wz-LlB4!G3@TcobHh4A$9#Y0jaV80Nrl$K z+?$R7Lz(nvsT28NiM&zyd26+}PiZkpldV`L#&UW>P8+KFMI1b#u_q|jHWI};Jwx=S z-A);LzY3m}*nXttuK7D`r-E2WG%AR7$%gm@BQOtQfS<7L0%xo|36-RVxneic%>w|n4s#sx(^rLZ)7iatHFE*a z9vMREin0uWSg28G{G~g2nsfS5OYmg_Tsrr5HWw5-t1~L0XG)KSSjEI6(a_Lp`z zT2P95M_mlne3X+xySVe~?xjm>tv<-MIUx4J;YjbBBl)jPF*zr-s9rAMeiOJ28OkpV0njGn z@1?|tzl{ZsCiu#Y`_GqDK>Xr_Xl^xS82^Y#&jle~Cvq4pl=YxB0sy&RB=s^?;!zx6A&4({#-)d1v5hX#e@wK_r#g8AeZK=WmmzDo z>JaFMoPkwO?6x2tL9qCYXoL1e8LQ%Ty8p$IEi(Wx!n`qXpOOeufK0|ZGF#952j36= zxDnfmCB|s5(G(Rii6c4r$%H$gxTr78gtT)VS`zxo7ZR)nmAf-4Ef?2_)|_J8w$~y7 ztTZBF*ERM?7SQf1;ownkt zBY+G?tlxbl957;^0PxjQ0JQe2Ezw8}uLT{TEL@fM7V*jROM&2Zq#UDU*m(Fl0OFGZ z=(&_?!S-s%;ks5<2ndK6JCr%@eGmtW$hhw9HqdodP{~d=__;s3xO6I`wFX4%Lwc|0 z1oQ+>U=lnniRLkj$;Msd_1}7<1)zX23V#1gX6EY=L0aDk65UxiDDd;BlbMF{gtk|P z!>6Ga{X5 z5IlSY3?|^!bKg|jGEau#GhSC9b$KYRIQs?+S?qtM)cyu5$yN+f{gC-{oK;9~oyXp% zPZ}4fou<;AD7Ivfgjh1jsf%wR76P>W37Qc#0TiKu^nlx_@6?x^Er4KOMY3HkhBD|8 z-#&H2&rRTl!E+9_^Z(4y?SJ8-7O<25o=!mjf+PMUv;P}m68q4_iQ4_((TcnAYsjem z_v+}J?ffHgkjyvOE%|?xP-03C(c{Wr(2hi(>+cQ#De!+4TYs! zSQ0oC)`72vy6Fq#p~e9J_X5XuAs1}uc#@?ci;y|aTLs)a(yE|Z8^y7KB&Hn+xyxkAjH14foR zps6=1(J|xk=~~ec0&Zb8FfCQW@5u^MAy?Y~hguWC?+@0-%6qdlnRqNEZbF=n?HC4psGlar`P7iW0^m!`_Q=f{*6z8}MMH+JPGE zT|5(_UOpZ_RY(Ksns2_iU84KBP|Vej$-`Mb3+yl6xE8!Tb0{v2o_6b9h2wHe;?f>X z#Z`ZmqHcbm0n5Shx})~dDk~R5`RP6rnqzb{YT;+VZ=@VsEDslD_U9YuZ8h^~KB3mI8B2TpxB3Z6oX8ArqEBG%hfK~|9e=#?{LjiEZ%O^On+kkT{ zLn4CX$`4F6*acCbtNj95Mj8M}rp2(`t}0!(gLx1BqNIvOuI{yQr`_V@d4XH}@RA73 zb!XgGo{E9oxQK6lW3GJ;yhwYq)alDnDHkZe!%)td;Bs%zgGN_%2x4At)2~-t!N*W} zDyfePf$#~A>u$aVL@*<@$4+wGvyCqnsBe6c-FD}6D((u%JS46+RW&FQ=78bhvJzXw zjxu* z$fs$8c&W_03PB}oA<@$iRT;QKVV$}g*ferii)n%8^meHK143RrAnJ74McFQcyT2kz z+=ZfHQ5m}W_zXB9v!ck?GK{63*z+PwxEt_lh$;;xPevk*`1jS-^ zZt#5k9a&eUFL?Tu4;P>E3Am^AZGeu1?^rZ-+S?zmLOu->YvjFlo~08LkXRlWq=Wee zLa3y0H?9N)qM%4q04oxAozqCHjEnI^_&oV{D5h(JT4=SJj`r%d1WP7^3O`OI9176f7 zNmV3a5P9O?K4|}ue$b?f-}eas&*LKHhio<=Qx6LZdnno#7;q1L_rj@F18q+o!p2XLsY4?ayLoXo@luHH~UIe9~eoXl}h#*$?Co{Y~FRI-Ovn ztD@R3k!|7I&eF(`?TmjM&*C%=P*t>}^k)C94440%pZ~p){@v~NZ===!TD$XqzkUC= z-oE1#yi*n=M;PfK_wD`gKniA;Sfv3YrI8~2A~13Sv{=O!eX@Wpa-h^e7pO&rlM@4h zL6V0o#26|+Zl{%d7N9(noFq8yyZr*#(U2X!uRWCC2!+$IvNh6Qf`DEM1#Q<`wnZ@8 zLoZE0O=4IW9f{>3x(&HamKidn04UgtoGigqWU#A64U&GyE|BJQcPrWLfQ z_lKp3{)2czRiUT?6Hw(=U_P128O&LM^H7QxyaNiW*o^Ps6z&t}Iv*Vt6TGU&NYYwo_+V%7nxCD7Thzy1PAAB)vdVs+7(sMOXYr)k2t zDUxS)xP)=dB0=n(-|=zKqx@vNFX?-;e;|}cPL-5lmdZpOK5^E$hdp_m=WbcDZahCx z&hL=9FVfX0wWj2JcfjqbX@!SlXLjjiFj^D(C8h4v^}rHeEWu;cTrS9)Y1vA?JrsG$ z=A?3R=G?{Wm@vES;Z8Yk8yyb8g{NDKJs;jN;8b+?N)zh=j zR4nHQc}~KVsh`z6UBW!FaukSeiD?)Oz)HCfd4B+EZt58H1^zsQAx<~hKGdN-$jK`? z4?T?zR~}dWWD)dxiU&9W&i16IC(T2jy^C9s#Y7s6*CSr^Ov3-G{mut|50|bNzleX* zfHNH;h5zDs0zL_qWAc6$NDJ>Cf~HH}j5HBvFFN^zX=wh5%56rzV)eJ`E+P$S#?a2a=WgqCxQ8btS3 zE~Rp_4723qUWDOdZ|{|+ zRlAxxmX+%DJj^%F+r5|~|Kv131~otNe7^t?0n&gRRU{qF=fBJ{i15pj6DA{}EA+F} z+j|dRxDb3)X8LrkcUf9Q|4BVYdOOYaC)A9O0?sq8z_qDd;kUlG1H{;OJ(w=62#~FL zT({<#aPKfw$pfX}eXpr{_sU)c*5)DIAeURm2b(i*lfTFdCFeI+uGd`MS1`b~&igcY z9;zg_r82hEEjly|K8BrFM3*HcRILZ@+>~ihU({eH<>kd0R(E1i~zp! zfl`6|Q1!t^`y)ZD-|(zjh%uYkH8=^GO=B!FuGrj|e74z47jh-^lC&DbRX$6td!Vy> zbTv&R%gj$9VjRrkYcN<$6otB4rEldilE_ZL9vbnn%Z))6RLsZ8HJ@Gl#ABVM<5 z1xEKd20fBrx*zt&03h*?w&$3yE*-@Hz4CC?f1~33Rhwe&W!axtN5l>BzeCJmE@5 zUB2Nqmp=j<@)sdsMrXeee@RBF6}btk zo!Z1%s4F=4>1v=8OCKCGq@3VBk(<@f2=l-$_gUR|DSYwl zb4OX1sfiIlGOcQ4G}4+Of*QSFQsab|`Nlc~)o(*9`BcN5o}k@)0NeKkr+TboUHh$NZ|GEeb1Rq>YDRQO?rA74&K+5 zH}B`p!cfT3baM1~BC6|Zw?E#t&Q5=OURC&~w6YPm(m^=#V?E~82up!}dwV&0s_P85 z)6U7pU@Vc$z0DH|&6`U?ss)(|%bvb@S(>{<1hZ*H=M6dti3l&Xw#dcFAJb}U@zBw1 z6jz(}_+p)>Dy*J^W&u)zu@UW1hfV=A7swkFOVMPm53^j%iLk8CC(eF*wo-t`{DXsZ z`60Z%>@Vd+VN8S_t3s}>-EF5#`B9Cq7_Sdrs6CzGN7F#V9We?c>NMXU*73(%?87_c zDb4_+Z~vOq=G95q8i^GDS!XG9Cqyt5-51m%pBr4IrSBiaj{PQHMFPR!@`8++TuBy& zsSI`ASW{mhx$Q#Z_&g=d0!`!#LpVC);SR5k868y_rk*SQg&jXrFWE$@{*y|KM=SW4U{%!R;^xwRUf}dqHUvukk8Seb+Uf{Npn6YPN zUBubSU`(H-w_$>%1OvLBx#?>h`kbaf0^;v_FyPa&!bw%^dRN1_C0wv9I*)?w?klzc zL|3ad=@WwiiIgkxnZiMx=?9=Lc;I(gJMb6b2Z;8fxiG_C{W_f9Hyf01juV4SsPf%+ zuf>nRJG9Ic3Oz*L485XsP`(bhAxT#~VRf%9#eoQnJOf+Y{pM{#jVlC?gkZ5|tOk6s zCfWznlEhZ#CAZFvx}uuB(hYdCutLwP+b0kah#YF#9G_gK?jjtxtyWnf8oIQR3)omn zp9?Vllcs5EjR^9*Ad^;jqC~~k)gZ-pJXH(&p}TA(BRbPw{LyLjXHZjXE?3^D zN5ABYnsqB{WQ<39hR6#MqTv7B4yJmdcdGsdvwl zNTnf5Wf>9*d9SnwighKKFgy*L7a!d0wfTszH^| z#>`r%rI8!P>$8{iIcpC-L;@)UsF0?x7F};l=uQy|>JzOs3Ek6+!#||aj~D-Ym#uW^ z9$@hfduw!!`F5Rh?`R-nOY_}1aqTxP9Zz_C*Q9AipD*QMTn^N5|IZEdKlakN8v6Cn z?>&@Wma2B2}7$5zm?q>U@Zw4ZV0r+!@n{Z0MNshA^TsgcZ*+O^mVUF(Yn7G>7 zw`OW4HO_lqR@dtld$OGR&>x#`GZ}Y$!F1;i7+!qAzoB|f4g@GPLmQ-#$hp=*jMf%m(LFYzVRle3e`k=*%37IJxUOPoyqEnFJlr$kSM?Z|2wdg?R zv}eW=J0Z%;L100?l!NV=XK%U4 zYHG)Ymr)LZR^3=(XA#BVN4Umh>DpTc`bJ1KA_K^w#$D{)UipXVdmZ#Tye*0kGYp*A zQ7=nSt7n>gvhF>jDE->l6*b2iv#D^aja~;j=;<;qZjKU*=N_3JD?ncsa7K4>&BSR5 zkth#ocw<3&00%cYro>&gM@&FAO_MmHamtZ)8#9jiRt%`i115IT=>d z*B^EdZj#x3K9KE=Qg#na9`6a5cU*TD0yt$dX*8JF84H4gplAC~Cea7uyfsf>Fv=~b zbsGP!Rs1L3qZmnIV3j0%J{(lP2*F)z!mGB&Y~W{iA$jC){H%%Ztiu}!Q(R@^0jx1n%QlKw|?}gMZfqVigK>rTO%Q4Us zQB2zTiR1zuHg@nqr6Texpzxh7OT<=h+N6x1(?&Zv(kwL;9- z-620tSOi?wkJ)9iX`N3ZRy9J773u)%HVy=g8c<+u>nbH0JlF9%ge_Qty4+ zO^xAdHQrFjiw~Q)%h#cc)33Uj8!9)I)bu5nOa{wJVpDX-60{Hb(I{_~m}uXp&E@xA z@f)(mM3rM1RRYI5l|lIx3n{Ji3viy9nS_yg8uPK=k3ezlQqyCXcqc$%?1kD}Gb$hl zWPQeY7-81t83*e}EEkI8rIvTt9YEpbMQ$e=y->!>v+^+W5j{ALb^$qAW7I@Cr~3PG zsXG$ug2yHZOzOJN-ijJ>;cMY@Xk)uqAH=dDCy%U8x)P0GB4Sh4f(q29B{BQ$4oafA zfo^qYuKLx+d*OO)6oJY8uoA{<*Jw;b35|gf(q`coaO`ggRp&ng0wW#6ZD%if2nLAH zRCHsWrqHu%{a zh;YmM_uT%S#~#U$9T`b6;L<3SueVpDMrAXhsl;qC#7z=Wk%7!_IF%0%H<-%uJwl(- zHXh|op^t8L&oJ*TR7@Uw1Yn{n;FjT=4@h-IR4lO{@r@tabrruA{61V?eCC?S<~21qh7<~#Z&dLu;)U4dD%8D&GucWj<$ zEC70efmq=2h}}^B?Jj2gMONz_@0@&2tT_XtJWybI@#(wnAU9r)4b$(ir8`ttW}KDk z0@gNLCd#c7wZTv6?gpvz9QxIp)Ma$wPj9}G#k+%yzY6q!{{+>dicPc(JHwVFPnMup zeQ2;W#EnCHcM^=MJc@t+2Nz)7A$J+J^SI45?NwJ+%*9wHR)SJhyI5Bg6g>wN82Y&> z&G5qnA=AJD%9yB3qJ3unC{pCcaRrIaTXU33%=OZqc@IRXUXsowi4hh*p96s5I2nsC z@`I?SelpBzJ)olvlkK}Rf-s(YsRdQENS~4n8dRSx#^+`qay%MLvt|2ch?oFw6ZzJY zb|N;Sh8ku-LcTj{&}*|2$H-$Zg1AdFir;Lk2 zarJ0i0%UZ%!=V>$UDU|I!@*ZI50x83(6=mrj9hK$Q50=)(&$Uha8iACL5Ls|9`C6) zsJ<>*{*-|4wtTOhuNWKmjnbUB?J?gDPw9WUT%P3FX1k@y)^AqP>ql?KFz8b^|DT_41G3LGFqr~5xf z7p5Suh$R2~d6Qizmb|95}hHSSH++M-Vcb1x`G&=2SIK=?fXlF@Q#f*7AkA?PQqw`t>W$EJIL>4y7M>aTT5{Y6SS1 z1@r+FJACO<_S%eSBTF?(r~Hf_XzEi7P6uIIdV(6-doBz) zuL(yRSpu<;!YR@9D_piFl}y126DrqhXAgi=^XH~>2EaQWY64fhlYAh2|Ip32@U$FcwAMs!=ijR>-PpkYH5Po+BmeM*;D>S69fB!!ZLiE5?pz^x+hZ5i(;in64Pv%n$ z-p$T=uX|mhQxpC{^hZ%v3}ZlD^To(DpWo>WOLa?-Zo}0Bq>`N{2iC7-UwE1E#}kS{ zli-y9nmY@XPZnJHIU%d*QEvP<^AGdu76R@nyxg=5HNyW!E{l6XWk0Yk?w^A&tk$>j z=kdeTf=^HVEZ4n!7hcA_SpDyx%OLUp$)}4mC7~*~85($kAPYD*{u2^66@>J;fZL41 z3nw_X_zq}Utvk-Nqjg91Q#F3dP1 z5&|YEkP;p0y33|esoMvg95!lTY@B{wzuvldHLk+IDozj#`I6>F)RNeSkT)v$iA||k ze0`NHLkN_1y0fmb*QFbQo*5Q)0R1qelIz>!`xLTp^Na!Bgh*VhJlPaeyD%XQ!POC+C`irL*uV1@Tvhvfg4 z_isdpL}Z`YWvIxcPV^sG_x_ib5Ho+B>AQ`UAyl(nX4<wlhD{#G>3lEeHm%ba zfA7hoEjJZ+&Rn2G=+G(E(?U%1#fkNMm)>+qS*a8%O0K9=+CF2&#`Eu?Dw4Xn<9AE*_jzC?xZnbKR@wyunN zfR6ghPc%O|#>QEQ&#nJ@iB5L7z{$vqaR%J@+0TY;>j9t;^!}7P4~uTHVw|G864$1s z(EYmr7wFA!@p3Z&%R?q@qU*kY7kYnO-ox_CA-wpraKtJ@n>p^z zy8J$%0BW?|sN;TjH{c3xStlONcsr3TzVn)6igMT)K3_m*5 zLI~58W-?Zsg7B2tZ;(iB1t#pX<1Ti6P)fZ#loVmy8*5u<`|iwzpqL3Pj{)GBi_~ z0n@2=L_z;{O9hB5`>M@9>q1V zJ%+%BftrAX01ffvh~;5n7f7qvkyRAM-t7W6+rm$f3hcKnGEi=UqDZJV0 zwH?>6@JXw@_Puv;0F;-bG_aI8@QBB+Q>wPt!KY>DQs3a8`TKmL*2k+)!^g)VhQuRK z#kr)LNU86D;&2g|KvxleO z3}Rt{@nc~Vu%y_!D!>D`2H+-sJMF*UPscpg$n-SfY z1ke@*-Y0+>Ta{3*#ev&3!3>}%_0z{P%y+PWUAGfq9{AlXQ1Uvt6X)77yO%junD>73 zK!6z))V^VS;LuD0FCe$8Nnya;-weGD_ZFN$@-84Kxc+{Q3c1&!a9quuwCR1>?8?=Pr&8h0C%qf}bjzK~ zT+RAX+m1fxb8iNq^#@Oe1-6@GOF+NqQFtB|Ef?|P|AeI|{2qw8Z4ZjVv@#D+Q7>~d zj`Y-ia`!}KGo2Q8ZgOBJ3^x7{5QlWrVuup+Z?OD?JVE%tcz;+ z7~FP)eN%dZe1Vq23WeH9=Tj!$OuY_I>A5;!KQ(mcf!Q6C*bd~-?ho9MmW#827htnC z1Rk;@eK!2P6t{a>6qrZ~Cn*4REL=+VGxwEcYRWoT+$PiBq4B^YXeZMrw`1<hfoV zwfhj+mG6%7&oS!B^c!HbahHI~ouJeHIEx!k%3)?=4G`;0)ybHm2C}E&D)WNgd$|=c zx|&sinpw;!*_kN*0`+j=nI2mXio|rf|C!~0q zBw{znZ#!#}rM-eosfL%(5rX5bET=Vo`4h#2Cv%Ov7ZJgxV%LnN6veD$K=mn?%|>M# zZRQ2(r@ObV3;+*nIM1xAUn~#+GjhbSw)P^t_4&EYn{N;mI6|UUfON$Uu_w^+W4srm zY)|xD9Ds}RtXuiacpN+nvNX?$YKnMvTBwR`b3UI|MR_HLSU88B=Y*r^s8U@gOo#?=sW$>I*Pv8JV-EaBo(pL~`C9b|=#eA?tP>F2A z`Anf0W}?#OBX*|2vZvof{Coc9$06XWAo=U<<0IB2XIE1v`tn19BD09JmN*TLR>xKD z@Ae^T7WpY3RJY7%4%~ceqI~G)#bb7bSHj(Z2=WJF^m)`)J$#Ozv;56vfu9>nE=H=P zNsZ7wXT?-oPLj*9M^nF9bJV0sjaPiML_mR8P2O*8{DFBvyI=dB(Yw9?FlGpLT~tR1 zr_7rNGkLb09bKTX)PDsP}F{YLSna&LDvg7ax{2uOK6^F#uaWa`I)wSNJ+m2I(yfjW|#0W zRF;c67e^Q2o|<)WjI>B%6RV8gvl=R^d?!?g$cOz?5$IwhA^kKEdh7J@*;SgZ^;0;? zAKP+8RkPYiR~gl5*7G_e+qOg@y7rq-fb%dM)*rf|hBjS7YqZNNC5uq>9CBXQc1Y|f z-d-usl;LWl_t1cp&#qS{t4E3`{A&83=0Emd4CL3xViN+NZiWy^WHx|5;;mQ z>@s1YFK4a@vADoyZjYiLDe2$rlpocZ;NtJ8Z098nI!Zf3c_Z zhT&{P4VkG{AP3R~*BE-Ehis+X%=^jPh!#a372eAeI1DoiB_SjIB@z11)##={zJu!+ zSl!wC2s$ItvYz(0RN|%mM7ece7%8Umq@G&#d2kr`K&aDTlfdA`;}sXw7<+{%^jZoWUn50j8mm84)hJihNM@Um+Id)=U}^1*-pB8AEGBM(=T=dDQ7F6L z$2~@bYQy%0f9i=@UFY)0^^a#Ai=#ehxj!Z(y;jM@E`EEv%6p^~XEEqo*_Rf5jWjzJ zzsY&;-Zz|bTT8ci&E|6RRjD(VDC)`?JxFuab=q+I47A5C*Q!mF(3GJ$W79;e?IgO( z?5w4j)d*MJn*JzCaQ}X20tkLCU_$6N7w5FiCexsnpW>)CkyVSt@USKA* zq9rm?Z!*4IH0W;grq&%frAo1o9h~{zcjTW$FBw%x%r#Ov(v}pIOf`dA3*T@TD3Ue& zzZxZez_fx3Z64A~WbA~5?n%8u(GsUhsFW`eUL5B%(|;Q?17kJzeAT8bzg61GBot_s zWYq3ZGpW`u>IP`FqTl6%hsUUpZza|{6=^c`)V`f48%l>0=vaAAWZJ359T}+B_L2d! z+>`-5*KSo^nn?>*K#?#393CnFhJH;A4rPpgEM4hiwfc9cvFs<(-K955SeD_%BgkJS zIex#KeFz-Bv5NOUs6I|9X=4|WEj#~?T;%d@vqF7+0yJzQuDn0}&o9n~B)>aJPMB%b zgycMuWun>oCBwv1-OaIM!Y&q_kY1-6V4YmF`_<8NEZtm!u?-oXx&S|Urpip|EyJ}Z z@tZ#%@`W5VzJ(XN?G3RG_&78DJ*TCfK)sz3&8#Kd$!@q^<(poSe&+H=AuQ(5&U)9B z#YTG%IR5J7nTOb{CHufafRCL(YvdgD;KXy-as;M}>Qw%e>0)MPi{CYU=iV#LzqNu^jL{1DzGiRyPihx&w9F$T$MSqbwBeD zAw*;kgwIo7UO1dj=cS5j#k=2Ul1f26$6L(yr50QUQb{N)B;*!G#x&ilI?`c0oX>8M zaND@8^5rP>M|xZ-&x%$MpERyoia^t`<}Afrd@tD$#WDjq!2Zzs`WPmukBGPV!L=$I zoV{gGYovIR_`I!;JgIiCi@tQQ!tbU@0^^X0q}#3TYQL|Xdlj(Pd8*EPHl4Ywo=mmh z+{Iil4qlz*&}fyqs_@B8_F>F9w8wjA`Tluq9?}pCC-A<H__y&CDplY1z9k&jnhn7#xn+XzzQsg#yW? zV2U3wk@zSprB~2Mj+PvS(1j_XQLu-t8>TB*>Hl*wJeUP@=xTd15btAQ5|07_bb;1Z zQC!hm9v!fEiB;iK(;O;jpwQFnz7U<VbFPwwP=;|;}TgYL@a1S=17YC+BsXS~vX z=Cz%f=oozCCo~;+I3_Nci)Qwp_vU~6s}gA_<3is|ZS<2R%x#eFV_f$S7y%-;5;?Zr z_P-e$Kymv2D?tT#Bma;2IQd^=kym)gBcg-ezlG>1hh6T&roW?gPC_v7H&9n z=RHtd2g}N+{soJUP^l5@Q1Co&Mk#scog+_Badz<&C`7a=z)Ha);>GIQe{)O(i{%e9 zWFIEU3|=g}6V_^33$OCs0JCu#Aav)U)C=Cct#;?oUrZ-#vgSY? z6qrLbfE?B;0~okxBb}ZrFg)1N;FT;uEQgmM;)mFKmljiyglK8|*P@I|zf%*OXDLuN z@FHf^n=lwJwx7*fEbMUdal<&V{|w5oZg4TWZ`m*-^%m75_iGdO)GjR7CQo}!kL@Qp zOS4J+fRi%}K{#UbXm$@3RlZ;*u?0Y&(3>!kB!!@gW^b{9dcqCIhN?}Hd7%P_1j00B z_&B!c<*@jk+p>dv=LpbWTrTdnll>|B^D^&__nz1Np$n4v38AdR8VF_KYbaQ^V>~@z zd7S&xrL_9smSMer&&X#~P9rPr8`>WLg`oyNS#t6XhCGrA3!EJ(a_Yc>UQ?0~4m3@K zkTDkp{>^>T40*gxHb6&KrW_8dyBOZXBo}+2Mh`C`#f}D&5+S}XwEHU;-*0=e%!#Br zfAKb8N+}o-36QU-OwwsVan%fNkNk`{2Ilo$>+L{r0sLTeY3F>71N$DqlSG76Gy}=N zj{h~1)QAfi-xJ|J7k{?*eTMf*#8ZO}y@QpV8FMkofs?3}I&>1eMOqDv@Ad)fh64TT zzdiUx{!(Fh`)M4v4)lR_70H0A$t6hJmrXa$T)X*yb<1E!g|D_gUH}!^BLFe~LP+(K zwT_81MGNej!u!lrvS((2n?^3%4T-07=$Yp{bujJfsn$zvI6GQ5=?8sdE6zmuws86Vq(g@ z&ySf{){WVq&W)H8_7S2!$jgO~K=S`!lTo35uQ!*>^fU@F(k5V2xqw9CmT{gZo4580 zkQkiQdhHIG5V?!pD~{Dh6+9@HXaJuwu-e)1y1w_ixdlU@=^FnOBByJ>+SeREYWXj! z1LbIE07R!b!6V(p(opN!Xva8wc0$aV^t^Iy3O$zV+7m#$&x4O2Kz~5eJFPL-Y2dc9 z$wn#N2{xb#|2fb2irV*b$5XB}yaG0y;1r<3_Ic5+=|l@8fpls3*?a4#{TdJt#p6AH z!+rRH#M_^03M;-Zcuw1mAIgo1gZ;V-=p?FCXx1e;+FJlR%l)xw377w(_weV&d1eeQ z2{~6bgY^qP3Rek}MS)4PK?njym~>kSTTrD)dX(#BT*FfiHI5xf)Z=3dBt?P0vI3C0 zdDad*|FiCJ;X4P=>elqy7`EV=@m+a^neRJ+;FeXZ4EXVZi4UktiO!Y5F^%LCcWAUb z5i9ajwoSKrh}$`OQfm-j?)c^SM!l6ywX-kYBPks6Nmqu9vMo9tmvap$Yr~?~_PTTc z#YesK)llL#e9M2?BKW_jgxchpN1%aed&oS3-84e~%*cqtPj@9|o)sgQ_eK%b81{Eu zq8NF|@AQmy`|?Q=bQCagZk1sT0eC(R;>9+6N~9ixVebwQi`5lN)k1(g509kYYsIzg zpMn-0knVqvW3>_Kq3bC+tcWm%dJrwlF|&HbITBSrd0{Iy{RFLBobF$hmcrrOamF`V z#uuCySZjY2WsPj!{z7@vJ{xHy1z6Mswb$U+#%d?&h(S0BU4Y)>ZTh6o>z~+<{&xf& z#GS>H;;EPUF=N*PEHr?;+1965$iL-j6il4h{`h4w*94*KFf}S50xX5>H>7J~K-U_Y z-Qd-~ref)y~`$iyQU1Rt+CnBlGmLM9KdO+9+@sCNBpxN9gZFfJUa zk2`T8E=c?b!}?DiXMilkqS2>yAEX*xKJy3H{tn$Ah#q@*@N~%-D@KSiYVu!lFg-Xn zgc#nH-vq|}t+9#wYcC;c6!k=}o&D~Aha0b%^oncUk}=Mwho7#wq2h|AHObRsx<7c` z1-?Z1{IBrdQoKANkTp+wT~9StR|^(hHJAN8VQ9)u;Hy{HU0`-1Jg9(Qx862@;8jh3 z+uE=R>t_o$f9G_nfW zWkway0L-*+eP!nB z&izzfYglx+7a%$KZ}!yxC+S50O&0r~+?xMapYFfHe^h}fgoL0*HfjimPiFhv_L}*G zv=(+Ck!Vy2QICy#6xw@o-Ci8O!f;BFa1Cm+jWY!wYCi@I{1s7wFoBe`z##-gfxO(W z6-wj#0S7~_4HJ;N2#L)Ch8GKy$sQYwh5kc@_#fSr>J;7Mv41miP)ZhxT>z=8HY;Xz zd2F}*A03F2Jq48F#d)BN3$p{Cj_}~lkUb#7 z)2>Ya%CVzvNTu$jMuCoBD^*F4!JxKI;!~LvfrF|W!{Eigtd(+l13)*wf0WTucQ5P` z`}b)7lB+uf%)4`$4l~e0IFA~fo26^*hFzK%c~Kf@K#1#3eAE+aMOqx3jMP01fBiHY zBYbAg8mVerFT{YAg$C+uAf!ccDhFv54gt%5rUuG4bu=WM3Rw7NkQ4=JjOU-DK&gK~ z^DKC0MA2mdt{RRc9L>PI|EnHC(xg~Jo`Vwj`~4nQ_nYw|H^{$=A3W)E6e6O;Aytn; zO}C8;%rs*LK6_BHa9!KlU z6e5v8z4>Z|HgIh|%6A|6y7aA}a-$6@t^sN>peJY^2o*bJuAM^4{_ zZen_5V-Yqjn#`OO!8MXvD3rygS}I|9PAmqH9iQXJ>0n#H3}_0%f{1xl-%=uE^?$+r zi$T@I^7kXF3DbTc7D5^!nO{2c1osnUVBu&9o@y7RLdClU3>y>2B!_byK;`w9Uz2#1 z(<`-{Fcjs<`rfz}i12OWWGEfgjT}U>oQbFcUh#eM8aX0T3>4fX3KOA=pS4F zLt+6I(7{ExZ^JWBmOf7ZmAa+u5v)VL9jr`7F)tBaANWBJ7K4EUvV73vie!Q?pO}Lk zO@BjyVp8=5VD}VvkJTpZ<-DL*VD1Z5$#Lx0rb|^xJLCDu9N_2@fIohE9ZzqL1O1Y! zE_O0==?;2I6U;R1ENpa#DDyMAa>>;3n3T#nW0vF&kmFWQwv^t;F0+;X0=r3u1$&T6 z1-u~<%YJb9tSDup0Eh+M)bx?iK+qs&<5y05I}+lnQ8o$qavYI z*mx`DJtv;r`gwaKb`q(gYR9<-PW*vs5y^L;p7(Y^m9jwk9>}==&0tH?bdN(%D9~)~ znPSiB_T!~jxZ%<_;{?8F5H~TfY=(|GU^?E!_xCXSNQ-$9H1$$!kyvU_q;bo$RdCWd zqDj_IF)?cgM-KPQL9Tu^wjg4G=g|lIx$qzZcw#eoe#_1e$~@@`_YrzV07z-(=$jqJ zEsb!oc=&mhz_`g&iff!{+xAO z(;FS)E;O(2FMXYu4)5ty?kU`*BjHm~V^W~f z&22%Ld0O{-_s77uEklXOT4fP1ui>RhEWWQJT1ZhjhCxi^Sw%KY7f1J%r-q+VH5kwy z5(!v;U$cvj(Y8JS$Yafg0B}$G2Dj)X)K#0QK*4y*Uj+cD-uKmQ>vpnd259h^jP|lJ ze#P?X6L%ztcIlEn?@oI32XNEtYS@>Y&++f1I;3{xk5igq=sVAiD<0YD_h}LDYod9v zQXDybFX?4R0_v^No84@bbnVK#g^el)i^W6OT8TO#A%ZTk&ivx$slh}gg8z}q!q*IV7S~OkgbhkBi zTg0!uU#9`Vx<+)cR&YdLqz5&ZE(8)^(ndUP`>6t-=GSl(u6r4=pP{qED=yXx^GszfWxl)aIUn}%&sy+C`yAxiOth+R*9wK0EpPr$ z1Bc@-l39~GuYLqh^<3a;>SzF9c5TNIlI;Rxi?kb_&#%|^++_aZK++8qX`m~J@;q;A z1!AtWyBm&58sruk0uRjPf_g7A7|S9E!WT;fQ>8#Kkn#2Vq%piJS&O;+{OFPo|+26Pf zL7i|CGB;B=WqP*_(Ikv?fycmea_JSM!S|Q6T-nye?a&QT`Yaflhu9(%ML~Sp1Ku|# zHMGLhHszMkI&~L!tP`l@M>4QV6L6hvis#E|R&!d7Gi#qLX93dGas#d6?A}F zaF0Rz$iEBYC8F3pSb3L()GbDLP8Yt0>QOVQao@TXXq=x6i0e1zdTF&{S|`^dXW@UJ z3?v#jgm|6|7ur+y-y%I-y8l&wjsIm$qNc%T)ICRn^hL;y@u<&;+$&;GqKZz(oCoD& zruoVYxp4CO1HUg0$(s?O%ms>44D{w?UMvcgz`EIv8f}|*cUSPjlSXGoB~)<1GgY|pThNfeo_F2O9`V>UrC655>oo0nQSIJ$@$usoo`5^yO5n$T?W!A z6fZr}KC^_1bAWw%a`#bq9DT9KhOk;=40(>Z4xwZO8AzZ8eH^52vZ%f10>u|*xNM;F z*?({WF6=yf)lWoUwHDgXqTzr8QLmnO%mTHg&yPnQ8lRCt#li%bA7G&uUFR`gbtY5S z95#kGB{L8BWctem&oXF@3`hesBZUa22-i9IW6Kh7&ti}#DE>7jBJ}g^Uv$Wz?n|<+ zB_|$|uZUP6KWM0qhFvvz0exkj1Ag)0<0YUSO8Ni_BMwHUqz;Kc;~lLaZl&>z)oRbdkJ*UbW& zhydk@7JtKdVIl>!Xty;$8fJ9ykzE)p*JjADgfPc?#vnhq_0u*5+QX+z^QBoZ)Kjml64|hkC&p4vhaLbHZaLai9 z!*?WXD(?6W&rX0>9GKUuh*`AA>9y0TMx-kvCJyi#O~iXVnS>lhv~VMxSd6%)h>lg_ z(R(j&&oh+lAP!_imSf^z)Muez=D_3Zb ziXMc2=Y=SW!5_if0Jr67im|ArqgDBa7}H+vmb~l6^Gc9MZheLiD?q(}WcPToC&$FM zxQ7NGyzLP@cNg%)V@E=%dUpkF6H@-|7Gws~7E3;`cQWRc{$I8afMLRK9gt&#a*yr4 zmc5Sw)ww&$Xs7ra6+G-na78#Tlj*w!`^E?n80@)R!Q0@wf6kp($#?K$z$iOc<)?=9 z?cyanE6ro$R-2FoN;7DWI1J`xGFwt4T*!pTN=<#w6TMcwcOXw*f*+>&fud@m)Oki6fG@LSmBm#GuSR2exzfO-8>^>?hiEC|1u;R<#zm< zsD8fi=9&DR4`NRULW5Xp)U2T0Ffn`#Xr&)#f}_s*3l6RpHDPPI z@L|XY5tKkgq7S9c;2K{j!YWt>oM*M!R>zU=p`&H{s zMxgnLQIs^;w@&57dW)JFd&09W@U-*rmg5X{1*%_{slU5%2WTEHf=hsMsTy_beR~Ui zeXao{t#TD!^lRLs{AFiq7?Vq6#5?a&8CluOr547&9uzs!9P7cf>FG>u+hIUg?T^GC z+cTBr_MomRlRXI{c+=&=c@y5@nInk85Vv(riq_4K`wmw0ounG zKfHWeBnJYv1zt)U+nX;B-N8V~mr`|?04;1@VV>D*$Swb1%f+13$G%Oa;3})`EA_NP z=2@0p5tnIubGRh3vm9#XgZfgp{IiP^_hY#yo2Jw5CG_kii9E^fI9t28ul5MR{$3SP z{ag8rRVzIo_4Y{XuH`n&4ycS5^@?>pYT~4lkDO@uNdcsJpmgYzIqhUkBIh`6BZC(H z6PEAUg*ea0Da>)V_me)?e+OkGnTtrYef+(OPNeV)VwW?rC$UHm3ViW(BUp*^_qvjk z%~-R@5~8A$%1uO1aOfpprY;ES`v|7@#_=*}9HO(xR zX|tu&;nhgi+ymkv-NyC=E|bpBXDv?4dp8`f8LrAxd7}^T!o4xu0qV``3tDuFgD*>^ z4%RBRz6w?yI$FF`)v%y{Y-`A4hKK18?+(22jrNfxEC+wDp5vfk6c}&XV_l+Od}F$` z#D|HGsh0$u)xiTl@}GYb*!{)q%{@o+8F~}j4ZBs#lPW_q%MX!0$L+p)hfI0(49`lK zE@l$64Url(<16WY*~~cb*oA0h)g<8PReZ5*G(eSlOg(L~DV0(}%$*Im`uX}XUP-H9I--h5s_Fw53ixHO>YSCXeD*Audr;9{ zkwPzDU?!23N^5u;BUew?-mDxOc97~()fBAoPI4&vV!hg3QO$DgG>l)dgYw2l;5}XV zxp6`4){>c71Y`q?XM!LqwW_??$uR5=2<76GA?_@QS?Mz9S{sAid*6JyaMmg?ZQ7)8 z^m7#X83UsLI36opJCJx(+E5U)^VLzqYtwHeo{G#PxKKqd$%&1Iq0k$0Un!?MX7CO#qy1%-ZaVVRwhcyyxp z)sUb@J&7k2-7$MOj!E0-HFl%?^BW!mvG0pcVm7JFX{N0!O((W0yq)%Rd^hr=`6+J@ zvs%ROw|TCCqGyg4XMdLr8lf1-U3zvbu7zG~12)FqW$YJy8b<`};PZq~p&6B7Px&6Y zBR69?WAtK#zsZR#p2QR_T{?CRSJBU(H@8bSb8jMmGcO+JB}CcDDVYdoC2{?tXzPEG zT~Jxz?ZWI;df<-RIdD(g`xnG5soN?rN4IrMT}%zN7y1mvnvh7@CRi)ri8yXZ;IZZ6 zHIDsF!P|$211Q_E5O~JN8#?j3N#zU=5n(1cZL;9qe$wKJqKyR?`Q*jChF%UXlV`4{ z%+P*AZfN*Z7}n~eU?7z;;4j%fIJQ6K9dxPmn16hULD z)AExdCba|FDHy>RysMcVheG<%*TYTcv)7B}`Ho7Yq5@Pw%PAf}W1sCoQhzS_LgxeJfF? zJ0FbCQ13GvB#kVH`pA}Tj8M62E*o>L`GHyAV(_otgrd=+coHnBHc^jZwwT=6Kn;em zIRP7;_`MLvNOsZBuWzp#TYP-N(v%^MysT3LebaBRdXtDdhb}uP+;uyZPExC>rT0^vkr-S`VUH-ymeh?G`oTYQ*9u@@p@MeUvafOkI4J?$FClv zD?CWH68l!rFmUPEK3US#=P+3xvKx6b;d=4lS)B}KCoh*x8(ErUdoFrD&+C6}FR8a# zg~e#Gd^xQ8`a;-z{gp9G;$!tn~ zBwE~Jv!I5uh1~P&+sZD8YRL}W7Y7VG#WLEWCSH_|i)+Up%PsU$Q)V1kFVcl}xAXJJldx9`OK#bGcAGqR=$Gx3V~ho<-`#{}P7b}xrlso8GZWF!dR)u13*BP5DjenM zjr%9hno~I6iRza)n8%oo{dn}${VKWDG*;(cjiE-~Qe;bxZ3LZlO1m@z9fLq_m-k&r zU}7)L;&^XIk%;_Tqs1sfgbnPzays zwdxNrgdhfcL*JP&!6EkC-o-(O`qf2vTfKM`-asUFw${%6jv|Oa6{TJf4QU~gfe&r; z?%THHHo+wJ6wf-SJoe}i3Cdq9swrkg#GvSXw`h$EJ2NDHCv2cFq)i7rb!fH9u*;niDPPh=X$S`_$zC*qSN;wr?R^R8_Bb58;u5{jxc!tK zDG$0&Yy8G7k2P^F>Ai_G^fBBv3Igis+V?@n7!$+7>h^+PzV?fM)lg$fM2M$9W~roK zh(#@PLq?cbyn}rCyGyjj<_NjX9b+jb-o+OuCL|tT(!2SaL-vAv82gaet)>odEIyr_ zv}pAb?w5PpV0KCBsn^dB%sgMB1j}~$%1(;UY}#^DTA+Ee`dj?RZ8N6Y0H)BK^brDpG58+^+3| z6ZOSP?(GALXA*YEvtV)>c)cG?!z_W12&S!PS*ZLjPl%~|)6B}<+RU>15`mf1we6f# zOQp^Jc|=P0G?o9u%tF-7bc7|->8_vaQ{(Gk5DdTbr|AP~Q3}mUm!DDe-Ez(ax(-%M z`HQ>SxK`7RSdI}pbCPizR)wO$Vrjnr!P~I%PL=+WS|`s6L2V1V5Xn^i+UGt3?rj6` zSnd*qRIs178|xWJ9W9+Qf5?-z>8n;vxWJHUcnAIK-f*v%Od!QCvgWw`B6#P6H$^(4TG6AAnu{ zy04;zvVA+a;bteBWDoX-+!cJ|Oo=Gksa65jt5 zTGCpvOq5gm_LC^(&lC3o)u1m~ZU52ZC&~HGKAjK65;v|bI({ip8XhC9oxbE||a`>Ioe zTl({JT>EqH&J!U$$YB+lX?#U*8lqC^jl~cIZ)U?JR&1?(>1U3zh+p3!Q&9FYadhPj zNu9r7c|UZAjkv%}W0`OUzk+t%`~a}0u{##_aj;auTr zlZ6ZqGj+3Vt+q#bmd}QJ^20oVc8enHAN!GJvc2=tefMy7tR+^e?_*ST%bV0=FX z2k}Pw4#30^8@)a6Mawouos-!`?3=jFR+=h(Yq0V*E~N1UMvUc&N<~GV#}XVasrnu9 z^X*fe-TV<}_h?*F4lev5$Nkms4lVUUp|k6LrB_9NL{clwU23g_r%%VxB9+a%1+6uf zpS(FCw;->cOa5}b#Ko~y+agORylm3h#++0#AL4WCW%8ni(>-`dY*H)4XJ$X^h+v{Oh>F#oJLDl;i`?TO!pym$`& zc2SF;tj3ZetJ}90oGsrV>61ch+|?hixjP(}V2UXqkT_khO3n5=DU!BL7Q8ww^XBO@ zy?MQO;X}7Oo)VTi^Db07_ef1LTR*R8o6jjt*uGNKzHf)EW_^vMtlXUJ)>8H_YByJ& zfl4nKuJMKX+%L{ zBrQTvKoJK}5J5U5-)r+f=RE3pKECf-?}zh+#ac6a_P+OB*L_{Tcs(yvTFl%xDU%i& ze#J(9#$n_dEaIecfHMg^?kxK?%r+!Y-9&F~ScrqnfkayA(C}7%?YtqHvW+t9lx+A` z0gLXc%*LB3YjJ~GN8M{!&6x3L`R8jd)cSUaz|Qk@Ojts<)hveNQdLh*W0=jx=kfX_ ztMmUI)eAja2ox=}2>cRAKi(OFGNRGTr+D;+E#MJXd!0w^m&Q`^mN9I9rOb)5?=6D# zZqa&)Lv$}g8*IUx-9?6@u&CrCONx4r7ER3n_!dq=I+C6g`wparbXi(zvj9^ zBxvPX)reE?P#9wwy+0Oho+?3|zZY)CNl-X*|EO$t<@e5%aJtR5C`wHpt+)K;cXyWp z6KZEYGD_tNaM@@@I*MSS7sX7pqUbxIo`Ik}6{p`_Fyrg32I?VIUT}mOYA<8{U~!}I z8%{e~a5%H&PfQJH>fE@>L#C*4gvWiM;j+1m`Q}>pd8J=~{}GTjp>2Vy6RT zRi^#R-1Bvsx~nGEdHi~+9$Y19Ca&~#*Ujje`03cY>hyWxVA^PHg?rvl zCM?ukOLcn6f<93VrfuG&_($l_nKq~nDs{dU%a{}wQpZu;D^c<2!VT7D_jfvW^mv}< z`HQ**Ns>YO=j6YJlEn#LO0l^ySe#Og>(afl+n(Bc(bTr8gNp2S-G}Ncm2cd|#0$|@ z3pg!uaq;rhn7ifzM_4;=T1yE?Gp(vgbg^s9c;P2D0Rd@ySr$!e`l)yrGzRT|UM=vO z`w{!0hu@HeNDoC%W`61wdoW=zanFN19Px)Qx8F#65BJQEFF-N$v{6v;607*2o+ZLA z+z3GH=SheRG8KMH_JNGRRn%bBNA5?LdBm1q7ue|i>?eutxd)N?vE$D6mna{<5%Ja; zbx?cja@ogJ>Z*Tse&)%v1fJald&7Y!y{LK?VQ#&yq-^JCZ9SKJ+Q*V7wqJ_$7%t6< z?lk3U@fmTNCq{&y5NPA~;L~ZyUD!&Q^4(N!ZlFu9E)uyfZ;m5gCtbhnvyN{HH+UwP zE>WE{ro}|Fa{(6@VHnAwMl&Wl(nUq9cZ6O6o0FvPf_%q*RIWkS zBYY}(A==!NppZI6jV80!q+>C+;vOgbnwiQg(Bmw6+H}FVb`5#^r;l)RB{EZ63uqnZ zK9YDy-IPYcKYbaKDU(NGeu`TxRa;S<5G!bG5M;BI3PC${V=tq2OWYAOWd*KF9PiWd zioTh^`&UjtW1ytBR;bjk`xP3dB^wudUY!tisp-@aD2ZVe)&3PhuM~bn3I_0l%$MTG70ttK8-X9jn?VBj*(pO~|7(o($_A zDiu_?ex&Dc)>Aat7GRXaQVmV7dx!Pg#4N1wF`eL5xPI_)&++@07?O#ju3mf~(-Cu+ zQNt)JRbk9LUBk(a|I}8D=nXCw2qAw+|Oto-ko1LLZhO0HKIe^(UzG_ z@=$X!OIMUs$9yZ(Su@dbo>fxGh^?qXB9s!H-$rKJ{E&;?Y6!nVub_Xpn*lCtEOpx- zucVT8Nia$0qYTR;ZSbivuq|O|fKrRn=P9)Kjd+uisUYq^uU5ZcLo(i)B8QW&Vw8UYw9cMBuDeHL2uZVv4S}%rBpg&|e>0+lTh#G{Ak`?=~O! z1hTG#yrtfR7>2@If%N`*ek9hP$Q|-+?L}lYWNk%Go~&1#c|F+u!Z9>vlO|t_PEL!Q z2am-zJ|s%SY9(>@zarARq+nZeK`^1t6+yE*6e{NKPU`S()1&m|4m*<%Smo#hBuad* zIzCsh$cvruoS8Gc9Xw`+&Q9cPIi6uc@sr0xu(&SPUVQQTKRiZ>4L}wg$X-lvQN`O` zA8n=NJ@w$1CE2j4Fp0J+Mcw$!1&3F9QAq`;v%e}a=VKg}5_z7#ZmZPJ$s{R^$K8o2 zT0lJ=BBw2i;K<|twKX8p`%)utD^LzSn}+2fN@mZd++8}J$Fl~I19Ni2e-Y8CQvz`> zR46=#e}SICyoB}4aMUcIKjXx2F}XXOO=RZBPz#Bq*X6Kpe|E1UUBVW9Im08gdyrjZ znsKqU?uNdX+z+mE^L|7F2Dwg0G-&BHg=w_x6ldf>5VD=Nc8MW}Kq@pwxT^EH8kTd_ zQ!D5kkf>i$btLXX;8b2Abkrl?dz*4_FM8MFpa0E{zf9tz<7N9p#Pn7_iy!eVma}Wa zYQ=Yp=R_5#R~|3;{!WoW0Etdm6$1)dr222uWY0_I!tqL}hskV)8Nd_F268lieSK6l z=Mnf14p!bW7mYpN37CkVFj zt@;yhuazT7q$sXX@nGNfwFnUu@oTU&!JW7=B;i@>G`qX#^jAl~L_ULDa$+I?z7Uo% z^(VL#xqF9T?=nu^L;lV70!U>{OTd139ZFxF@t+29<^RPp*2<}cc7t=o?bF_Wdvd_d z!jM*n*pUC%W3=Re%qFaHHTPao{e{tlXFz^&o8`Zs?LaKjqfGI>VCf`6l%K};(KU`! zQO;ipm}dJORbSGcGkVS1{C5B%(kLCxoGFm;*#^KZwRF#)emKZWjWNi%6uQr)LSEvT zU)xZvrb6_1748Z}Aj7rj2X%Y$?)=`axW{E6YpGY>Ueo^jCO$DLJ(RR%OUXF(5-lH& zzuPRt3)vhwYq0#9_<)n#Z*+?x&jKi_?29Vn0q;e=x6!GL=rgk++9(xZ>_eM+Y=rjT zzcfQP?TxoX#4fgl?U^zr4ZsjBDq;B_eX(g~B1HWY(bmOLik8wLi%%Tw4+I~FDAhIl zBS>32z+hc@1`a3y5JC`LJ<6ab{Q}WJ-8&;W10WWm-7Q3v*ano5?_GUx$Y2{ZRd9%y z9#WBAc)+zIBnIj#R?@ElPJHUKF5)8%o+rl-46MVH5k(*Ooh|(yJc<`(}sex^8i-FK6#5AW9d~J^N@jKvGG5mNZaYHK_+; z#|xkdup@TTVZbVQRhhGiKwns_-1~|@Y_B)(aUf{*1@OfP2QgJ%ZW4VEE44#(V-c!m zqrUWQ7`P9dHW2YS#N9f+57e#Mg%+~7Y^;QC(Rd-KFhDob?MsXT_)(ry5dz?F2ynzG zQ#GcDiRU_Pv*MMT)}PO}3uwk2Im`m|kk%`2FYD$MAj{M13(u+ zO{Eh6OMa91-~dZ7aUDP((E-%AwmvlPGKYpiJ4?onT|($M@2oB@`41SKCKU)`Wt>Lv zSo$w3Ll0TY>QjoIDFvyT+3SEMdhgnhw6FlGLb8qEbu8Ue#-|{0xEX|THs7lA{en@k zwc89Y*v$8XhM0;IZl2ZtC77Pmbw>HkUACbL=k#%6LM^Q!<@4>u_<+)pZwMq2p5By}*k0xyPF?19Pt$tJf{JRFq(jFFbJ!$* zLE&`5)ldZ3Iq&;cqtA7PP5oCL#*6jk=gMoy{FF1W6{4U;WI*dT#-m?=U^D%bXBTk; z0LF958b=T@=5v>f!Xl>(YY}O2P3a0ucBbQ_-IJmU*UJDpB!d{u1a%*$I*;&5u1p}> z)>E0D*7wt{Rdow28hl2;ltY51+mR;=F9Ix6_x|K1S43oqT$o8w&@wlPr>pX1c8n0M zz$d*HZ*gFmluf+cLy(aO#Ir4eWEQ;84~qwXh2s(}t>TJd47vUN(#P)v^Nt}-$_qd` z=(s5>b2&o6*QRpbcS;_SgQ2q7nK63-j9*rSx}q{bwqpTyIyhibD1JFN4W8tYi8>p$ zh^HDNbG$;aPOuyd7)C)&Z|($%ho+}3sl5meo`76pg2d!BO)HHsM1+rq z_>omXJ)*^sfzy2TIp9twNahbUZ)m&{f)Xb6*M#T2P%r-QMD%d-3Sxh*??Hc{;$mp{ zi6TuBUXDwkm%YQ9Z$gQoi^#k1Rq=A^k}^PL3F(2XTqs%e8El!y@ezmeZ!uLO;^szl zb}0Rnj=N1O1^$l;Ru1;lBmCZ`9<6g)c?4>&Kpgtryh6XwzH-v>{yRx@Y6E1#mgL9B zw!g_BR!PULWni3cr>&{l*rs4IqhcQQOQPi}u1C-vxaWI;3B-@XacbpLfR^4L)&JHSR!NA%Y_<-Y>YiyTpvWYT&Io^nl2fr$5D)QNl{Zkovl;`N3)I~zYe zo7qnSbIr>YC8l+})qkxzI~4S`JR48KLeY*WhHotEB(i99q$&!;R+`S;1!eUuJfK61 z*&_R$?q$k>DU9)?>l0>Zs-cm|5PEaXmwVh|*BT_WJ7Bk}1hCn!E-P*~|3G!v7UdDZdC3MR_S!=sB zKO<9_T>etO?ij0l$;==n2QGFigw2d8?!H~vVS8Dn(2X?Ki5IgjGIMmr@n1Ip zTuuTm586-}HcRi6#g@1q&*$Fc!-JH**1sc;(!R5wN^ z)5Cg$fk&x*CJtcbC;iFn&Y&rXSZ`S*k(*8KLaw+3rQr{x0lF^rzwChW_`%w)%9t-_8 zZ3t{uHV9vEih{y9JQBD_;uSuTl8FwfC%+akK6`@>kJJ%aL8!~2h-AD?bu1;VUOmO& z#V52Kq1*{?DyMF4x*=xm9$#>6e9T;vdwN6ZSD0!`7Xr(_WQ#TUym_hQ=a3hiNJOw~ zLXZ&2%9!Ho7gLjGRirF|VDmZ6BG~CUwbWZYi{`c!Ri+gg@xF0>fKhOwOJI0%WsDA! zMoyDo=p>3In<0}>`WYY)4A!^=xdZ_>eWkEZL67d#?p8&pK$%k0)$BkegI_ zL)q;bw~9ke&wBditqc-51S!opAV2gFr%s0dh&V9^on+vS3c|TGE=b{RE~K8dZzDdZ ziK$4OCMb&w^dBR8z4z1=QR&sru(Qy!Dj+*Iw; zh&muu8Av97Rg$N(EB#T9H9a7Hj`TQ2-h>tQrR*;~HEMg>^F!Kn$AR#sdDgB_It_TKC|1D>0LX!fCz>yKAD_LRY-$=_<< zTZ(E9nMgI+x(zKBr^{a+mY05j2RcnB8;)`{w3_0sY@Vtxz`t_Tu zA73q}%W?b4&=W*v@Z<&06RlG$~b>MD4<>5aUjiXb{gb&(x8sPq~U4Z8|0<3GE{Ab>Iq(F{OS-ZH*??Q%4aNWA^m z*-L6A!&UKjuLv;f#Z})CU~ANRR56VTgsAnFC8@z4ptq>5DBtiv^VTLhZ5M4k$OoCztCup1TZ~ z-?-3bb321ugtFl`hn)ihrab3S_Z80YMN`rWr*PPCuS}ZFM;_o|BjU{zY*4!okT!Gl z_sIvxF{&L`v;q>h1Or~HKq<-J?+@3K(*zUJo@m? zQSinzV$~fdrkAVefE_MU*{`67wE-u%Ps^jtMOL~z30M>Yk}ia%>-bg^ zUFu7rXv&sQ&F7gR40>3|uWZUdUf+SZj->UstWz5C9Ui;oweb3j|Jz6P*SQkImVk%R zsdm>n+VZWz*clH2RfV_8m#fF#G%mDAd#^Db#CZx_9v(H<;@~-|k*tD1k!tRFg@1$; zM+NZ}YdHO>u#;Du;S4A~`~DG?Gs+l*n7?MF8kqhp$}_ME@g?;!{g75cXL;ULs^uk@ z_!sjs3Pa8_iuhbk9ksZ~leTIdXb}q|e9-n458)Q`+R~aGJIhrKce@)lMOj42TrR{d zB3yn(u3(ByNYC9MlJQnMSl{3G`%Myoyu%yv%*P#TnG(aY=`B=Fpg+robqkrIy}H6y zF8v^JEPo{;@xi{T{iW|SrOy0F@pC7uF!yAlj-c{Ru=Qs*>pu!5%Tp^X!Och)oz8dR z)=b2pS7`cF%?l!Vv)#m{y3f>7({0clh&1$G={r5~6#eN6=n5{5sRJTtjeyY}9e73c z<>aH3XNhM7ySEY?3e!~_Y1eg+I#k6*gtSBnbyk;}%IWS}0#U7mcaonU@+R7qvsb7N4!``Bulc*pmOGpHi5y zs3zDz9@ZO-T$*E_83~>Bm-Oi>`&X<>j89Qe)R67xtFRQs7O`HcEhAl|R@q&wg;dY3rX zOKZn=+{)Q3Dy8a$TPn$sJeQ51lV+{exjW<``JxiJ96C7S9s*zWOcJb=>D2PM+Yf7U zeX*G1@p@?qaY3>w86-XBSA0|zZQfn1?MfZUcIlySQ~H_eV$VNyDLZm@At)hfbdGqD zDEH_!updeB=ybtjocnkSDIdhN)D8u#}aI@=pPKdEXd zUkoyGrZMH?5*iDow98DMD{d`WIXV?7p$igj3Y;n3-@>PCSpko zUTsfx?wB@;4ptYga&)t(me!41&27)FOGL5^v(e^1oixtruf@cMUpbg$GLhT=Q~G-& z(_BbzvgxkqYJ7&Mf^z+%Q+Kw?G0!7ctaQfDCr)YCI$%<$8Qtc+_$rnxu!Ki|f zuH&AK;@kC*PaYW2Dra$~H?Kc1d}-7S4L6wxIqxBm2H!Oy6Hz#^m>Tg2hF#QonKLX; z<3(KuUrP7qE7}ZHcdgQYBfocT<$S+P|H+2%$p9h`SLC157+i1V%NO6`H9!9j~09M5~9)R|VNhrjN1w+Lhr|pnH?# zF^V5n8L*Jn4L0D%^UV1qDFY%;g@icqv;tZ2+>NB?EI1o!p5iXQ8A%2w$PcO?A9%KL z1pLH2vaAqeIl}}3X+j}2Q(-d&tGuj*79E~j^h28okxYy!I_HcpE^n2T^)t_|d4iEAk_ zGdcxL5{dc8bo>v+)vy!NqX%6M=A$w#c%lpM)Mz;Si}nvF-9lk_UY)@Eff~+cRIwC3 z{PyRbryV(drNuq33a!>ysHvQ%n(R3^((gPddh8Ku)Ld`sNTbB=36d~XLCzFgRf1#SXVq}}ArltK7(04qMKIG;3BhC!0 zQ7{O@^2`oY8bD!z{DUr%T4Y6$^y>xhfvN^7Jf z?TOSa)k>f`r4qVT^G@Bzt>=k69lQqjd=7yd&rHa?!Y%nZD>Eb@wp6>2Pl7wXD zer_jXST;)UVZ&?GDQaaGE_|jtLdRJ#tIxZ^$rC)UTW5P|ZTpH_YpGM~Td_|QpU$B- z40U<04Krnw2DVJH8|96}iIS7)4Lp5BPmxsN1|r>^HG+9*8x5E@1Wohghu~!HEXI;9 zGn1A;)KVLHv1|TWlCqM9;q^Vo%g-I-c{PH|0@H1mV_veY^Neeh;fhw9xQ3A`wyd(o z{Xv~R9TY2yN$IP_ndU|LbyevADE4Az$k)loxo&x%prDo}qNG)S!yEF6D>Pj7!0cC~ zCP7U|RND3M=t$atQ2@7cD-X%NF^bi?cY0F=uAfC*kKXk#`(lY5F=LPux}vG&f#7_) zAdne8Kcgf5{U4N%lfWRVA(@2d2bxSmvg?{FoVu=QwuUAwxp@D=o-vMWe*gB6_W=!1 zW%w7&hiM4y*rmToUcHML%0UzwgS8PYHTyV!ygl;quf%^0;hcY&3 zdx1(AnLQIMZa=qxsFpisXdI*^kVI}XOmSNTZCN&bS){+4^r?qPQnz2HM@7D|31f7B z?V>Pa50gh{M%NVrc|^ zm3Ay1X1tDjmcA_^`DlUr?1%2TsSD5gXPgUo-s{%d<}E^wxb6N$k#6I|S%U^F$lb(`4IdC)rT-qMDvIfy`ts(N1H*Jcf`f zWwfSw_`ojgbSpZBC}rgVrnUVl1E;OZD1pZ%m1#T7+nx3yRzZ2s>v|sNUQJ$Zx|y#a zK{&Zp4+K4kZ!hhAJI(bEwvI98D=&``RN8FKVHdA|6UjLA z4>k$rw=Ime^nCwZn&Od6ay*Yyi%Lb|3#Y-yV1`j6;mqjyL-HoQ-w^6_p*Us$vw6Uk zA)T1&=OqlbKHPlx(kF$}n8o&e$)k`(nRU)!ZQhR;O@EvNxZG~I;F#TJmn>DZ6XtRU8ywXE!>2e+z-2;B))e zay0xV-58wiz@AV&`D_K;T#wzlKY9l_191}y@I?@pl6;HprV1uTp&@L(Ea5huQ5yWs zjM+Q)f>cWsZ--;h-2ed$Ysn7U&ZJn_(A5d{U+eMoF3Ik$Jty93G6ZCn`YC`;(j%ZWrd7tf{Rt~?S2 zb?LJb-gchBrHRcwE?xyzl0ppHDP*R6<2+V}+#+3mIm>=)qk3UVBor4Kz zTk3nQtJPp#t}&hZ2AA!XL*4lj6gMq?qKgvhQqryzT7LgG7GSc6UnF91!56O_cL}4P zuOqdATI1(JX&RbwD}P7G%o97=aj&w&32@nsi_gcmvOaShrd>DN)aMnGY<+uLGSMfr zLRC46NrKOk%!t830d2PUyuCi)SMtbYLrg+anOP$0v251-#ph}z z^y#capF~+_H(qQ=)YI`}uP4joF=$;YS{Zfxbe@qjwZD!|W%!gp=Bt|#w~t3MGrj)0 zE5+=fFuR*`5VoBW0&gMd+y#l0Mmrm%jz)cq(E5h(b|*j`RZQtEGy?bSN>`)^ZBwP$pG#hRoo1d8Kd2$Fi+#>pQwnE% zBNAtT>d~H0X1+PXe=soBZkLu!KpJ5Ux4Rmiiofq=`ilN=T3Q9vo`=0dD8%lJGE|th zsk5=$)2%a?jO#U}2}upx4K`TT`#qtcjY*wn96M_II(^$>#lPqDp}qqsPI|si2A_H&*gq+!X7Ors3yoNp<;4$Q zriK)aobRIz>!niuvS2q!8jsZ=`9#6NmifCP_n0HT4gzgJPo`RvAO>e^KxQ_0d^4Kd&uo%f{>Z1VVVsZJSo;7^4rDmZw?uvyAsIA zemEF33~Cfi|Gxf!4rG^dNE&8+VP(CUHu|@t4Px&7>{C#zhYdd0$e!~rmu3|rVt76Y zIv*>q$2j#va&M0PRkxqEM>=A*@!~U3JpaE`*@G^^+dQHBShwc~GoB~*f$pg7li$%D zy^=&wiI_M4c=9}ANX@_8fK%^2^7H!d-$?+Z#FwLeSK;$HfzS9g9}TZ*3C(+d8B%ix z1w&IPgJ)8~@Y$z5Iqo+nF+}E3#S^iMqfxFcy-P`ZRQ-GCAN_#5N&@I9q6DAs;B#!f z^w8liz<|UGw1nD^tF|T_u5bU|{$JPA!1YOp_BsE}`WJt{XFGU$4iA-vdx+h)+sD6< zIEF~$_?MgiyYVns+|yul4Cmf6Xr~Zfrj}Z3K;AmX|#uBenI*4>WaXfy{5<+(s5k*1Y!HgxdeNSD&Z=M6jUv zjkwjAILd?TR0YV|JW0OYIfC(;X7yo5Hmu)YnGSLVjp9o0Vjf3QXZS&O76dJV`9wfT zTX4NPAsZjoC^1vz5K~V1@R1SAO<%!T%O36{7#G&A*Sw|7}J8SrGoW^ZuU* z1p>GK1seYe>i!pK{AZHD*7E;Wpz*}1zPxcsryi7l_U&~)1Ds0&5MH6DrtTjveKu2U z*UoIVFF_e?xg_<+Zm&`fkmhOW`1gkm$|-3*$NW5HAuxDBOshZ#V#BrjTAE;QDT$a5 z+JS8UzENej!ss)+HDZeh(vwtxx@{J*PU!(G?GOKKMF4);fh=IG@;yji;6V6z>=}*s zKJM>7HwIy7h|E(P4tyTAX5`#=hnt9#KOoRtOTD;nG?A<))!o8#XPOc%D+UUfc=(lh zc4_Kgdx(1HMs@Sf+_ilNGc7M!#BL6OJodmnt#EqB-c#l1jel@aC+ zBEtAA>m1bVUw|>C>3Y4jQqBi#I3h52{(38^-?4M>_XSAFv+V65u9grr<;@>A{rZD~ z>G|x|#f~~qFluL_nq?o`(5zp;Q9bBdKtwDNUCd7&A6<=P)12}m*%&wxGq{DBlJbt5 zbTS)PL2Biz;1R(Oh#!fqhCfI^M|8dg z_rwXSI|D@h2P+T_MLgt1e3~8h%~(KsvNN0hRwabJ?E!n=WSYN6zx=Yhv-YH!!yh!F zI+|_<&?hF&0kOGCsQ0{+Is?PHwj zcr{}pNVy?VJcSqwerT-S1+{sy&3Je{R(=x*80`fjLh3E?5CF|Z*UrDwJAMXK+e9}X zpVRgVoCDyvajk3rMMPfHKigP&9CYExnP~Z?Lp<(S`+Tne9C0}0`AWtr~J%TaR<=8pHem$*}6zAssCpYMdI!p@UROX zFC9-LryUhjT1i@HadZcd(nxRe% zN=j;HyA&4{`^QD7Hdiz5l>4ui;9=_glz+sGBOa%~sn%6os5AD}Onm7$ylYsx)V#NI zjVUzAD1xbvw2oYKzIo5TT@rVIo)oYBX9nSbKBS3M!_Z1cp!#z#OWK`g9uQ~6dhdtT zjvr>!=@&|w4tYib z&~kX!_q(o5RIOkd(;jqY{Em(@dZ1DTo70TGmfw9M^aK~^4mP<}-DvJYMXwZSCSXX& zD9bV2QU!W+tB7==$#dcK94_}>)y11L7ZKgt#j*RRXGlM$Fi`XtE~xNz!>nX1uLa2G z8U=GrP$1DBQ)NqA03brY|KR;DCBc&!) z0J|QeICrH|WBU~DY?Lfd26ijY(xkihm*z*t-+P?ek4TYM0nwok(UHIRUUJCw;5QYY zGhSYhg&vR^5rdzISC0FjmT#M9(__AKdZ9(@W18RE@Rq)_iC0(M*^+ub z9GQZabgMoHh$6xcI;4{=8PR2fjIS2YiuJNo2PzZo|p4{a+kA)#vqi1-QZvOrzv~rL{Rq&GtoVuzZhipvu3Nj4MsUY*N#9B^<7P$`` z4Mg5~ev8^J$dXh$;whq2PgN#p6%=_=bd|M_l$^;NP?;_Qboc~m({J==Fl+?DIv!Ir zaTla^UU2P7WO=j@l3@|0S;RItGLc-b0z9N~^?7@><$h&jwLsJEG=D`Tk8G8|=-7Go zc?P?H7)y1^v6$D6a11=t8mCI6KudzD5W)e$1DKt{=GRNJ%~*P6UK6_rYILpR6QUiU zksRUG$9$}H!3=~>5&pSJ@EOYCgP3o`99hr=Fz7C zAw}fbBkiOXw>lBg&idl)%)p2A651E4qB6n8%kSLHIyEouvWb3O@j0>|FlQ)WFAdl} zocv9vp^olw;*2mi_4Q9c9AbYbDItJgk-o+>qyV0u6Y;|O+HUPW?C*n*Rf`n-D z)RsJ^bHmpPH0bEnL^Msk(f}8|*)Lo*g=r>g6<&OhBX~Q5htnk|G+l+| zE)ZopGifFA*tB>H9T6|!F-3Kk4usu9%PxiwVk1VY*@6v)C$j8O+Ko3oEZ3O3J+@^% zY1|{nqi_Y}awjOHYB#kP4cbBgMfD*!P>@u1c!&4$tX>r$h}+nBqL0>@E1vNGC-497 zK49WAWCOSY4f#P^AOXkQZ@JKpJCT z!<)Pa*^mz3)}2(REbs9Qfb2Z=KDb{pk9eH;B#^^HrG(QWrBMnAI=ExB8nJA213HD5 zGU+(MVR;m=opU+>Ogw+rgyn4m;-fy@_h7=bpt~^I4IGl%UG@K@tJ!`b@xgehoX|?7 zxhT;e(fcn357tAI)-NsQ}ZV-d2v10`QA96S%!m9_P^r3F-LOhQ1&9AGUx zI1ZTp`H!e-%VLcJ$%|CS7`lTh`YC^47b=h)nT?5cOn%0bsZtX_0IRT*JWtC|=jk)K zpWGvSGGnwmUyBQroBsWp5xN{}wn`5Lt+t~ru1m*Lx{v3cX(H-Kl~+}sdImv3&*-4N zlW%Gj>4EfKMwCr=1PM2a-~o-A<(Vl?twg$Qu|X}Z4>>=HN!j0>Oc_>-H$EeMHpiM^ z4H8!|W?X>@esz;=fFBj|`88PXUt3;BoctZ!aaP#h!}dR9mQA=A?0T%amWNJ5i=(LK zGO5}@HC-cUO2QfTTmn9M1CyDX{$cH{L&KVtyc0p$RrAg3qTwo;YF(l_UB1#3Il@!7axNrtSC?(<8KR%N-DS9XhGG zb^mT1+1E?H$|pa2O{g3Utr686vn|%{!94M@nR*+{qQyK$6f< zzR*sF!4$UQj&XEPAyAqnt=gjz*i#@(y%l?ep3IF1-patlNjWYi~fb&CtWULyQmZB8xI%7$iKt`O4e* za<63;CoC(AhAqtMj^=O#a#&F zN?7i?3f2OA*v8)qMTrS4@{E~xDqmc|1$%AghQ z-*?}XG-R@FNQo+$Fme>I#jK=THx+InpCp=b_*NaJ?Z3ZpuN=1Rz{IA^De2LwQ zSh5i>2>B;S(om_;04Hu)gcMqtrBN5?;&J}ICv1f6{`3b`90`nQLH=J;q1=-GtBPZkro z(lYnW{wlq32i>5bw!r5OUN!M@k~S(A{IdqCLUaLalMQ=;sz9pho%Tm!9f|e10brl| zWe&bVmlFQ_Q4c8#29iKIF{gq{nr+0HiS7tkMabW~;(TszJz(!pxePcM+_C-;{ofaf z&`-}qgd^^b3y(7EfNV*&>s|sJw*QOi^iRItK^$=xJtw#ua&*M9%M@~Q_uBzobk?Ph zPzBQVKKH!bejg&pn%+e}?Sog1KdA|iE@{pAm*owVWuYc2A>nW3S%*{q@bop!U%B{c z7pNy%%{6t%moN0n9`E^m5+U;`=o?a&?RhNPW8yWf^JMsMA9E{RxbJ1F59d3T{UbVF@G)b)2K=gL75=mugNI>5&;hd5kCoKH zreO5a)bHh9=rVT>`1!{-hg;-Ikyd8?SMy^b>*5q+_JONxp?CF|JHcPi_bduZqGnGo zf61Ifs?H8y(2c?W9u|Y-)LcsEW&gP%h!8N*tP;4N&)@>jU#~v@i>EKA4ed`}aXkkk zL}eZ+9CO&PuxXc*(}o`Sq59X|_wz4BJ_G<|BEL_vIm9{cV0ZgBz`^&Qt9EF}2#z<~n~jOfc7Y+3)REEa0OD0W;A$EjZkUq<;s5)=FVQFfD~Cl+a? zPdWkPONgZS^mQO})Yp>FAwQX*?m|fmZ1O9zGldUua59_s+J;E*h!XIGThJWmAnWp? zE0K4ihrRORR`Q4!;!KOQT!_KZ70eg*lOF8fbsXK`P!bMlMTsmKa1&}DOhS5ht^O zbR8a%O#zzliI^V7{Y2V-@EQqKph~Uw8jjq4kN4f)5FvvF$knax#7vW!_IV8tAQ!mH z2O%5hC--+}@W@g3nouo}l|IAWuXzD|7l&}&_c{F}hm{580tc_7$x+m7ky1`|%0

(K~#`>o& z$U-)3Voaknbyf+Itrg&S6DscaT^En&?;*BEvqVOR(b5XwyyS<{u0xR2h)*z+c|9O* z=*4fx>@Qp=kUi|MiYvO>LcB>pQ8 zxZ1T>ps9yUEu9N0dex}fEBp{qB1*X)HH>JQJ1zUkQZ7moEJ!nm`2rc128>{Q>?-Sq zcY8MwA4*;^uoDTuB2E`ako?Vj&=7Kd(TYS;r!Ro?-8+v#Gj0P>o*EhMFCY`jfn-C~ z&eQ@8<;fT)DP6y%2P6ZV*2drrBcx?ZK7&@Hd!E?O%f`%!7>A_Qsm-81oE*L%Ed+Z4&-7Nqy}4#&?-HUh>E590Q3*1|83fdL>MEJou|WWwACXERZt zgj4+t36^>md=M$X=<0`8XBH%1Dm@IJZ&D63p6mVz`4Ac)u=~a=r2*x6D?^njckMv% z3ov+M2+*I4gRbS@ZJ@{7y-t&K+eSQ0W4Q6>L&FJ2EwjY-1C+CnrB9iC4;#!F9?{R< z07q9YhV*VZO%g zO?^VZvrWC?5WeqZpgwr**9Z1pg%ECei^~9f&Uw?zn+K@(&4;673MQ8PE_{tTbkiE` z9Dis9j%u6c%M?M%j4&^dTym|SLmIcyDn@7)*KQ{y78nGC>^hu;SLdHYM%ERvdE{B)O#gRUS*^cCUh( zyI5r#L(tg=j>xIzHS@zTl5y4AABj(NS&xe^fL8wt=x{DG;q=#2v2;#qKFj08$pL60 z%KERdXXV>yO@%=Txv?H|KlW~~j%MK+WE024KO9~Rj%)#&8>7V|n*m^=ip_?TJV9b3_hQJ>U}6IIkfGrUyU#f1DeOIy*=&G2|U`R#+^X1(CZmcg_lSWF5yL zSxKs-M5QEVvZa4yN?0r$6IpqJ`fNx-k@NJzjSPSN0dtqP zb2PUEk03KPF9K~x-`zgBjZ}j0{@ma>1%a8u_KaC`u@`59u^|<)gJ7(0S@gDcnSJ|!P=8~#oerIAi;O9p7TQ) zzk&j|)K!L&t|55WvE{OUuXF8jV}~His%8$78pNchqRk)Rh`$FR_GNm>JXQyg64b~) zC8!(=h*#ev{peqQDWYI_(+*;o@5^lMPstfCZoPISCr6VqSy;UUd2cRWxg2_jd9NEm zk^-AW^%qRNa+H5PpNaI~{&crnDU7@&`uIr@&si_frVC9d$L%8Nfw|M!%$#1oP9MRK zGHZ% z54);;Jb?j0*q5orQuijGBZ@%K*sJjLaL@kH$(9>N`~kIcsvhu3$$po{lw7W>FCni) z=aOOS7gb*y0kxJks0FatEVx)e>OLyR=m8G}feE??NM(e%JaZH_j{^vm0H&Cf-h!MnHRhRI zkrrR=)rT1_#TlbK9CJt@x_VV#?t6YzXHrw*2=7c@*YPXdvs&jqLgX}V`cUESu-k(| zW)QaskVmqVOr+nB4SYW7%bZ55IryR&oY^}K^5vM7q{>_edMbnM^Orf3h7oUY?Ye4 z;%rarI`@ysDo>FB=hc`hK3AHiTu=dVWDepGt^n|Zgdw5QoBV8=V4%HWQzbu&!RfDBhc09kOR61YIBw z_n89Ag;KplYqT`onM9d3nl{AMory)C3`e7yF&g2bkzvwsJBplkHjCtyi)L92AIqZ% zdenQNZ$%> z9d_ddm3h8L54s^L1hqkxRxm;OA$RIq7$)Xqc%4r2>46qZ?|~!zwkJ%IQZ+nSbS9gZ zG9%`U3b!(>x_SNqoVOR-mA<$LMLAup&wjSKEJ?p;Ou{(ne-hfa1hmnT-wSHdTn~#U z&>USXj;h;x1$O>x5d1EUhK7R&mfYwSf5-&qcVCHa89aY3?w+6L z`sqxXMrMqFpq-99g~tICZatfomc@(<0@aroUOc37{61^;7B7vS69(V~e0&TS5}Ieh zV(4H{lsu7!ECVALpv391xO~|i^$nwaej40}BIAq{=(Qzt&p`kxjiY0eXxQPx2>XY`IHdx&0*}vXbz_F-j<7Y%f*2 z8m8*B3bD0`$}Q>FH|(WOu*sgtGaXLtbP0a>JG9v52yx*$DJ(l zOuj(E%2`sVF-^X7I8v93!0qDJdQgi+pa}dW+MR)E$2ki}Bq%4d$}L^BKPI-P`XWML zkMGsvtE%ZRZOrdG*4c||&4mNvy8eRqaU&Nl%7m+1-USlHFBtY~-PqSB{wgw=M*>4G z5oj+PWz`FD0-YK$BU=RhZV#uf^2P`{FpZnUHAy%whM&2Cq7kBvS(Q5}nZEaO+s5*c z^31jPvb@u(X78X={lM&x^Y5=iTA-Ai-5>$sgzVKPP(2>g`cHb@21iOiP0iajuoink z3VekXrYf-K5S|QYE5M$3#wjl^{hr>0@Ercl#GpzjMSoh=5%I#Beax1DpPs)y$VeN) za@L9a-c0BL#^b9fV6#$44=4-+=1U3PE zjbF`K4%?Ua8FC;P@4XDfDT{}F-Dij%38TN-74vpzLacNtM3E(NLVDO%P-6P75(D}= zLjWVW0WAu8e*u^&uNPns;rq}BBhkn=OYeLC0W+^7b_)+J&6C03t7j&6z}tlT#SPBA zB6arBw`(E)DIO(DwuME4lLOdg!ls>cyF3W+itX01)}uw_RclCwt4&Vu7eZ;>m)ONm zxj%zyqtf;BNKZ!cwbk7j`i$DYZ|FZC^!zjeZ#ogwr&ghuxs_XViY}-@ta^$`W^892SE5wpbu3>VWHxB{0; z{sKf*u<*qAst*F@IeIx9LOKM`{sTa#G{CHqp9=7l1z_5F ze;D3jnmQ%{*&yAKl)*L;FlDKn-hs*s$_92((j$-?kUFOpRzbzG`!2|;Oa_TTM1Q3s zcxOqDD@i&78UT6rTc74&pIr`C`uXK{IO}l>Zf9{h=ve3Zfjf8*zOtTwYH@*r03%4L;xh2aP{Rgi8{p12u!K)NDWTC7<6?b)|M|ewFE_sGss=}Iu zX|@na_{!pv{(c8BrIb3nB|MHtOGa{&@C{MEgx=(Q1qKl`=Vsr_{xvEpB(+*8W&~1W zYJ_@44D%MgAiZ9l!O&2pR6`MoG@E}%n%l-wh2%4{;B{zcRRoA(jQNwm(inJCals0d zYrWI@cg+8JONU$_)KMTjZr(eimMm(d`EgGgc#*8Meckh)3G(Ojk=G)NqlZ&C!Uuss zS<&lC+h@}67h373NDdm6JyiX0+ zfBT(cZ~MT~+fXU-tA+OzY>uqadPzVk4}-EJw`DLQ-1Fnza=yQ(GCVXj_;OOZy@-4d zkm1!!ncko#3WKbDh=wEcfszWnRv%;Pm}_qif1^P(8Dy?t@CtUG zxYU_Taf+EU_3GGG$Kz_aeZYY0p)UY__Kdj_@B{MM1Ciccbq)(4*9^i^Qu)? zQT59z;1&Hp_(g|9d1g3b7t8Nm-u)2i^m*{?%Nw*#>6WjdU+IY z{=e{qcNxcohKYq2@#vC^%!^%;KBE1^N>Hx1HiP3vhQb>A-y{1^wss)(Au zw(T*3Z`%?m1@?8UAdY=VE9VfTHF6C!xVHs`;V@z{ss2s@9B~QIv6r#^Fkt5+Z;oAm z57^w$Wk8BF_>sRF5XL+MLd4yBbo>Nf48)X0aXQf{ug?`dC!r33#`dKS<~9)LT?Q~Z z3?g0S=V``kkZTZ#DfTfS&4(qR#davvgco+|;Slp4WWVGOOkze)L{cL8mogWR){Q(W zP_-UOQx#TN(Ljd82u-3dxZnFroSYEs09p+Qj4ijKPIovhV{Q(7eqThc|FjkBD$bH1xlf$=+D}I4raK3CSWl3p&@I2Kl;l`_6H!AFSd?K zb!rU1RNi#80Gyg?Ig<253!NdNj$Wy!zs7JJe$BOwm0u>en14bg`F#eF#Og z95s{OOz8W#Swt2J+A~Td{Nlp8!eqV4d<9QxL8}$=9%QH^$(q0Avm+5aXBGw#lhY#5 zqQ0P%p4NwBAXGf`0^;QPOi;-XES~~s&#GR251w@+H_Y{`ET1Fj{#o7^@ur4*c!Yjd zM*!pvc-ubEFIO~2iZ+0OXxIB)_(Ks6IM4vEY20!e0?Zq+QV}e=Yz?SHM5f|z2@eNf z$87>B0|d`);ooV^B>rdt&c`Iub$gCsVWnUT<@u4R`c4#s_XZe7p^9!JwAS6@2|}rB?*tN|zwf%OieMkO;x@ql%(Ig7?Yv;a z(IO5hy$etFX`$bXXs$rxCsf1a-2CZUQo;)RKn*(YCsb^P6a3;p|Eu^bkeqTMZ;psU zjRF~wu-MpYT^swtM1p#uPp>O-fR7y|at)BD>lQhEEEyFBv;YsvX6 zG|Bl2B4}o$)$Dw~Jjf-^lAZep+rLqI`&+WV(mD_q^oaKDlw<^^`=iR``_O28q&<7k z6?(}w`paSzV)jV#OKc))?!4Ni{&|4+Go1B~{{Ch1WIk57(^FY1aExE#vMavJIMNcO zDL#GL_+kwez0$8JI{vl_Xd~`yB7Qg-`^}g~OiQ1`EMXVO!^TYKx`4x)MN!-_&&JsE zF@iJ`ha~1do(`tPqXI$SmKG^LPAxQu8De)3F*<7t`3VZ7-iE0K5ER&{d0F zd4+qq4#dE964Hxk4fdkdp_n<9Mxxv`gmpVBufum%_9lpl4_yn9Q{`3p19+bV(!my% z2Q6YFTYLUN6z*5G1)#W`z}~ zViXPj6uTRuou3BtzuH__8>WCGrr7Rospo=nmE9(K$M#7B#81@4r~M$y9+AVpcYVpZ z)+LEhS&f4n?+Lf!Y4KdgEzjL40@}CfOw^qi)}KgTR0I0$VellqW^wkq^cmV{5Y1>% zG*Xn$t%K_ASh1pKCKunX$1>Pwz1YpP|@e9z!*w)EvW+WzztNM3J9E2 z&Cz^9Xuz^tvmnOki3HLC50eHGguCI4cJD1YUc83LWDUWMy$I;k7}_Q2yKq%*&q^A+5PN$fx$HHY=+Ul_Tol_7cr_(oQ-J z(jrcK;qv#<1X4y6NS7|eX#Y9^yq`e}B+l1jY^Z$~_nL-=t7ZK|`6VKhA8)0zB7kEj z6z%JfIGZL$1yn)^e6SCM!mdQ!W|(P=OZJ5?$bMTf-m0mXhKgSAy;aMv@QH`Y3qysl zUfGGBx9rW$a4G!cLQ+mGI!9iM*IgY-2UK)vboL9+t#DOiapZqrlYQG)t^a42)dATazNO1E_{Bbj!_KA>^Hiw*3anQ9`P>Loj<|sp!+UQ}YY3g66Crb>QsM9xq;?mPx)Vf|Z&8fp&)0A53zKJkoYzx^ zIigWi`x!i)_lrN7eYO?`yBEK6XdBfAY_)r%ElgSuIkr*#Yp_lu*~);uu6x(B#Ax$8= zw#=d?wS8G|qPsAQ<+@SfJSdTkd67bpTD`@!n$G6vFtM5XU-7DuFg(rlD_*F7jS1$q zP%NzI_w~b@YqRv=n2pdQcY$W!%T~hTaG2Pk8BwFKzh`-W(F1OLh>=?<=I#py)q8 zFkM=Wtqr6L;gse?>O&Ia_DmAhx)PKhMN-7ZVuJ0BH6{YCHI^55K5 z0NDhz0m_Qbs-Oeak*L2s2QqiDl<)&gfY+YyHsj*tb_98*H&Q!u=26&H#%3!&RgKfj zRORVu1#0~U+>hLlR_Od4pv|LRuQp*(+gxqSu)55N7rq}+Sn^7k*qGNbVh@n=UM$ku z+p$561mbJ2c9FAh6DHrp;*eRiPsloA-2%?!YTzTnwNNz&XP}sSqV+Gw6%rCG2=t$= zV5R_~3(O*#AYvF)(I65TK|(z_fRspII)Osb7S4^CcTa#t7x(}wOV{ewt7Z+bT38D8 z8XG~fvKP?_NWjVPqGbbV&Co=XXQiRRN46LeZna^1K>z!}7eSRs^t-D|O^_eyW1!_4 z8c%~4x>dX(kj*ZUQixRW581bsRD%r@0hk!(D7MMgCxE}yvp4F&^MeJIf^=6ht5KgF z0Vt$hU$ESW+)(3H_AoW+*l4ToiurR-QJu}+b476njO7!M0Xd=)M5t@-L(v?FwzUqn$ zqIydSCIj{L82`)L9P_XXGzo^naVUp$*>KII{)LB4gG7SrHzH;W#^U*M{Yb2Y)MX*c zg!!rn77%QhvvA&pDIA6ZH^iug@G7eAE4V+vShok%IME`de^NI75fLg}Dj|nI1(2Q* zjjE9*&>b9wd?%)J|I2-D`s$yLQiv!>g|^-Py>DGd5JHr%pj7&t1WV27{r_aDTRbWg zuYTiUlzsnzG*#C@U{~So6tHm&x0_4l=zlS92}1~xklZ!;{c6>gK2W(2}gX*F)$prQ2K%c|^<^2fPZzsF$Z2C28f8X8zD+Vw>ZNLRu zuD`AQkAKWLj0DJA57E9W{)MwW=K@XTZr&pGMUYde_ed`3{eFZJ3otS?iplSJkb54~ zhWp%GD-)o!95p9FM3%I}k4S~&5^|zzzE@8b3Jp*Fv}^5ea8Wx&0iNWm&y#W{|IaW! z7^Yi4!K>)EGt*xvj=VnHTLaw%uOEm}kh3q-%GP^d=F1OW$81zdlFe#u~_Q_d*wLp`7QtCW$QI-I2_+7eUn>K zvM+4+v$lTIH)%QPozz}y_-^s)vZ4wR2}1x885a>T3jVC;513<+*y2$jYaMoC-+qN5 zK;=ho#~IOQ3?Bac=T`q*CVK#vpReD9 z`wo9%jr@o|SKv=qE<=G*>)wBJ3; zfG$ZNu#oyQ-G6^)ppzcga0mrfY8~WO<(G%m1}a_8hV3)Wvp9G!%s5qTm#KyZW)DFf z<9)mK@9D+Ryh_HTi8GQ9b&wUT;#PQsdF&iJmLiBsKf|lbK~7ND93r3DG1rL#V;Hn| zL#|Nayr<7UadwZ~<(OdQ%$)nvqePPj!Frwn;QnEtjJti!gqF5gZm=~P9 z2oZI*kQFA8TS0@T(PPJ?hBsXy31md`%Dazu@maQ!r=P2Vw8^-MB7|b|>VF3OKM!tP zfB|mD^2zxE8+Be&>xgXA%nV)nVeSoacCQ^j7d8qK9)*SRw*pOctWiE=gVnMGQh)mY z4Bel#7pTt-#OE6SGTX?rSZw{m8oCrcfOb*P~|kJA4E5AE;FK6w*GTl ze_q#k3$BNsX^OI3Tbkx!yLS};YA5K9mwiQGg!^tGpPTBRZ~bQ)so)t_qutOqPQ zIHWGbrac0xMm~1H; zQ`pbLY_)P!L}2Rm2n$w&6(o6+ZK_sWzy^yFao-W4t2 zQLv+yE!N&K(v<34wjmVhmV$$r#B&EbM^+_McH$x9#m?vQO+?qn%@zAaHY z<9K2=OqsSk-8;g44G%6_@DbEGxDJy2J$s4B4qaDKqol_(iB8lf%P@*6@^s=k8!3jl z-)?&182(;{#Hh4D6#^xFXJbepDFuIhSN@3#mV zgFk;z*yQn;xLU%+(kIV@VUY(gTaf(y;y@xsZS7&zP1Ageo~z9f*%`qOM|knv3cPsp zKIR=S&C}zu4W-~tKJ1nuYE>#|y^sr=4-AL?JH@_>63}JvJDcN<11@%hlDBk@Ygur@ z7B-)9|E*24+B*VA(T*J{s%joZ5v8_J3%RBBnr_G3vlAep6jx~8Q|&o@#bdvTZmp3+ zsDnIL12_NCZ4G({15jmcB-x@;f-e-Qk)leG1)cT-! z@C`_AFTkvHy={13qTZ_9Y0to}M%QuxsGp~wX(^~2p;`m2_(&y3M%cEyQ(3|U%*EEO zd$i`#-kM{e%LVFV>6J*~dmTWOTlc+P4FCgxkF1C9Jc2TGPf(bpE%qLer5olh4he^h z>)o~Wd9CH7!OIP_p1|$cKlQq?YuobXB_{?zZSHKMS6nlp0TPjO!b}r=H#m<>!FnqO z`bT8J5|;UN`aK(6$-=Xoa_3ujR|Cf}cy{7lq08#1LNy0L{R}02ltqG^E8drrf|kP$ zfp|Rqq%CFtodYUt0`Mc6IOF?X>)8$4zRfT=UV5REx0#Mp-i~Q6G}IecQU*o*CD@LS zKfC&~RdesWH;VpX{nZZwI*n%3A>p4l`z46~{k-Q}d=P~cnR8zt1b+~jS5<51FLQiU zk?gl*IobEbUS9!X7v|QNeoXMr8}})<%@C&h5ZjO(k?raY5>$v#pX|-Qw7xP=r;~d; zUrkx|Vpn6v(c~GWEvU1YhLouSa_T-1=+jSrtgQxH0ypj$-GSXQPWMGEOB#C%Pq{#^ zutYdRTjEDtfuZ5g<{aLVIecsX!up|euR_FxIPHK75@S_Yk{=FXK4}o4d7YyV$_y_2 z{QgyM-oOv6A`D2vdo$mNI#BxpDpa-R@UDP!{#mW>azAkC&bRpuuJ~@k)XLL{WDXtc~IE+bX;E*OK5tfOy*=;ZPGfJ=zHd!wqjb^WiDyy^poNnHCoo01YYH z+*qp~iCbHqt)LdGe4hR)q77u$BfZJJBeyoaCgJAF;lyl#-}6;|Qt45B=18_T$0I_{J)LAX7ksC4X&C z9uk8rx*FuX(|}xgU3;-@#JFNPV2%dV?pI|+!x?fKDk&0wctYIsAy9u*dEfDZlYN{8 z3X`3W!xtytwQuxi9^Jn@1dJiivxDGFSDS5-On0ti0Ywj6>y^*TQ+>?yeey+mv2%3s zA5;9dHs1A@US#gqOMAeVG{!Yu0Ydm5PQ1tcn|AUkH}}fENtAU-`f|mA(16EAPXQ|J zN%2;jKTeb6X2|u@`%!3Jh4&EIG!DyG<{l4kWNlw31e1|+M)*S>ZMTdtgEIb`>vki- z8pO2%aaXSbq|pb39HU8h7=QJ>a2HaF(;q>Q;>0Nv&5zV|b165(`lC%^6-*ay%Gal1 zaVxO8Paf<>B8ag=Ki7d{DL(o1N_ta%OyIwp1WFKjLWh;I2o1n~oPT{+)a~Onz{u6& zXs!*{ZwZf`&UN)$Tk=@s?q+Ow0&^!Zsls_s{(*vbNi+FTevrwc7bZ=Le zER9(v(uT#)2*qK~L^};V^YesK#`lTRjvr>5qS?Jjf~{^|1EO)p#`4C#!&BgvP=Pcv zlRBkK2*!{uh^~&dJy73Q%0|fp>_=EML!1zk`j3gBZvCNG=2=GE@f@b#EEeU2V4wBbxV6}}9WH=->*hrZjagiGpk35f_r$EHpsT9KY#u(SX4!_3_o(uQdydjq z%)eWd%0J1QDl^*sJ%_~U03n}xNzNLU0`VWz^sBwcQ-8{T5iV0b6F^rl|4>BVI%9z> z^uBM#oOkH*q>A{1gJ(4bc~8}8<<1+``gokEyqsEX-bwBAjJ>L0jF@v>I$WPZnUnk+ zRlWF$^1LI0lSBnZK6Tm>?qf!>HW}^=N8P>QVV%5A-G^u@>Aph%RJCS3bi>gF9Ba;( ztSzYO5)khBjK^fJ$Fg>J$H|ZAMx!{6sFGL%IGnIvY3HFjJzx65oWH#GLyum|45?j| zB1tXEK#AX7>+v6>Wk<7MQ!bFSyH~pOmu9a?+rN1z=Fza0VWSoqf=(bqVKoGG)%k6) zL4jV{?GbY+EcC9g9>^B@W{?q4T0F6D_pH;iGs=mv-{U7v2&GWe!UST(q%!fA3em?; znoLWBmP=3eB;EkRQuSqF%!{=n9llxaoHW~`At6Q;9O`{FoLs?lYOA9kCClD+%dUJ( zHBX)_%1k6q`WYGyKd}odw_-5#zT(nAuU}fjV=smuH@%@``Qd49Qv>q!leoKyb1Cxl zQ7p0JDp|2O@na3ur#uAQ$L|$g)aht+tUNm6RdsW5x^&d& ze(f@yAP0{EkXYrH#2g4~GHXCkTqX%Uz&EVg-YK1+L4MG@F~pvUj)fC*TOzeI^z1>- zw4Q8@j8#+;Y8aj%_ek4#+gHvSKFt=h*DBJq@g`jJP0Tl0C{rU)bwbmsvo{X#5P}_! z30`k-iQ30N-~MGlvR&bA4|~g)@2~gct$2rL<7H1t4XXwv-hajWO3JbKKcF>BNpqgYX8Lj5Q|3Y5e;*@`t5bKk368D;g(MKQaJvb-|hVQYs1`K!$zUI_|VPq z{HYOvmw)WXEz;K3iK)Wg*BHsHKC~&q_k2xraO)~1N?v8+gjWLL$1!)4Y-ZTc%GdOb z8Eo8xw?0CE8D=6EKPzslPH`mxgL3a!q9*%Tdo57a;~;GJN?t}}W?l%L zQE5`}z?r|&&@{j_Ljk_EZb&0f&vT=Ya56G+=_c{2l?{e$ZG3O4dZl@|+T#BB4DJk@ zDox#!hq`(&{nhD<+F#~3`4#IwTOfOI9zIM{i6jS1b3Z{U*)+in6W%>@iDrU z11PmKoh`S^v7>UId~?;q|R~ zHZFd$vS{sO_@SOqBl(a6(r|WMFQIeI@n>j>PcCGODcMEMcK`d*F;i7(QoD<+==kp` z?u?dcW@IiihL7Y!s}8I@R=}6NHRG)Zi^@8Q=g%Q$t0~**0-^itVyKEu6^^p)=R}q zBTRm;hHjaWTYKi zY>eVf?6M;*T5NQc{)I&uP`pUY@|1I!JGYQ)kXI0!I3Qndw&p!2FP%fF- zb3Y+6sMg`(wAc`(_PAhv5L?KaK58cTtc%TV(=&16eaah(R}JRfL3qA}1MS_G>fu)s z95k|G@l(Kv{EA_dd-Bx?VOHTj8B4c?ON2~chJJm0FT5a~S3!`Kkb88#ndfGR{8gN6 zM!HJokcfldL?rD*Udk#Xnz^>_%m^iC0=L2iXKWJgl7@8Vv-)KR_GbG2YV&Ar*p`#B z#K7>$5rd4cT__9CT@0C)v;STf$SE54WUW7R`g7o?p(dH8#ZeioESdcvFB5k z5FnJCdwsH4wJe=wKh~PCZJT%`3$!YlHA!FBGg zg0M=_^tG|2X0_Fr4W*>jy?3JOSiL(}Ft%Eqbn+K`?=lXnF29mx((UF}%icROnlRdN zHR;?!t@h2i6fdpWSp7#fUt}XK!2aJW`C@*O1U`!byBQH3mF*DoE~DJ_^9vcj)de%Z zSJ;2k@xar}u#Ml}VnpAPR}dux&vXESMnR`Pf5~`Kn=^`EL@hx zt;@GNco}t&UrezSUlk#E?gew~`j7Kpe*IX_+?<2M>{obIvK4dmn-uw`1V?!JaxIGS zj$Vl$@dZLkvDDk92c-@i+uVbR-!mA{w@P$!=TfTOiA;?Zu_Rfyv8igId1|3l8D^wd z+B6o2f)5)7*46!xb~(Mg!6{~MGt`AtNToJ4iw9HCmmhW;z27D1zGAKsl9Ws8x4Hh) z)ICRo63aS9R1iVl6J#V$Kr~hk$$PFnp}Pl~`jR!x6GJCOtW<^gnApwEa@QCJ&)kF< zEmV9_A8RP6rqW5mL4fxy@s37R8pWkx$A0u& zmpmi-5GC);ZQ10nmj+5faHP$g(YObEyX2nP32d8<$e$&oaD!8y()CaB5Y+hzUyOHL z=VG`6H(;g_DGfJTyx!uxJr;pu+s*@?g5{ukLXA>LxIX!-&pWQGa27 zd?%st21a9KD!}3>06mT)trY)`0TL2$y@$&rqy9)z{BU~Pvq#X_2uZkexg%V?Z+R3N z6XkwvpD=J4B?FfOe(oRUK22v|z2jGn=)6vKhLoaZV?Eg0bJwj^K$j>y7o1%bx<^VKw)cUz)@-xcl>-D6xwybL#8D8qsjdcTe=@PlipuFKz(J z%Z}+FLi~MxdGqL%YK?G+&w6vr3!Op5atp!XtPzM_5#%h0O66%pwD7SUJ0?~%>0b+s z>^eNz=L$FG7>zzTkYL^)$Md?03zf@z2vg#J#`{bjus zE~l!&Nfs|=H9`QPj~!AHu{hBq(y{{nS5@xRk*FWv+P$*Thti#)s+-m@&!PtBdP{n2 z74h)xlHrhbc7lmHA9D4$<0eUNQp@R!*GzQe(DX)AY^#w1$lih>}MxL&PFi16f zknA=jNTi?=)2HaO31O7er{{r&V1W2gt}~JaoO#j>5~YOt6hYV?T6Xd6izs&S88Vi? zzzyIWHIY3!#7gFwfN=-~=fbgQm0f$wGP~0CPQV?Add+;0v1^Wv4F?VRC0{^$reJZ* z?DgxBUjcY_D%pS2Rhk*xxo>|-nW3Z?Zp^BpFt_%&liLQ^wsNHpCCJ^1Y#YL!Y7@6t zdCfW_Fz`qttj(z|?xmSQM-aOe@BI#1+^3kE!=rzR@?e{H)wGbnaT z)tEY`kE>oIj>Y@?7s~5HPe4$QwbOktr*#w;T;(x&ezxYv4Ueh!ooL^wgUj=vMRr6G zKD|!dXoCH%d>2roc>?)Z)qX7RkQH~U)4F|q*I!zNz|Y!5!^3ifwy2wiEW`C@A6U8S zL=xrXUjEX|__h*Uq2C%R%xjotqBu*BIT;59$ZO(up%sT!hrZ{c34jDnXP6`g%@}j* z^{93z+9;Ay?m+-7YpAF=o@vM_YtzIdUipb+4K^Rs7@W~+5QdlKdP){Wd(GG`#iMF! zm4LT;i>cK!?*_1c7B6bTj8ceGy|`C*C{`!nzLK90(?Npry;B`V*HMu&yLas93rw(@ z#fzPkl(Y%KR@y+syfnPrQ9t0q#AK#54AHn7Bh9MiD3B+Ku@{DAdu)uz0=lGm?F7Gb zNt@~hcO4<{aJb*ZNbt(7gSxG^$X4C_y>W&hoxku)@X~RHFussgnI&q zW0$(KjAzh2K^?Ot2#}3E7cu}+_HXzj>liKAp{<1@<+5Y#Dzg_e?_uzz(M){0c0VeT zzp8JGhKNY!xqw@tpKyIBDM$zo2?b-J#&ok982VH*GG(7grW{|y-8{iTvsq!-_Dge| zPM1*4T9W^!_g+Px(m^oQ(U~}`aU~m1$#N`(B^2gSCwNHm%j!={9dqi+eUS2o$)`Yn ze4n&que)-E%h1R88^f}Z^gs(<-j6iV@q|i}M5{ACacrPYG1F38WAEuCJ~s1w{|rwg;B^T$HepVtA>A)Ns8>Gk8+5SCPbb>Ng33m{eU>--E}DLM@vS z;cN7oRshJ)v#$2+eXK<|g5&mfFca?yb_5%u0;0~ihFbdSM z^a-Jek z9Gk1M%~}g}P+Hj)6fb`P7s}hCW)>9O%FGjA)meC|&hEr~)Nx(1xD-|M=(F=O%iAL? zuF7Ar*BXq=CI`I(k2h5G@%>o1D|!M+M+%-af$Vp<01%q)L{mnZi`nzB{kdhh`7@3^ zId@n^t-5=BUytd$#ku=zs84kbB#mV z;fqyJ&TUNqi3jx{Za%HXAAh6jt8uuknRFaWaH5k~Oj7 z-0Qo0zE{bcUoh}rpAYw#?(czPHU7(C8!8Q6Rh4lLVO}9y?8W^st=yI;4ojYImGH0D zpG}R*F%k}&u`In1HyO+QwmM}ai#kk$eAaDNDcw~&_5y(Dw7Vs^)P2A5|C`(XI(3mS zm;yo=qh(4~y*xOfj_`g1;N^?4Uqd13Gkvw75b|`|eG0e^j|8J5x>)(v2eRU3SG$vFiLNr!FOtuK(TA^ngJ&lioeCvWcGDBlS&^e!$5~vLBu&P>WZx zJvtP*FGnYpg(ly?1HGOx63@hwb3EUn@Y!k(tFT$K_3;Yo$UP2g2`1*T0UP5*e@n`^ zf|1cciB*5R3v4cx#6)ZX<^~hByUIK60X=IY_9$Q_t0$TE#2-&R6O$92G7kiJ8N`g? zAI1ST`oyI=ocH8S35b{~N$xO_uy9J8X+(j#;P@E2a5PBXJ}X$WGW%{2Mm(M>HMmB9 zoB__9Con~{uHNZBsIC@{XOYt6JjlZBibQUv88L7$x1?WOX>(l6ZfB`YS*!727d!br z!{EVrgk8OI^CJZ%8^HZ;SRr5sCN}z;3{B4YwRZ(PM6;_XY^Rs)lMZ z+8&!R*gr~q7=`JweW-QqJN$+-B1n5&JBCk{@$<%~vF#Zi6lYX#H+|g(4Buf!jYsnF zYFEHnPwQ$pOEy#q<{g$o747WC=d5SkHf;w1<1;3n6Ob$0m! z=oj~SHCj57giwTp=PhcMaQrp5xjNwmyj9^1OW||B`23_D$=WutA$)*Eo!gE~?|+qw zzmoM|BeU&U{g1-T#*f2L1r>+54V0g0q8g}hNk&}lj|0W(I8dIyCM!Kq=U)f%q#iJM zbX$W;s7nw!$eeva1M;hrB?D}tR(6O3W={KLM`|$ehoUX_|1VNADkq5Z0l9OpDDeI< z+6Sd`9E<23QG7i&%z%BTsvq$u&UI9Kxjc}t4Jvc!d9}JQemlL!8{$3RFL7E^a64X; zuOQoEg;j|tFevEJrBLYHng0_Se#BJH6xMC_SGkX$IZ^K9`pzRo+$<{=(u+5*;)QNc zp*c5^<`i(K|M!`h;Xj#KOH%f{r2QNBAS5?cf9JnCE)WuaV3Dn4_x9=Am<(tCx*x+^ zRuJ*hEd%8-4H=+UP(uJQX`b_7mGc#>;=)AFYb@$ZYro~(zw-*ysb;(!PBuwa3_3=qzlsn z6*qlun#h8E3HcGQNVqgV(j=BhaDnQvG=n6CzJIbcI%^Jj_bfze;{5b0n?TgqCWrzs z3L1QGh_*jUvfD^Pxt;_8r&kue*N{-=!x0Cem1!uTEd@UXq?Ac24Q<)?0X2e-8`Y0e$4w+iMbT|?Kr zhY6v_YjE>+E{REtpog)7h~##fHe&C!1U?#1QIyz&X+5RMAR(%54h5ZrX6>bs#z?U% z!yvdS)w#{jw|5Kpbc+Wvmp=5DQM5w|nC&hq78KTS9Y!8SZ+ubmuSmKcQ7K6b2}I4M z1V3|Ngx&+?94qt)d-iDKD0rpW6_hl|6G7DVW{AquW zE;VZxmZiD0KwF3j$+4Mb!4T(gHLQ2_b|^bqJGmO_aCqut?SR?CD-bz}ol_6e0Uea+ z(IkbIj~|fS-4*Lw06A@9qKDY>xgl7?0U5X~Zs+_ofNAE^9c=&B{6Q>Aoi~vEI#Q zV*{jEUZ-UtD$l5brk&-S>%v%P$E?p?yokc&ho?{U_!tFPZ_&uI*4K>Tqd&aVtrj%K zYcx(X&Bu0D1gvdOKNx5rvag?WyKoP358eYvHJVuZ$e`B{t)JVhSA|9-1gd@D7igce zhx><#D5&fbcpnj}k6H_N=Z}EXCryu=_N>SPq9p{y*BD;NtQ{1k!^(}hDrsGE>-_Vn z*l4yH9!hrbDJ@vD#78(NWIYR=F}+TKwaXd^aZnPpR$l_eV!wgx;FoL1IVsz2&c)i$ zA3|@}XfXUsX2M;TI}bu9lUH4DM-C>=ePXq%fo5OU#IF_-P4*&a0UQncW346IEHCMxT}?$)2dedFRgc}dZX-iH$>xdX8mxyPXbPB=FE8^e4o8@&Yi z_HX`Y@ljxLMpU6yYRwR^G8b2#<$VXa2Mb>+iWn=>`sj$Pwad*Hxk&_2Q+|n(_aQ`V zap&l{f^$#xVu4L)DfFtWSIHg|GA%do07S2r=HKlSj3jm{7Xj~F;S z!jJt*$Fy_DKNXCTr3YkT&*xi}VLndG^2V81?<$NAt2&i6s&1ukEmR{fnD3_fM+?Bl zbdLAh!t}s98~+WDd%qL3o0fIgY8@Gc88rsg#~k9=y0**3V7(HMRXX(u6*c#%aDEO> z&i5c3yW9E}PR(M@Bc7FS*q}HwtQx3DBS*t@;k3*b!T8poCp+6DJj6iDXZKk>--Seg378F9&~Yx zwkDi~AiWq-G`m^D31&@I|EWGWAIQO(Dgj7x!>Vjo3_SMJk z)((g`|2s(wzgjjGUxY8c31Z);FLly7l*|N8jcJBQDfyZB%rez7$}TGFev55Rd9&x5 zDMvUNWxoIO0AeAry~EszT2JY=MZ$mBN9JGoStJ#lKv%-QJUr|crjUaoiljS4Ny)?! zFOzcj_JnY%XMB|VeGn8zlf`-y)FA=*up63()%sRA9?mWXZH4<*j!K&TI3s|wUUB5| zE4~>d_vA|pPgJ}~*ASG(h?aX!Tjidrdv*dwAS)wyiEm!*kqA_QOqg)-R^l?cnbKXx zlEkd5D%WbdQLf`Xx%W7guDNko%bb%Wi8lft8VtiLR`S|GiE>!T#l zUn9p}{6P$UF)06J@t}J!3oxb_2L;{zMiS1By4490^9Gd+mu29%rwc&BWfdAi#A?{R z0>`u!q`c}bfdUFF;x->BsSwuzRzc|bk=31Q(@8um%~6v`y(vYmXvD~fY||tvp;W9U zes6glPJ&cA_29())OMD>l=Az?S(ksXN9Kus!XcT9UA!Uy@!zt428n6GMn)q^B&I)@ zj57UFGp6TC$65#Ep^%R~fbV|&>Q_cKC=lJQ(D42NK)Dl1^Fp;|ye&_k7x8t4lT zeDJoc%poq@EDUgn%6N))GnZiyoEp3pkpvaqJI%K)IZY|w{9r3V_z;1hEYolkZA0aa zg@|_R;xnPw(Te>&0q=nWiT+~5tshCZsZFuf)}OhOeIt0n=F6K$0}$oTnizE3^87bN zDgPus$xs`3zDmN%wWal8wEa#div%z-E?i%QR!Dd^CxG=u%Ba8A88K>~7aSokIbC}b ziH3oVbgYfb`)dzn&~}FxM9vHsZE!0fJI0+Il-TwfC-{Kg*?l|swEF)oe?!(25*BWM zR{vwQU$NH;!jB-rx_pab0$?xph<@`}wn-C0@N62i{SQ>yy{U3Iw);TJe`n$cqL&`5 z^3XeU9`-<)t1FiQ5DNrstPS?Pa{$d0K{96%OiqhYngz+L%(Zpg+mBG5;}}lISptIR zesH>SOni)+A1S72i`kw}1gPBQ1w}vK;FWc@I~NG*a4uXTgi!Y8921#*o7p~3dM{95 z|KTf*XzwqKz94fS90bbZ2_ckvV0CeF8n$askSAA34?*QOWAY~0Y@Yy0trH+BGpmpt zI>5$I0{gjT!l84w1|=bJgskml7oP&qG%rArJwrxIN#`>IG6yFJTxSjg^8Ey@=)>zh z3Y81{W&!+p+FmA*KMlFyp=rytctKZC@7f1&RBYekKn$q~;a;Z#$2nzSbKO?{Bybe- z#H8dF@Yn{wKtUF4qR}Ut6C^Xi2;Lqr#TH&_fWl`#4BN!*f=KR4Cu5Wv*k~}De0UE0 zj}sK9h)ty0wdJ>i)RK=L^a*_r2>?xot|A7qk$ukU#n0EVNhuJ_?k^6UVt z&0(O%`-viW+O*&~;uDQ>kBbfz*#~Cgbv;T&&!0&GQ}; zhW~GVhv9dDtv?<`FF%&cRu4*6^G`SaU;^LwYE z4xc+vMHb?KI1KjWU~q0h)hj9%!P7uj1h17VgRLi|;u7papAs-MfIf~kMgpVPhb0ws zP-Wb8C||_nRXv3I(~Z|5I)CF_x3Sdm0}cu1}FSd0nD(KLDHocWY~ zx{+M>811w8{OHNQH-Iw;S$)7?bI<@D!ADI~5iudgPtfhbV;8-&A$H%fA(clM_l~4P z=b|1#vNscu7hHM@*HN)99Ghje>rjt;hoZ1uwmhopWo|eZ_h)D&TmtSa3i0O|+8Coy zb99Rnn!`uk<}l<73O%P#6<~4Jjui~a)Ex6>qGnMowrgR!my=p6!l^dS0SymN+|tTs zNT)Ih(>jR@hLij=UcpY-U8s{k5uua{a+{!7W(=<>@mIP>1ZNvS>p{E<3W)TUE|84E zuXcv9jB9FZWmg04woSq4K|cv*WNv0u^vt^;g8qF7z*?5 zaTX5p?!>Y`y?N(&>Oxfa6u>&3`}nS96=gVBoc`;969IxlvP4IQ#l;p~hVqKa*e%HJ zWf$6NhX$GWfL}QeDz-=2=R;cc%D~IS?v^{%tdd7|N9yWdxn89+LsW*5B87i{hPY1u6(kMBHghWx=$Rd9?y92 zp*@Eu^{1pdj=?2TkV%mD{mNVGuGiu7f8DRjzY_%X+%O5N@R21*(GaAa(V#u935wZc zz-Q#34hn(;z)Bc0K;q|i>wsRq;9jWJ>vK{0}`RXk`mX__&X+l+7Xw82Ip-cXu*tuG1S^N5S^km+uApX2Jb)m5QLK%`oK9 zt%o+K0$rtHh`1Z8hIp78cElAacUwc(gSBRi#G}rrExh!8l?l${0Pid9^|679!dgI%#5^8@z^(;hZ+za-nPMWU z!n1O*l{%MW=eUY(C%Utb=a^-e9tc19_gDqMlOk*G>vw<$<$paKSnro$30q>9^s(ez zG@=~gOhn4aycFTN^*+=+rhODpfP-1NjX=(THooVSc8eL!cR1msJ>qvOU9Ef|@PU^l zt2el}E5l%~mt`Ea!_Vw_2VxFqrh`()g#`Yagm4nGr+Z2AL*ubHCI?x0@9%c+6iK%U z;t-69!4QFGW;Onhc5;q=CFzb{A|UVieg;BWU^ zbh>E>g4~1{b+2nj=UZ3zLS^P`Z}uv6&UlKSPECicKgjUKcButL+v|!NuZGRQKclPL z^{zAAv#Isz16O1NPU!`w_V~@>3!Ru;tFoNQ|D(O{j^}#)`!A8bSCp+XLPSIMN<^Q` zvMNz1Qbtmem6bwT6h4aVQW^-+I7)+1=82XS>6j7j=jANA&;9*<@84hd@AtURALnr% z51o8I?{QtP=e+zn8V*qIMq|DzK2li{EY+-2%k7w@(&g~8L&a!jP4bY$Q$cf5)sRel zl6+mqr-*GtaG_E_$vZIlhwQ~eUdu?*W_qSQIgR}urA_5e_+WD!lwq}*Na;fAOeNL>oKxQYZZXByy^tP*poNooq z-B(p7Nbf>Yk_@cN4u3|4QD^ghqZP4IY3hctl;#sF%;e}x**55NVsA6(k?S-R4a82SAlb)w#@5@!VVxoLfAEvnN$n8}f$ zM;~}KWP11SN}i7b;gR8KKa{ns=SIgG^;?*fT$|4O8&Eq=&}jfcKsZi`6J2fU3P_at80 zaEk&OI_0g2Z}@4vCa-I)`jR`EMEyHn`rA+SC$o*1P*BD%@0u0P0cQGN7bn)dKfvSc zYxOTpt0QbF>ks9ZqJD1#<5Xi=zyurvX1>*OcN>vk-i4X|;3GP0#xD`wbtd*R;%khN zMTk&kUtGuvczWryGZ^nt^R!vw)`<}0QRS2G=5k+@9z|Tbby}|;z59#JR zJ1S`YbgGIzkoa|68XBjH7>=016h`gk0utpWk7BA2NR^|!Sw+jL z#U)O#3AvH{<$0nb0Y%*;+U1|-%$w^Y*+>M_^14HtgMatZ>J-iL5)H4~ z2-tvbRElK!$-Ny+aP1CZ^a5Reh+pW=%r9RE$E@o7iv2KkmZ~7-9?8YU%ET-4QNA2t zb_9nt*g(HCsnkb2H@*BJ6yxtz{gQJ+o9ugcr)N~Z`UU!nBjU=wS-%KDS4PphvnME7 zW=oGiz_X7EeP>~<9qz5qnrRihC;Nq^ z2MnJ*D{rsr7(k~GrBfNI$NDPa7}b!0kq%0s{>`GHA4EL?DRQ}8ED6;H+|p{)x#jXzsRfe4$?TG_s2mA zw4g!RIh2~52EhRisB z^q-NU7SsQU`M8Du)c3yPQr|`YK?~|eE?iC4z4}(CmKNT~s7k^xk>-cR8NP`!tGgRa zD0>*y*R}bMkmHLZ1gVHLxk*_!ZjG5+Kk95iy z`)8T43w+VY;Tq{=F=kJ$ge^Mrw1xV@hCqrsBkwhfx2*bG(c1KQeDv#AJ;wAPDIrO@ z498f(>xJ~`npHg7RNsqAye$d$I6aJBQDjX6#WprXQmyMp@LswMYP^O&ioqMK> zm9Dp6*)D3B6>`Gb&`eHm>DnqaMNF=I@UoOU4Ia^#UD|9|lbWNSX!mH2cBmzKlE4Uu z$Ulvh-9E<$N^HZ&X32FtGjg5&%B$&5bltAMdH*;%4|^l3R<>C8c3eGUdhj7{?G5bL zcTX8UmYp6gpAOSwih(KRYRP8D(2*;*4-d=y7+ixK!EIX0_On7(a>|`VM-`2#LmK8`+K0Wo)Ll*Fj$lDfBm2MM@KNUHdA%Ob0OQT zc7DdQ++oZei{6|2hrLKDkbB7tIHML7;`jJJx*%9LSNe)1fv>px7f+Nz&7W_!jCD(4 z@V|=5c~mbcUG@tHVnm z)v@hIpP`)Hy&B?z!FshxsUS@d+~s>c#>j=q_XaS0mN0~1?`{DD-ytvOZ_rb8I9s<{sLm>{Z{^woon@BtR62kx3I8l}aSMTd$T5qr_ra|2;echgyOWqdg%y_2Yx}5lqTkNw% zpkoBZT}54N9nc*OTfX%eBSP0q;ZxXEZn+T497+{CQ#Z! zdh<(OTCCDRk`FeiP4T83ZP!e>z$f_%E&nmRM7)$QY8k~M$c)F~=r{ct`D`URa^&c* zd+7LuM;X2{lYN1~vI%^eUHC~ffrg#PTAIP{isFL~YPj%C^h4stCQX~%Iy#%2`a3{h z21KBmnd6eSDUK$)GnRlMKRN+Sk2==Jl@hZSeS(3p>qM4(NRO73%KO4PQ?IN=?~V?} z+}`{4rO!>nr&CiC`}fwSh2@BFO8ZLyfR(#A|9f$_Eq6WaJLQC)TMj77@d0}zgqm?% zOb|tFi%8ZuSN3$OKbMSYR{r9s8>C4!+Ada#s?5R26!!%Vko;SrZYv09&J)R*Y(!;X zx?P>{eZ4)XiDEWjW&e1ccr;07Mlb9}%phyb>E6ZxN`1tN^t7{E=C7P%v*ut>n^Dqe zprj$0-DR^o<6ni`o(iXXbNW!nzB#IU zj^hsoWJWWiza4k~%gyTe!~Srw+}ZUD+uJ6cK3QjCVEk-Bt83$?c6G!}&f$I|jMbl* z^kiZPATv_3vsbM5FyCu!Yx*K<=w!kscg@=(IQ!Rf?3ZkfP!euC(R|>U8mE5STAnyg z4+Ny|g2}Ua8o!jp;O9>m@0?pvptT#SeASBhrjP5uvSvO`7oQJKx4YSX+@zMKXQ~}g ztUN#Ml#$qg$P@JrF|+Z95b!Rq6f4m2a((#^NUUkseerX!*4-ivG&YAalv9#dAqg^g zw|eM_OtwzeBCcH|4?--?=ZEB7G>zI~tJ(wl+eRO8Z(ggN28L?&jt9q2?mK(rPbQmC zM%d=JXPQ%=>HYn{az9qC0jNj+*~eKQ`CiPwTFqzXUiPlJHFMxG%2EK+#Bms`4E(F9`bV;B*`YJ))m{A zE}zM)50_M%Pb7;`4n@(RLBD2>XA)?OHtY|y)y`yLbc5ne3`L~Uka{G6OVPbqdAY+s zh1rbxo~Fw46F{=qJljHg`N6ABON|n4pt*FNcT;qH4SkeDRf=YyOB#`X>BXrZM|x9%*5qm1;H zw1ASZZAY7~Bho(w+@@>Ant~Aw-3K1eJ2>Ra$1|Jnta`cjA!$rFv|TdCp|m(}d%1@F zUmnZ;WYC4hU8gav){*{X$h>!?%`>(VucX@G))AkR*H_=2o9N$0-ef+X=ihAjkXQ?& zPxn-Y21K}eC||chu-Jm8Gugrdx5O=+ZSh9#H%zjdmPwM3PyjL;s<47oe<8z&?DhMv zBiXaI;pW&s3Qm)q@iXgsA)PmV6r0kUo6g3gtpIj5#6GE_Z{^)n|CBVSpw%TYMnqG? z`&Gf0Ucb_>YI9G*h|t6x9h(r%kVQvM`M?0C?Ig2Bq+*&h<=v#)xCx~k?75(gS3^LT zd<(|y_ri*Y%nJ8+fZ%hiAKl;xHZ%QTu3eE*j~{F7y#X{vHNhh1jvSlO!B=22?Tf?G zRG?VxN0Rs8+tbe5U2S+m<}4vW`$*B2ZL!>kXj93KoydOmQ|u6oOj7l6Q<4H$BO$P& zVtS*!11-30_Iecv=bF$Dl6Bhy#j4h}+VsaRmf!n`2d)1zMOg<{4im4OWtG4geK-o? z5EwMafh0377|VB~e&jqiN1^G(;dzQ}!!dwQ-}LqM!bo$wyY5u$_ibmvYM*k=pp$6>n-tM}MeeUuY^o2+s z6XWu_vK#zX&R9cf>nn7Y)oQykfCTdn9|YKY7$CgSkD{&5IFAgvi)E8lk5?1+0fr=o z@A`4`xtN@g2rp)tsh42ni{c??BFBF}fEtK?t?QW!BPEiF3u&h71usz_Dipj__(uI>6|0YAuVS{d{<0F z-4V{8`#d+114>3lwijEU9@rsI;|T52s@U802#{AE_UTg}u=#brrXy*iW2Yt6GWp1k zl!u*EY=p!*N3#q<#8+5&Roudg(bBMMhFV5X^OniQ@wYBw^pS9Y8XdzsUI$5urdvl;_sZ+KlXy+{A<^d%FUV$e9^=lYERHDUn(HxcQarE{tvbMcDfTGa$S@4^G#`Jg z^49Ky#wkfPJ()Y11%$~WCQ0vzX*g4rJ}N{C`? zLCN5Z4K4}6Q+e%a5Z*ficyG8kg}dpx@Un*SqLJw~IHJZ_G2Nq4kuTF7Mi)~`PP;Jb|0fM?mr^L54fsr2liA4a5EYQscz zTO&{T)no?MWD`(+7kY?uG5zhHuwae`dy3o%Xf01#t?5Km(zGi!!oU97{YrZ(9D^Dr zx)RHjT6hlP_|xeE2qj~gtPNLdP^W2ABmqtBfG%6!XX+O20-N1mWpBNG>O1E;>}0}q zxG;Hcz>;3wZxL3q&E{)cB!n^ex#BtxA8Ud$U7_gW#f%%;J`(i`OiOsw9_>vuqE?=c zGu~?8QGyd<&@XO%E`&aBQvrGr6qK^E771EXYkA$i>F_k1uQ5xp*{QqJ)Vumwfb)|` zcMh$2^J)M1S9>L|sNX%$33;mQ=Ja;4EBnXj*_L$WY;xg^Ca!hs%&MLD)0c7!eM=o@ z@IOHE;2@5q%>%CRA2qpNlS$LyL~%c{N+s_kPJS>V3HVZk61!J7@UTwSp>xMvd!tIs zqnfY3eSG@iikPDcZ$y23stuEcgmyiuY2L@=yS&N0NWTKezYxRmwRQWfzRH?!-52`T zLxRVJKX+cmL%J59NP9mSI(C0Xb~*DRfR#G4^KmA|>OG4ms@laY%0*G3?Z9ypp(odo zNV{Qis{^E8zX?{qx{e&Ft5GU$k54jkFLP>K2Vw0N_QLN*Jm4WxNS^cs6JG%vhYg>3 z^={nZH)#Ig|Md%6zxA)V16((-G8N9bj&A!*f zRmKH0=2v$)+fILJD|x$Me#~x)NoZHQwy|0ElMszMt}{#uLnfu?0xZNH!-zWe&5@2h z-T3?+cr|Ph{FodY{|-x1S%7!U!Wb5cR%(VTo8Va8{jhz+u8sWH1;Oiw4|&u5yV;f_IY zq~<+HLX%tX@O#{$aZRm{ag)Mof8CE-%X!c|(+sCJ4QNbP2V4%@kfb|jIXO(0lVsoL zMa8)m%)*5%S9?|?9{bjM7sZo;2}mUF2CPydyc|MmRaPCr2KwNKD>tZi>D$!NTU^sN zXXnv-#3ZbouA?tJ9DJSsaLnpV3N?6h5+yIRDudr!ZcUb_BRUv5;y9=@=P!1D*lI3h zac_&lcfs4#6kZ0Gv&{BKFjMXlwt5Q|V3X~AzXJDnWo-+>!5?K5mUuSCh$rWE(Q9xIx%tXhI zjJ6%$h?DuBE6B`RBHBnp%Raw1FgW~?>ZNuu#Wf>3f3Gq4F9wLjEF5Z*Z#rn%BZ~_7 z|J(y+b^|Sh>-HRP7b{>}?q~QsrgZkpGH-rx_;K|G_`iJCR_HojzKuSb$(NTe{lF0zRsg_e|iHyrFk<=KC`C>|6i*i`OJnl=T+#& zuNN)Zym=tD?foV8IX`8%gP%SaPiYt1A@s6O{O{ci|FseCwc$Z}r_pXk;hVOX2F2$5 zv;se6*gDOvMw|Pv2&JjB7Gcq+xOyA=kAKlnuh8|4_R^U9)4#6mai6t!qvw2o$!)l9 zPlaOJt}XXldut4K8Yk?+Lorj6UjuCsk-b?Gy;fw-#f~+@?~-n~Ebxw{;^B90HE;oI zh=@c+TA{U`52EGIQ|<>+Oo>nB#>i!H{YkfwTX#e zeu$K9wtKJ} z7r>d+2)keJ+nkIz0i0=b@APZ<49>l{v~JaoKi)e}Ia%U?$#LW#8=+-?$}aq? z-v?GQ`1irIxW>e}Do$dB_+)gr`Ur~#wZS&yp~0O3cUg$6S< zWDsLcKnto&CKP!@r3xdGqZD^Mm?vUi3mtMXA~a*FJn}2-i|a$;5VDeIP0ZJyA5IgE zLB$qZiNZiRlf206SLsQ#U48H3iLTY2?OI158=6Sa;mDm! zpFg0^vjurWcihF9%20T4R6JVAi7C5rip{9IM6@v;#ka4%y^mIC1~1{EItuxs&wPoD zRc_an1^qBwDtFc-gp!&HT;Wf*m*_|^VPBoUDqMEnyhUF&>S zX? zM5PFBU7OQY%6lC3z47jLIBmYbUOdzrUG{ZwprA8;&RP&j)`CZypJXu#KPQrK&~b;) z4LquuZ-U)K$r-c`y4rG91F1GFi89n~wq~nj|M}L@`DZ!S8EMx^6W-%pK6begu&&<= z=_i-8k$O>cQ65PbRlVJb8QZ#&d7_TO>?lmhl$NTkXQqUoYOd_an@bEheP9BHkGw9eo&8DY1^JOh|Hx_v;pZE)T$+QTR+^%r)A|qv zDau!05Zq(pw?E0NX6Ie2U#6-DBB65gsml3%qhOrIGHGJu8Ez93<*~62dXXlM?7UPC zEft>kF@9T1eDiLD>S>b-^*fF7RohZZIt{nUWj~yO^Ds$@eQt5%ASw^Fq}5SHbeSddwo3jvNqB;+-pMU19Ad!6a6>KLr@?myepRq z%4BD5e}XMIdj}NC?&MI+5RJ$XeF63w+3|jp$*_<`Cl%pnl<@@Ey4qsH;T9DC>6hhHX`OB2s!TZlc~+NTis zJ(@q2MMbV6Ye08cLOj}11E+@Fae5lRf{xwh*e#SN9dp5MJINm0qW|~q-M$37?4@lJ4EU~!7h@VpTor}EWZ8C-F;2(B{*Kz7qX-?Ik@>7VDH&f5!53m zbutr&2-33&My6USyjSVV@#hxgrEZ}w9_iX4DO(W|Wt?5*WEC7|o;(xP( zns*3<)vfFDaV{isS2_|B9K6+d&y)4}xFUkT;DDAqEU9JNq(oiN*0@63?Bp`1SvuEodhUbXPNO#$FtZuM|HEh z(hWAabiV@M_(bh~)b4D*&rnP@nTysJW*cBT>_g4x>cjeXv0xvuk03c=Wg3b4k5^om zB%U*as^mR^r}6oMbJ#L$Kvr;<+KpJ*ugF*GQUxHy043KT>q28dgiMp)V@{W?V7 zU{w=c;xM&*(}mcY4tuMC6fN4#Bv$G4ZJ!=LAI@03L5?mvkt41TjtJqcOEizQ zUW>OtpRlG?V9stmWS2IH>1LUwzy5Nq-5^NM`$cq3X7tu-=YG^0U32X6zdf{PNRhkv zu6ML4FbG>}_3^qf&eh?P&xnn9VW%unLO zH*=*IjF$#y)I=X~8c5aXZ!EaNzaJ-^uP04dq$K;Rf8VwJ)59iZHo5KE*+k4Mr%39Yy#4MyMyA!NhGtpu(qe0 zymjcSL*T_XE(;r0Mz9GhhiO59yakTk-|xVeM#YqusX9@D>ZyFPel^u)Rw>ZZ%0;$o z%04oSmT%9Dn=cranh{4Qc$u?_M_y`Q%`m3Udd=60%S{qL&e!oa@2Wunk`(IE(2R zyb8+MIvz}xs5eRS&^$H=qcZ3*(XRI62(CO)jyZDYX;FMZJ~fw(8)K|4Vq`5p-uu|5 zn&PHciCri_#>9IdmCgEI>K4xIE2(j8{&8=r7B#q1;tc3G3xerQvPeA@uH}oueHjL} z7ARLI+%FP6hJ%6prKx-ULn|2@_Auo6b2b|qyRj`^{k&I4%BAM8<$7&8;SVbIqnNG( zJ~72yrPNgiVq*!A+yIq&SAVvruDO=7kY0sxvfANXunKRAefI8cy?3B>(A6Tel7%-p zA=0Y1#xNWg@16kb?3s$A%22eXLYL!VIAENT|ASX=Ix&~1m|UR@q^8Te&YBC!gm{sg z;phNI;XqKkm{;JUT3b#v$Zj|x)_DIyml$f1^`;vr+Wlyj21#)VxG3DUesv=wzmK1TBZ|EBQW-c(w40mg6ZW{<$4#~(EDi299b zz7P#%E4B;{zxBQIVZofqWWhf{KSt zQ-d9TbFUJO03)rcAA?Po7KqQ31t}79kRZb)_n-$0*|k~lcfTA-VgF_Jx{2F|n}(MT z#c$38PE1^V-^2S&HTiwKz~sh(|NKoD_6>;Yg|Wk^=JZVN;^7SX(DsgfQ{O#T{Pc-W z(weRIBSnSRt*BPWh#{-$wu#yO+N+S)_!zBq=+-$y`YG1D9y|l(FjY;kb@`mj=4m?f zH!Qc2_M`xwE%zfu(EC735bFTx&?g1NpiOo`?ifhfbsJ5yO|bX5BQZOI`LQXIdpS9Y z(-)JxEy81Y@RTWzny3B-=+S}=94l(Tx%G!Ua_ow3lIlvB`ndQ%0W1{)QYPjPgJ^^J zp-4*bb7A}@O@usS(d~S*Z(O?_GiH^&9fM!SXxTeA4BP-4n)3O|`R_Q-W!(4&iXqhu zrOd8Ilz%}{k;aS1GMD_IP^i;V%-0@(AF&zi-xs`v8mhWfvdPB&H({goJ?e+A2%28s z(wU5+Dv{`)7gIdjJ6_RO=>8TGPWmX|xN<@FL=ykn05rOU{Ov^z zICBV|Sngz-s~<+uqzNeFOe`=-9>vo4U!gHB7mJlIpg;>olS%&uo?~i=5oj1)0n!up zu9N*?s};6Nu#!^~TI6l- zJO}2w@Yz5)n~M2zssyWd3tII$HTQ#{I!&F|QMXx>*h#K6?MFwLMsXSt`|5)qs|NNk z@he}}t*Al+`}y4>G0$tfnd$g)tgqjJO!piA+zb1NcgG`;2B$2TgfabmP*!Fij=bP{aXYf`gGqRnX4nYa`8cq#JCy`6rzB7K;+*Ib@} zOY;t5oc;`{wev}UF44=ys1g~@c5^X{YBA-sR5XZBRsMQ?yJj!g;^JOmMaR%^Hr3=~ zY+;N9(Bl}}Mu43qgIReHqPxoqZK5lgZ^zcc1K-0}Yx0X5hKRQ(#%3U5_^I@N!iUEv zHy)V7Y{dr65G0(z^tZr_9uZ9qj&@@bve>Jw!l7*00kQn_Vn}u&8Ah#;y2|p@hH3Ko z>}9nj7AOm8oBR$Cr;GjpAS#RN)QQzA)9AcCf_jWcxjCtV?Ad+fq=1xDoY|$RsUMNA z9@+u>r&#yPrOdn;7Uq2nF(+$yVryhpCZ=(!8|k#5rS;YbP|mn|_pdHfxiGEnmPVAAY$=B?BoI( zK7eB*M_zY@k)voC^_A6iN3%XDs6>euXJv%u>zdTbE1%V^_CAr3MEud`z-D_-l>YWi zb~TJkbqUOE>~pvEh~+b!sYF-BLo8Q)pg2s|D2Wa&a#7XA^E|1!i;le@O3uwTPwyK^ zr(%rg14*>@gL!(^l0RVL7N!}P$izXq%oT>*66L9`+p9&TJQ>_ zt^zQ%xS&+%76#QaQO|%_ zR#12{U5ByZ%moa?DA-i&|E@fl7C1fS3(`30ep(AL>R}DkaMO`ksbivHv)ZE03>K3( zk8;qchaV^%{AjorkhOBO`qb=e6gv*$DC^xXuKg>|28UxkK5;&)-Yiz8W>HgN{gtwU z(=5U|!_3(Ey6MDjN9j66|E}lx)rnYmn<@0$*gK*vjZxiFh9}EXipMeYLAt?iy)|K+ zg3hGc$4`pGCQkfU#)fy<0@Sn8JW2^NmNG9gm^^q4_1@lRh$)V^ZTA_Yz)_hK`PRmP z1|qAdNqd&ho|X)^;T|URH8-vSydwf#z{g-FCAT8O(iMB39VhzSg#iNaM9+dc_W$;T zoTReM^wJ{vs?5ouzVfoC*3rCbU86ExnSDIomq0|rKEuzpSh-IOEuSsfBbAslav{zg zxVF7(kqzT6l-S~QJ_ChKMd%RO-XMNc+RhbQ!@Cb;1_b_oRbPiq-j`j=NsW z4-X5g$mT1n3%xUG61QA2*oI))=4Kv}4R}a;RP#0|!lU(7zrnIb6XVqI zc~|IhtRc|j%o&DQ%Qa9NnHkPcv{I1khyRx3Y=*USHh;ufN2jl+unVbvY}vr?Szk5J zB#Ba26-?OD!_BjxTpfXOPNa71R$hjK%d?Ok1sBx~_Y>Kumw6a?P;WVH9+1zm6Qkd; zz{E1@-Z#TpK8Tw>Oz2vT8b}z2vZrmX2zsl94epe@1$j?W`S+S=)GyFr!3}izU>OH6E#v*Ss&=Pk zA1$xnNsqXk-AQ(I z^)q1Y>bhgQ^XJO(q8A9I+B*ukb~9&U9P>!d1}VQ9v-8TcpHHoMLC?VnIlYv2z9a5u zm@E6wFlxsuj0%oII63LU;v=((wRtnV?Lt7bUQfWV)<)(zppkHotFR4j4d?zR1gQHU znOyIGsYO+J9C#T%htj0AB@f@^W@vqEoxYs2VO)?Trgq7q1vprnmc2>t<@Y(GXV4D( zls0#o5rSRETvAsw)(M|P@{pEqN^-kc$>Vi%r7n(rFaQ7m literal 0 HcmV?d00001 From 2797a94cf5b0210754edd3f829995656db9fe109 Mon Sep 17 00:00:00 2001 From: Falko Menge Date: Sat, 20 Jan 2024 12:48:57 +0100 Subject: [PATCH 07/58] Improve contact point generation --- .../multi-region/active-active/setup-zeebe.py | 32 +++++++++++++------ .../active-active/teardown-zeebe.py | 4 +-- 2 files changed, 25 insertions(+), 11 deletions(-) diff --git a/google/multi-region/active-active/setup-zeebe.py b/google/multi-region/active-active/setup-zeebe.py index 83c13545..2d59fe64 100755 --- a/google/multi-region/active-active/setup-zeebe.py +++ b/google/multi-region/active-active/setup-zeebe.py @@ -10,22 +10,32 @@ # Before running the script, fill in appropriate values for all the parameters # above the dashed line. -# Fill in the `contexts` map with the zones of your clusters and their +# Fill in the `contexts` map with the regions of your clusters and their # corresponding kubectl context names. # # To get the names of your kubectl "contexts" for each of your clusters, run: # kubectl config get-contexts # +# format: +# contexts = { +# region_name: context_name, +# } +# # example: # contexts = { -# 'europe-west4-b': 'gke_camunda-researchanddevelopment_europe-west4-b_cdame-region-0', -# 'us-east1-b': 'gke_camunda-researchanddevelopment_us-east1-b_cdame-region-1', +# 'us-east1': 'gke_camunda-researchanddevelopment_us-east1_falko-region-0', +# 'europe-west1': 'gke_camunda-researchanddevelopment_europe-west1_falko-region-1', # } +# TODO generate kubectl contexts via make using pattern: gke_$(project)_$(region)_$(clusterName) contexts = { - 'europe-west4-b': 'gke_camunda-researchanddevelopment_europe-west4-b_cdame-region-0', - 'us-east1-b': 'gke_camunda-researchanddevelopment_us-east1-b_cdame-region-1', + 'us-east1': 'gke_camunda-researchanddevelopment_us-east1_falko-region-0', + 'europe-west1': 'gke_camunda-researchanddevelopment_europe-west1_falko-region-1', } +# Fill in the number of Zeebe brokers per region, +# i.e. clusterSize/regions as defined in camunda-values.yaml +number_of_zeebe_brokers_per_region = 4 + # Path to directory generated YAML files. generated_files_dir = './generated' @@ -35,7 +45,6 @@ if len(contexts) == 0: exit("must provide at least one Kubernetes cluster in the `contexts` map at the top of the script") - for zone, context in contexts.items(): try: check_call(['kubectl', 'get', 'pods', '--context', context]) @@ -98,7 +107,12 @@ # Generate ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS join_addrs = [] for zone in contexts: - for i in range(3): - join_addrs.append('camunda-zeebe-%d.camunda-zeebe.%s' % (i, zone)) + for i in range(number_of_zeebe_brokers_per_region): + join_addrs.append('camunda-zeebe-%d.camunda-zeebe.%s.svc.cluster.local:26502' % (i, zone)) join_str = ','.join(join_addrs) -print(join_str) \ No newline at end of file +print(join_str) + +# Generate ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_URL e.g. http://elasticsearch-master-headless.us-east1.svc.cluster.local:9200 +elastic_urls = {} +for zone, context in contexts.items(): + print('http://elasticsearch-master-headless.%s.svc.cluster.local:9200' % (zone)) diff --git a/google/multi-region/active-active/teardown-zeebe.py b/google/multi-region/active-active/teardown-zeebe.py index 2b676959..e4f53b02 100755 --- a/google/multi-region/active-active/teardown-zeebe.py +++ b/google/multi-region/active-active/teardown-zeebe.py @@ -10,8 +10,8 @@ # To get the names of your kubectl "contexts" for each of your clusters, run: # kubectl config get-contexts contexts = { - 'europe-west4-b': 'gke_camunda-researchanddevelopment_europe-west4-b_cdame-region-0', - 'us-east1-b': 'gke_camunda-researchanddevelopment_us-east1-b_cdame-region-1', + 'us-east1': 'gke_camunda-researchanddevelopment_us-east1_falko-region-0', + 'europe-west1': 'gke_camunda-researchanddevelopment_europe-west1_falko-region-1', } certs_dir = './certs' From 8a988bb4544c751eae380d58b9942b2a259c92ed Mon Sep 17 00:00:00 2001 From: Falko Menge Date: Sat, 20 Jan 2024 12:49:39 +0100 Subject: [PATCH 08/58] Remove generated configmaps --- .../generated/dns-configmap-europe-west1.yaml | 8 -------- .../active-active/generated/dns-configmap-us-east1.yaml | 8 -------- 2 files changed, 16 deletions(-) delete mode 100644 google/multi-region/active-active/generated/dns-configmap-europe-west1.yaml delete mode 100644 google/multi-region/active-active/generated/dns-configmap-us-east1.yaml diff --git a/google/multi-region/active-active/generated/dns-configmap-europe-west1.yaml b/google/multi-region/active-active/generated/dns-configmap-europe-west1.yaml deleted file mode 100644 index 055bddd7..00000000 --- a/google/multi-region/active-active/generated/dns-configmap-europe-west1.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: kube-dns - namespace: kube-system -data: - stubDomains: | - {"us-east1.svc.cluster.local": ["10.142.0.113"], "us-east1-failover.svc.cluster.local": ["10.142.0.113"]} diff --git a/google/multi-region/active-active/generated/dns-configmap-us-east1.yaml b/google/multi-region/active-active/generated/dns-configmap-us-east1.yaml deleted file mode 100644 index ec18a7ab..00000000 --- a/google/multi-region/active-active/generated/dns-configmap-us-east1.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: kube-dns - namespace: kube-system -data: - stubDomains: | - {"europe-west1.svc.cluster.local": ["10.132.0.112"], "europe-west1-failover.svc.cluster.local": ["10.132.0.112"]} From ec1fb19eaf85531e6e6cc6fc955caf03564af57c Mon Sep 17 00:00:00 2001 From: Falko Menge Date: Sat, 20 Jan 2024 12:52:49 +0100 Subject: [PATCH 09/58] Sync and udate configs --- .../active-active/region0/Makefile | 2 +- .../active-active/region0/camunda-values.yaml | 6 ++-- .../active-active/region1/Makefile | 2 +- .../active-active/region1/camunda-values.yaml | 33 ++++++------------- 4 files changed, 14 insertions(+), 29 deletions(-) diff --git a/google/multi-region/active-active/region0/Makefile b/google/multi-region/active-active/region0/Makefile index 2877aaef..ab9d733b 100644 --- a/google/multi-region/active-active/region0/Makefile +++ b/google/multi-region/active-active/region0/Makefile @@ -99,7 +99,7 @@ prepare-elastic-backup-key: .PHONY: prepare-elastic-backup-repo prepare-elastic-backup-repo: - kubectl exec elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPUT http://localhost:9200/_snapshot/camunda_backup -H 'Content-Type: application/json' -d'{"type": "gcs","settings":{"bucket": "cdame-elasticsearch-backup", "base_path": "backups"}}' + kubectl exec elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPUT http://localhost:9200/_snapshot/camunda_backup -H 'Content-Type: application/json' -d'{"type": "gcs","settings":{"bucket": "falko-elasticsearch-backup", "base_path": "backups"}}' .PHONY: operate-snapshot operate-snapshot: diff --git a/google/multi-region/active-active/region0/camunda-values.yaml b/google/multi-region/active-active/region0/camunda-values.yaml index f1d9978b..18d0ed96 100644 --- a/google/multi-region/active-active/region0/camunda-values.yaml +++ b/google/multi-region/active-active/region0/camunda-values.yaml @@ -16,8 +16,6 @@ global: regions: 2 # unique id of the region. Should start at 0 for easy computation. With 2 regions, you would have region 0 and 1. regionId: 0 - image: - tag: latest identity: auth: # Disable the Identity authentication @@ -68,11 +66,11 @@ zeebe: - name: ZEEBE_BROKER_DATA_DISKUSAGEREPLICATIONWATERMARK value: "0.87" - name: ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS - value: "camunda-zeebe-0.camunda-zeebe.europe-west4-b.svc.cluster.local:26502, camunda-zeebe-1.camunda-zeebe.europe-west4-b.svc.cluster.local:26502, camunda-zeebe-2.camunda-zeebe.europe-west4-b.svc.cluster.local:26502, camunda-zeebe-3.camunda-zeebe.europe-west4-b.svc.cluster.local:26502, camunda-zeebe-0.camunda-zeebe.us-east1-b.svc.cluster.local:26502, camunda-zeebe-1.camunda-zeebe.us-east1-b.svc.cluster.local:26502, camunda-zeebe-2.camunda-zeebe.us-east1-b.svc.cluster.local:26502, camunda-zeebe-3.camunda-zeebe.us-east1-b.svc.cluster.local:26502" + value: "camunda-zeebe-0.camunda-zeebe.us-east1.svc.cluster.local:26502,camunda-zeebe-1.camunda-zeebe.us-east1.svc.cluster.local:26502,camunda-zeebe-2.camunda-zeebe.us-east1.svc.cluster.local:26502,camunda-zeebe-3.camunda-zeebe.us-east1.svc.cluster.local:26502,camunda-zeebe-0.camunda-zeebe.europe-west1.svc.cluster.local:26502,camunda-zeebe-1.camunda-zeebe.europe-west1.svc.cluster.local:26502,camunda-zeebe-2.camunda-zeebe.europe-west1.svc.cluster.local:26502,camunda-zeebe-3.camunda-zeebe.europe-west1.svc.cluster.local:26502" - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_CLASSNAME value: "io.camunda.zeebe.exporter.ElasticsearchExporter" - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_URL - value: "http://elasticsearch-master-headless.us-east1-b.svc.cluster.local:9200" + value: "http://elasticsearch-master-headless.europe-west1.svc.cluster.local:9200" - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_BULK_SIZE value: "1" - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_INDEX_PREFIX diff --git a/google/multi-region/active-active/region1/Makefile b/google/multi-region/active-active/region1/Makefile index 3d2e8c6f..6f46ea5e 100644 --- a/google/multi-region/active-active/region1/Makefile +++ b/google/multi-region/active-active/region1/Makefile @@ -99,7 +99,7 @@ prepare-elastic-backup-key: .PHONY: prepare-elastic-backup-repo prepare-elastic-backup-repo: - kubectl exec elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPUT http://localhost:9200/_snapshot/camunda_backup -H 'Content-Type: application/json' -d'{"type": "gcs","settings":{"bucket": "cdame-elasticsearch-backup", "base_path": "backups"}}' + kubectl exec elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPUT http://localhost:9200/_snapshot/camunda_backup -H 'Content-Type: application/json' -d'{"type": "gcs","settings":{"bucket": "falko-elasticsearch-backup", "base_path": "backups"}}' .PHONY: operate-snapshot operate-snapshot: diff --git a/google/multi-region/active-active/region1/camunda-values.yaml b/google/multi-region/active-active/region1/camunda-values.yaml index c25615f6..dbb6f97b 100644 --- a/google/multi-region/active-active/region1/camunda-values.yaml +++ b/google/multi-region/active-active/region1/camunda-values.yaml @@ -16,29 +16,27 @@ global: regions: 2 # unique id of the region. Should start at 0 for easy computation. With 2 regions, you would have region 0 and 1. regionId: 1 - image: - tag: latest identity: auth: # Disable the Identity authentication # it will fall back to basic-auth: demo/demo as default user enabled: false -identity: - enabled: false - -optimize: - enabled: false - operate: env: - name: CAMUNDA_OPERATE_BACKUP_REPOSITORYNAME - value: camunda_backup + value: "camunda_backup" tasklist: env: - name: CAMUNDA_TASKLIST_BACKUP_REPOSITORYNAME - value: camunda_backup + value: "camunda_backup" +identity: + enabled: false + +optimize: + enabled: false + connectors: enabled: true inbound: @@ -68,11 +66,11 @@ zeebe: - name: ZEEBE_BROKER_DATA_DISKUSAGEREPLICATIONWATERMARK value: "0.87" - name: ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS - value: "camunda-zeebe-0.camunda-zeebe.europe-west4-b.svc.cluster.local:26502, camunda-zeebe-1.camunda-zeebe.europe-west4-b.svc.cluster.local:26502, camunda-zeebe-2.camunda-zeebe.europe-west4-b.svc.cluster.local:26502, camunda-zeebe-3.camunda-zeebe.europe-west4-b.svc.cluster.local:26502, camunda-zeebe-0.camunda-zeebe.us-east1-b.svc.cluster.local:26502, camunda-zeebe-1.camunda-zeebe.us-east1-b.svc.cluster.local:26502, camunda-zeebe-2.camunda-zeebe.us-east1-b.svc.cluster.local:26502, camunda-zeebe-3.camunda-zeebe.us-east1-b.svc.cluster.local:26502" + value: "camunda-zeebe-0.camunda-zeebe.us-east1.svc.cluster.local:26502,camunda-zeebe-1.camunda-zeebe.us-east1.svc.cluster.local:26502,camunda-zeebe-2.camunda-zeebe.us-east1.svc.cluster.local:26502,camunda-zeebe-3.camunda-zeebe.us-east1.svc.cluster.local:26502,camunda-zeebe-0.camunda-zeebe.europe-west1.svc.cluster.local:26502,camunda-zeebe-1.camunda-zeebe.europe-west1.svc.cluster.local:26502,camunda-zeebe-2.camunda-zeebe.europe-west1.svc.cluster.local:26502,camunda-zeebe-3.camunda-zeebe.europe-west1.svc.cluster.local:26502" - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_CLASSNAME value: "io.camunda.zeebe.exporter.ElasticsearchExporter" - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_URL - value: "http://elasticsearch-master-headless.europe-west4-b.svc.cluster.local:9200" + value: "http://elasticsearch-master-headless.us-east1.svc.cluster.local:9200" - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_BULK_SIZE value: "1" - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_INDEX_PREFIX @@ -145,17 +143,6 @@ elasticsearch: # Allow no backup for single node setups clusterHealthCheckParams: "wait_for_status=yellow&timeout=1s" - resources: - requests: - cpu: "100m" - memory: "512M" - limits: - cpu: "1000m" - memory: "2Gi" - - # Allow no backup for single node setups - clusterHealthCheckParams: "wait_for_status=yellow&timeout=1s" - resources: requests: cpu: "100m" From 18b6680e5827dd1126b9c658a52c45422db70f42 Mon Sep 17 00:00:00 2001 From: Falko Menge Date: Sat, 20 Jan 2024 13:03:33 +0100 Subject: [PATCH 10/58] Generate context entry with `make` --- google/multi-region/active-active/region0/Makefile | 2 ++ google/multi-region/active-active/region1/Makefile | 2 ++ 2 files changed, 4 insertions(+) diff --git a/google/multi-region/active-active/region0/Makefile b/google/multi-region/active-active/region0/Makefile index ab9d733b..1b8777e9 100644 --- a/google/multi-region/active-active/region0/Makefile +++ b/google/multi-region/active-active/region0/Makefile @@ -36,6 +36,8 @@ all: use-kube namespace prepare-elastic-backup-key camunda external-urls .PHONY: kube # Create Kubernetes cluster. (No aplication gateway required) kube: kube-gke + @echo "Please add the following line to the list of contexts in setup-zeebe.py & teardown-zeebe.py:" + @echo " '$(region)': 'gke_$(project)_$(region)_$(clusterName)'," .PHONY: external-urls # Show external URLs external-urls: external-urls-no-ingress diff --git a/google/multi-region/active-active/region1/Makefile b/google/multi-region/active-active/region1/Makefile index 6f46ea5e..cfb4e564 100644 --- a/google/multi-region/active-active/region1/Makefile +++ b/google/multi-region/active-active/region1/Makefile @@ -36,6 +36,8 @@ all: use-kube namespace prepare-elastic-backup-key camunda external-urls .PHONY: kube # Create Kubernetes cluster. (No aplication gateway required) kube: kube-gke + @echo "Please add the following line to the list of contexts in setup-zeebe.py & teardown-zeebe.py:" + @echo " '$(region)': 'gke_$(project)_$(region)_$(clusterName)'," .PHONY: external-urls # Show external URLs external-urls: external-urls-no-ingress From cf46e4ed3002a63274096f34dd4016bb46d1acd8 Mon Sep 17 00:00:00 2001 From: Falko Menge Date: Sat, 20 Jan 2024 13:10:53 +0100 Subject: [PATCH 11/58] Replace zone with region --- .../multi-region/active-active/setup-zeebe.py | 36 +++++++++---------- .../active-active/teardown-zeebe.py | 6 ++-- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/google/multi-region/active-active/setup-zeebe.py b/google/multi-region/active-active/setup-zeebe.py index 2d59fe64..12b5d36a 100755 --- a/google/multi-region/active-active/setup-zeebe.py +++ b/google/multi-region/active-active/setup-zeebe.py @@ -45,11 +45,11 @@ if len(contexts) == 0: exit("must provide at least one Kubernetes cluster in the `contexts` map at the top of the script") -for zone, context in contexts.items(): +for region, context in contexts.items(): try: check_call(['kubectl', 'get', 'pods', '--context', context]) except: - exit("unable to make basic API call using kubectl context '%s' for cluster in zone '%s'; please check if the context is correct and your Kubernetes cluster is working" % (context, zone)) + exit("unable to make basic API call using kubectl context '%s' for cluster in region '%s'; please check if the context is correct and your Kubernetes cluster is working" % (context, region)) # Set up the necessary directory. Ignore errors because they may already exist. try: @@ -59,37 +59,37 @@ # For each cluster, create a load balancer to its DNS pod. -for zone, context in contexts.items(): +for region, context in contexts.items(): check_call(['kubectl', 'apply', '-f', 'dns-lb.yaml', '--context', context]) -# Set up each cluster to forward DNS requests for zone-scoped namespaces to the +# Set up each cluster to forward DNS requests for region-scoped namespaces to the # relevant cluster's DNS server, using load balancers in order to create a # static IP for each cluster's DNS endpoint. dns_ips = dict() -for zone, context in contexts.items(): +for region, context in contexts.items(): external_ip = '' while True: external_ip = check_output(['kubectl', 'get', 'svc', 'kube-dns-lb', '--namespace', 'kube-system', '--context', context, '--template', '{{range .status.loadBalancer.ingress}}{{.ip}}{{end}}']).decode("UTF-8") if external_ip: break - print('Waiting for DNS load balancer IP in %s...' % (zone)) + print('Waiting for DNS load balancer IP in %s...' % (region)) sleep(10) - print ('DNS endpoint for zone %s: %s' % (zone, external_ip)) - dns_ips[zone] = external_ip + print ('DNS endpoint for region %s: %s' % (region, external_ip)) + dns_ips[region] = external_ip # Update each cluster's DNS configuration with an appropriate configmap. Note # that we have to leave the local cluster out of its own configmap to avoid # infinite recursion through the load balancer IP. We then have to delete the # existing DNS pods in order for the new configuration to take effect. -for zone, context in contexts.items(): +for region, context in contexts.items(): remote_dns_ips = dict() - for z, ip in dns_ips.items(): - if z == zone: + for r, ip in dns_ips.items(): + if r == region: continue - remote_dns_ips[z+'.svc.cluster.local'] = [ip] - remote_dns_ips[z+'-failover.svc.cluster.local'] = [ip] + remote_dns_ips[r+'.svc.cluster.local'] = [ip] + remote_dns_ips[r+'-failover.svc.cluster.local'] = [ip] print(remote_dns_ips) - config_filename = '%s/dns-configmap-%s.yaml' % (generated_files_dir, zone) + config_filename = '%s/dns-configmap-%s.yaml' % (generated_files_dir, region) with open(config_filename, 'w') as f: f.write("""\ apiVersion: v1 @@ -106,13 +106,13 @@ # Generate ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS join_addrs = [] -for zone in contexts: +for region in contexts: for i in range(number_of_zeebe_brokers_per_region): - join_addrs.append('camunda-zeebe-%d.camunda-zeebe.%s.svc.cluster.local:26502' % (i, zone)) + join_addrs.append('camunda-zeebe-%d.camunda-zeebe.%s.svc.cluster.local:26502' % (i, region)) join_str = ','.join(join_addrs) print(join_str) # Generate ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_URL e.g. http://elasticsearch-master-headless.us-east1.svc.cluster.local:9200 elastic_urls = {} -for zone, context in contexts.items(): - print('http://elasticsearch-master-headless.%s.svc.cluster.local:9200' % (zone)) +for region, context in contexts.items(): + print('http://elasticsearch-master-headless.%s.svc.cluster.local:9200' % (region)) diff --git a/google/multi-region/active-active/teardown-zeebe.py b/google/multi-region/active-active/teardown-zeebe.py index e4f53b02..0d2d09d2 100755 --- a/google/multi-region/active-active/teardown-zeebe.py +++ b/google/multi-region/active-active/teardown-zeebe.py @@ -20,11 +20,11 @@ # ------------------------------------------------------------------------------ -# Delete each cluster's special zone-scoped namespace, which transitively +# Delete each cluster's special region-scoped namespace, which transitively # deletes all resources that were created in the namespace, along with the few # other resources we created that weren't in that namespace -for zone, context in contexts.items(): - call(['kubectl', 'delete', 'namespace', zone, '--context', context]) +for region, context in contexts.items(): + call(['kubectl', 'delete', 'namespace', region, '--context', context]) # call(['kubectl', 'delete', 'secret', 'cockroachdb.client.root', '--context', context]) # call(['kubectl', 'delete', '-f', 'external-name-svc.yaml', '--context', context]) call(['kubectl', 'delete', '-f', 'dns-lb.yaml', '--context', context]) From 62622e576a600c519c1652a2ecc4c54a9edb491a Mon Sep 17 00:00:00 2001 From: Falko Menge Date: Sat, 20 Jan 2024 13:14:56 +0100 Subject: [PATCH 12/58] Rename Python scripts for DNS Chaining --- google/multi-region/active-active/README.md | 18 +++++++++--------- .../active-active/region0/Makefile | 2 +- .../active-active/region1/Makefile | 2 +- .../{setup-zeebe.py => setup-dns-chaining.py} | 0 ...rdown-zeebe.py => teardown-dns-chaining.py} | 0 5 files changed, 11 insertions(+), 11 deletions(-) rename google/multi-region/active-active/{setup-zeebe.py => setup-dns-chaining.py} (100%) rename google/multi-region/active-active/{teardown-zeebe.py => teardown-dns-chaining.py} (100%) diff --git a/google/multi-region/active-active/README.md b/google/multi-region/active-active/README.md index c1114333..2e2c6f9c 100644 --- a/google/multi-region/active-active/README.md +++ b/google/multi-region/active-active/README.md @@ -15,7 +15,7 @@ Camunda reserves the right to restrict support if no review was performed prior ## Prerequisite: Kubernetes Cross-Cluster Communication A multi-region setup in Kubernetes really means a multi-cluster setup and that comes with a networking challenge: How to manage connectivity between my pods across different Kubernetes clusters? You should setup proper firewall rules and correctly route traffic among the pods. For that you have many options (in nor particular order): -* ["DNS Chainging" with kube-dns](https://youtu.be/az4BvMfYnLY?si=RmauCqchHwsmCDZZ&t=2004): That's the option we took in this example. We setup kube-dns automatically through a [python script](https://github.com/camunda-community-hub/camunda-8-helm-profiles/blob/main/google/multi-region/active-active/setup-zeebe.py) to route traffic to the distant cluster based on the namespace. This requires to have different namespaces in each cluster. +* ["DNS Chainging" with kube-dns](https://youtu.be/az4BvMfYnLY?si=RmauCqchHwsmCDZZ&t=2004): That's the option we took in this example. We setup kube-dns automatically through a [python script](https://github.com/camunda-community-hub/camunda-8-helm-profiles/blob/main/google/multi-region/active-active/setup-dns-chaining.py) to route traffic to the distant cluster based on the namespace. This requires to have different namespaces in each cluster. * [Istio](https://medium.com/@danielepolencic/scaling-kubernetes-to-multiple-clusters-and-regionss-491813c3c8cd) ([video](https://youtu.be/_8FNsvoECPU?si=dUOFwaaUxRroj8MP)) * [Skupper](https://medium.com/@shailendra14k/deploy-the-skupper-networks-89800323925c#:~:text=Skupper%20creates%20a%20service%20network,secure%20communication%20across%20Kubernetes%20clusters.) * [Linkerd multi-cluster communication](https://linkerd.io/2.14/features/multicluster/) @@ -37,7 +37,7 @@ We are basing our dual-region active-active setup on standard Kubernetes feature You should clone this repository locally. -The installation configurations are available at the beginning of these makefiles (clustername, region, project, machine type, etc). For this example, we decided to name our namespaces after our regions for an easier readability. You may want to change this. In such a case and if you want to use setup-zeebe.py to configure kube-dns, this script should be updated accordingly. +The installation configurations are available at the beginning of these makefiles (clustername, region, project, machine type, etc). For this example, we decided to name our namespaces after our regions for an easier readability. You may want to change this. In such a case and if you want to use setup-dns-chaining.py to configure kube-dns, this script should be updated accordingly. #### Prepare Kubernetes Clusters @@ -170,7 +170,7 @@ $ cd .. #### Configure Kube-dns Note: this step should not be executed if you plan to user another solution for cross cluster communication. -Edit the Python script [setup-zeebe.py](./setup-zeebe.py) +Edit the Python script [setup-dns-chaining.py](./setup-dns-chaining.py) and adjust the list of `contexts` and the `number_of_zeebe_brokers_per_region`. To get the names of your kubectl "contexts" for each of your clusters, run: @@ -193,14 +193,14 @@ Then run that script to adjust the DNS configuration of both Kubernetes clusters so that they can resolve each others service names. ```sh -./setup-zeebe.py +./setup-dns-chaining.py ```
Example Command Output ```sh -$ ./setup-zeebe.py +$ ./setup-dns-chaining.py No resources found in default namespace. No resources found in default namespace. service/kube-dns-lb created @@ -225,17 +225,17 @@ camunda-zeebe-0.camunda-zeebe.us-east1,camunda-zeebe-1.camunda-zeebe.us-east1,ca For troubleshooting, you can test the DNS connection as described in the [Kubernetes Documentation on Debugging DNS Resolution](https://kubernetes.io/docs/tasks/administer-cluster/dns-debugging-resolution/) (you could also build [your own](https://github.com/wkruse/dnsutils-docker) [dnsutils image](https://github.com/docker-archive/dnsutils) if you can't pull one). -To roll back the changes made by the Python script [setup-zeebe.py](./setup-zeebe.py), you can adjust and run [teardown-zeebe.py](./teardown-zeebe.py): +To roll back the changes made by the Python script [setup-dns-chaining.py](./setup-dns-chaining.py), you can adjust and run [teardown-dns-chaining.py](./teardown-dns-chaining.py): ```sh -./teardown-zeebe.py +./teardown-dns-chaining.py ```
Example Command Output ```sh -$ ./teardown-zeebe.py +$ ./teardown-dns-chaining.py namespace "us-east1" deleted service "kube-dns-lb" deleted configmap "kube-dns" deleted @@ -272,7 +272,7 @@ Download the JSON API key and save it in each region as `gcs_backup_key.json` #### Installing Camunda -Adjust `ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS` and `ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_URL` in [region0/camunda-values.yaml](region0/camunda-values.yaml) and [region1/camunda-values.yaml](region1/camunda-values.yaml) with the values printed by the Python script [setup-zeebe.py](./setup-zeebe.py), e.g. +Adjust `ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS` and `ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_URL` in [region0/camunda-values.yaml](region0/camunda-values.yaml) and [region1/camunda-values.yaml](region1/camunda-values.yaml) with the values printed by the Python script [setup-dns-chaining.py](./setup-dns-chaining.py), e.g. ```yaml zeebe: diff --git a/google/multi-region/active-active/region0/Makefile b/google/multi-region/active-active/region0/Makefile index 1b8777e9..3206a13f 100644 --- a/google/multi-region/active-active/region0/Makefile +++ b/google/multi-region/active-active/region0/Makefile @@ -36,7 +36,7 @@ all: use-kube namespace prepare-elastic-backup-key camunda external-urls .PHONY: kube # Create Kubernetes cluster. (No aplication gateway required) kube: kube-gke - @echo "Please add the following line to the list of contexts in setup-zeebe.py & teardown-zeebe.py:" + @echo "Please add the following line to the list of contexts in setup-dns-chaining.py & teardown-dns-chaining.py:" @echo " '$(region)': 'gke_$(project)_$(region)_$(clusterName)'," .PHONY: external-urls # Show external URLs diff --git a/google/multi-region/active-active/region1/Makefile b/google/multi-region/active-active/region1/Makefile index cfb4e564..36ae3f14 100644 --- a/google/multi-region/active-active/region1/Makefile +++ b/google/multi-region/active-active/region1/Makefile @@ -36,7 +36,7 @@ all: use-kube namespace prepare-elastic-backup-key camunda external-urls .PHONY: kube # Create Kubernetes cluster. (No aplication gateway required) kube: kube-gke - @echo "Please add the following line to the list of contexts in setup-zeebe.py & teardown-zeebe.py:" + @echo "Please add the following line to the list of contexts in setup-dns-chaining.py & teardown-dns-chaining.py:" @echo " '$(region)': 'gke_$(project)_$(region)_$(clusterName)'," .PHONY: external-urls # Show external URLs diff --git a/google/multi-region/active-active/setup-zeebe.py b/google/multi-region/active-active/setup-dns-chaining.py similarity index 100% rename from google/multi-region/active-active/setup-zeebe.py rename to google/multi-region/active-active/setup-dns-chaining.py diff --git a/google/multi-region/active-active/teardown-zeebe.py b/google/multi-region/active-active/teardown-dns-chaining.py similarity index 100% rename from google/multi-region/active-active/teardown-zeebe.py rename to google/multi-region/active-active/teardown-dns-chaining.py From 2b28d582a70132180d21c53d4826e008f11eb49a Mon Sep 17 00:00:00 2001 From: Falko Menge Date: Sat, 20 Jan 2024 13:37:17 +0100 Subject: [PATCH 13/58] Temporarily switch back to public DNS LB --- google/multi-region/active-active/dns-lb.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/google/multi-region/active-active/dns-lb.yaml b/google/multi-region/active-active/dns-lb.yaml index 8e56fc90..82754da2 100644 --- a/google/multi-region/active-active/dns-lb.yaml +++ b/google/multi-region/active-active/dns-lb.yaml @@ -9,7 +9,7 @@ metadata: # see: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer service.beta.kubernetes.io/aws-load-balancer-internal: "true" service.beta.kubernetes.io/azure-load-balancer-internal: "true" - networking.gke.io/load-balancer-type: "Internal" + # FIXME: find firewall configuration for using: networking.gke.io/load-balancer-type: "Internal" spec: type: LoadBalancer sessionAffinity: None From 89d88d89584dc377357a9bec8fa17deada026e58 Mon Sep 17 00:00:00 2001 From: Falko Menge Date: Sat, 20 Jan 2024 14:30:45 +0100 Subject: [PATCH 14/58] Update sample output --- google/multi-region/active-active/README.md | 72 ++++++++++----------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/google/multi-region/active-active/README.md b/google/multi-region/active-active/README.md index 2e2c6f9c..f3f50cfa 100644 --- a/google/multi-region/active-active/README.md +++ b/google/multi-region/active-active/README.md @@ -367,18 +367,18 @@ WARNING: open /home/falko/.cache/helm/repository/prometheus-community-index.yaml WARNING: Repo "stable" is corrupt or missing. Try 'helm repo update'. WARNING: open /home/falko/.cache/helm/repository/stable-index.yaml: no such file or directory NAME CHART VERSION APP VERSION DESCRIPTION -camunda/camunda-platform 8.3.4 8.3.x Camunda 8 Self-Managed Helm charts. Camunda's p... +camunda/camunda-platform 9.0.2 8.4.x Camunda 8 Self-Managed Helm charts. Camunda's p... helm install --namespace us-east1 camunda camunda/camunda-platform -f camunda-values.yaml --skip-crds -W1213 13:36:32.695588 47912 warnings.go:70] spec.template.spec.containers[0].env[6]: hides previous definition of "CAMUNDA_OPERATE_CLIENT_USERNAME" -W1213 13:36:32.695633 47912 warnings.go:70] spec.template.spec.containers[0].env[7]: hides previous definition of "CAMUNDA_OPERATE_CLIENT_PASSWORD" -W1213 13:36:32.903716 47912 warnings.go:70] spec.template.spec.containers[0].env[25]: hides previous definition of "ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS" +W0120 13:30:40.390359 25577 warnings.go:70] spec.template.spec.containers[0].env[6]: hides previous definition of "CAMUNDA_OPERATE_CLIENT_USERNAME" +W0120 13:30:40.390415 25577 warnings.go:70] spec.template.spec.containers[0].env[7]: hides previous definition of "CAMUNDA_OPERATE_CLIENT_PASSWORD" +W0120 13:30:40.554727 25577 warnings.go:70] spec.template.spec.containers[0].env[25]: hides previous definition of "ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS" NAME: camunda -LAST DEPLOYED: Wed Dec 13 13:36:24 2023 +LAST DEPLOYED: Sat Jan 20 13:30:31 2024 NAMESPACE: us-east1 STATUS: deployed REVISION: 1 NOTES: -# (camunda-platform - 8.3.4) +# (camunda-platform - 9.0.2) ###### ### ## ## ## ## ## ## ######## ### ## ## ## ## ### ### ## ## ### ## ## ## ## ## @@ -394,20 +394,20 @@ NOTES: - Zeebe: - Enabled: true - - Docker Image used for Zeebe: camunda/zeebe:8.3.4 + - Docker Image used for Zeebe: camunda/zeebe:8.4.0 - Zeebe Cluster Name: "camunda-zeebe" - Prometheus ServiceMonitor Enabled: false - Operate: - Enabled: true - - Docker Image used for Operate: camunda/operate:8.3.4 + - Docker Image used for Operate: camunda/operate:8.4.0 - Tasklist: - Enabled: true - - Docker Image used for Tasklist: camunda/tasklist:8.3.4 + - Docker Image used for Tasklist: camunda/tasklist:8.4.0 - Optimize: - Enabled: false - Connectors: - Enabled: true - - Docker Image used for Connectors: camunda/connectors-bundle:8.3.2 + - Docker Image used for Connectors: camunda/connectors-bundle:8.4.3 - Identity: - Enabled: false - Web Modeler: @@ -449,7 +449,7 @@ Default user and password: "demo/demo" ## Console config - name: camunda namespace: us-east1 - version: 8.3.4 + version: 9.0.2 components: @@ -496,18 +496,18 @@ WARNING: open /home/falko/.cache/helm/repository/prometheus-community-index.yaml WARNING: Repo "stable" is corrupt or missing. Try 'helm repo update'. WARNING: open /home/falko/.cache/helm/repository/stable-index.yaml: no such file or directory NAME CHART VERSION APP VERSION DESCRIPTION -camunda/camunda-platform 8.3.4 8.3.x Camunda 8 Self-Managed Helm charts. Camunda's p... +camunda/camunda-platform 9.0.2 8.4.x Camunda 8 Self-Managed Helm charts. Camunda's p... helm install --namespace europe-west1 camunda camunda/camunda-platform -f camunda-values.yaml --skip-crds -W1213 13:41:59.312024 49320 warnings.go:70] spec.template.spec.containers[0].env[6]: hides previous definition of "CAMUNDA_OPERATE_CLIENT_USERNAME" -W1213 13:41:59.312065 49320 warnings.go:70] spec.template.spec.containers[0].env[7]: hides previous definition of "CAMUNDA_OPERATE_CLIENT_PASSWORD" -W1213 13:41:59.428526 49320 warnings.go:70] spec.template.spec.containers[0].env[25]: hides previous definition of "ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS" +W0120 13:31:25.870663 26373 warnings.go:70] spec.template.spec.containers[0].env[6]: hides previous definition of "CAMUNDA_OPERATE_CLIENT_USERNAME" +W0120 13:31:25.870683 26373 warnings.go:70] spec.template.spec.containers[0].env[7]: hides previous definition of "CAMUNDA_OPERATE_CLIENT_PASSWORD" +W0120 13:31:25.963735 26373 warnings.go:70] spec.template.spec.containers[0].env[25]: hides previous definition of "ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS" NAME: camunda -LAST DEPLOYED: Wed Dec 13 13:41:54 2023 +LAST DEPLOYED: Sat Jan 20 13:31:20 2024 NAMESPACE: europe-west1 STATUS: deployed REVISION: 1 NOTES: -# (camunda-platform - 8.3.4) +# (camunda-platform - 9.0.2) ###### ### ## ## ## ## ## ## ######## ### ## ## ## ## ### ### ## ## ### ## ## ## ## ## @@ -523,20 +523,20 @@ NOTES: - Zeebe: - Enabled: true - - Docker Image used for Zeebe: camunda/zeebe:8.3.4 + - Docker Image used for Zeebe: camunda/zeebe:8.4.0 - Zeebe Cluster Name: "camunda-zeebe" - Prometheus ServiceMonitor Enabled: false - Operate: - Enabled: true - - Docker Image used for Operate: camunda/operate:8.3.4 + - Docker Image used for Operate: camunda/operate:8.4.0 - Tasklist: - Enabled: true - - Docker Image used for Tasklist: camunda/tasklist:8.3.4 + - Docker Image used for Tasklist: camunda/tasklist:8.4.0 - Optimize: - Enabled: false - Connectors: - Enabled: true - - Docker Image used for Connectors: camunda/connectors-bundle:8.3.2 + - Docker Image used for Connectors: camunda/connectors-bundle:8.4.3 - Identity: - Enabled: false - Web Modeler: @@ -578,7 +578,7 @@ Default user and password: "demo/demo" ## Console config - name: camunda namespace: europe-west1 - version: 8.3.4 + version: 9.0.2 components: @@ -621,52 +621,52 @@ in which they are hosted.): Cluster size: 8 Partitions count: 8 Replication factor: 4 -Gateway version: 8.2.8 +Gateway version: 8.4.0 Brokers: Broker 0 - camunda-zeebe-0.camunda-zeebe.us-east1.svc:26501 - Version: 8.2.8 + Version: 8.4.0 Partition 1 : Leader, Healthy - Partition 6 : Leader, Healthy + Partition 6 : Follower, Healthy Partition 7 : Leader, Healthy Partition 8 : Leader, Healthy Broker 1 - camunda-zeebe-0.camunda-zeebe.europe-west1.svc:26501 - Version: 8.2.8 + Version: 8.4.0 Partition 1 : Follower, Healthy Partition 2 : Follower, Healthy Partition 7 : Follower, Healthy Partition 8 : Follower, Healthy Broker 2 - camunda-zeebe-1.camunda-zeebe.us-east1.svc:26501 - Version: 8.2.8 + Version: 8.4.0 Partition 1 : Follower, Healthy Partition 2 : Leader, Healthy Partition 3 : Leader, Healthy Partition 8 : Follower, Healthy Broker 3 - camunda-zeebe-1.camunda-zeebe.europe-west1.svc:26501 - Version: 8.2.8 + Version: 8.4.0 Partition 1 : Follower, Healthy Partition 2 : Follower, Healthy Partition 3 : Follower, Healthy - Partition 4 : Follower, Healthy + Partition 4 : Leader, Healthy Broker 4 - camunda-zeebe-2.camunda-zeebe.us-east1.svc:26501 - Version: 8.2.8 + Version: 8.4.0 Partition 2 : Follower, Healthy Partition 3 : Follower, Healthy - Partition 4 : Leader, Healthy + Partition 4 : Follower, Healthy Partition 5 : Leader, Healthy - Broker 5 - camunda-zeebe-1.camunda-zeebe.europe-west1.svc:26501 - Version: 8.2.8 + Broker 5 - camunda-zeebe-2.camunda-zeebe.europe-west1.svc:26501 + Version: 8.4.0 Partition 3 : Follower, Healthy Partition 4 : Follower, Healthy Partition 5 : Follower, Healthy - Partition 6 : Follower, Healthy + Partition 6 : Leader, Healthy Broker 6 - camunda-zeebe-3.camunda-zeebe.us-east1.svc:26501 - Version: 8.2.8 + Version: 8.4.0 Partition 4 : Follower, Healthy Partition 5 : Follower, Healthy Partition 6 : Follower, Healthy Partition 7 : Follower, Healthy Broker 7 - camunda-zeebe-3.camunda-zeebe.europe-west1.svc:26501 - Version: 8.2.8 + Version: 8.4.0 Partition 5 : Follower, Healthy Partition 6 : Follower, Healthy Partition 7 : Follower, Healthy From 6448fa402a185e27f0a97b7b3b331f21eb16d8f6 Mon Sep 17 00:00:00 2001 From: Falko Menge Date: Sun, 21 Jan 2024 11:23:00 +0100 Subject: [PATCH 15/58] Add link to skupper.io --- google/multi-region/active-active/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/google/multi-region/active-active/README.md b/google/multi-region/active-active/README.md index f3f50cfa..2c2c2a85 100644 --- a/google/multi-region/active-active/README.md +++ b/google/multi-region/active-active/README.md @@ -17,7 +17,7 @@ Camunda reserves the right to restrict support if no review was performed prior A multi-region setup in Kubernetes really means a multi-cluster setup and that comes with a networking challenge: How to manage connectivity between my pods across different Kubernetes clusters? You should setup proper firewall rules and correctly route traffic among the pods. For that you have many options (in nor particular order): * ["DNS Chainging" with kube-dns](https://youtu.be/az4BvMfYnLY?si=RmauCqchHwsmCDZZ&t=2004): That's the option we took in this example. We setup kube-dns automatically through a [python script](https://github.com/camunda-community-hub/camunda-8-helm-profiles/blob/main/google/multi-region/active-active/setup-dns-chaining.py) to route traffic to the distant cluster based on the namespace. This requires to have different namespaces in each cluster. * [Istio](https://medium.com/@danielepolencic/scaling-kubernetes-to-multiple-clusters-and-regionss-491813c3c8cd) ([video](https://youtu.be/_8FNsvoECPU?si=dUOFwaaUxRroj8MP)) -* [Skupper](https://medium.com/@shailendra14k/deploy-the-skupper-networks-89800323925c#:~:text=Skupper%20creates%20a%20service%20network,secure%20communication%20across%20Kubernetes%20clusters.) +* [Skupper](https://medium.com/@shailendra14k/deploy-the-skupper-networks-89800323925c#:~:text=Skupper%20creates%20a%20service%20network,secure%20communication%20across%20Kubernetes%20clusters.) ([skupper.io](https://skupper.io/)) * [Linkerd multi-cluster communication](https://linkerd.io/2.14/features/multicluster/) * [Submariner](https://submariner.io/) * [KubeStellar](https://kubestellar.io) From d0ac24feb75cf20072fe786cee5b70f59fb27621 Mon Sep 17 00:00:00 2001 From: Falko Menge Date: Sun, 21 Jan 2024 15:30:11 +0100 Subject: [PATCH 16/58] Add example DNS configmaps --- .../generated/dns-configmap-europe-west1.yaml | 8 ++++++++ .../active-active/generated/dns-configmap-us-east1.yaml | 8 ++++++++ 2 files changed, 16 insertions(+) create mode 100644 google/multi-region/active-active/generated/dns-configmap-europe-west1.yaml create mode 100644 google/multi-region/active-active/generated/dns-configmap-us-east1.yaml diff --git a/google/multi-region/active-active/generated/dns-configmap-europe-west1.yaml b/google/multi-region/active-active/generated/dns-configmap-europe-west1.yaml new file mode 100644 index 00000000..bef7e091 --- /dev/null +++ b/google/multi-region/active-active/generated/dns-configmap-europe-west1.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: kube-dns + namespace: kube-system +data: + stubDomains: | + {"us-east1.svc.cluster.local": ["35.243.201.145"], "us-east1-failover.svc.cluster.local": ["35.243.201.145"]} diff --git a/google/multi-region/active-active/generated/dns-configmap-us-east1.yaml b/google/multi-region/active-active/generated/dns-configmap-us-east1.yaml new file mode 100644 index 00000000..033a6936 --- /dev/null +++ b/google/multi-region/active-active/generated/dns-configmap-us-east1.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: kube-dns + namespace: kube-system +data: + stubDomains: | + {"europe-west1.svc.cluster.local": ["8.34.209.179"], "europe-west1-failover.svc.cluster.local": ["8.34.209.179"]} From d309c97ee3736d4481bc60f31c4256b3d474470f Mon Sep 17 00:00:00 2001 From: Falko Menge Date: Sun, 21 Jan 2024 15:30:17 +0100 Subject: [PATCH 17/58] Add manual commands and example output for failover --- google/multi-region/active-active/README.md | 169 +++++++++++++++++++- 1 file changed, 165 insertions(+), 4 deletions(-) diff --git a/google/multi-region/active-active/README.md b/google/multi-region/active-active/README.md index 2c2c2a85..2bf87715 100644 --- a/google/multi-region/active-active/README.md +++ b/google/multi-region/active-active/README.md @@ -33,7 +33,7 @@ We are basing our dual-region active-active setup on standard Kubernetes feature ### Initial Setup -### Prepare installation +#### Prepare installation You should clone this repository locally. @@ -703,9 +703,45 @@ make elastic-nodes ### Disaster -In case of disaster, if a region is lost, attempting to start a process instance would lead to an exception: +If a region is lost, attempting to start a process instance would lead to an exception: +``` io.grpc.StatusRuntimeException: RESOURCE_EXHAUSTED: Expected to execute the command on one of the partitions, but all failed; there are no more partitions available to retry. Please try again. If the error persists contact your zeebe operator +``` + +`zbctl` would show only the local half of the brokers and no partition would have a leader: + +``` +Cluster size: 8 +Partitions count: 8 +Replication factor: 4 +Gateway version: 8.4.0 +Brokers: + Broker 0 - camunda-zeebe-0.camunda-zeebe.us-east1.svc:26501 + Version: 8.4.0 + Partition 1 : Follower, Healthy + Partition 6 : Follower, Healthy + Partition 7 : Follower, Healthy + Partition 8 : Follower, Healthy + Broker 2 - camunda-zeebe-1.camunda-zeebe.us-east1.svc:26501 + Version: 8.4.0 + Partition 1 : Follower, Healthy + Partition 2 : Follower, Healthy + Partition 3 : Follower, Healthy + Partition 8 : Follower, Healthy + Broker 4 - camunda-zeebe-2.camunda-zeebe.us-east1.svc:26501 + Version: 8.4.0 + Partition 2 : Follower, Healthy + Partition 3 : Follower, Healthy + Partition 4 : Follower, Healthy + Partition 5 : Follower, Healthy + Broker 6 - camunda-zeebe-3.camunda-zeebe.us-east1.svc:26501 + Version: 8.4.0 + Partition 4 : Follower, Healthy + Partition 5 : Follower, Healthy + Partition 6 : Follower, Healthy + Partition 7 : Follower, Healthy +``` the procedure would be to: * start temporary nodes that will restore the quorum in the surviving region @@ -718,25 +754,150 @@ the procedure would be to: * clean the temporary nodes from the surviving region * restore the initial setup -##### pause exporters +#### pause exporters TODO: write a makefile target to pause exporters in the surviving region -##### start temporary nodes (failOver) +#### Fail Over by starting Temporary Nodes In the surviving region, use the "make fail-over-regionX" to create the temporary nodes with the partitions to restore the qorum. If region0 survived, the command would be + + + + + + + + + +
Using GNU MakeManual Commands
+ ```sh cd region0 make fail-over-region1 ``` + + +```sh +kubectl create namespace us-east1-failover +kubectl config set-context --current --namespace=us-east1-failover +helm install --namespace us-east1-failover camunda camunda/camunda-platform -f camunda-values.yaml --skip-crds \ + --set global.installationType=failOver \ + --set global.regionId=1 \ + --set elasticsearch.enabled=false \ + --set operate.enabled=false \ + --set tasklist.enabled=false \ + --set zeebe-gateway.enabled=false +``` +
Example Command Output ```sh +$ make fail-over-region1 +kubectl create namespace us-east1-failover +namespace/us-east1-failover created +kubectl config set-context --current --namespace=us-east1-failover +Context "gke_camunda-researchanddevelopment_us-east1_falko-region-0" modified. +helm install --namespace us-east1-failover camunda camunda/camunda-platform -f camunda-values.yaml --skip-crds \ + --set global.installationType=failOver \ + --set global.regionId=1 \ + --set elasticsearch.enabled=false \ + --set operate.enabled=false \ + --set tasklist.enabled=false \ + --set zeebe-gateway.enabled=false +W0121 14:04:51.739034 55061 warnings.go:70] spec.template.spec.containers[0].env[6]: hides previous definition of "CAMUNDA_OPERATE_CLIENT_USERNAME" +W0121 14:04:51.739048 55061 warnings.go:70] spec.template.spec.containers[0].env[7]: hides previous definition of "CAMUNDA_OPERATE_CLIENT_PASSWORD" +W0121 14:04:51.916650 55061 warnings.go:70] spec.template.spec.containers[0].env[25]: hides previous definition of "ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS" +NAME: camunda +LAST DEPLOYED: Sun Jan 21 14:04:45 2024 +NAMESPACE: us-east1-failover +STATUS: deployed +REVISION: 1 +NOTES: +# (camunda-platform - 9.0.2) + + ###### ### ## ## ## ## ## ## ######## ### +## ## ## ## ### ### ## ## ### ## ## ## ## ## +## ## ## #### #### ## ## #### ## ## ## ## ## +## ## ## ## ### ## ## ## ## ## ## ## ## ## ## +## ######### ## ## ## ## ## #### ## ## ######### +## ## ## ## ## ## ## ## ## ### ## ## ## ## + ###### ## ## ## ## ####### ## ## ######## ## ## + +################################################################### + +## Installed Services: + +- Zeebe: + - Enabled: true + - Docker Image used for Zeebe: camunda/zeebe:8.4.0 + - Zeebe Cluster Name: "camunda-zeebe" + - Prometheus ServiceMonitor Enabled: false +- Operate: + - Enabled: false +- Tasklist: + - Enabled: false +- Optimize: + - Enabled: false +- Connectors: + - Enabled: true + - Docker Image used for Connectors: camunda/connectors-bundle:8.4.3 +- Identity: + - Enabled: false +- Web Modeler: + - Enabled: false +- Elasticsearch: + - Enabled: false + +### Zeebe + +The Cluster itself is not exposed as a service which means that you can use `kubectl port-forward` to access the Zeebe cluster from outside Kubernetes: + +> kubectl port-forward svc/camunda-zeebe-gateway 26500:26500 -n us-east1-failover + +Now you can connect your workers and clients to `localhost:26500` +### Connecting to Web apps + + +As part of the Helm charts, an ingress definition can be deployed, but you require to have an Ingress Controller for that Ingress to be Exposed. +In order to deploy the ingress manifest, set `.ingress.enabled` to `true`. Example: `operate.ingress.enabled=true` + +If you don't have an ingress controller you can use `kubectl port-forward` to access the deployed web application from outside the cluster: + + + + + +Connectors: +> kubectl port-forward svc/camunda-connectors 8088:8080 + + +Now you can point your browser to one of the service's login pages. Example: http://localhost:8081 for Operate. + +Default user and password: "demo/demo" + + +## Console config +- name: camunda + namespace: us-east1-failover + version: 9.0.2 + components: + + + + + + + + + - name: Zeebe Gateway + url: grpc:// + readiness: http://camunda-zeebe-gateway.us-east1-failover:9600/actuator/health/readiness ```
From b647131f5d20bc6c1d0b8f4bf72b1e8ece5c778f Mon Sep 17 00:00:00 2001 From: Falko Menge Date: Sun, 21 Jan 2024 15:34:28 +0100 Subject: [PATCH 18/58] Update global.multiregion value names --- google/multi-region/active-active/README.md | 8 ++++---- google/multi-region/active-active/region0/Makefile | 6 +++--- google/multi-region/active-active/region1/Makefile | 6 +++--- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/google/multi-region/active-active/README.md b/google/multi-region/active-active/README.md index 2bf87715..104ba60d 100644 --- a/google/multi-region/active-active/README.md +++ b/google/multi-region/active-active/README.md @@ -785,8 +785,8 @@ make fail-over-region1 kubectl create namespace us-east1-failover kubectl config set-context --current --namespace=us-east1-failover helm install --namespace us-east1-failover camunda camunda/camunda-platform -f camunda-values.yaml --skip-crds \ - --set global.installationType=failOver \ - --set global.regionId=1 \ + --set global.multiregion.installationType=failOver \ + --set global.multiregion.regionId=1 \ --set elasticsearch.enabled=false \ --set operate.enabled=false \ --set tasklist.enabled=false \ @@ -804,8 +804,8 @@ namespace/us-east1-failover created kubectl config set-context --current --namespace=us-east1-failover Context "gke_camunda-researchanddevelopment_us-east1_falko-region-0" modified. helm install --namespace us-east1-failover camunda camunda/camunda-platform -f camunda-values.yaml --skip-crds \ - --set global.installationType=failOver \ - --set global.regionId=1 \ + --set global.multiregion.installationType=failOver \ + --set global.multiregion.regionId=1 \ --set elasticsearch.enabled=false \ --set operate.enabled=false \ --set tasklist.enabled=false \ diff --git a/google/multi-region/active-active/region0/Makefile b/google/multi-region/active-active/region0/Makefile index 3206a13f..9447a81b 100644 --- a/google/multi-region/active-active/region0/Makefile +++ b/google/multi-region/active-active/region0/Makefile @@ -49,8 +49,8 @@ fail-over-region1: -kubectl create namespace $(namespace)-failover -kubectl config set-context --current --namespace=$(namespace)-failover helm install --namespace $(namespace)-failover $(release) $(chart) -f $(chartValues) --skip-crds \ - --set global.installationType=failOver \ - --set global.regionId=1 \ + --set global.multiregion.installationType=failOver \ + --set global.multiregion.regionId=1 \ --set elasticsearch.enabled=false \ --set operate.enabled=false \ --set tasklist.enabled=false \ @@ -60,7 +60,7 @@ fail-over-region1: fail-back: use-kube namespace prepare-elastic-backup-key helm install --namespace $(region) $(release) $(chart) -f $(chartValues) --skip-crds \ - --set global.installationType=failBack \ + --set global.multiregion.installationType=failBack \ --set operate.enabled=false \ --set tasklist.enabled=false diff --git a/google/multi-region/active-active/region1/Makefile b/google/multi-region/active-active/region1/Makefile index 36ae3f14..c9376492 100644 --- a/google/multi-region/active-active/region1/Makefile +++ b/google/multi-region/active-active/region1/Makefile @@ -49,8 +49,8 @@ fail-over-region1: -kubectl create namespace $(namespace)-failover -kubectl config set-context --current --namespace=$(namespace)-failover helm install --namespace $(namespace)-failover $(release) $(chart) -f $(chartValues) --skip-crds \ - --set global.installationType=failOver \ - --set global.regionId=0 \ + --set global.multiregion.installationType=failOver \ + --set global.multiregion.regionId=0 \ --set elasticsearch.enabled=false \ --set operate.enabled=false \ --set tasklist.enabled=false \ @@ -60,7 +60,7 @@ fail-over-region1: fail-back: use-kube namespace prepare-elastic-backup-key helm install --namespace $(region) $(release) $(chart) -f $(chartValues) --skip-crds \ - --set global.installationType=failBack \ + --set global.multiregion.installationType=failBack \ --set operate.enabled=false \ --set tasklist.enabled=false From e4211291c2287ca8d1c80555b0eb2e640d156772 Mon Sep 17 00:00:00 2001 From: Falko Menge Date: Sun, 21 Jan 2024 17:47:44 +0100 Subject: [PATCH 19/58] Add manual commands and example output for fail back --- google/multi-region/active-active/README.md | 184 +++++++++++++++++++- 1 file changed, 180 insertions(+), 4 deletions(-) diff --git a/google/multi-region/active-active/README.md b/google/multi-region/active-active/README.md index 104ba60d..06ea8af7 100644 --- a/google/multi-region/active-active/README.md +++ b/google/multi-region/active-active/README.md @@ -901,8 +901,53 @@ Default user and password: "demo/demo" ```
+`zbctl` should now also show half of the brokers of the failed region, e.g. two of the odd-numbered ones: -If region1 survived, the command would be +``` +Cluster size: 8 +Partitions count: 8 +Replication factor: 4 +Gateway version: 8.4.0 +Brokers: + Broker 0 - camunda-zeebe-0.camunda-zeebe.us-east1.svc:26501 + Version: 8.4.0 + Partition 1 : Leader, Healthy + Partition 6 : Leader, Healthy + Partition 7 : Follower, Healthy + Partition 8 : Leader, Healthy + Broker 1 - camunda-zeebe-0.camunda-zeebe.us-east1-failover.svc:26501 + Version: 8.4.0 + Partition 1 : Follower, Healthy + Partition 2 : Follower, Healthy + Partition 7 : Follower, Healthy + Partition 8 : Follower, Healthy + Broker 2 - camunda-zeebe-1.camunda-zeebe.us-east1.svc:26501 + Version: 8.4.0 + Partition 1 : Follower, Healthy + Partition 2 : Leader, Healthy + Partition 3 : Leader, Healthy + Partition 8 : Follower, Healthy + Broker 4 - camunda-zeebe-2.camunda-zeebe.us-east1.svc:26501 + Version: 8.4.0 + Partition 2 : Follower, Healthy + Partition 3 : Follower, Healthy + Partition 4 : Leader, Healthy + Partition 5 : Leader, Healthy + Broker 5 - camunda-zeebe-1.camunda-zeebe.us-east1-failover.svc:26501 + Version: 8.4.0 + Partition 3 : Follower, Healthy + Partition 4 : Follower, Healthy + Partition 5 : Follower, Healthy + Partition 6 : Follower, Healthy + Broker 6 - camunda-zeebe-3.camunda-zeebe.us-east1.svc:26501 + Version: 8.4.0 + Partition 4 : Follower, Healthy + Partition 5 : Follower, Healthy + Partition 6 : Follower, Healthy + Partition 7 : Leader, Healthy +``` + +If region1 survived, the commands would be ```sh cd region1 @@ -918,18 +963,149 @@ make fail-over-region0 > :information_source: As a result, we have a working zeebe engine but the exporters are stuck because one ES target is not yet available. -##### restore missing nodes in the disastered region (failBack) +#### Fail Back by restore missing nodes in the disastered region + +Once you're able to restore the disaster region, you don't want to restart all nodes at once because you would end-up with some brokerIds duplicated (from the failOver). So instead, you want to restart only the missing brokerIds. + + + + + + + + + + +
Using GNU MakeManual Commands
-Once you're able to restore the disaster region, you don't want to restart all nodes. Else you will end-up with some brokerIds duplicated (from the failOver). So instead, you want to restart only missing brokerIds. ```sh -cd region0 +cd region1 make fail-back ``` + + +```sh +gcloud config set project camunda-researchanddevelopment +gcloud container clusters get-credentials falko-region-1 --region europe-west1 +kubectl create namespace europe-west1 +kubectl config set-context --current --namespace=europe-west1 +kubectl create secret generic gcs-backup-key --from-file=gcs_backup_key.json=gcs_backup_key.json +helm install --namespace europe-west1 camunda camunda/camunda-platform -f camunda-values.yaml --skip-crds \ + --set global.multiregion.installationType=failBack \ + --set operate.enabled=false \ + --set tasklist.enabled=false +``` +
Example Command Output ```sh +$ make fail-back +gcloud config set project camunda-researchanddevelopment +Updated property [core/project]. +gcloud container clusters get-credentials falko-region-1 --region europe-west1 +Fetching cluster endpoint and auth data. +kubeconfig entry generated for falko-region-1. +kubectl create namespace europe-west1 +namespace/europe-west1 created +kubectl config set-context --current --namespace=europe-west1 +Context "gke_camunda-researchanddevelopment_europe-west1_falko-region-1" modified. +kubectl create secret generic gcs-backup-key --from-file=gcs_backup_key.json=gcs_backup_key.json +secret/gcs-backup-key created +helm install --namespace europe-west1 camunda camunda/camunda-platform -f camunda-values.yaml --skip-crds \ + --set global.multiregion.installationType=failBack \ + --set operate.enabled=false \ + --set tasklist.enabled=false +W0121 16:50:38.304149 68228 warnings.go:70] spec.template.spec.containers[0].env[6]: hides previous definition of "CAMUNDA_OPERATE_CLIENT_USERNAME" +W0121 16:50:38.304198 68228 warnings.go:70] spec.template.spec.containers[0].env[7]: hides previous definition of "CAMUNDA_OPERATE_CLIENT_PASSWORD" +W0121 16:50:38.474040 68228 warnings.go:70] spec.template.spec.containers[0].env[25]: hides previous definition of "ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS" +NAME: camunda +LAST DEPLOYED: Sun Jan 21 16:50:34 2024 +NAMESPACE: europe-west1 +STATUS: deployed +REVISION: 1 +NOTES: +# (camunda-platform - 9.0.2) + + ###### ### ## ## ## ## ## ## ######## ### +## ## ## ## ### ### ## ## ### ## ## ## ## ## +## ## ## #### #### ## ## #### ## ## ## ## ## +## ## ## ## ### ## ## ## ## ## ## ## ## ## ## +## ######### ## ## ## ## ## #### ## ## ######### +## ## ## ## ## ## ## ## ## ### ## ## ## ## + ###### ## ## ## ## ####### ## ## ######## ## ## + +################################################################### + +## Installed Services: + +- Zeebe: + - Enabled: true + - Docker Image used for Zeebe: camunda/zeebe:8.4.0 + - Zeebe Cluster Name: "camunda-zeebe" + - Prometheus ServiceMonitor Enabled: false +- Operate: + - Enabled: false +- Tasklist: + - Enabled: false +- Optimize: + - Enabled: false +- Connectors: + - Enabled: true + - Docker Image used for Connectors: camunda/connectors-bundle:8.4.3 +- Identity: + - Enabled: false +- Web Modeler: + - Enabled: false +- Elasticsearch: + - Enabled: true + - Elasticsearch URL: http://camunda-elasticsearch:9200 + +### Zeebe + +The Cluster itself is not exposed as a service which means that you can use `kubectl port-forward` to access the Zeebe cluster from outside Kubernetes: + +> kubectl port-forward svc/camunda-zeebe-gateway 26500:26500 -n europe-west1 + +Now you can connect your workers and clients to `localhost:26500` +### Connecting to Web apps + + +As part of the Helm charts, an ingress definition can be deployed, but you require to have an Ingress Controller for that Ingress to be Exposed. +In order to deploy the ingress manifest, set `.ingress.enabled` to `true`. Example: `operate.ingress.enabled=true` + +If you don't have an ingress controller you can use `kubectl port-forward` to access the deployed web application from outside the cluster: + + + + + +Connectors: +> kubectl port-forward svc/camunda-connectors 8088:8080 + + +Now you can point your browser to one of the service's login pages. Example: http://localhost:8081 for Operate. + +Default user and password: "demo/demo" + + +## Console config +- name: camunda + namespace: europe-west1 + version: 9.0.2 + components: + + + + + + + + + - name: Zeebe Gateway + url: grpc:// + readiness: http://camunda-zeebe-gateway.europe-west1:9600/actuator/health/readiness ```
From dd59b85fa8ebd1ac9e343ccfd71b8f1a38cf5615 Mon Sep 17 00:00:00 2001 From: Falko Menge Date: Sun, 21 Jan 2024 17:48:51 +0100 Subject: [PATCH 20/58] Fix typo --- google/multi-region/active-active/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/google/multi-region/active-active/README.md b/google/multi-region/active-active/README.md index 06ea8af7..1831c4af 100644 --- a/google/multi-region/active-active/README.md +++ b/google/multi-region/active-active/README.md @@ -963,7 +963,7 @@ make fail-over-region0 > :information_source: As a result, we have a working zeebe engine but the exporters are stuck because one ES target is not yet available. -#### Fail Back by restore missing nodes in the disastered region +#### Fail Back by restoring missing nodes in the disastered region Once you're able to restore the disaster region, you don't want to restart all nodes at once because you would end-up with some brokerIds duplicated (from the failOver). So instead, you want to restart only the missing brokerIds. From cad5fde7d87faac45154b010244f6f76f23e51cb Mon Sep 17 00:00:00 2001 From: Falko Menge Date: Sun, 21 Jan 2024 19:18:55 +0100 Subject: [PATCH 21/58] Add makefile target to pause exporters and zbctl status after failBack --- google/multi-region/active-active/README.md | 65 ++++++++++++++++++++- 1 file changed, 63 insertions(+), 2 deletions(-) diff --git a/google/multi-region/active-active/README.md b/google/multi-region/active-active/README.md index 1831c4af..e986252d 100644 --- a/google/multi-region/active-active/README.md +++ b/google/multi-region/active-active/README.md @@ -756,8 +756,10 @@ the procedure would be to: #### pause exporters -TODO: write a makefile target to pause exporters in the surviving region - +```sh +cd region0 +make pause-exporters +``` #### Fail Over by starting Temporary Nodes @@ -1111,6 +1113,65 @@ Default user and password: "demo/demo" > :information_source: This will indeed create all the brokers. But half of them (the ones in the failOver) will not be started (start script is altered in the configmap). Operate and tasklist are not restarted on purpose to avoid touching ES indices. +`zbctl status` should now show all brokers but with three different DNS suffixes, +e.g. four in `us-east1`, two in `us-east1-failover`, and two in `europe-west1`: + +``` +Cluster size: 8 +Partitions count: 8 +Replication factor: 4 +Gateway version: 8.4.0 +Brokers: + Broker 0 - camunda-zeebe-0.camunda-zeebe.us-east1.svc:26501 + Version: 8.4.0 + Partition 1 : Follower, Healthy + Partition 6 : Follower, Healthy + Partition 7 : Follower, Healthy + Partition 8 : Follower, Healthy + Broker 1 - camunda-zeebe-0.camunda-zeebe.us-east1-failover.svc:26501 + Version: 8.4.0 + Partition 1 : Leader, Healthy + Partition 2 : Follower, Healthy + Partition 7 : Follower, Healthy + Partition 8 : Leader, Healthy + Broker 2 - camunda-zeebe-1.camunda-zeebe.us-east1.svc:26501 + Version: 8.4.0 + Partition 1 : Follower, Healthy + Partition 2 : Follower, Healthy + Partition 3 : Follower, Healthy + Partition 8 : Follower, Healthy + Broker 3 - camunda-zeebe-1.camunda-zeebe.europe-west1.svc:26501 + Version: 8.4.0 + Partition 1 : Follower, Healthy + Partition 2 : Follower, Healthy + Partition 3 : Follower, Healthy + Partition 4 : Leader, Healthy + Broker 4 - camunda-zeebe-2.camunda-zeebe.us-east1.svc:26501 + Version: 8.4.0 + Partition 2 : Leader, Healthy + Partition 3 : Leader, Healthy + Partition 4 : Follower, Healthy + Partition 5 : Follower, Healthy + Broker 5 - camunda-zeebe-1.camunda-zeebe.us-east1-failover.svc:26501 + Version: 8.4.0 + Partition 3 : Follower, Healthy + Partition 4 : Follower, Healthy + Partition 5 : Leader, Healthy + Partition 6 : Follower, Healthy + Broker 6 - camunda-zeebe-3.camunda-zeebe.us-east1.svc:26501 + Version: 8.4.0 + Partition 4 : Follower, Healthy + Partition 5 : Follower, Healthy + Partition 6 : Leader, Healthy + Partition 7 : Leader, Healthy + Broker 7 - camunda-zeebe-3.camunda-zeebe.europe-west1.svc:26501 + Version: 8.4.0 + Partition 5 : Follower, Healthy + Partition 6 : Follower, Healthy + Partition 7 : Follower, Healthy + Partition 8 : Follower, Healthy +``` + ##### pause exporters You now have 2 active regions again and we want to have 2 consistent ES clusters. We will pause exporters, take snapshots in the surviving region, restore them into the restored region and resume exporters. From 0f0e2d65b9b4cf4201353085cdab82a911050d5d Mon Sep 17 00:00:00 2001 From: Falko Menge Date: Sun, 21 Jan 2024 19:39:30 +0100 Subject: [PATCH 22/58] Update elasticsearch pod name --- .../multi-region/active-active/region0/Makefile | 16 ++++++++-------- .../multi-region/active-active/region1/Makefile | 16 ++++++++-------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/google/multi-region/active-active/region0/Makefile b/google/multi-region/active-active/region0/Makefile index 9447a81b..3d2b7c05 100644 --- a/google/multi-region/active-active/region0/Makefile +++ b/google/multi-region/active-active/region0/Makefile @@ -93,7 +93,7 @@ include $(root)/connectors/connectors.mk .PHONY: elastic-nodes elastic-nodes: - kubectl exec elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -s http://localhost:9200/_nodes | python -m json.tool + kubectl exec camunda-elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -s http://localhost:9200/_nodes | python -m json.tool .PHONY: prepare-elastic-backup-key prepare-elastic-backup-key: @@ -101,7 +101,7 @@ prepare-elastic-backup-key: .PHONY: prepare-elastic-backup-repo prepare-elastic-backup-repo: - kubectl exec elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPUT http://localhost:9200/_snapshot/camunda_backup -H 'Content-Type: application/json' -d'{"type": "gcs","settings":{"bucket": "falko-elasticsearch-backup", "base_path": "backups"}}' + kubectl exec camunda-elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPUT http://localhost:9200/_snapshot/camunda_backup -H 'Content-Type: application/json' -d'{"type": "gcs","settings":{"bucket": "falko-elasticsearch-backup", "base_path": "backups"}}' .PHONY: operate-snapshot operate-snapshot: @@ -109,9 +109,9 @@ operate-snapshot: .PHONY: restore-operate-snapshot restore-operate-snapshot: - kubectl exec elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_1_of_6/_restore?wait_for_completion=true - kubectl exec elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_2_of_6/_restore?wait_for_completion=true - kubectl exec elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_3_of_6/_restore?wait_for_completion=true - kubectl exec elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_4_of_6/_restore?wait_for_completion=true - kubectl exec elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_5_of_6/_restore?wait_for_completion=true - kubectl exec elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_6_of_6/_restore?wait_for_completion=true + kubectl exec camunda-elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_1_of_6/_restore?wait_for_completion=true + kubectl exec camunda-elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_2_of_6/_restore?wait_for_completion=true + kubectl exec camunda-elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_3_of_6/_restore?wait_for_completion=true + kubectl exec camunda-elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_4_of_6/_restore?wait_for_completion=true + kubectl exec camunda-elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_5_of_6/_restore?wait_for_completion=true + kubectl exec camunda-elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_6_of_6/_restore?wait_for_completion=true diff --git a/google/multi-region/active-active/region1/Makefile b/google/multi-region/active-active/region1/Makefile index c9376492..57c0c435 100644 --- a/google/multi-region/active-active/region1/Makefile +++ b/google/multi-region/active-active/region1/Makefile @@ -93,7 +93,7 @@ include $(root)/connectors/connectors.mk .PHONY: elastic-nodes elastic-nodes: - kubectl exec elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -s http://localhost:9200/_nodes | python -m json.tool + kubectl exec camunda-elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -s http://localhost:9200/_nodes | python -m json.tool .PHONY: prepare-elastic-backup-key prepare-elastic-backup-key: @@ -101,7 +101,7 @@ prepare-elastic-backup-key: .PHONY: prepare-elastic-backup-repo prepare-elastic-backup-repo: - kubectl exec elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPUT http://localhost:9200/_snapshot/camunda_backup -H 'Content-Type: application/json' -d'{"type": "gcs","settings":{"bucket": "falko-elasticsearch-backup", "base_path": "backups"}}' + kubectl exec camunda-elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPUT http://localhost:9200/_snapshot/camunda_backup -H 'Content-Type: application/json' -d'{"type": "gcs","settings":{"bucket": "falko-elasticsearch-backup", "base_path": "backups"}}' .PHONY: operate-snapshot operate-snapshot: @@ -109,9 +109,9 @@ operate-snapshot: .PHONY: restore-operate-snapshot restore-operate-snapshot: - kubectl exec elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_1_of_6/_restore?wait_for_completion=true - kubectl exec elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_2_of_6/_restore?wait_for_completion=true - kubectl exec elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_3_of_6/_restore?wait_for_completion=true - kubectl exec elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_4_of_6/_restore?wait_for_completion=true - kubectl exec elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_5_of_6/_restore?wait_for_completion=true - kubectl exec elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_6_of_6/_restore?wait_for_completion=true + kubectl exec camunda-elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_1_of_6/_restore?wait_for_completion=true + kubectl exec camunda-elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_2_of_6/_restore?wait_for_completion=true + kubectl exec camunda-elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_3_of_6/_restore?wait_for_completion=true + kubectl exec camunda-elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_4_of_6/_restore?wait_for_completion=true + kubectl exec camunda-elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_5_of_6/_restore?wait_for_completion=true + kubectl exec camunda-elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_6_of_6/_restore?wait_for_completion=true From 3ccfe4fbd3cc989b85e556277f22f02cefccddcd Mon Sep 17 00:00:00 2001 From: Falko Menge Date: Tue, 23 Jan 2024 13:28:45 +0100 Subject: [PATCH 23/58] Enforce using the correct k8s context for all targets --- .../multi-region/active-active/region0/Makefile | 16 ++++++++-------- .../multi-region/active-active/region1/Makefile | 16 ++++++++-------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/google/multi-region/active-active/region0/Makefile b/google/multi-region/active-active/region0/Makefile index 3d2b7c05..4b35a565 100644 --- a/google/multi-region/active-active/region0/Makefile +++ b/google/multi-region/active-active/region0/Makefile @@ -40,12 +40,12 @@ kube: kube-gke @echo " '$(region)': 'gke_$(project)_$(region)_$(clusterName)'," .PHONY: external-urls # Show external URLs -external-urls: external-urls-no-ingress +external-urls: use-kube external-urls-no-ingress ### <--- End of setup ---> .PHONY: fail-over-region1 # Create temporary brokers that impersonate half of the ones lost in region 1 to backfill and restore quorum -fail-over-region1: +fail-over-region1: use-kube -kubectl create namespace $(namespace)-failover -kubectl config set-context --current --namespace=$(namespace)-failover helm install --namespace $(namespace)-failover $(release) $(chart) -f $(chartValues) --skip-crds \ @@ -66,7 +66,7 @@ fail-back: use-kube namespace prepare-elastic-backup-key # TODO what if something is running # require clean-camunda but without deleting PVCs or with because its dirty -fail-back-with-cluster-running: +fail-back-with-cluster-running: use-kube kubectl delete pod camunda-zeebe-0 -n $(namespace) kubectl delete pod camunda-zeebe-2 -n $(namespace) @@ -92,23 +92,23 @@ include $(root)/bpmn/deploy-models.mk include $(root)/connectors/connectors.mk .PHONY: elastic-nodes -elastic-nodes: +elastic-nodes: use-kube kubectl exec camunda-elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -s http://localhost:9200/_nodes | python -m json.tool .PHONY: prepare-elastic-backup-key -prepare-elastic-backup-key: +prepare-elastic-backup-key: use-kube kubectl create secret generic gcs-backup-key --from-file=gcs_backup_key.json=gcs_backup_key.json .PHONY: prepare-elastic-backup-repo -prepare-elastic-backup-repo: +prepare-elastic-backup-repo: use-kube kubectl exec camunda-elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPUT http://localhost:9200/_snapshot/camunda_backup -H 'Content-Type: application/json' -d'{"type": "gcs","settings":{"bucket": "falko-elasticsearch-backup", "base_path": "backups"}}' .PHONY: operate-snapshot -operate-snapshot: +operate-snapshot: use-kube kubectl exec $$(kubectl get pod --namespace $(namespace) --selector="app=camunda-platform,app.kubernetes.io/component=operate,app.kubernetes.io/instance=camunda,app.kubernetes.io/managed-by=Helm,app.kubernetes.io/name=operate,app.kubernetes.io/part-of=camunda-platform" --output jsonpath='{.items[0].metadata.name}') --namespace $(namespace) -c operate -- curl -i http://localhost:8080/actuator/backups -XPOST -H 'Content-Type: application/json' -d'{"backupId": 3}' .PHONY: restore-operate-snapshot -restore-operate-snapshot: +restore-operate-snapshot: use-kube kubectl exec camunda-elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_1_of_6/_restore?wait_for_completion=true kubectl exec camunda-elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_2_of_6/_restore?wait_for_completion=true kubectl exec camunda-elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_3_of_6/_restore?wait_for_completion=true diff --git a/google/multi-region/active-active/region1/Makefile b/google/multi-region/active-active/region1/Makefile index 57c0c435..21cdf4c1 100644 --- a/google/multi-region/active-active/region1/Makefile +++ b/google/multi-region/active-active/region1/Makefile @@ -40,12 +40,12 @@ kube: kube-gke @echo " '$(region)': 'gke_$(project)_$(region)_$(clusterName)'," .PHONY: external-urls # Show external URLs -external-urls: external-urls-no-ingress +external-urls: use-kube external-urls-no-ingress ### <--- End of setup ---> .PHONY: fail-over-region0 # Create temporary brokers that impersonate half of the ones lost in region 0 to backfill and restore quorum -fail-over-region1: +fail-over-region1: use-kube -kubectl create namespace $(namespace)-failover -kubectl config set-context --current --namespace=$(namespace)-failover helm install --namespace $(namespace)-failover $(release) $(chart) -f $(chartValues) --skip-crds \ @@ -66,7 +66,7 @@ fail-back: use-kube namespace prepare-elastic-backup-key # TODO what if something is running # require clean-camunda but without deleting PVCs or with because its dirty -fail-back-with-cluster-running: +fail-back-with-cluster-running: use-kube kubectl delete pod camunda-zeebe-0 -n $(namespace) kubectl delete pod camunda-zeebe-2 -n $(namespace) @@ -92,23 +92,23 @@ include $(root)/bpmn/deploy-models.mk include $(root)/connectors/connectors.mk .PHONY: elastic-nodes -elastic-nodes: +elastic-nodes: use-kube kubectl exec camunda-elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -s http://localhost:9200/_nodes | python -m json.tool .PHONY: prepare-elastic-backup-key -prepare-elastic-backup-key: +prepare-elastic-backup-key: use-kube kubectl create secret generic gcs-backup-key --from-file=gcs_backup_key.json=gcs_backup_key.json .PHONY: prepare-elastic-backup-repo -prepare-elastic-backup-repo: +prepare-elastic-backup-repo: use-kube kubectl exec camunda-elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPUT http://localhost:9200/_snapshot/camunda_backup -H 'Content-Type: application/json' -d'{"type": "gcs","settings":{"bucket": "falko-elasticsearch-backup", "base_path": "backups"}}' .PHONY: operate-snapshot -operate-snapshot: +operate-snapshot: use-kube kubectl exec $$(kubectl get pod --namespace $(namespace) --selector="app=camunda-platform,app.kubernetes.io/component=operate,app.kubernetes.io/instance=camunda,app.kubernetes.io/managed-by=Helm,app.kubernetes.io/name=operate,app.kubernetes.io/part-of=camunda-platform" --output jsonpath='{.items[0].metadata.name}') --namespace $(namespace) -c operate -- curl -i http://localhost:8080/actuator/backups -XPOST -H 'Content-Type: application/json' -d'{"backupId": 3}' .PHONY: restore-operate-snapshot -restore-operate-snapshot: +restore-operate-snapshot: use-kube kubectl exec camunda-elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_1_of_6/_restore?wait_for_completion=true kubectl exec camunda-elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_2_of_6/_restore?wait_for_completion=true kubectl exec camunda-elasticsearch-master-0 -n $(namespace) -c elasticsearch -- curl -XPOST http://localhost:9200/_snapshot/camunda_backup/camunda_operate_3_8.2.10_part_3_of_6/_restore?wait_for_completion=true From d411964424b66a8c6c04032946eaa490b7d8c88b Mon Sep 17 00:00:00 2001 From: Falko Menge Date: Tue, 23 Jan 2024 14:17:26 +0100 Subject: [PATCH 24/58] Re-add TODO for disable exporters and output of make pause-exporters --- google/multi-region/active-active/README.md | 211 +++++++++++++++++++- 1 file changed, 209 insertions(+), 2 deletions(-) diff --git a/google/multi-region/active-active/README.md b/google/multi-region/active-active/README.md index e986252d..bd89f686 100644 --- a/google/multi-region/active-active/README.md +++ b/google/multi-region/active-active/README.md @@ -754,11 +754,11 @@ the procedure would be to: * clean the temporary nodes from the surviving region * restore the initial setup -#### pause exporters +#### Disable Exporters to the failed region ```sh cd region0 -make pause-exporters +# TODO make disable-exporter-to-region1 ``` #### Fail Over by starting Temporary Nodes @@ -1184,6 +1184,11 @@ make pause-exporters Example Command Output ```sh +$ make pause-exporters +kubectl exec camunda-elasticsearch-master-0 -n us-east1 -c elasticsearch -- curl -i camunda-zeebe-gateway:9600/actuator/exporting/pause -XPOST + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed + 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0HTTP/1.1 204 No Content ```
@@ -1255,6 +1260,11 @@ make resume-exporters Example Command Output ```sh +$ make resume-exporters +kubectl exec camunda-elasticsearch-master-0 -n us-east1 -c elasticsearch -- curl -i camunda-zeebe-gateway:9600/actuator/exporting/resume -XPOST + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed + 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--HTTP/1.1 204 No Content ``` @@ -1271,9 +1281,67 @@ make clean-fail-over-region1 Example Command Output ```sh +$ make clean-fail-over-region1 +gcloud config set project camunda-researchanddevelopment +Updated property [core/project]. +gcloud container clusters get-credentials falko-region-0 --region us-east1 +Fetching cluster endpoint and auth data. +kubeconfig entry generated for falko-region-0. +helm --namespace us-east1-failover uninstall camunda +release "camunda" uninstalled +kubectl delete -n us-east1-failover pvc -l app.kubernetes.io/instance=camunda +persistentvolumeclaim "data-camunda-zeebe-0" deleted +persistentvolumeclaim "data-camunda-zeebe-1" deleted +kubectl delete namespace us-east1-failover +namespace "us-east1-failover" deleted ``` +`zbctl startus` should show that the temporary brokers are gone from the topology: + +``` +Cluster size: 8 +Partitions count: 8 +Replication factor: 4 +Gateway version: 8.4.0 +Brokers: + Broker 0 - camunda-zeebe-0.camunda-zeebe.us-east1.svc:26501 + Version: 8.4.0 + Partition 1 : Follower, Healthy + Partition 6 : Follower, Healthy + Partition 7 : Follower, Healthy + Partition 8 : Follower, Healthy + Broker 2 - camunda-zeebe-1.camunda-zeebe.us-east1.svc:26501 + Version: 8.4.0 + Partition 1 : Leader, Healthy + Partition 2 : Leader, Healthy + Partition 3 : Leader, Healthy + Partition 8 : Leader, Healthy + Broker 3 - camunda-zeebe-1.camunda-zeebe.europe-west1.svc:26501 + Version: 8.4.0 + Partition 1 : Follower, Healthy + Partition 2 : Follower, Healthy + Partition 3 : Follower, Healthy + Partition 4 : Leader, Healthy + Broker 4 - camunda-zeebe-2.camunda-zeebe.us-east1.svc:26501 + Version: 8.4.0 + Partition 2 : Follower, Healthy + Partition 3 : Follower, Healthy + Partition 4 : Follower, Healthy + Partition 5 : Leader, Healthy + Broker 6 - camunda-zeebe-3.camunda-zeebe.us-east1.svc:26501 + Version: 8.4.0 + Partition 4 : Follower, Healthy + Partition 5 : Follower, Healthy + Partition 6 : Leader, Healthy + Partition 7 : Leader, Healthy + Broker 7 - camunda-zeebe-3.camunda-zeebe.europe-west1.svc:26501 + Version: 8.4.0 + Partition 5 : Follower, Healthy + Partition 6 : Follower, Healthy + Partition 7 : Follower, Healthy +``` + ##### restore the initial setup (back to normal) You now want to recreate the missing brokers in the disastered region. @@ -1287,6 +1355,145 @@ make fail-back-to-normal Example Command Output ```sh +$ make fail-back-to-normal +gcloud config set project camunda-researchanddevelopment +Updated property [core/project]. +gcloud container clusters get-credentials falko-region-1 --region europe-west1 +Fetching cluster endpoint and auth data. +kubeconfig entry generated for falko-region-1. +helm repo update camunda +Hang tight while we grab the latest from your chart repositories... +...Successfully got an update from the "camunda" chart repository +Update Complete. ⎈Happy Helming!⎈ +helm search repo camunda/camunda-platform +WARNING: Repo "prometheus-community" is corrupt or missing. Try 'helm repo update'. +WARNING: open /home/falko/.cache/helm/repository/prometheus-community-index.yaml: no such file or directory +WARNING: Repo "stable" is corrupt or missing. Try 'helm repo update'. +WARNING: open /home/falko/.cache/helm/repository/stable-index.yaml: no such file or directory +NAME CHART VERSION APP VERSION DESCRIPTION +camunda/camunda-platform 9.0.2 8.4.x Camunda 8 Self-Managed Helm charts. Camunda's p... +OPERATE_SECRET=$(kubectl get secret --namespace europe-west1 "camunda-operate-identity-secret" -o jsonpath="{.data.operate-secret}" | base64 --decode); \ +TASKLIST_SECRET=$(kubectl get secret --namespace europe-west1 "camunda-tasklist-identity-secret" -o jsonpath="{.data.tasklist-secret}" | base64 --decode); \ +OPTIMIZE_SECRET=$(kubectl get secret --namespace europe-west1 "camunda-optimize-identity-secret" -o jsonpath="{.data.optimize-secret}" | base64 --decode); \ +KEYCLOAK_ADMIN_SECRET=$(kubectl get secret --namespace europe-west1 "camunda-keycloak" -o jsonpath="{.data.admin-password}" | base64 --decode) \ +KEYCLOAK_MANAGEMENT_SECRET=$(kubectl get secret --namespace europe-west1 "camunda-keycloak" -o jsonpath="{.data.management-password}" | base64 --decode) \ +POSTGRESQL_SECRET=$(kubectl get secret --namespace europe-west1 "camunda-postgresql" -o jsonpath="{.data.postgres-password}" | base64 --decode) \ + CONNECTORS_SECRET=$(kubectl get secret --namespace europe-west1 "camunda-connectors-auth-credentials" -o jsonpath="{.data.connectors-secret}" | base64 -d) \ +helm upgrade --namespace europe-west1 camunda camunda/camunda-platform -f camunda-values.yaml \ + --set global.identity.auth.operate.existingSecret=$OPERATE_SECRET \ + --set global.identity.auth.tasklist.existingSecret=$TASKLIST_SECRET \ + --set global.identity.auth.optimize.existingSecret=$OPTIMIZE_SECRET \ + --set identity.keycloak.auth.adminPassword=$KEYCLOAK_ADMIN_SECRET \ + --set identity.keycloak.auth.managementPassword=$KEYCLOAK_MANAGEMENT_SECRET \ + --set identity.keycloak.postgresql.auth.password=$POSTGRESQL_SECRET \ + --set connectors.inbound.auth.existingSecret=ONNECTORS_SECRET +Error from server (NotFound): secrets "camunda-operate-identity-secret" not found +Error from server (NotFound): secrets "camunda-tasklist-identity-secret" not found +Error from server (NotFound): secrets "camunda-optimize-identity-secret" not found +Error from server (NotFound): secrets "camunda-keycloak" not found +Error from server (NotFound): secrets "camunda-keycloak" not found +Error from server (NotFound): secrets "camunda-postgresql" not found +W0121 19:49:24.231459 92899 warnings.go:70] spec.template.spec.containers[0].env[3]: hides previous definition of "CAMUNDA_OPERATE_CLIENT_USERNAME" +W0121 19:49:24.231507 92899 warnings.go:70] spec.template.spec.containers[0].env[5]: hides previous definition of "CAMUNDA_OPERATE_CLIENT_PASSWORD" +Release "camunda" has been upgraded. Happy Helming! +NAME: camunda +LAST DEPLOYED: Sun Jan 21 19:49:19 2024 +NAMESPACE: europe-west1 +STATUS: deployed +REVISION: 2 +NOTES: +# (camunda-platform - 9.0.2) + + ###### ### ## ## ## ## ## ## ######## ### +## ## ## ## ### ### ## ## ### ## ## ## ## ## +## ## ## #### #### ## ## #### ## ## ## ## ## +## ## ## ## ### ## ## ## ## ## ## ## ## ## ## +## ######### ## ## ## ## ## #### ## ## ######### +## ## ## ## ## ## ## ## ## ### ## ## ## ## + ###### ## ## ## ## ####### ## ## ######## ## ## + +################################################################### + +## Installed Services: + +- Zeebe: + - Enabled: true + - Docker Image used for Zeebe: camunda/zeebe:8.4.0 + - Zeebe Cluster Name: "camunda-zeebe" + - Prometheus ServiceMonitor Enabled: false +- Operate: + - Enabled: true + - Docker Image used for Operate: camunda/operate:8.4.0 +- Tasklist: + - Enabled: true + - Docker Image used for Tasklist: camunda/tasklist:8.4.0 +- Optimize: + - Enabled: false +- Connectors: + - Enabled: true + - Docker Image used for Connectors: camunda/connectors-bundle:8.4.3 +- Identity: + - Enabled: false +- Web Modeler: + - Enabled: false +- Elasticsearch: + - Enabled: true + - Elasticsearch URL: http://camunda-elasticsearch:9200 + +### Zeebe + +The Cluster itself is not exposed as a service which means that you can use `kubectl port-forward` to access the Zeebe cluster from outside Kubernetes: + +> kubectl port-forward svc/camunda-zeebe-gateway 26500:26500 -n europe-west1 + +Now you can connect your workers and clients to `localhost:26500` +### Connecting to Web apps + + +As part of the Helm charts, an ingress definition can be deployed, but you require to have an Ingress Controller for that Ingress to be Exposed. +In order to deploy the ingress manifest, set `.ingress.enabled` to `true`. Example: `operate.ingress.enabled=true` + +If you don't have an ingress controller you can use `kubectl port-forward` to access the deployed web application from outside the cluster: + + +Operate: +> kubectl port-forward svc/camunda-operate 8081:80 +Tasklist: +> kubectl port-forward svc/camunda-tasklist 8082:80 + +Connectors: +> kubectl port-forward svc/camunda-connectors 8088:8080 + + +Now you can point your browser to one of the service's login pages. Example: http://localhost:8081 for Operate. + +Default user and password: "demo/demo" + + +## Console config +- name: camunda + namespace: europe-west1 + version: 9.0.2 + components: + + + - name: Operate + url: http:// + readiness: http://camunda-operate.europe-west1:80/actuator/health/readiness + + + + - name: Tasklist + url: http:// + readiness: http://camunda-tasklist.europe-west1:80/actuator/health/readiness + + - name: Zeebe Gateway + url: grpc:// + readiness: http://camunda-zeebe-gateway.europe-west1:9600/actuator/health/readiness +kubectl delete pod camunda-zeebe-0 -n europe-west1 +pod "camunda-zeebe-0" deleted +kubectl delete pod camunda-zeebe-2 -n europe-west1 +pod "camunda-zeebe-2" deleted ``` From f838872379a5d82a56664c4ee9bdb82091b11605 Mon Sep 17 00:00:00 2001 From: ManuelDittmar Date: Wed, 24 Jan 2024 10:14:11 +0100 Subject: [PATCH 25/58] automate gcp creation --- google/multi-region/active-active/Makefile | 101 +++++++++++++++++ .../camunda-values-region-0.yaml | 102 ++++++++++++++++++ .../camunda-values-region-1.yaml | 102 ++++++++++++++++++ .../camunda-values-template.yaml | 102 ++++++++++++++++++ .../generated/dns-configmap-us-east1.yaml | 2 +- .../active-active/setup-dns-chaining.py | 19 +++- 6 files changed, 424 insertions(+), 4 deletions(-) create mode 100644 google/multi-region/active-active/Makefile create mode 100644 google/multi-region/active-active/camunda-values-region-0.yaml create mode 100644 google/multi-region/active-active/camunda-values-region-1.yaml create mode 100644 google/multi-region/active-active/camunda-values-template.yaml diff --git a/google/multi-region/active-active/Makefile b/google/multi-region/active-active/Makefile new file mode 100644 index 00000000..e60bf193 --- /dev/null +++ b/google/multi-region/active-active/Makefile @@ -0,0 +1,101 @@ +# ------------------------------------ +# Set the following for your specific environment +# Already have a Cluster? Set these values to point to your existing environment +# Otherwise, these values will be used to create a new Cluster + +# GCP project +project ?= camunda-researchanddevelopment +# GCP region 0 +region0 ?=us-east1-c# region = zone for simplicity +# GCP region 1 +region1 ?= europe-west1-b +# GKE cluster name 0 +clusterName0 ?= manus-region-0 +# GKE cluster name 1 +clusterName1 ?= manus-region-1 +# Firewall rule name +firewallRule ?= zeebe-between-clusters-manu +# Brokers per Region +brokersPerRegion = 4 + + +# ------------------------------------ +# The following variables should not be changed except for advanced use cases +ifeq ($(OS),Windows_NT) + root ?= $(CURDIR)/../../../.. +else + root ?= $(shell pwd)/../../../.. +endif + + +.PHONY: gcp-create-cluster +gcp-create-cluster: + gcloud config set project $(project) + gcloud container clusters create $(clusterName0) \ + --region $(region0) \ + --num-nodes=1 \ + --enable-autoscaling --max-nodes=24 --min-nodes=1 \ + --enable-ip-alias \ + --machine-type=n2-standard-2 \ + --disk-type "pd-ssd" \ + --spot \ + --maintenance-window=4:00 \ + --release-channel=regular \ + --cluster-version=latest + gcloud container clusters create $(clusterName1) \ + --region $(region1) \ + --num-nodes=1 \ + --enable-autoscaling --max-nodes=24 --min-nodes=1 \ + --enable-ip-alias \ + --machine-type=n2-standard-2 \ + --disk-type "pd-ssd" \ + --spot \ + --maintenance-window=4:00 \ + --release-channel=regular \ + --cluster-version=latest + +.PHONY: gcp-firewall +gcp-firewall: + networkTag0=$$(gcloud compute instances describe $$(kubectl get nodes -o name --output jsonpath={.items[0].metadata.name} --context gke_$(project)_$(region0)_$(clusterName0)) --zone $(region0) --format="get(tags.items)"); \ + networkTag1=$$(gcloud compute instances describe $$(kubectl get nodes -o name --output jsonpath={.items[0].metadata.name} --context gke_$(project)_$(region1)_$(clusterName1)) --zone $(region1) --format="get(tags.items)"); \ + ipRange0=$$(gcloud container clusters describe $(clusterName0) --zone $(region0) --format='value(clusterIpv4Cidr)'); \ + ipRange1=$$(gcloud container clusters describe $(clusterName1) --zone $(region1) --format='value(clusterIpv4Cidr)'); \ + gcloud compute firewall-rules create $(firewallRule) --direction=INGRESS --priority=999 --network=default --action=ALLOW --rules=tcp:9600,tcp:26501,tcp:26502,tcp:9300,tcp:9200,udp:26502,udp:9300,udp:9200 --source-ranges=$$ipRange0,$$ipRange1 --target-tags=$$networkTag0,$$networkTag1 + +.PHONY: dns-chaining +dns-chaining: + python3 setup-dns-chaining.py $(project) $(region0) $(clusterName0) $(region1) $(clusterName1) $(brokersPerRegion) +## TODO teardown + +.PHONY: generate-camunda-values +generate-camunda-values: + @contactPoints=$$(/bin/bash -c ' \ + join_addrs=(); \ + for region in $(region0) $(region1); do \ + for i in `seq 0 $$(($(brokersPerRegion)-1))`; do \ + join_addrs+=("camunda-zeebe-$$i.camunda-zeebe.$$region.svc.cluster.local:26502"); \ + done; \ + done; \ + IFS=,; echo "$${join_addrs[*]}";'); \ + echo "Initial contact points: $$contactPoints"; \ + cp camunda-values-template.yaml camunda-values-region-0.yaml; \ + sed -i 's/\$$REGIONID\$$/0/' camunda-values-region-0.yaml; \ + sed -i 's@\$$ELASTIC_URL_2\$$@http://elasticsearch-master-headless.$(region1).svc.cluster.local:9200@' camunda-values-region-0.yaml; \ + sed -i 's@\$$CONTACTPOINTS\$$@'"$$contactPoints"'@' camunda-values-region-0.yaml; \ + cp camunda-values-template.yaml camunda-values-region-1.yaml; \ + sed -i 's/\$$REGIONID\$$/1/' camunda-values-region-1.yaml; \ + sed -i 's@\$$ELASTIC_URL_2\$$@http://elasticsearch-master-headless.$(region0).svc.cluster.local:9200@' camunda-values-region-1.yaml; \ + sed -i 's@\$$CONTACTPOINTS\$$@'"$$contactPoints"'@' camunda-values-region-1.yaml + +.PHONY: install-camunda +install-camunda: + kubectl config use-context gke_$(project)_$(region0)_$(clusterName0) + kubectl create namespace $(region0) + helm install camunda camunda/camunda-platform -f camunda-values-region-0.yaml -n $(region0) + kubectl config use-context gke_$(project)_$(region1)_$(clusterName1) + kubectl create namespace $(region1) + helm install camunda camunda/camunda-platform -f camunda-values-region-1.yaml -n $(region1) + +.PHONY: setup-mraa-gcp +setup-mraa-gcp: gcp-create-cluster gcp-firewall dns-chaining generate-camunda-values install-camunda + diff --git a/google/multi-region/active-active/camunda-values-region-0.yaml b/google/multi-region/active-active/camunda-values-region-0.yaml new file mode 100644 index 00000000..a3704edf --- /dev/null +++ b/google/multi-region/active-active/camunda-values-region-0.yaml @@ -0,0 +1,102 @@ +# Chart values for the Camunda Platform 8 Helm chart. +# This file deliberately contains only the values that differ from the defaults. +# For changes and documentation, use your favorite diff tool to compare it with: +# https://github.com/camunda/camunda-platform-helm/blob/main/charts/camunda-platform/values.yaml + +global: + # Multiregion options for Zeebe + # + ## WARNING: In order to get your multi-region setup covered by Camunda enterprise support you MUST get your configuration and run books reviewed by Camunda before going to production. + # This is necessary for us to be able to help you in case of outages, due to the complexity of operating multi-region setups and the dependencies to the underlying Kubernetes prerequisites. + # If you operate this in the wrong way you risk corruption and complete loss of all data especially in the dual-region case. + # If you can, consider three regions. Please, contact your customer success manager as soon as you start planning a multi-region setup. + # Camunda reserves the right to limit support if no review was done prior to launch or the review showed significant risks. + multiregion: + # number of regions that this Camunda Platform instance is stretched across + regions: 2 + # unique id of the region. Should start at 0 for easy computation. With 2 regions, you would have region 0 and 1. + regionId: 0 + identity: + auth: + # Disable the Identity authentication + # it will fall back to basic-auth: demo/demo as default user + enabled: false + +operate: + env: + - name: CAMUNDA_OPERATE_BACKUP_REPOSITORYNAME + value: "camunda_backup" +tasklist: + env: + - name: CAMUNDA_TASKLIST_BACKUP_REPOSITORYNAME + value: "camunda_backup" + +identity: + enabled: false + +optimize: + enabled: false + +connectors: + enabled: true + inbound: + mode: credentials + resources: + requests: + cpu: "100m" + memory: "512M" + limits: + cpu: "1000m" + memory: "2Gi" + env: + - name: CAMUNDA_OPERATE_CLIENT_USERNAME + value: demo + - name: CAMUNDA_OPERATE_CLIENT_PASSWORD + value: demo + +zeebe: + clusterSize: 8 + partitionCount: 8 + replicationFactor: 4 + env: + - name: ZEEBE_BROKER_DATA_SNAPSHOTPERIOD + value: "5m" + - name: ZEEBE_BROKER_DATA_DISKUSAGECOMMANDWATERMARK + value: "0.85" + - name: ZEEBE_BROKER_DATA_DISKUSAGEREPLICATIONWATERMARK + value: "0.87" + - name: ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS + value: "camunda-zeebe-0.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-1.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-2.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-3.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-0.camunda-zeebe.europe-west1-b.svc.cluster.local:26502,camunda-zeebe-1.camunda-zeebe.europe-west1-b.svc.cluster.local:26502,camunda-zeebe-2.camunda-zeebe.europe-west1-b.svc.cluster.local:26502,camunda-zeebe-3.camunda-zeebe.europe-west1-b.svc.cluster.local:26502" + - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_CLASSNAME + value: "io.camunda.zeebe.exporter.ElasticsearchExporter" + - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_URL + value: "http://elasticsearch-master-headless.europe-west1-b.svc.cluster.local:9200" + - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_BULK_SIZE + value: "1" + - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_INDEX_PREFIX + value: "zeebe-record" + pvcSize: 1Gi + + resources: + requests: + cpu: "100m" + memory: "512M" + limits: + cpu: "512m" + memory: "2Gi" + +zeebe-gateway: + replicas: 1 + + resources: + requests: + cpu: "100m" + memory: "512M" + limits: + cpu: "1000m" + memory: "1Gi" + + logLevel: ERROR + +elasticsearch: + enabled: true \ No newline at end of file diff --git a/google/multi-region/active-active/camunda-values-region-1.yaml b/google/multi-region/active-active/camunda-values-region-1.yaml new file mode 100644 index 00000000..e3824268 --- /dev/null +++ b/google/multi-region/active-active/camunda-values-region-1.yaml @@ -0,0 +1,102 @@ +# Chart values for the Camunda Platform 8 Helm chart. +# This file deliberately contains only the values that differ from the defaults. +# For changes and documentation, use your favorite diff tool to compare it with: +# https://github.com/camunda/camunda-platform-helm/blob/main/charts/camunda-platform/values.yaml + +global: + # Multiregion options for Zeebe + # + ## WARNING: In order to get your multi-region setup covered by Camunda enterprise support you MUST get your configuration and run books reviewed by Camunda before going to production. + # This is necessary for us to be able to help you in case of outages, due to the complexity of operating multi-region setups and the dependencies to the underlying Kubernetes prerequisites. + # If you operate this in the wrong way you risk corruption and complete loss of all data especially in the dual-region case. + # If you can, consider three regions. Please, contact your customer success manager as soon as you start planning a multi-region setup. + # Camunda reserves the right to limit support if no review was done prior to launch or the review showed significant risks. + multiregion: + # number of regions that this Camunda Platform instance is stretched across + regions: 2 + # unique id of the region. Should start at 0 for easy computation. With 2 regions, you would have region 0 and 1. + regionId: 1 + identity: + auth: + # Disable the Identity authentication + # it will fall back to basic-auth: demo/demo as default user + enabled: false + +operate: + env: + - name: CAMUNDA_OPERATE_BACKUP_REPOSITORYNAME + value: "camunda_backup" +tasklist: + env: + - name: CAMUNDA_TASKLIST_BACKUP_REPOSITORYNAME + value: "camunda_backup" + +identity: + enabled: false + +optimize: + enabled: false + +connectors: + enabled: true + inbound: + mode: credentials + resources: + requests: + cpu: "100m" + memory: "512M" + limits: + cpu: "1000m" + memory: "2Gi" + env: + - name: CAMUNDA_OPERATE_CLIENT_USERNAME + value: demo + - name: CAMUNDA_OPERATE_CLIENT_PASSWORD + value: demo + +zeebe: + clusterSize: 8 + partitionCount: 8 + replicationFactor: 4 + env: + - name: ZEEBE_BROKER_DATA_SNAPSHOTPERIOD + value: "5m" + - name: ZEEBE_BROKER_DATA_DISKUSAGECOMMANDWATERMARK + value: "0.85" + - name: ZEEBE_BROKER_DATA_DISKUSAGEREPLICATIONWATERMARK + value: "0.87" + - name: ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS + value: "camunda-zeebe-0.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-1.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-2.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-3.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-0.camunda-zeebe.europe-west1-b.svc.cluster.local:26502,camunda-zeebe-1.camunda-zeebe.europe-west1-b.svc.cluster.local:26502,camunda-zeebe-2.camunda-zeebe.europe-west1-b.svc.cluster.local:26502,camunda-zeebe-3.camunda-zeebe.europe-west1-b.svc.cluster.local:26502" + - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_CLASSNAME + value: "io.camunda.zeebe.exporter.ElasticsearchExporter" + - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_URL + value: "http://elasticsearch-master-headless.us-east1-c.svc.cluster.local:9200" + - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_BULK_SIZE + value: "1" + - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_INDEX_PREFIX + value: "zeebe-record" + pvcSize: 1Gi + + resources: + requests: + cpu: "100m" + memory: "512M" + limits: + cpu: "512m" + memory: "2Gi" + +zeebe-gateway: + replicas: 1 + + resources: + requests: + cpu: "100m" + memory: "512M" + limits: + cpu: "1000m" + memory: "1Gi" + + logLevel: ERROR + +elasticsearch: + enabled: true \ No newline at end of file diff --git a/google/multi-region/active-active/camunda-values-template.yaml b/google/multi-region/active-active/camunda-values-template.yaml new file mode 100644 index 00000000..cab38393 --- /dev/null +++ b/google/multi-region/active-active/camunda-values-template.yaml @@ -0,0 +1,102 @@ +# Chart values for the Camunda Platform 8 Helm chart. +# This file deliberately contains only the values that differ from the defaults. +# For changes and documentation, use your favorite diff tool to compare it with: +# https://github.com/camunda/camunda-platform-helm/blob/main/charts/camunda-platform/values.yaml + +global: + # Multiregion options for Zeebe + # + ## WARNING: In order to get your multi-region setup covered by Camunda enterprise support you MUST get your configuration and run books reviewed by Camunda before going to production. + # This is necessary for us to be able to help you in case of outages, due to the complexity of operating multi-region setups and the dependencies to the underlying Kubernetes prerequisites. + # If you operate this in the wrong way you risk corruption and complete loss of all data especially in the dual-region case. + # If you can, consider three regions. Please, contact your customer success manager as soon as you start planning a multi-region setup. + # Camunda reserves the right to limit support if no review was done prior to launch or the review showed significant risks. + multiregion: + # number of regions that this Camunda Platform instance is stretched across + regions: 2 + # unique id of the region. Should start at 0 for easy computation. With 2 regions, you would have region 0 and 1. + regionId: $REGIONID$ + identity: + auth: + # Disable the Identity authentication + # it will fall back to basic-auth: demo/demo as default user + enabled: false + +operate: + env: + - name: CAMUNDA_OPERATE_BACKUP_REPOSITORYNAME + value: "camunda_backup" +tasklist: + env: + - name: CAMUNDA_TASKLIST_BACKUP_REPOSITORYNAME + value: "camunda_backup" + +identity: + enabled: false + +optimize: + enabled: false + +connectors: + enabled: true + inbound: + mode: credentials + resources: + requests: + cpu: "100m" + memory: "512M" + limits: + cpu: "1000m" + memory: "2Gi" + env: + - name: CAMUNDA_OPERATE_CLIENT_USERNAME + value: demo + - name: CAMUNDA_OPERATE_CLIENT_PASSWORD + value: demo + +zeebe: + clusterSize: 8 + partitionCount: 8 + replicationFactor: 4 + env: + - name: ZEEBE_BROKER_DATA_SNAPSHOTPERIOD + value: "5m" + - name: ZEEBE_BROKER_DATA_DISKUSAGECOMMANDWATERMARK + value: "0.85" + - name: ZEEBE_BROKER_DATA_DISKUSAGEREPLICATIONWATERMARK + value: "0.87" + - name: ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS + value: "$CONTACTPOINTS$" + - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_CLASSNAME + value: "io.camunda.zeebe.exporter.ElasticsearchExporter" + - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_URL + value: "$ELASTIC_URL_2$" + - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_BULK_SIZE + value: "1" + - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_INDEX_PREFIX + value: "zeebe-record" + pvcSize: 1Gi + + resources: + requests: + cpu: "100m" + memory: "512M" + limits: + cpu: "512m" + memory: "2Gi" + +zeebe-gateway: + replicas: 1 + + resources: + requests: + cpu: "100m" + memory: "512M" + limits: + cpu: "1000m" + memory: "1Gi" + + logLevel: ERROR + +elasticsearch: + enabled: true \ No newline at end of file diff --git a/google/multi-region/active-active/generated/dns-configmap-us-east1.yaml b/google/multi-region/active-active/generated/dns-configmap-us-east1.yaml index 033a6936..29295a70 100644 --- a/google/multi-region/active-active/generated/dns-configmap-us-east1.yaml +++ b/google/multi-region/active-active/generated/dns-configmap-us-east1.yaml @@ -5,4 +5,4 @@ metadata: namespace: kube-system data: stubDomains: | - {"europe-west1.svc.cluster.local": ["8.34.209.179"], "europe-west1-failover.svc.cluster.local": ["8.34.209.179"]} + {"europe-west1-b.svc.cluster.local": ["34.34.141.123"], "europe-west1-b-failover.svc.cluster.local": ["34.34.141.123"]} diff --git a/google/multi-region/active-active/setup-dns-chaining.py b/google/multi-region/active-active/setup-dns-chaining.py index 12b5d36a..5b3a11be 100755 --- a/google/multi-region/active-active/setup-dns-chaining.py +++ b/google/multi-region/active-active/setup-dns-chaining.py @@ -3,6 +3,7 @@ import distutils.spawn import json import os +import sys from subprocess import check_call,check_output from sys import exit from time import sleep @@ -26,12 +27,24 @@ # 'us-east1': 'gke_camunda-researchanddevelopment_us-east1_falko-region-0', # 'europe-west1': 'gke_camunda-researchanddevelopment_europe-west1_falko-region-1', # } -# TODO generate kubectl contexts via make using pattern: gke_$(project)_$(region)_$(clusterName) +if len(sys.argv) != 7: + print("Usage: python your_script.py ") + sys.exit(1) + +project = sys.argv[1] +region0 = sys.argv[2] +clusterName0 = sys.argv[3] +region1 = sys.argv[4] +clusterName1 = sys.argv[5] +brokersPerRegion = int(sys.argv[6]) + contexts = { - 'us-east1': 'gke_camunda-researchanddevelopment_us-east1_falko-region-0', - 'europe-west1': 'gke_camunda-researchanddevelopment_europe-west1_falko-region-1', + region0: f'gke_{project}_{region0}_{clusterName0}', + region1: f'gke_{project}_{region1}_{clusterName1}', } +number_of_zeebe_brokers_per_region = brokersPerRegion + # Fill in the number of Zeebe brokers per region, # i.e. clusterSize/regions as defined in camunda-values.yaml number_of_zeebe_brokers_per_region = 4 From b28109500c78cf0945994c45ba394881a91e0e95 Mon Sep 17 00:00:00 2001 From: ManuelDittmar Date: Wed, 24 Jan 2024 10:17:47 +0100 Subject: [PATCH 26/58] Revert "automate gcp creation" This reverts commit f838872379a5d82a56664c4ee9bdb82091b11605. --- google/multi-region/active-active/Makefile | 101 ----------------- .../camunda-values-region-0.yaml | 102 ------------------ .../camunda-values-region-1.yaml | 102 ------------------ .../camunda-values-template.yaml | 102 ------------------ .../generated/dns-configmap-us-east1.yaml | 2 +- .../active-active/setup-dns-chaining.py | 19 +--- 6 files changed, 4 insertions(+), 424 deletions(-) delete mode 100644 google/multi-region/active-active/Makefile delete mode 100644 google/multi-region/active-active/camunda-values-region-0.yaml delete mode 100644 google/multi-region/active-active/camunda-values-region-1.yaml delete mode 100644 google/multi-region/active-active/camunda-values-template.yaml diff --git a/google/multi-region/active-active/Makefile b/google/multi-region/active-active/Makefile deleted file mode 100644 index e60bf193..00000000 --- a/google/multi-region/active-active/Makefile +++ /dev/null @@ -1,101 +0,0 @@ -# ------------------------------------ -# Set the following for your specific environment -# Already have a Cluster? Set these values to point to your existing environment -# Otherwise, these values will be used to create a new Cluster - -# GCP project -project ?= camunda-researchanddevelopment -# GCP region 0 -region0 ?=us-east1-c# region = zone for simplicity -# GCP region 1 -region1 ?= europe-west1-b -# GKE cluster name 0 -clusterName0 ?= manus-region-0 -# GKE cluster name 1 -clusterName1 ?= manus-region-1 -# Firewall rule name -firewallRule ?= zeebe-between-clusters-manu -# Brokers per Region -brokersPerRegion = 4 - - -# ------------------------------------ -# The following variables should not be changed except for advanced use cases -ifeq ($(OS),Windows_NT) - root ?= $(CURDIR)/../../../.. -else - root ?= $(shell pwd)/../../../.. -endif - - -.PHONY: gcp-create-cluster -gcp-create-cluster: - gcloud config set project $(project) - gcloud container clusters create $(clusterName0) \ - --region $(region0) \ - --num-nodes=1 \ - --enable-autoscaling --max-nodes=24 --min-nodes=1 \ - --enable-ip-alias \ - --machine-type=n2-standard-2 \ - --disk-type "pd-ssd" \ - --spot \ - --maintenance-window=4:00 \ - --release-channel=regular \ - --cluster-version=latest - gcloud container clusters create $(clusterName1) \ - --region $(region1) \ - --num-nodes=1 \ - --enable-autoscaling --max-nodes=24 --min-nodes=1 \ - --enable-ip-alias \ - --machine-type=n2-standard-2 \ - --disk-type "pd-ssd" \ - --spot \ - --maintenance-window=4:00 \ - --release-channel=regular \ - --cluster-version=latest - -.PHONY: gcp-firewall -gcp-firewall: - networkTag0=$$(gcloud compute instances describe $$(kubectl get nodes -o name --output jsonpath={.items[0].metadata.name} --context gke_$(project)_$(region0)_$(clusterName0)) --zone $(region0) --format="get(tags.items)"); \ - networkTag1=$$(gcloud compute instances describe $$(kubectl get nodes -o name --output jsonpath={.items[0].metadata.name} --context gke_$(project)_$(region1)_$(clusterName1)) --zone $(region1) --format="get(tags.items)"); \ - ipRange0=$$(gcloud container clusters describe $(clusterName0) --zone $(region0) --format='value(clusterIpv4Cidr)'); \ - ipRange1=$$(gcloud container clusters describe $(clusterName1) --zone $(region1) --format='value(clusterIpv4Cidr)'); \ - gcloud compute firewall-rules create $(firewallRule) --direction=INGRESS --priority=999 --network=default --action=ALLOW --rules=tcp:9600,tcp:26501,tcp:26502,tcp:9300,tcp:9200,udp:26502,udp:9300,udp:9200 --source-ranges=$$ipRange0,$$ipRange1 --target-tags=$$networkTag0,$$networkTag1 - -.PHONY: dns-chaining -dns-chaining: - python3 setup-dns-chaining.py $(project) $(region0) $(clusterName0) $(region1) $(clusterName1) $(brokersPerRegion) -## TODO teardown - -.PHONY: generate-camunda-values -generate-camunda-values: - @contactPoints=$$(/bin/bash -c ' \ - join_addrs=(); \ - for region in $(region0) $(region1); do \ - for i in `seq 0 $$(($(brokersPerRegion)-1))`; do \ - join_addrs+=("camunda-zeebe-$$i.camunda-zeebe.$$region.svc.cluster.local:26502"); \ - done; \ - done; \ - IFS=,; echo "$${join_addrs[*]}";'); \ - echo "Initial contact points: $$contactPoints"; \ - cp camunda-values-template.yaml camunda-values-region-0.yaml; \ - sed -i 's/\$$REGIONID\$$/0/' camunda-values-region-0.yaml; \ - sed -i 's@\$$ELASTIC_URL_2\$$@http://elasticsearch-master-headless.$(region1).svc.cluster.local:9200@' camunda-values-region-0.yaml; \ - sed -i 's@\$$CONTACTPOINTS\$$@'"$$contactPoints"'@' camunda-values-region-0.yaml; \ - cp camunda-values-template.yaml camunda-values-region-1.yaml; \ - sed -i 's/\$$REGIONID\$$/1/' camunda-values-region-1.yaml; \ - sed -i 's@\$$ELASTIC_URL_2\$$@http://elasticsearch-master-headless.$(region0).svc.cluster.local:9200@' camunda-values-region-1.yaml; \ - sed -i 's@\$$CONTACTPOINTS\$$@'"$$contactPoints"'@' camunda-values-region-1.yaml - -.PHONY: install-camunda -install-camunda: - kubectl config use-context gke_$(project)_$(region0)_$(clusterName0) - kubectl create namespace $(region0) - helm install camunda camunda/camunda-platform -f camunda-values-region-0.yaml -n $(region0) - kubectl config use-context gke_$(project)_$(region1)_$(clusterName1) - kubectl create namespace $(region1) - helm install camunda camunda/camunda-platform -f camunda-values-region-1.yaml -n $(region1) - -.PHONY: setup-mraa-gcp -setup-mraa-gcp: gcp-create-cluster gcp-firewall dns-chaining generate-camunda-values install-camunda - diff --git a/google/multi-region/active-active/camunda-values-region-0.yaml b/google/multi-region/active-active/camunda-values-region-0.yaml deleted file mode 100644 index a3704edf..00000000 --- a/google/multi-region/active-active/camunda-values-region-0.yaml +++ /dev/null @@ -1,102 +0,0 @@ -# Chart values for the Camunda Platform 8 Helm chart. -# This file deliberately contains only the values that differ from the defaults. -# For changes and documentation, use your favorite diff tool to compare it with: -# https://github.com/camunda/camunda-platform-helm/blob/main/charts/camunda-platform/values.yaml - -global: - # Multiregion options for Zeebe - # - ## WARNING: In order to get your multi-region setup covered by Camunda enterprise support you MUST get your configuration and run books reviewed by Camunda before going to production. - # This is necessary for us to be able to help you in case of outages, due to the complexity of operating multi-region setups and the dependencies to the underlying Kubernetes prerequisites. - # If you operate this in the wrong way you risk corruption and complete loss of all data especially in the dual-region case. - # If you can, consider three regions. Please, contact your customer success manager as soon as you start planning a multi-region setup. - # Camunda reserves the right to limit support if no review was done prior to launch or the review showed significant risks. - multiregion: - # number of regions that this Camunda Platform instance is stretched across - regions: 2 - # unique id of the region. Should start at 0 for easy computation. With 2 regions, you would have region 0 and 1. - regionId: 0 - identity: - auth: - # Disable the Identity authentication - # it will fall back to basic-auth: demo/demo as default user - enabled: false - -operate: - env: - - name: CAMUNDA_OPERATE_BACKUP_REPOSITORYNAME - value: "camunda_backup" -tasklist: - env: - - name: CAMUNDA_TASKLIST_BACKUP_REPOSITORYNAME - value: "camunda_backup" - -identity: - enabled: false - -optimize: - enabled: false - -connectors: - enabled: true - inbound: - mode: credentials - resources: - requests: - cpu: "100m" - memory: "512M" - limits: - cpu: "1000m" - memory: "2Gi" - env: - - name: CAMUNDA_OPERATE_CLIENT_USERNAME - value: demo - - name: CAMUNDA_OPERATE_CLIENT_PASSWORD - value: demo - -zeebe: - clusterSize: 8 - partitionCount: 8 - replicationFactor: 4 - env: - - name: ZEEBE_BROKER_DATA_SNAPSHOTPERIOD - value: "5m" - - name: ZEEBE_BROKER_DATA_DISKUSAGECOMMANDWATERMARK - value: "0.85" - - name: ZEEBE_BROKER_DATA_DISKUSAGEREPLICATIONWATERMARK - value: "0.87" - - name: ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS - value: "camunda-zeebe-0.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-1.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-2.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-3.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-0.camunda-zeebe.europe-west1-b.svc.cluster.local:26502,camunda-zeebe-1.camunda-zeebe.europe-west1-b.svc.cluster.local:26502,camunda-zeebe-2.camunda-zeebe.europe-west1-b.svc.cluster.local:26502,camunda-zeebe-3.camunda-zeebe.europe-west1-b.svc.cluster.local:26502" - - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_CLASSNAME - value: "io.camunda.zeebe.exporter.ElasticsearchExporter" - - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_URL - value: "http://elasticsearch-master-headless.europe-west1-b.svc.cluster.local:9200" - - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_BULK_SIZE - value: "1" - - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_INDEX_PREFIX - value: "zeebe-record" - pvcSize: 1Gi - - resources: - requests: - cpu: "100m" - memory: "512M" - limits: - cpu: "512m" - memory: "2Gi" - -zeebe-gateway: - replicas: 1 - - resources: - requests: - cpu: "100m" - memory: "512M" - limits: - cpu: "1000m" - memory: "1Gi" - - logLevel: ERROR - -elasticsearch: - enabled: true \ No newline at end of file diff --git a/google/multi-region/active-active/camunda-values-region-1.yaml b/google/multi-region/active-active/camunda-values-region-1.yaml deleted file mode 100644 index e3824268..00000000 --- a/google/multi-region/active-active/camunda-values-region-1.yaml +++ /dev/null @@ -1,102 +0,0 @@ -# Chart values for the Camunda Platform 8 Helm chart. -# This file deliberately contains only the values that differ from the defaults. -# For changes and documentation, use your favorite diff tool to compare it with: -# https://github.com/camunda/camunda-platform-helm/blob/main/charts/camunda-platform/values.yaml - -global: - # Multiregion options for Zeebe - # - ## WARNING: In order to get your multi-region setup covered by Camunda enterprise support you MUST get your configuration and run books reviewed by Camunda before going to production. - # This is necessary for us to be able to help you in case of outages, due to the complexity of operating multi-region setups and the dependencies to the underlying Kubernetes prerequisites. - # If you operate this in the wrong way you risk corruption and complete loss of all data especially in the dual-region case. - # If you can, consider three regions. Please, contact your customer success manager as soon as you start planning a multi-region setup. - # Camunda reserves the right to limit support if no review was done prior to launch or the review showed significant risks. - multiregion: - # number of regions that this Camunda Platform instance is stretched across - regions: 2 - # unique id of the region. Should start at 0 for easy computation. With 2 regions, you would have region 0 and 1. - regionId: 1 - identity: - auth: - # Disable the Identity authentication - # it will fall back to basic-auth: demo/demo as default user - enabled: false - -operate: - env: - - name: CAMUNDA_OPERATE_BACKUP_REPOSITORYNAME - value: "camunda_backup" -tasklist: - env: - - name: CAMUNDA_TASKLIST_BACKUP_REPOSITORYNAME - value: "camunda_backup" - -identity: - enabled: false - -optimize: - enabled: false - -connectors: - enabled: true - inbound: - mode: credentials - resources: - requests: - cpu: "100m" - memory: "512M" - limits: - cpu: "1000m" - memory: "2Gi" - env: - - name: CAMUNDA_OPERATE_CLIENT_USERNAME - value: demo - - name: CAMUNDA_OPERATE_CLIENT_PASSWORD - value: demo - -zeebe: - clusterSize: 8 - partitionCount: 8 - replicationFactor: 4 - env: - - name: ZEEBE_BROKER_DATA_SNAPSHOTPERIOD - value: "5m" - - name: ZEEBE_BROKER_DATA_DISKUSAGECOMMANDWATERMARK - value: "0.85" - - name: ZEEBE_BROKER_DATA_DISKUSAGEREPLICATIONWATERMARK - value: "0.87" - - name: ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS - value: "camunda-zeebe-0.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-1.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-2.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-3.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-0.camunda-zeebe.europe-west1-b.svc.cluster.local:26502,camunda-zeebe-1.camunda-zeebe.europe-west1-b.svc.cluster.local:26502,camunda-zeebe-2.camunda-zeebe.europe-west1-b.svc.cluster.local:26502,camunda-zeebe-3.camunda-zeebe.europe-west1-b.svc.cluster.local:26502" - - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_CLASSNAME - value: "io.camunda.zeebe.exporter.ElasticsearchExporter" - - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_URL - value: "http://elasticsearch-master-headless.us-east1-c.svc.cluster.local:9200" - - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_BULK_SIZE - value: "1" - - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_INDEX_PREFIX - value: "zeebe-record" - pvcSize: 1Gi - - resources: - requests: - cpu: "100m" - memory: "512M" - limits: - cpu: "512m" - memory: "2Gi" - -zeebe-gateway: - replicas: 1 - - resources: - requests: - cpu: "100m" - memory: "512M" - limits: - cpu: "1000m" - memory: "1Gi" - - logLevel: ERROR - -elasticsearch: - enabled: true \ No newline at end of file diff --git a/google/multi-region/active-active/camunda-values-template.yaml b/google/multi-region/active-active/camunda-values-template.yaml deleted file mode 100644 index cab38393..00000000 --- a/google/multi-region/active-active/camunda-values-template.yaml +++ /dev/null @@ -1,102 +0,0 @@ -# Chart values for the Camunda Platform 8 Helm chart. -# This file deliberately contains only the values that differ from the defaults. -# For changes and documentation, use your favorite diff tool to compare it with: -# https://github.com/camunda/camunda-platform-helm/blob/main/charts/camunda-platform/values.yaml - -global: - # Multiregion options for Zeebe - # - ## WARNING: In order to get your multi-region setup covered by Camunda enterprise support you MUST get your configuration and run books reviewed by Camunda before going to production. - # This is necessary for us to be able to help you in case of outages, due to the complexity of operating multi-region setups and the dependencies to the underlying Kubernetes prerequisites. - # If you operate this in the wrong way you risk corruption and complete loss of all data especially in the dual-region case. - # If you can, consider three regions. Please, contact your customer success manager as soon as you start planning a multi-region setup. - # Camunda reserves the right to limit support if no review was done prior to launch or the review showed significant risks. - multiregion: - # number of regions that this Camunda Platform instance is stretched across - regions: 2 - # unique id of the region. Should start at 0 for easy computation. With 2 regions, you would have region 0 and 1. - regionId: $REGIONID$ - identity: - auth: - # Disable the Identity authentication - # it will fall back to basic-auth: demo/demo as default user - enabled: false - -operate: - env: - - name: CAMUNDA_OPERATE_BACKUP_REPOSITORYNAME - value: "camunda_backup" -tasklist: - env: - - name: CAMUNDA_TASKLIST_BACKUP_REPOSITORYNAME - value: "camunda_backup" - -identity: - enabled: false - -optimize: - enabled: false - -connectors: - enabled: true - inbound: - mode: credentials - resources: - requests: - cpu: "100m" - memory: "512M" - limits: - cpu: "1000m" - memory: "2Gi" - env: - - name: CAMUNDA_OPERATE_CLIENT_USERNAME - value: demo - - name: CAMUNDA_OPERATE_CLIENT_PASSWORD - value: demo - -zeebe: - clusterSize: 8 - partitionCount: 8 - replicationFactor: 4 - env: - - name: ZEEBE_BROKER_DATA_SNAPSHOTPERIOD - value: "5m" - - name: ZEEBE_BROKER_DATA_DISKUSAGECOMMANDWATERMARK - value: "0.85" - - name: ZEEBE_BROKER_DATA_DISKUSAGEREPLICATIONWATERMARK - value: "0.87" - - name: ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS - value: "$CONTACTPOINTS$" - - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_CLASSNAME - value: "io.camunda.zeebe.exporter.ElasticsearchExporter" - - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_URL - value: "$ELASTIC_URL_2$" - - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_BULK_SIZE - value: "1" - - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_INDEX_PREFIX - value: "zeebe-record" - pvcSize: 1Gi - - resources: - requests: - cpu: "100m" - memory: "512M" - limits: - cpu: "512m" - memory: "2Gi" - -zeebe-gateway: - replicas: 1 - - resources: - requests: - cpu: "100m" - memory: "512M" - limits: - cpu: "1000m" - memory: "1Gi" - - logLevel: ERROR - -elasticsearch: - enabled: true \ No newline at end of file diff --git a/google/multi-region/active-active/generated/dns-configmap-us-east1.yaml b/google/multi-region/active-active/generated/dns-configmap-us-east1.yaml index 29295a70..033a6936 100644 --- a/google/multi-region/active-active/generated/dns-configmap-us-east1.yaml +++ b/google/multi-region/active-active/generated/dns-configmap-us-east1.yaml @@ -5,4 +5,4 @@ metadata: namespace: kube-system data: stubDomains: | - {"europe-west1-b.svc.cluster.local": ["34.34.141.123"], "europe-west1-b-failover.svc.cluster.local": ["34.34.141.123"]} + {"europe-west1.svc.cluster.local": ["8.34.209.179"], "europe-west1-failover.svc.cluster.local": ["8.34.209.179"]} diff --git a/google/multi-region/active-active/setup-dns-chaining.py b/google/multi-region/active-active/setup-dns-chaining.py index 5b3a11be..12b5d36a 100755 --- a/google/multi-region/active-active/setup-dns-chaining.py +++ b/google/multi-region/active-active/setup-dns-chaining.py @@ -3,7 +3,6 @@ import distutils.spawn import json import os -import sys from subprocess import check_call,check_output from sys import exit from time import sleep @@ -27,24 +26,12 @@ # 'us-east1': 'gke_camunda-researchanddevelopment_us-east1_falko-region-0', # 'europe-west1': 'gke_camunda-researchanddevelopment_europe-west1_falko-region-1', # } -if len(sys.argv) != 7: - print("Usage: python your_script.py ") - sys.exit(1) - -project = sys.argv[1] -region0 = sys.argv[2] -clusterName0 = sys.argv[3] -region1 = sys.argv[4] -clusterName1 = sys.argv[5] -brokersPerRegion = int(sys.argv[6]) - +# TODO generate kubectl contexts via make using pattern: gke_$(project)_$(region)_$(clusterName) contexts = { - region0: f'gke_{project}_{region0}_{clusterName0}', - region1: f'gke_{project}_{region1}_{clusterName1}', + 'us-east1': 'gke_camunda-researchanddevelopment_us-east1_falko-region-0', + 'europe-west1': 'gke_camunda-researchanddevelopment_europe-west1_falko-region-1', } -number_of_zeebe_brokers_per_region = brokersPerRegion - # Fill in the number of Zeebe brokers per region, # i.e. clusterSize/regions as defined in camunda-values.yaml number_of_zeebe_brokers_per_region = 4 From fccf8515b67601c9e5166194ff776c6b1c151622 Mon Sep 17 00:00:00 2001 From: ManuelDittmar Date: Wed, 24 Jan 2024 10:27:22 +0100 Subject: [PATCH 27/58] one makefile + values template --- google/multi-region/active-active/Makefile | 101 +++++++++++++++++ .../camunda-values-region-0.yaml | 102 ++++++++++++++++++ .../camunda-values-region-1.yaml | 102 ++++++++++++++++++ .../camunda-values-template.yaml | 102 ++++++++++++++++++ .../active-active/setup-dns-chaining.py | 19 +++- 5 files changed, 423 insertions(+), 3 deletions(-) create mode 100644 google/multi-region/active-active/Makefile create mode 100644 google/multi-region/active-active/camunda-values-region-0.yaml create mode 100644 google/multi-region/active-active/camunda-values-region-1.yaml create mode 100644 google/multi-region/active-active/camunda-values-template.yaml diff --git a/google/multi-region/active-active/Makefile b/google/multi-region/active-active/Makefile new file mode 100644 index 00000000..e60bf193 --- /dev/null +++ b/google/multi-region/active-active/Makefile @@ -0,0 +1,101 @@ +# ------------------------------------ +# Set the following for your specific environment +# Already have a Cluster? Set these values to point to your existing environment +# Otherwise, these values will be used to create a new Cluster + +# GCP project +project ?= camunda-researchanddevelopment +# GCP region 0 +region0 ?=us-east1-c# region = zone for simplicity +# GCP region 1 +region1 ?= europe-west1-b +# GKE cluster name 0 +clusterName0 ?= manus-region-0 +# GKE cluster name 1 +clusterName1 ?= manus-region-1 +# Firewall rule name +firewallRule ?= zeebe-between-clusters-manu +# Brokers per Region +brokersPerRegion = 4 + + +# ------------------------------------ +# The following variables should not be changed except for advanced use cases +ifeq ($(OS),Windows_NT) + root ?= $(CURDIR)/../../../.. +else + root ?= $(shell pwd)/../../../.. +endif + + +.PHONY: gcp-create-cluster +gcp-create-cluster: + gcloud config set project $(project) + gcloud container clusters create $(clusterName0) \ + --region $(region0) \ + --num-nodes=1 \ + --enable-autoscaling --max-nodes=24 --min-nodes=1 \ + --enable-ip-alias \ + --machine-type=n2-standard-2 \ + --disk-type "pd-ssd" \ + --spot \ + --maintenance-window=4:00 \ + --release-channel=regular \ + --cluster-version=latest + gcloud container clusters create $(clusterName1) \ + --region $(region1) \ + --num-nodes=1 \ + --enable-autoscaling --max-nodes=24 --min-nodes=1 \ + --enable-ip-alias \ + --machine-type=n2-standard-2 \ + --disk-type "pd-ssd" \ + --spot \ + --maintenance-window=4:00 \ + --release-channel=regular \ + --cluster-version=latest + +.PHONY: gcp-firewall +gcp-firewall: + networkTag0=$$(gcloud compute instances describe $$(kubectl get nodes -o name --output jsonpath={.items[0].metadata.name} --context gke_$(project)_$(region0)_$(clusterName0)) --zone $(region0) --format="get(tags.items)"); \ + networkTag1=$$(gcloud compute instances describe $$(kubectl get nodes -o name --output jsonpath={.items[0].metadata.name} --context gke_$(project)_$(region1)_$(clusterName1)) --zone $(region1) --format="get(tags.items)"); \ + ipRange0=$$(gcloud container clusters describe $(clusterName0) --zone $(region0) --format='value(clusterIpv4Cidr)'); \ + ipRange1=$$(gcloud container clusters describe $(clusterName1) --zone $(region1) --format='value(clusterIpv4Cidr)'); \ + gcloud compute firewall-rules create $(firewallRule) --direction=INGRESS --priority=999 --network=default --action=ALLOW --rules=tcp:9600,tcp:26501,tcp:26502,tcp:9300,tcp:9200,udp:26502,udp:9300,udp:9200 --source-ranges=$$ipRange0,$$ipRange1 --target-tags=$$networkTag0,$$networkTag1 + +.PHONY: dns-chaining +dns-chaining: + python3 setup-dns-chaining.py $(project) $(region0) $(clusterName0) $(region1) $(clusterName1) $(brokersPerRegion) +## TODO teardown + +.PHONY: generate-camunda-values +generate-camunda-values: + @contactPoints=$$(/bin/bash -c ' \ + join_addrs=(); \ + for region in $(region0) $(region1); do \ + for i in `seq 0 $$(($(brokersPerRegion)-1))`; do \ + join_addrs+=("camunda-zeebe-$$i.camunda-zeebe.$$region.svc.cluster.local:26502"); \ + done; \ + done; \ + IFS=,; echo "$${join_addrs[*]}";'); \ + echo "Initial contact points: $$contactPoints"; \ + cp camunda-values-template.yaml camunda-values-region-0.yaml; \ + sed -i 's/\$$REGIONID\$$/0/' camunda-values-region-0.yaml; \ + sed -i 's@\$$ELASTIC_URL_2\$$@http://elasticsearch-master-headless.$(region1).svc.cluster.local:9200@' camunda-values-region-0.yaml; \ + sed -i 's@\$$CONTACTPOINTS\$$@'"$$contactPoints"'@' camunda-values-region-0.yaml; \ + cp camunda-values-template.yaml camunda-values-region-1.yaml; \ + sed -i 's/\$$REGIONID\$$/1/' camunda-values-region-1.yaml; \ + sed -i 's@\$$ELASTIC_URL_2\$$@http://elasticsearch-master-headless.$(region0).svc.cluster.local:9200@' camunda-values-region-1.yaml; \ + sed -i 's@\$$CONTACTPOINTS\$$@'"$$contactPoints"'@' camunda-values-region-1.yaml + +.PHONY: install-camunda +install-camunda: + kubectl config use-context gke_$(project)_$(region0)_$(clusterName0) + kubectl create namespace $(region0) + helm install camunda camunda/camunda-platform -f camunda-values-region-0.yaml -n $(region0) + kubectl config use-context gke_$(project)_$(region1)_$(clusterName1) + kubectl create namespace $(region1) + helm install camunda camunda/camunda-platform -f camunda-values-region-1.yaml -n $(region1) + +.PHONY: setup-mraa-gcp +setup-mraa-gcp: gcp-create-cluster gcp-firewall dns-chaining generate-camunda-values install-camunda + diff --git a/google/multi-region/active-active/camunda-values-region-0.yaml b/google/multi-region/active-active/camunda-values-region-0.yaml new file mode 100644 index 00000000..a3704edf --- /dev/null +++ b/google/multi-region/active-active/camunda-values-region-0.yaml @@ -0,0 +1,102 @@ +# Chart values for the Camunda Platform 8 Helm chart. +# This file deliberately contains only the values that differ from the defaults. +# For changes and documentation, use your favorite diff tool to compare it with: +# https://github.com/camunda/camunda-platform-helm/blob/main/charts/camunda-platform/values.yaml + +global: + # Multiregion options for Zeebe + # + ## WARNING: In order to get your multi-region setup covered by Camunda enterprise support you MUST get your configuration and run books reviewed by Camunda before going to production. + # This is necessary for us to be able to help you in case of outages, due to the complexity of operating multi-region setups and the dependencies to the underlying Kubernetes prerequisites. + # If you operate this in the wrong way you risk corruption and complete loss of all data especially in the dual-region case. + # If you can, consider three regions. Please, contact your customer success manager as soon as you start planning a multi-region setup. + # Camunda reserves the right to limit support if no review was done prior to launch or the review showed significant risks. + multiregion: + # number of regions that this Camunda Platform instance is stretched across + regions: 2 + # unique id of the region. Should start at 0 for easy computation. With 2 regions, you would have region 0 and 1. + regionId: 0 + identity: + auth: + # Disable the Identity authentication + # it will fall back to basic-auth: demo/demo as default user + enabled: false + +operate: + env: + - name: CAMUNDA_OPERATE_BACKUP_REPOSITORYNAME + value: "camunda_backup" +tasklist: + env: + - name: CAMUNDA_TASKLIST_BACKUP_REPOSITORYNAME + value: "camunda_backup" + +identity: + enabled: false + +optimize: + enabled: false + +connectors: + enabled: true + inbound: + mode: credentials + resources: + requests: + cpu: "100m" + memory: "512M" + limits: + cpu: "1000m" + memory: "2Gi" + env: + - name: CAMUNDA_OPERATE_CLIENT_USERNAME + value: demo + - name: CAMUNDA_OPERATE_CLIENT_PASSWORD + value: demo + +zeebe: + clusterSize: 8 + partitionCount: 8 + replicationFactor: 4 + env: + - name: ZEEBE_BROKER_DATA_SNAPSHOTPERIOD + value: "5m" + - name: ZEEBE_BROKER_DATA_DISKUSAGECOMMANDWATERMARK + value: "0.85" + - name: ZEEBE_BROKER_DATA_DISKUSAGEREPLICATIONWATERMARK + value: "0.87" + - name: ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS + value: "camunda-zeebe-0.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-1.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-2.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-3.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-0.camunda-zeebe.europe-west1-b.svc.cluster.local:26502,camunda-zeebe-1.camunda-zeebe.europe-west1-b.svc.cluster.local:26502,camunda-zeebe-2.camunda-zeebe.europe-west1-b.svc.cluster.local:26502,camunda-zeebe-3.camunda-zeebe.europe-west1-b.svc.cluster.local:26502" + - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_CLASSNAME + value: "io.camunda.zeebe.exporter.ElasticsearchExporter" + - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_URL + value: "http://elasticsearch-master-headless.europe-west1-b.svc.cluster.local:9200" + - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_BULK_SIZE + value: "1" + - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_INDEX_PREFIX + value: "zeebe-record" + pvcSize: 1Gi + + resources: + requests: + cpu: "100m" + memory: "512M" + limits: + cpu: "512m" + memory: "2Gi" + +zeebe-gateway: + replicas: 1 + + resources: + requests: + cpu: "100m" + memory: "512M" + limits: + cpu: "1000m" + memory: "1Gi" + + logLevel: ERROR + +elasticsearch: + enabled: true \ No newline at end of file diff --git a/google/multi-region/active-active/camunda-values-region-1.yaml b/google/multi-region/active-active/camunda-values-region-1.yaml new file mode 100644 index 00000000..e3824268 --- /dev/null +++ b/google/multi-region/active-active/camunda-values-region-1.yaml @@ -0,0 +1,102 @@ +# Chart values for the Camunda Platform 8 Helm chart. +# This file deliberately contains only the values that differ from the defaults. +# For changes and documentation, use your favorite diff tool to compare it with: +# https://github.com/camunda/camunda-platform-helm/blob/main/charts/camunda-platform/values.yaml + +global: + # Multiregion options for Zeebe + # + ## WARNING: In order to get your multi-region setup covered by Camunda enterprise support you MUST get your configuration and run books reviewed by Camunda before going to production. + # This is necessary for us to be able to help you in case of outages, due to the complexity of operating multi-region setups and the dependencies to the underlying Kubernetes prerequisites. + # If you operate this in the wrong way you risk corruption and complete loss of all data especially in the dual-region case. + # If you can, consider three regions. Please, contact your customer success manager as soon as you start planning a multi-region setup. + # Camunda reserves the right to limit support if no review was done prior to launch or the review showed significant risks. + multiregion: + # number of regions that this Camunda Platform instance is stretched across + regions: 2 + # unique id of the region. Should start at 0 for easy computation. With 2 regions, you would have region 0 and 1. + regionId: 1 + identity: + auth: + # Disable the Identity authentication + # it will fall back to basic-auth: demo/demo as default user + enabled: false + +operate: + env: + - name: CAMUNDA_OPERATE_BACKUP_REPOSITORYNAME + value: "camunda_backup" +tasklist: + env: + - name: CAMUNDA_TASKLIST_BACKUP_REPOSITORYNAME + value: "camunda_backup" + +identity: + enabled: false + +optimize: + enabled: false + +connectors: + enabled: true + inbound: + mode: credentials + resources: + requests: + cpu: "100m" + memory: "512M" + limits: + cpu: "1000m" + memory: "2Gi" + env: + - name: CAMUNDA_OPERATE_CLIENT_USERNAME + value: demo + - name: CAMUNDA_OPERATE_CLIENT_PASSWORD + value: demo + +zeebe: + clusterSize: 8 + partitionCount: 8 + replicationFactor: 4 + env: + - name: ZEEBE_BROKER_DATA_SNAPSHOTPERIOD + value: "5m" + - name: ZEEBE_BROKER_DATA_DISKUSAGECOMMANDWATERMARK + value: "0.85" + - name: ZEEBE_BROKER_DATA_DISKUSAGEREPLICATIONWATERMARK + value: "0.87" + - name: ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS + value: "camunda-zeebe-0.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-1.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-2.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-3.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-0.camunda-zeebe.europe-west1-b.svc.cluster.local:26502,camunda-zeebe-1.camunda-zeebe.europe-west1-b.svc.cluster.local:26502,camunda-zeebe-2.camunda-zeebe.europe-west1-b.svc.cluster.local:26502,camunda-zeebe-3.camunda-zeebe.europe-west1-b.svc.cluster.local:26502" + - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_CLASSNAME + value: "io.camunda.zeebe.exporter.ElasticsearchExporter" + - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_URL + value: "http://elasticsearch-master-headless.us-east1-c.svc.cluster.local:9200" + - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_BULK_SIZE + value: "1" + - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_INDEX_PREFIX + value: "zeebe-record" + pvcSize: 1Gi + + resources: + requests: + cpu: "100m" + memory: "512M" + limits: + cpu: "512m" + memory: "2Gi" + +zeebe-gateway: + replicas: 1 + + resources: + requests: + cpu: "100m" + memory: "512M" + limits: + cpu: "1000m" + memory: "1Gi" + + logLevel: ERROR + +elasticsearch: + enabled: true \ No newline at end of file diff --git a/google/multi-region/active-active/camunda-values-template.yaml b/google/multi-region/active-active/camunda-values-template.yaml new file mode 100644 index 00000000..cab38393 --- /dev/null +++ b/google/multi-region/active-active/camunda-values-template.yaml @@ -0,0 +1,102 @@ +# Chart values for the Camunda Platform 8 Helm chart. +# This file deliberately contains only the values that differ from the defaults. +# For changes and documentation, use your favorite diff tool to compare it with: +# https://github.com/camunda/camunda-platform-helm/blob/main/charts/camunda-platform/values.yaml + +global: + # Multiregion options for Zeebe + # + ## WARNING: In order to get your multi-region setup covered by Camunda enterprise support you MUST get your configuration and run books reviewed by Camunda before going to production. + # This is necessary for us to be able to help you in case of outages, due to the complexity of operating multi-region setups and the dependencies to the underlying Kubernetes prerequisites. + # If you operate this in the wrong way you risk corruption and complete loss of all data especially in the dual-region case. + # If you can, consider three regions. Please, contact your customer success manager as soon as you start planning a multi-region setup. + # Camunda reserves the right to limit support if no review was done prior to launch or the review showed significant risks. + multiregion: + # number of regions that this Camunda Platform instance is stretched across + regions: 2 + # unique id of the region. Should start at 0 for easy computation. With 2 regions, you would have region 0 and 1. + regionId: $REGIONID$ + identity: + auth: + # Disable the Identity authentication + # it will fall back to basic-auth: demo/demo as default user + enabled: false + +operate: + env: + - name: CAMUNDA_OPERATE_BACKUP_REPOSITORYNAME + value: "camunda_backup" +tasklist: + env: + - name: CAMUNDA_TASKLIST_BACKUP_REPOSITORYNAME + value: "camunda_backup" + +identity: + enabled: false + +optimize: + enabled: false + +connectors: + enabled: true + inbound: + mode: credentials + resources: + requests: + cpu: "100m" + memory: "512M" + limits: + cpu: "1000m" + memory: "2Gi" + env: + - name: CAMUNDA_OPERATE_CLIENT_USERNAME + value: demo + - name: CAMUNDA_OPERATE_CLIENT_PASSWORD + value: demo + +zeebe: + clusterSize: 8 + partitionCount: 8 + replicationFactor: 4 + env: + - name: ZEEBE_BROKER_DATA_SNAPSHOTPERIOD + value: "5m" + - name: ZEEBE_BROKER_DATA_DISKUSAGECOMMANDWATERMARK + value: "0.85" + - name: ZEEBE_BROKER_DATA_DISKUSAGEREPLICATIONWATERMARK + value: "0.87" + - name: ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS + value: "$CONTACTPOINTS$" + - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_CLASSNAME + value: "io.camunda.zeebe.exporter.ElasticsearchExporter" + - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_URL + value: "$ELASTIC_URL_2$" + - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_BULK_SIZE + value: "1" + - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_INDEX_PREFIX + value: "zeebe-record" + pvcSize: 1Gi + + resources: + requests: + cpu: "100m" + memory: "512M" + limits: + cpu: "512m" + memory: "2Gi" + +zeebe-gateway: + replicas: 1 + + resources: + requests: + cpu: "100m" + memory: "512M" + limits: + cpu: "1000m" + memory: "1Gi" + + logLevel: ERROR + +elasticsearch: + enabled: true \ No newline at end of file diff --git a/google/multi-region/active-active/setup-dns-chaining.py b/google/multi-region/active-active/setup-dns-chaining.py index 12b5d36a..5b3a11be 100755 --- a/google/multi-region/active-active/setup-dns-chaining.py +++ b/google/multi-region/active-active/setup-dns-chaining.py @@ -3,6 +3,7 @@ import distutils.spawn import json import os +import sys from subprocess import check_call,check_output from sys import exit from time import sleep @@ -26,12 +27,24 @@ # 'us-east1': 'gke_camunda-researchanddevelopment_us-east1_falko-region-0', # 'europe-west1': 'gke_camunda-researchanddevelopment_europe-west1_falko-region-1', # } -# TODO generate kubectl contexts via make using pattern: gke_$(project)_$(region)_$(clusterName) +if len(sys.argv) != 7: + print("Usage: python your_script.py ") + sys.exit(1) + +project = sys.argv[1] +region0 = sys.argv[2] +clusterName0 = sys.argv[3] +region1 = sys.argv[4] +clusterName1 = sys.argv[5] +brokersPerRegion = int(sys.argv[6]) + contexts = { - 'us-east1': 'gke_camunda-researchanddevelopment_us-east1_falko-region-0', - 'europe-west1': 'gke_camunda-researchanddevelopment_europe-west1_falko-region-1', + region0: f'gke_{project}_{region0}_{clusterName0}', + region1: f'gke_{project}_{region1}_{clusterName1}', } +number_of_zeebe_brokers_per_region = brokersPerRegion + # Fill in the number of Zeebe brokers per region, # i.e. clusterSize/regions as defined in camunda-values.yaml number_of_zeebe_brokers_per_region = 4 From 2b946ca10a6413b49ed8045133ad846a6849bf18 Mon Sep 17 00:00:00 2001 From: ManuelDittmar Date: Wed, 24 Jan 2024 15:54:42 +0100 Subject: [PATCH 28/58] reuse make targets and allow > 2 regions --- .../active-active/gcp-setup/Makefile | 127 ++++++++++++++++++ 1 file changed, 127 insertions(+) create mode 100644 google/multi-region/active-active/gcp-setup/Makefile diff --git a/google/multi-region/active-active/gcp-setup/Makefile b/google/multi-region/active-active/gcp-setup/Makefile new file mode 100644 index 00000000..e2f30d3b --- /dev/null +++ b/google/multi-region/active-active/gcp-setup/Makefile @@ -0,0 +1,127 @@ +# ------------------------------------ +# The following variables should not be changed except for advanced use cases +ifeq ($(OS),Windows_NT) + root ?= $(CURDIR)/../../../.. +else + root ?= $(shell pwd)/../../../.. +endif +# ------------------------------------ +# Set the following for your specific environment +# Already have a Cluster? Set these values to point to your existing environment +# Otherwise, these values will be used to create a new Cluster +include $(root)/google/include/kubernetes-gke.mk +include $(root)/include/camunda.mk + +# GCP project +project ?= camunda-researchanddevelopment +# GCP region 0 +region0 ?=us-east1-c# region = zone for simplicity +# GCP region 1 +region1 ?= europe-west1-b + +machineType ?= n2-standard-2 +minSize ?= 1 +maxSize ?= 24 + +regions ?= us-east1-c europe-west1-b +clusters ?= manus-region-0 manus-region-1 +regions_clusters ?= "us-east1-c;manus-region-0" "europe-west1-b;manus-region-1" +# GKE cluster name 0 +clusterName0 ?= manus-region-0 +# GKE cluster name 1 +clusterName1 ?= manus-region-1 +# Firewall rule name +firewallRule ?= zeebe-between-clusters-manu +# Brokers per Region +brokersPerRegion = 4 +# Bucket Name for GCP +bucketName ?= manus-backup + +.PHONY: kube +kube: + for region_cluster in $(regions_clusters); do \ + region=$$(echo $$region_cluster | cut -d';' -f1); \ + clusterName=$$(echo $$region_cluster | cut -d';' -f2); \ + echo "Setting up region: $$region with cluster name: $$clusterName"; \ + $(MAKE) kube-gke region=$$region clusterName=$$clusterName; \ + done + + +.PHONY: gcp-firewall +gcp-firewall: + set -e; \ + ipRanges=""; \ + networkTags=""; \ + for region_cluster in $(regions_clusters); do \ + region=$$(echo $$region_cluster | cut -d';' -f1); \ + clusterName=$$(echo $$region_cluster | cut -d';' -f2); \ + networkTag=$$(gcloud compute instances describe $$(kubectl get nodes -o name --output jsonpath={.items[0].metadata.name} --context gke_$(project)_"$$region"_"$$clusterName") --zone $$region --format="get(tags.items)"); \ + ipRange=$$(gcloud container clusters describe $$clusterName --zone $$region --format='value(clusterIpv4Cidr)'); \ + if [ -z "$$networkTags" ]; then \ + networkTags=$$networkTag; \ + else \ + networkTags=$$networkTags,$$networkTag; \ + fi; \ + if [ -z "$$ipRanges" ]; then \ + ipRanges=$$ipRange; \ + else \ + ipRanges=$$ipRanges,$$ipRange; \ + fi; \ + done; \ + gcloud compute firewall-rules create $(firewallRule) --direction=INGRESS --priority=999 --network=default --action=ALLOW --rules=tcp:9600,tcp:26501,tcp:26502,tcp:9300,tcp:9200,udp:26502,udp:9300,udp:9200 --source-ranges=$$ipRanges --target-tags=$$networkTags + +.PHONY: dns-chaining +dns-chaining: + python3 setup-dns-chaining.py $(project) $(region0) $(clusterName0) $(region1) $(clusterName1) $(brokersPerRegion) +## TODO teardown + +.PHONY: generate-camunda-values +generate-camunda-values: + @contactPoints=$$(/bin/bash -c ' \ + join_addrs=(); \ + for region in $(region0) $(region1); do \ + for i in `seq 0 $$(($(brokersPerRegion)-1))`; do \ + join_addrs+=("camunda-zeebe-$$i.camunda-zeebe.$$region.svc.cluster.local:26502"); \ + done; \ + done; \ + IFS=,; echo "$${join_addrs[*]}";'); \ + echo "Initial contact points: $$contactPoints"; \ + cp camunda-values-template.yaml camunda-values-region-0.yaml; \ + sed -i 's/\$$REGIONID\$$/0/' camunda-values-region-0.yaml; \ + sed -i 's@\$$ELASTIC_URL_2\$$@http://elasticsearch-master-headless.$(region1).svc.cluster.local:9200@' camunda-values-region-0.yaml; \ + sed -i 's@\$$CONTACTPOINTS\$$@'"$$contactPoints"'@' camunda-values-region-0.yaml; \ + cp camunda-values-template.yaml camunda-values-region-1.yaml; \ + sed -i 's/\$$REGIONID\$$/1/' camunda-values-region-1.yaml; \ + sed -i 's@\$$ELASTIC_URL_2\$$@http://elasticsearch-master-headless.$(region0).svc.cluster.local:9200@' camunda-values-region-1.yaml; \ + sed -i 's@\$$CONTACTPOINTS\$$@'"$$contactPoints"'@' camunda-values-region-1.yaml + +.PHONY: gcp-bucket +gcp-bucket: + gcloud storage buckets create gs://$(bucketName) --project $(project) + gcloud iam service-accounts create $(bucketName)-sa \ + --description="Service account for $(bucketName) bucket" \ + --project $(project) + gcloud projects add-iam-policy-binding $(project) \ + --member="serviceAccount:$(bucketName)-sa@$(project).iam.gserviceaccount.com" \ + --role="roles/storage.admin" + gcloud iam service-accounts keys create ~/Downloads/camunda-hackweek-es-backup-key.json \ + --iam-account=$(bucketName)-sa@$(project).iam.gserviceaccount.com + +#.PHONY register-repository-es +#register-repository-es: +# sed -i 's/\$$BUCKETNAME\$$/$(bucketName)/' elasticsearch-repo-setup.yaml; \ + + + +.PHONY: install-camunda +install-camunda: + kubectl config use-context gke_$(project)_$(region0)_$(clusterName0) + kubectl create namespace $(region0) + helm install camunda camunda/camunda-platform -f camunda-values-region-0.yaml -n $(region0) + kubectl config use-context gke_$(project)_$(region1)_$(clusterName1) + kubectl create namespace $(region1) + helm install camunda camunda/camunda-platform -f camunda-values-region-1.yaml -n $(region1) + +.PHONY: setup-mraa-gcp +setup-mraa-gcp: gcp-create-cluster gcp-firewall dns-chaining generate-camunda-values install-camunda + From 1994a663ab579ea4648841370a90bf9fe5ffd420 Mon Sep 17 00:00:00 2001 From: HamzaMasood1 Date: Wed, 24 Jan 2024 15:59:45 +0100 Subject: [PATCH 29/58] changed layout of values.yaml files and fixed conflicting exporter names for elasticsearch --- .../active-active/camunda-values.yaml | 169 ++++++++++++++++++ .../active-active/region0/Makefile | 8 +- .../active-active/region0/camunda-values.yaml | 156 ---------------- .../active-active/region1/Makefile | 8 +- .../active-active/region1/camunda-values.yaml | 158 +--------------- 5 files changed, 180 insertions(+), 319 deletions(-) create mode 100644 google/multi-region/active-active/camunda-values.yaml diff --git a/google/multi-region/active-active/camunda-values.yaml b/google/multi-region/active-active/camunda-values.yaml new file mode 100644 index 00000000..6e698156 --- /dev/null +++ b/google/multi-region/active-active/camunda-values.yaml @@ -0,0 +1,169 @@ +# Chart values for the Camunda Platform 8 Helm chart. +# This file deliberately contains only the values that differ from the defaults. +# For changes and documentation, use your favorite diff tool to compare it with: +# https://github.com/camunda/camunda-platform-helm/blob/main/charts/camunda-platform/values.yaml + +global: + # Multiregion options for Zeebe + # + ## WARNING: In order to get your multi-region setup covered by Camunda enterprise support you MUST get your configuration and run books reviewed by Camunda before going to production. + # This is necessary for us to be able to help you in case of outages, due to the complexity of operating multi-region setups and the dependencies to the underlying Kubernetes prerequisites. + # If you operate this in the wrong way you risk corruption and complete loss of all data especially in the dual-region case. + # If you can, consider three regions. Please, contact your customer success manager as soon as you start planning a multi-region setup. + # Camunda reserves the right to limit support if no review was done prior to launch or the review showed significant risks. + multiregion: + # number of regions that this Camunda Platform instance is stretched across + regions: 2 + identity: + auth: + # Disable the Identity authentication + # it will fall back to basic-auth: demo/demo as default user + enabled: false + elasticsearch: + disableExporter: true + +operate: + env: + - name: CAMUNDA_OPERATE_BACKUP_REPOSITORYNAME + value: "camunda_backup" +tasklist: + env: + - name: CAMUNDA_TASKLIST_BACKUP_REPOSITORYNAME + value: "camunda_backup" + +identity: + enabled: false + +optimize: + enabled: false + +connectors: + enabled: true + inbound: + mode: credentials + resources: + requests: + cpu: "100m" + memory: "512M" + limits: + cpu: "1000m" + memory: "2Gi" + env: + - name: CAMUNDA_OPERATE_CLIENT_USERNAME + value: demo + - name: CAMUNDA_OPERATE_CLIENT_PASSWORD + value: demo + +zeebe: + clusterSize: 8 + partitionCount: 8 + replicationFactor: 4 + env: + - name: ZEEBE_BROKER_DATA_SNAPSHOTPERIOD + value: "5m" + - name: ZEEBE_BROKER_DATA_DISKUSAGECOMMANDWATERMARK + value: "0.85" + - name: ZEEBE_BROKER_DATA_DISKUSAGEREPLICATIONWATERMARK + value: "0.87" + - name: ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS + value: "camunda-zeebe-0.camunda-zeebe.us-east1.svc.cluster.local:26502,camunda-zeebe-1.camunda-zeebe.us-east1.svc.cluster.local:26502,camunda-zeebe-2.camunda-zeebe.us-east1.svc.cluster.local:26502,camunda-zeebe-3.camunda-zeebe.us-east1.svc.cluster.local:26502,camunda-zeebe-0.camunda-zeebe.europe-west1.svc.cluster.local:26502,camunda-zeebe-1.camunda-zeebe.europe-west1.svc.cluster.local:26502,camunda-zeebe-2.camunda-zeebe.europe-west1.svc.cluster.local:26502,camunda-zeebe-3.camunda-zeebe.europe-west1.svc.cluster.local:26502" + - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCHREGION0_CLASSNAME + value: "io.camunda.zeebe.exporter.ElasticsearchExporter" + - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCHREGION0_ARGS_URL + value: "http://elasticsearch-master-headless.us-east1.svc.cluster.local:9200" + - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCHREGION0_ARGS_INDEX_PREFIX + value: "zeebe-record" + - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCHREGION1_CLASSNAME + value: "io.camunda.zeebe.exporter.ElasticsearchExporter" + - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCHREGION1_ARGS_URL + value: "http://elasticsearch-master-headless.europe-west1.svc.cluster.local:9200" + - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCHREGION1_ARGS_INDEX_PREFIX + value: "zeebe-record" + pvcSize: 1Gi + + resources: + requests: + cpu: "100m" + memory: "512M" + limits: + cpu: "512m" + memory: "2Gi" + +zeebe-gateway: + replicas: 1 + + resources: + requests: + cpu: "100m" + memory: "512M" + limits: + cpu: "1000m" + memory: "1Gi" + + logLevel: ERROR + +elasticsearch: + enabled: true + # imageTag: 7.17.3 + master: + replicaCount: 2 + resources: + requests: + cpu: "100m" + memory: "512M" + limits: + cpu: "1000m" + memory: "2Gi" + persistence: + size: 15Gi + + initContainers: + - name: install-gcs-plugin + image: elasticsearch:7.17.10 + securityContext: + privileged: true + command: + - sh + args: + - -c + - | + ./bin/elasticsearch-plugin install --batch repository-gcs + ./bin/elasticsearch-keystore add-file -f gcs.client.default.credentials_file ./key/gcs_backup_key.json + cp -a ./config/elasticsearch.keystore /tmp/keystore + volumeMounts: + - name: plugins + mountPath: /usr/share/elasticsearch/plugins + - name: gcs-backup-key + mountPath: /usr/share/elasticsearch/key + - name: keystore + mountPath: /tmp/keystore + extraVolumes: + - name: plugins + emptyDir: {} + - name: keystore + emptyDir: {} + - name: gcs-backup-key + secret: + secretName: gcs-backup-key + extraVolumeMounts: + - name: plugins + mountPath: /usr/share/elasticsearch/plugins + readOnly: false + - mountPath: /usr/share/elasticsearch/key + name: gcs-backup-key + - name: keystore + mountPath: /usr/share/elasticsearch/config/elasticsearch.keystore + subPath: elasticsearch.keystore + # Allow no backup for single node setups ??? Not included in es values.yaml + # clusterHealthCheckParams: "wait_for_status=yellow&timeout=1s" + + + + + # # Request smaller persistent volumes. + # volumeClaimTemplate: + # accessModes: [ "ReadWriteOnce" ] + # storageClassName: "standard" + # resources: + # requests: + # storage: 15Gi diff --git a/google/multi-region/active-active/region0/Makefile b/google/multi-region/active-active/region0/Makefile index 4b35a565..6a61f76e 100644 --- a/google/multi-region/active-active/region0/Makefile +++ b/google/multi-region/active-active/region0/Makefile @@ -29,7 +29,9 @@ release ?= camunda # Helm chart coordinates for Camunda chart ?= camunda/camunda-platform # Helm chart values -chartValues ?= camunda-values.yaml +globalChartValues ?= ../camunda-values.yaml +regionalChartValues ?= camunda-values.yaml +chartValues ?= $(globalChartValues) -f $(regionalChartValues) .PHONY: all all: use-kube namespace prepare-elastic-backup-key camunda external-urls @@ -48,7 +50,7 @@ external-urls: use-kube external-urls-no-ingress fail-over-region1: use-kube -kubectl create namespace $(namespace)-failover -kubectl config set-context --current --namespace=$(namespace)-failover - helm install --namespace $(namespace)-failover $(release) $(chart) -f $(chartValues) --skip-crds \ + helm install --namespace $(namespace)-failover $(release) $(chart) -f $(chartValues) --skip-crds \ --set global.multiregion.installationType=failOver \ --set global.multiregion.regionId=1 \ --set elasticsearch.enabled=false \ @@ -59,7 +61,7 @@ fail-over-region1: use-kube # TODO importers fail-back: use-kube namespace prepare-elastic-backup-key - helm install --namespace $(region) $(release) $(chart) -f $(chartValues) --skip-crds \ + helm install --namespace $(region) $(release) $(chart) -f $(chartValues) --skip-crds \ --set global.multiregion.installationType=failBack \ --set operate.enabled=false \ --set tasklist.enabled=false diff --git a/google/multi-region/active-active/region0/camunda-values.yaml b/google/multi-region/active-active/region0/camunda-values.yaml index 18d0ed96..6de61cfa 100644 --- a/google/multi-region/active-active/region0/camunda-values.yaml +++ b/google/multi-region/active-active/region0/camunda-values.yaml @@ -1,160 +1,4 @@ -# Chart values for the Camunda Platform 8 Helm chart. -# This file deliberately contains only the values that differ from the defaults. -# For changes and documentation, use your favorite diff tool to compare it with: -# https://github.com/camunda/camunda-platform-helm/blob/main/charts/camunda-platform/values.yaml - global: - # Multiregion options for Zeebe - # - ## WARNING: In order to get your multi-region setup covered by Camunda enterprise support you MUST get your configuration and run books reviewed by Camunda before going to production. - # This is necessary for us to be able to help you in case of outages, due to the complexity of operating multi-region setups and the dependencies to the underlying Kubernetes prerequisites. - # If you operate this in the wrong way you risk corruption and complete loss of all data especially in the dual-region case. - # If you can, consider three regions. Please, contact your customer success manager as soon as you start planning a multi-region setup. - # Camunda reserves the right to limit support if no review was done prior to launch or the review showed significant risks. multiregion: - # number of regions that this Camunda Platform instance is stretched across - regions: 2 # unique id of the region. Should start at 0 for easy computation. With 2 regions, you would have region 0 and 1. regionId: 0 - identity: - auth: - # Disable the Identity authentication - # it will fall back to basic-auth: demo/demo as default user - enabled: false - -operate: - env: - - name: CAMUNDA_OPERATE_BACKUP_REPOSITORYNAME - value: "camunda_backup" -tasklist: - env: - - name: CAMUNDA_TASKLIST_BACKUP_REPOSITORYNAME - value: "camunda_backup" - -identity: - enabled: false - -optimize: - enabled: false - -connectors: - enabled: true - inbound: - mode: credentials - resources: - requests: - cpu: "100m" - memory: "512M" - limits: - cpu: "1000m" - memory: "2Gi" - env: - - name: CAMUNDA_OPERATE_CLIENT_USERNAME - value: demo - - name: CAMUNDA_OPERATE_CLIENT_PASSWORD - value: demo - -zeebe: - clusterSize: 8 - partitionCount: 8 - replicationFactor: 4 - env: - - name: ZEEBE_BROKER_DATA_SNAPSHOTPERIOD - value: "5m" - - name: ZEEBE_BROKER_DATA_DISKUSAGECOMMANDWATERMARK - value: "0.85" - - name: ZEEBE_BROKER_DATA_DISKUSAGEREPLICATIONWATERMARK - value: "0.87" - - name: ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS - value: "camunda-zeebe-0.camunda-zeebe.us-east1.svc.cluster.local:26502,camunda-zeebe-1.camunda-zeebe.us-east1.svc.cluster.local:26502,camunda-zeebe-2.camunda-zeebe.us-east1.svc.cluster.local:26502,camunda-zeebe-3.camunda-zeebe.us-east1.svc.cluster.local:26502,camunda-zeebe-0.camunda-zeebe.europe-west1.svc.cluster.local:26502,camunda-zeebe-1.camunda-zeebe.europe-west1.svc.cluster.local:26502,camunda-zeebe-2.camunda-zeebe.europe-west1.svc.cluster.local:26502,camunda-zeebe-3.camunda-zeebe.europe-west1.svc.cluster.local:26502" - - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_CLASSNAME - value: "io.camunda.zeebe.exporter.ElasticsearchExporter" - - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_URL - value: "http://elasticsearch-master-headless.europe-west1.svc.cluster.local:9200" - - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_BULK_SIZE - value: "1" - - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_INDEX_PREFIX - value: "zeebe-record" - pvcSize: 1Gi - - resources: - requests: - cpu: "100m" - memory: "512M" - limits: - cpu: "512m" - memory: "2Gi" - -zeebe-gateway: - replicas: 1 - - resources: - requests: - cpu: "100m" - memory: "512M" - limits: - cpu: "1000m" - memory: "1Gi" - - logLevel: ERROR - -elasticsearch: - enabled: true -# imageTag: 7.17.3 - replicas: 1 - - extraInitContainers: - - name: install-gcs-plugin - image: elasticsearch:7.17.10 - securityContext: - privileged: true - command: - - sh - args: - - -c - - | - ./bin/elasticsearch-plugin install --batch repository-gcs - ./bin/elasticsearch-keystore add-file -f gcs.client.default.credentials_file ./key/gcs_backup_key.json - cp -a ./config/elasticsearch.keystore /tmp/keystore - volumeMounts: - - name: plugins - mountPath: /usr/share/elasticsearch/plugins - - name: gcs-backup-key - mountPath: /usr/share/elasticsearch/key - - name: keystore - mountPath: /tmp/keystore - extraVolumes: - - name: plugins - emptyDir: {} - - name: keystore - emptyDir: {} - - name: gcs-backup-key - secret: - secretName: gcs-backup-key - extraVolumeMounts: - - name: plugins - mountPath: /usr/share/elasticsearch/plugins - readOnly: false - - mountPath: /usr/share/elasticsearch/key - name: gcs-backup-key - - name: keystore - mountPath: /usr/share/elasticsearch/config/elasticsearch.keystore - subPath: elasticsearch.keystore - # Allow no backup for single node setups - clusterHealthCheckParams: "wait_for_status=yellow&timeout=1s" - - resources: - requests: - cpu: "100m" - memory: "512M" - limits: - cpu: "1000m" - memory: "2Gi" - - # Request smaller persistent volumes. - volumeClaimTemplate: - accessModes: [ "ReadWriteOnce" ] - storageClassName: "standard" - resources: - requests: - storage: 15Gi diff --git a/google/multi-region/active-active/region1/Makefile b/google/multi-region/active-active/region1/Makefile index 21cdf4c1..11834e55 100644 --- a/google/multi-region/active-active/region1/Makefile +++ b/google/multi-region/active-active/region1/Makefile @@ -29,7 +29,9 @@ release ?= camunda # Helm chart coordinates for Camunda chart ?= camunda/camunda-platform # Helm chart values -chartValues ?= camunda-values.yaml +globalChartValues ?= ../camunda-values.yaml +regionalChartValues ?= camunda-values.yaml +chartValues ?= $(globalChartValues) -f $(regionalChartValues) .PHONY: all all: use-kube namespace prepare-elastic-backup-key camunda external-urls @@ -48,7 +50,7 @@ external-urls: use-kube external-urls-no-ingress fail-over-region1: use-kube -kubectl create namespace $(namespace)-failover -kubectl config set-context --current --namespace=$(namespace)-failover - helm install --namespace $(namespace)-failover $(release) $(chart) -f $(chartValues) --skip-crds \ + helm install --namespace $(namespace)-failover $(release) $(chart) -f $(chartValues) --skip-crds \ --set global.multiregion.installationType=failOver \ --set global.multiregion.regionId=0 \ --set elasticsearch.enabled=false \ @@ -59,7 +61,7 @@ fail-over-region1: use-kube # TODO importers fail-back: use-kube namespace prepare-elastic-backup-key - helm install --namespace $(region) $(release) $(chart) -f $(chartValues) --skip-crds \ + helm install --namespace $(region) $(release) $(chart) -f $(chartValues) --skip-crds \ --set global.multiregion.installationType=failBack \ --set operate.enabled=false \ --set tasklist.enabled=false diff --git a/google/multi-region/active-active/region1/camunda-values.yaml b/google/multi-region/active-active/region1/camunda-values.yaml index dbb6f97b..672b6f8a 100644 --- a/google/multi-region/active-active/region1/camunda-values.yaml +++ b/google/multi-region/active-active/region1/camunda-values.yaml @@ -1,160 +1,4 @@ -# Chart values for the Camunda Platform 8 Helm chart. -# This file deliberately contains only the values that differ from the defaults. -# For changes and documentation, use your favorite diff tool to compare it with: -# https://github.com/camunda/camunda-platform-helm/blob/main/charts/camunda-platform/values.yaml - global: - # Multiregion options for Zeebe - # - ## WARNING: In order to get your multi-region setup covered by Camunda enterprise support you MUST get your configuration and run books reviewed by Camunda before going to production. - # This is necessary for us to be able to help you in case of outages, due to the complexity of operating multi-region setups and the dependencies to the underlying Kubernetes prerequisites. - # If you operate this in the wrong way you risk corruption and complete loss of all data especially in the dual-region case. - # If you can, consider three regions. Please, contact your customer success manager as soon as you start planning a multi-region setup. - # Camunda reserves the right to limit support if no review was done prior to launch or the review showed significant risks. multiregion: - # number of regions that this Camunda Platform instance is stretched across - regions: 2 # unique id of the region. Should start at 0 for easy computation. With 2 regions, you would have region 0 and 1. - regionId: 1 - identity: - auth: - # Disable the Identity authentication - # it will fall back to basic-auth: demo/demo as default user - enabled: false - -operate: - env: - - name: CAMUNDA_OPERATE_BACKUP_REPOSITORYNAME - value: "camunda_backup" -tasklist: - env: - - name: CAMUNDA_TASKLIST_BACKUP_REPOSITORYNAME - value: "camunda_backup" - -identity: - enabled: false - -optimize: - enabled: false - -connectors: - enabled: true - inbound: - mode: credentials - resources: - requests: - cpu: "100m" - memory: "512M" - limits: - cpu: "1000m" - memory: "2Gi" - env: - - name: CAMUNDA_OPERATE_CLIENT_USERNAME - value: demo - - name: CAMUNDA_OPERATE_CLIENT_PASSWORD - value: demo - -zeebe: - clusterSize: 8 - partitionCount: 8 - replicationFactor: 4 - env: - - name: ZEEBE_BROKER_DATA_SNAPSHOTPERIOD - value: "5m" - - name: ZEEBE_BROKER_DATA_DISKUSAGECOMMANDWATERMARK - value: "0.85" - - name: ZEEBE_BROKER_DATA_DISKUSAGEREPLICATIONWATERMARK - value: "0.87" - - name: ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS - value: "camunda-zeebe-0.camunda-zeebe.us-east1.svc.cluster.local:26502,camunda-zeebe-1.camunda-zeebe.us-east1.svc.cluster.local:26502,camunda-zeebe-2.camunda-zeebe.us-east1.svc.cluster.local:26502,camunda-zeebe-3.camunda-zeebe.us-east1.svc.cluster.local:26502,camunda-zeebe-0.camunda-zeebe.europe-west1.svc.cluster.local:26502,camunda-zeebe-1.camunda-zeebe.europe-west1.svc.cluster.local:26502,camunda-zeebe-2.camunda-zeebe.europe-west1.svc.cluster.local:26502,camunda-zeebe-3.camunda-zeebe.europe-west1.svc.cluster.local:26502" - - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_CLASSNAME - value: "io.camunda.zeebe.exporter.ElasticsearchExporter" - - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_URL - value: "http://elasticsearch-master-headless.us-east1.svc.cluster.local:9200" - - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_BULK_SIZE - value: "1" - - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_INDEX_PREFIX - value: "zeebe-record" - pvcSize: 1Gi - - resources: - requests: - cpu: "100m" - memory: "512M" - limits: - cpu: "512m" - memory: "2Gi" - -zeebe-gateway: - replicas: 1 - - resources: - requests: - cpu: "100m" - memory: "512M" - limits: - cpu: "1000m" - memory: "1Gi" - - logLevel: ERROR - -elasticsearch: - enabled: true -# imageTag: 7.17.3 - replicas: 1 - - extraInitContainers: - - name: install-gcs-plugin - image: elasticsearch:7.17.10 - securityContext: - privileged: true - command: - - sh - args: - - -c - - | - ./bin/elasticsearch-plugin install --batch repository-gcs - ./bin/elasticsearch-keystore add-file -f gcs.client.default.credentials_file ./key/gcs_backup_key.json - cp -a ./config/elasticsearch.keystore /tmp/keystore - volumeMounts: - - name: plugins - mountPath: /usr/share/elasticsearch/plugins - - name: gcs-backup-key - mountPath: /usr/share/elasticsearch/key - - name: keystore - mountPath: /tmp/keystore - extraVolumes: - - name: plugins - emptyDir: {} - - name: keystore - emptyDir: {} - - name: gcs-backup-key - secret: - secretName: gcs-backup-key - extraVolumeMounts: - - name: plugins - mountPath: /usr/share/elasticsearch/plugins - readOnly: false - - mountPath: /usr/share/elasticsearch/key - name: gcs-backup-key - - name: keystore - mountPath: /usr/share/elasticsearch/config/elasticsearch.keystore - subPath: elasticsearch.keystore - # Allow no backup for single node setups - clusterHealthCheckParams: "wait_for_status=yellow&timeout=1s" - - resources: - requests: - cpu: "100m" - memory: "512M" - limits: - cpu: "1000m" - memory: "2Gi" - - # Request smaller persistent volumes. - volumeClaimTemplate: - accessModes: [ "ReadWriteOnce" ] - storageClassName: "standard" - resources: - requests: - storage: 15Gi + regionId: 1 \ No newline at end of file From 8b3206ed02d59afeaffc872a437623405f9be881 Mon Sep 17 00:00:00 2001 From: ManuelDittmar Date: Wed, 24 Jan 2024 23:55:35 +0100 Subject: [PATCH 30/58] only gcp setup and reuse make targets --- .../active-active/gcp-setup/Makefile | 81 ++++++++----------- .../active-active/gcp-setup/values-gcp.yaml | 17 ++++ .../generated/dns-configmap-europe-west1.yaml | 8 -- .../active-active/setup-dns-chaining.py | 32 ++++---- 4 files changed, 65 insertions(+), 73 deletions(-) create mode 100644 google/multi-region/active-active/gcp-setup/values-gcp.yaml delete mode 100644 google/multi-region/active-active/generated/dns-configmap-europe-west1.yaml diff --git a/google/multi-region/active-active/gcp-setup/Makefile b/google/multi-region/active-active/gcp-setup/Makefile index e2f30d3b..dbbe3b0f 100644 --- a/google/multi-region/active-active/gcp-setup/Makefile +++ b/google/multi-region/active-active/gcp-setup/Makefile @@ -14,10 +14,6 @@ include $(root)/include/camunda.mk # GCP project project ?= camunda-researchanddevelopment -# GCP region 0 -region0 ?=us-east1-c# region = zone for simplicity -# GCP region 1 -region1 ?= europe-west1-b machineType ?= n2-standard-2 minSize ?= 1 @@ -26,10 +22,6 @@ maxSize ?= 24 regions ?= us-east1-c europe-west1-b clusters ?= manus-region-0 manus-region-1 regions_clusters ?= "us-east1-c;manus-region-0" "europe-west1-b;manus-region-1" -# GKE cluster name 0 -clusterName0 ?= manus-region-0 -# GKE cluster name 1 -clusterName1 ?= manus-region-1 # Firewall rule name firewallRule ?= zeebe-between-clusters-manu # Brokers per Region @@ -70,31 +62,11 @@ gcp-firewall: done; \ gcloud compute firewall-rules create $(firewallRule) --direction=INGRESS --priority=999 --network=default --action=ALLOW --rules=tcp:9600,tcp:26501,tcp:26502,tcp:9300,tcp:9200,udp:26502,udp:9300,udp:9200 --source-ranges=$$ipRanges --target-tags=$$networkTags -.PHONY: dns-chaining -dns-chaining: - python3 setup-dns-chaining.py $(project) $(region0) $(clusterName0) $(region1) $(clusterName1) $(brokersPerRegion) +.PHONY: gcp-dns-chaining +gcp-dns-chaining: + python3 ../setup-dns-chaining.py $(project) '$(regions_clusters)' $(brokersPerRegion) ## TODO teardown -.PHONY: generate-camunda-values -generate-camunda-values: - @contactPoints=$$(/bin/bash -c ' \ - join_addrs=(); \ - for region in $(region0) $(region1); do \ - for i in `seq 0 $$(($(brokersPerRegion)-1))`; do \ - join_addrs+=("camunda-zeebe-$$i.camunda-zeebe.$$region.svc.cluster.local:26502"); \ - done; \ - done; \ - IFS=,; echo "$${join_addrs[*]}";'); \ - echo "Initial contact points: $$contactPoints"; \ - cp camunda-values-template.yaml camunda-values-region-0.yaml; \ - sed -i 's/\$$REGIONID\$$/0/' camunda-values-region-0.yaml; \ - sed -i 's@\$$ELASTIC_URL_2\$$@http://elasticsearch-master-headless.$(region1).svc.cluster.local:9200@' camunda-values-region-0.yaml; \ - sed -i 's@\$$CONTACTPOINTS\$$@'"$$contactPoints"'@' camunda-values-region-0.yaml; \ - cp camunda-values-template.yaml camunda-values-region-1.yaml; \ - sed -i 's/\$$REGIONID\$$/1/' camunda-values-region-1.yaml; \ - sed -i 's@\$$ELASTIC_URL_2\$$@http://elasticsearch-master-headless.$(region0).svc.cluster.local:9200@' camunda-values-region-1.yaml; \ - sed -i 's@\$$CONTACTPOINTS\$$@'"$$contactPoints"'@' camunda-values-region-1.yaml - .PHONY: gcp-bucket gcp-bucket: gcloud storage buckets create gs://$(bucketName) --project $(project) @@ -104,24 +76,37 @@ gcp-bucket: gcloud projects add-iam-policy-binding $(project) \ --member="serviceAccount:$(bucketName)-sa@$(project).iam.gserviceaccount.com" \ --role="roles/storage.admin" - gcloud iam service-accounts keys create ~/Downloads/camunda-hackweek-es-backup-key.json \ - --iam-account=$(bucketName)-sa@$(project).iam.gserviceaccount.com - -#.PHONY register-repository-es -#register-repository-es: -# sed -i 's/\$$BUCKETNAME\$$/$(bucketName)/' elasticsearch-repo-setup.yaml; \ - + gcloud iam service-accounts keys create ../generated/$(bucketName)-backup-key.json \ + --iam-account=$(bucketName)-sa@$(project).iam.gserviceaccount.com +gcp-gcs-secret: + for region_cluster in $(regions_clusters); do \ + region=$$(echo $$region_cluster | cut -d';' -f1); \ + clusterName=$$(echo $$region_cluster | cut -d';' -f2); \ + kubectl create secret generic gcs-backup-key --from-file=gcs_backup_key.json=../generated/$(bucketName)-backup-key.json --context gke_$(project)_"$$region"_"$$clusterName"; \ + done; -.PHONY: install-camunda -install-camunda: - kubectl config use-context gke_$(project)_$(region0)_$(clusterName0) - kubectl create namespace $(region0) - helm install camunda camunda/camunda-platform -f camunda-values-region-0.yaml -n $(region0) - kubectl config use-context gke_$(project)_$(region1)_$(clusterName1) - kubectl create namespace $(region1) - helm install camunda camunda/camunda-platform -f camunda-values-region-1.yaml -n $(region1) +.PHONY: gcp-register-backup-repo +gcp-register-backup-repo: + @echo "Starting port-forwarding to svc/camunda-elasticsearch..." + @kubectl port-forward svc/camunda-elasticsearch 9200:9200 & \ + PID=$$!; \ + echo "Waiting for port-forwarding to be ready..."; \ + while ! curl -s -o /dev/null -w "%{http_code}" localhost:9200 | grep -q "200"; do sleep 1; done; \ + echo "Executing backup command..."; \ + status_code=$$(curl --silent --location --request PUT "http://localhost:9200/_snapshot/camunda_backup" \ + --header "Content-Type: application/json" \ + --data-raw "{\"type\": \"gcs\", \"settings\": {\"client\": \"default\", \"bucket\": \"$(bucketName)\", \"compress\": true}}" \ + -o /dev/null -w "%{http_code}"); \ + if [ $$status_code -eq 200 ]; then \ + echo "Backup repository registered successfully."; \ + else \ + echo "Failed to register backup repository. Status code: $$status_code"; \ + exit 1; \ + fi; \ + echo "Stopping port-forwarding..."; \ + kill $$PID -.PHONY: setup-mraa-gcp -setup-mraa-gcp: gcp-create-cluster gcp-firewall dns-chaining generate-camunda-values install-camunda +.PHONY: all +all: kube gcp-firewall gcp-dns-chaining gcp-bucket gcp-gcs-secret diff --git a/google/multi-region/active-active/gcp-setup/values-gcp.yaml b/google/multi-region/active-active/gcp-setup/values-gcp.yaml new file mode 100644 index 00000000..73ab85b7 --- /dev/null +++ b/google/multi-region/active-active/gcp-setup/values-gcp.yaml @@ -0,0 +1,17 @@ +elasticsearch: + initScripts: + init-keystore.sh: | + #!/bin/bash + set -e + echo "Adding credentials file to Elasticsearch keystore..." + elasticsearch-keystore add-file gcs.client.default.credentials_file /gcp/key/gcs_backup_key.json + echo "..done adding credentials file to Elasticsearch keystore" + + extraVolumes: + - name: gcs-backup-key + secret: + secretName: gcs-backup-key + + extraVolumeMounts: + - mountPath: /gcp/key + name: gcs-backup-key \ No newline at end of file diff --git a/google/multi-region/active-active/generated/dns-configmap-europe-west1.yaml b/google/multi-region/active-active/generated/dns-configmap-europe-west1.yaml deleted file mode 100644 index bef7e091..00000000 --- a/google/multi-region/active-active/generated/dns-configmap-europe-west1.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: kube-dns - namespace: kube-system -data: - stubDomains: | - {"us-east1.svc.cluster.local": ["35.243.201.145"], "us-east1-failover.svc.cluster.local": ["35.243.201.145"]} diff --git a/google/multi-region/active-active/setup-dns-chaining.py b/google/multi-region/active-active/setup-dns-chaining.py index 5b3a11be..ea130461 100755 --- a/google/multi-region/active-active/setup-dns-chaining.py +++ b/google/multi-region/active-active/setup-dns-chaining.py @@ -27,30 +27,28 @@ # 'us-east1': 'gke_camunda-researchanddevelopment_us-east1_falko-region-0', # 'europe-west1': 'gke_camunda-researchanddevelopment_europe-west1_falko-region-1', # } -if len(sys.argv) != 7: - print("Usage: python your_script.py ") +if len(sys.argv) != 4: + print("Usage: python your_script.py ") sys.exit(1) project = sys.argv[1] -region0 = sys.argv[2] -clusterName0 = sys.argv[3] -region1 = sys.argv[4] -clusterName1 = sys.argv[5] -brokersPerRegion = int(sys.argv[6]) +regions_clusters = sys.argv[2] +brokersPerRegion = int(sys.argv[3]) -contexts = { - region0: f'gke_{project}_{region0}_{clusterName0}', - region1: f'gke_{project}_{region1}_{clusterName1}', -} +regions_clusters_list = regions_clusters.split() -number_of_zeebe_brokers_per_region = brokersPerRegion +contexts = {} + +for region_cluster in regions_clusters_list: + region_cluster = region_cluster.replace('"', '') + region = region_cluster.split(";")[0] + clusterName = region_cluster.split(";")[1] + contexts[region] = f'gke_{project}_{region}_{clusterName}' -# Fill in the number of Zeebe brokers per region, -# i.e. clusterSize/regions as defined in camunda-values.yaml -number_of_zeebe_brokers_per_region = 4 +number_of_zeebe_brokers_per_region = brokersPerRegion # Path to directory generated YAML files. -generated_files_dir = './generated' +generated_files_dir = '../generated' # ------------------------------------------------------------------------------ @@ -73,7 +71,7 @@ # For each cluster, create a load balancer to its DNS pod. for region, context in contexts.items(): - check_call(['kubectl', 'apply', '-f', 'dns-lb.yaml', '--context', context]) + check_call(['kubectl', 'apply', '-f', '../dns-lb.yaml', '--context', context]) # Set up each cluster to forward DNS requests for region-scoped namespaces to the # relevant cluster's DNS server, using load balancers in order to create a From 5c9a8d22d36a9daa9e1a5b3dc7e7897a6fba420f Mon Sep 17 00:00:00 2001 From: Falko Menge Date: Thu, 25 Jan 2024 11:22:41 +0100 Subject: [PATCH 31/58] Add metrics to k8s --- google/multi-region/active-active/region0/Makefile | 3 ++- google/multi-region/active-active/region1/Makefile | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/google/multi-region/active-active/region0/Makefile b/google/multi-region/active-active/region0/Makefile index 6a61f76e..0251115f 100644 --- a/google/multi-region/active-active/region0/Makefile +++ b/google/multi-region/active-active/region0/Makefile @@ -37,7 +37,7 @@ chartValues ?= $(globalChartValues) -f $(regionalChartValues) all: use-kube namespace prepare-elastic-backup-key camunda external-urls .PHONY: kube # Create Kubernetes cluster. (No aplication gateway required) -kube: kube-gke +kube: kube-gke metrics @echo "Please add the following line to the list of contexts in setup-dns-chaining.py & teardown-dns-chaining.py:" @echo " '$(region)': 'gke_$(project)_$(region)_$(clusterName)'," @@ -92,6 +92,7 @@ include $(root)/google/include/kubernetes-gke.mk include $(root)/include/camunda.mk include $(root)/bpmn/deploy-models.mk include $(root)/connectors/connectors.mk +include $(root)/metrics/metrics.mk .PHONY: elastic-nodes elastic-nodes: use-kube diff --git a/google/multi-region/active-active/region1/Makefile b/google/multi-region/active-active/region1/Makefile index 11834e55..ee101a13 100644 --- a/google/multi-region/active-active/region1/Makefile +++ b/google/multi-region/active-active/region1/Makefile @@ -37,7 +37,7 @@ chartValues ?= $(globalChartValues) -f $(regionalChartValues) all: use-kube namespace prepare-elastic-backup-key camunda external-urls .PHONY: kube # Create Kubernetes cluster. (No aplication gateway required) -kube: kube-gke +kube: kube-gke metrics @echo "Please add the following line to the list of contexts in setup-dns-chaining.py & teardown-dns-chaining.py:" @echo " '$(region)': 'gke_$(project)_$(region)_$(clusterName)'," @@ -92,6 +92,7 @@ include $(root)/google/include/kubernetes-gke.mk include $(root)/include/camunda.mk include $(root)/bpmn/deploy-models.mk include $(root)/connectors/connectors.mk +include $(root)/metrics/metrics.mk .PHONY: elastic-nodes elastic-nodes: use-kube From d497e3c85b1c8b2d57eef36248511349f4ef909a Mon Sep 17 00:00:00 2001 From: HamzaMasood1 Date: Thu, 25 Jan 2024 13:52:33 +0100 Subject: [PATCH 32/58] region 0 failover namespace configuration for secondary elasticsearch --- .../multi-region/active-active/region0/Makefile | 16 +++++++++------- .../region0/camunda-failover-to-region0.yaml | 14 ++++++++++++++ 2 files changed, 23 insertions(+), 7 deletions(-) create mode 100644 google/multi-region/active-active/region0/camunda-failover-to-region0.yaml diff --git a/google/multi-region/active-active/region0/Makefile b/google/multi-region/active-active/region0/Makefile index 0251115f..e908786f 100644 --- a/google/multi-region/active-active/region0/Makefile +++ b/google/multi-region/active-active/region0/Makefile @@ -31,6 +31,7 @@ chart ?= camunda/camunda-platform # Helm chart values globalChartValues ?= ../camunda-values.yaml regionalChartValues ?= camunda-values.yaml +failoverChartValues ?= camunda-failover-to-region0.yaml chartValues ?= $(globalChartValues) -f $(regionalChartValues) .PHONY: all @@ -48,15 +49,16 @@ external-urls: use-kube external-urls-no-ingress .PHONY: fail-over-region1 # Create temporary brokers that impersonate half of the ones lost in region 1 to backfill and restore quorum fail-over-region1: use-kube + chartValues = $(globalChartValues) -f $(regionalChartValues) -f $(failoverChartValues) -kubectl create namespace $(namespace)-failover -kubectl config set-context --current --namespace=$(namespace)-failover - helm install --namespace $(namespace)-failover $(release) $(chart) -f $(chartValues) --skip-crds \ - --set global.multiregion.installationType=failOver \ - --set global.multiregion.regionId=1 \ - --set elasticsearch.enabled=false \ - --set operate.enabled=false \ - --set tasklist.enabled=false \ - --set zeebe-gateway.enabled=false + helm install --namespace $(namespace)-failover $(release) $(chart) -f $(chartValues) --skip-crds + # --set global.multiregion.installationType=failOver \ + # --set global.multiregion.regionId=1 \ + # --set elasticsearch.enabled=false \ + # --set operate.enabled=false \ + # --set tasklist.enabled=false \ + # --set zeebe-gateway.enabled=false # TODO connect to existing elastic in current region # TODO importers diff --git a/google/multi-region/active-active/region0/camunda-failover-to-region0.yaml b/google/multi-region/active-active/region0/camunda-failover-to-region0.yaml new file mode 100644 index 00000000..e41f8cdc --- /dev/null +++ b/google/multi-region/active-active/region0/camunda-failover-to-region0.yaml @@ -0,0 +1,14 @@ +global: + multiregion: + installationType: failOver + regionId: 1 +operate: + enabled: false +tasklist: + enabled: false +zeebe-gateway: + enabled: false +zeebe: + env: + - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCHREGION1_ARGS_URL + value: "http://elasticsearch-master-headless.us-east1-failover.svc.cluster.local:9200" From b237b8a0884da61043ec31f3bfebb362c1e5dbba Mon Sep 17 00:00:00 2001 From: Falko Menge Date: Thu, 25 Jan 2024 14:14:17 +0100 Subject: [PATCH 33/58] Add Stackdriver logging configuration for Zeebe and Zeebe Gateway --- google/include/log-format-stackdriver.yaml | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 google/include/log-format-stackdriver.yaml diff --git a/google/include/log-format-stackdriver.yaml b/google/include/log-format-stackdriver.yaml new file mode 100644 index 00000000..65b9f149 --- /dev/null +++ b/google/include/log-format-stackdriver.yaml @@ -0,0 +1,22 @@ +zeebe: + env: + # Enable JSON logging for Google Cloud Stackdriver + - name: ZEEBE_LOG_APPENDER + value: Stackdriver + - name: ZEEBE_LOG_STACKDRIVER_SERVICENAME + value: zeebe + - name: ZEEBE_LOG_STACKDRIVER_SERVICEVERSION + valueFrom: + fieldRef: + fieldPath: metadata.namespace +zeebe-gateway: + env: + # Enable JSON logging for Google Cloud Stackdriver + - name: ZEEBE_LOG_APPENDER + value: Stackdriver + - name: ZEEBE_LOG_STACKDRIVER_SERVICENAME + value: zeebe + - name: ZEEBE_LOG_STACKDRIVER_SERVICEVERSION + valueFrom: + fieldRef: + fieldPath: metadata.namespace From dc480d0f78c8301018a48e4f898f62a10bba56fc Mon Sep 17 00:00:00 2001 From: ManuelDittmar Date: Thu, 25 Jan 2024 14:38:36 +0100 Subject: [PATCH 34/58] remove generated values --- .../camunda-values-region-0.yaml | 102 ------------------ .../camunda-values-region-1.yaml | 102 ------------------ 2 files changed, 204 deletions(-) delete mode 100644 google/multi-region/active-active/camunda-values-region-0.yaml delete mode 100644 google/multi-region/active-active/camunda-values-region-1.yaml diff --git a/google/multi-region/active-active/camunda-values-region-0.yaml b/google/multi-region/active-active/camunda-values-region-0.yaml deleted file mode 100644 index a3704edf..00000000 --- a/google/multi-region/active-active/camunda-values-region-0.yaml +++ /dev/null @@ -1,102 +0,0 @@ -# Chart values for the Camunda Platform 8 Helm chart. -# This file deliberately contains only the values that differ from the defaults. -# For changes and documentation, use your favorite diff tool to compare it with: -# https://github.com/camunda/camunda-platform-helm/blob/main/charts/camunda-platform/values.yaml - -global: - # Multiregion options for Zeebe - # - ## WARNING: In order to get your multi-region setup covered by Camunda enterprise support you MUST get your configuration and run books reviewed by Camunda before going to production. - # This is necessary for us to be able to help you in case of outages, due to the complexity of operating multi-region setups and the dependencies to the underlying Kubernetes prerequisites. - # If you operate this in the wrong way you risk corruption and complete loss of all data especially in the dual-region case. - # If you can, consider three regions. Please, contact your customer success manager as soon as you start planning a multi-region setup. - # Camunda reserves the right to limit support if no review was done prior to launch or the review showed significant risks. - multiregion: - # number of regions that this Camunda Platform instance is stretched across - regions: 2 - # unique id of the region. Should start at 0 for easy computation. With 2 regions, you would have region 0 and 1. - regionId: 0 - identity: - auth: - # Disable the Identity authentication - # it will fall back to basic-auth: demo/demo as default user - enabled: false - -operate: - env: - - name: CAMUNDA_OPERATE_BACKUP_REPOSITORYNAME - value: "camunda_backup" -tasklist: - env: - - name: CAMUNDA_TASKLIST_BACKUP_REPOSITORYNAME - value: "camunda_backup" - -identity: - enabled: false - -optimize: - enabled: false - -connectors: - enabled: true - inbound: - mode: credentials - resources: - requests: - cpu: "100m" - memory: "512M" - limits: - cpu: "1000m" - memory: "2Gi" - env: - - name: CAMUNDA_OPERATE_CLIENT_USERNAME - value: demo - - name: CAMUNDA_OPERATE_CLIENT_PASSWORD - value: demo - -zeebe: - clusterSize: 8 - partitionCount: 8 - replicationFactor: 4 - env: - - name: ZEEBE_BROKER_DATA_SNAPSHOTPERIOD - value: "5m" - - name: ZEEBE_BROKER_DATA_DISKUSAGECOMMANDWATERMARK - value: "0.85" - - name: ZEEBE_BROKER_DATA_DISKUSAGEREPLICATIONWATERMARK - value: "0.87" - - name: ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS - value: "camunda-zeebe-0.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-1.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-2.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-3.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-0.camunda-zeebe.europe-west1-b.svc.cluster.local:26502,camunda-zeebe-1.camunda-zeebe.europe-west1-b.svc.cluster.local:26502,camunda-zeebe-2.camunda-zeebe.europe-west1-b.svc.cluster.local:26502,camunda-zeebe-3.camunda-zeebe.europe-west1-b.svc.cluster.local:26502" - - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_CLASSNAME - value: "io.camunda.zeebe.exporter.ElasticsearchExporter" - - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_URL - value: "http://elasticsearch-master-headless.europe-west1-b.svc.cluster.local:9200" - - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_BULK_SIZE - value: "1" - - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_INDEX_PREFIX - value: "zeebe-record" - pvcSize: 1Gi - - resources: - requests: - cpu: "100m" - memory: "512M" - limits: - cpu: "512m" - memory: "2Gi" - -zeebe-gateway: - replicas: 1 - - resources: - requests: - cpu: "100m" - memory: "512M" - limits: - cpu: "1000m" - memory: "1Gi" - - logLevel: ERROR - -elasticsearch: - enabled: true \ No newline at end of file diff --git a/google/multi-region/active-active/camunda-values-region-1.yaml b/google/multi-region/active-active/camunda-values-region-1.yaml deleted file mode 100644 index e3824268..00000000 --- a/google/multi-region/active-active/camunda-values-region-1.yaml +++ /dev/null @@ -1,102 +0,0 @@ -# Chart values for the Camunda Platform 8 Helm chart. -# This file deliberately contains only the values that differ from the defaults. -# For changes and documentation, use your favorite diff tool to compare it with: -# https://github.com/camunda/camunda-platform-helm/blob/main/charts/camunda-platform/values.yaml - -global: - # Multiregion options for Zeebe - # - ## WARNING: In order to get your multi-region setup covered by Camunda enterprise support you MUST get your configuration and run books reviewed by Camunda before going to production. - # This is necessary for us to be able to help you in case of outages, due to the complexity of operating multi-region setups and the dependencies to the underlying Kubernetes prerequisites. - # If you operate this in the wrong way you risk corruption and complete loss of all data especially in the dual-region case. - # If you can, consider three regions. Please, contact your customer success manager as soon as you start planning a multi-region setup. - # Camunda reserves the right to limit support if no review was done prior to launch or the review showed significant risks. - multiregion: - # number of regions that this Camunda Platform instance is stretched across - regions: 2 - # unique id of the region. Should start at 0 for easy computation. With 2 regions, you would have region 0 and 1. - regionId: 1 - identity: - auth: - # Disable the Identity authentication - # it will fall back to basic-auth: demo/demo as default user - enabled: false - -operate: - env: - - name: CAMUNDA_OPERATE_BACKUP_REPOSITORYNAME - value: "camunda_backup" -tasklist: - env: - - name: CAMUNDA_TASKLIST_BACKUP_REPOSITORYNAME - value: "camunda_backup" - -identity: - enabled: false - -optimize: - enabled: false - -connectors: - enabled: true - inbound: - mode: credentials - resources: - requests: - cpu: "100m" - memory: "512M" - limits: - cpu: "1000m" - memory: "2Gi" - env: - - name: CAMUNDA_OPERATE_CLIENT_USERNAME - value: demo - - name: CAMUNDA_OPERATE_CLIENT_PASSWORD - value: demo - -zeebe: - clusterSize: 8 - partitionCount: 8 - replicationFactor: 4 - env: - - name: ZEEBE_BROKER_DATA_SNAPSHOTPERIOD - value: "5m" - - name: ZEEBE_BROKER_DATA_DISKUSAGECOMMANDWATERMARK - value: "0.85" - - name: ZEEBE_BROKER_DATA_DISKUSAGEREPLICATIONWATERMARK - value: "0.87" - - name: ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS - value: "camunda-zeebe-0.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-1.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-2.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-3.camunda-zeebe.us-east1-c.svc.cluster.local:26502,camunda-zeebe-0.camunda-zeebe.europe-west1-b.svc.cluster.local:26502,camunda-zeebe-1.camunda-zeebe.europe-west1-b.svc.cluster.local:26502,camunda-zeebe-2.camunda-zeebe.europe-west1-b.svc.cluster.local:26502,camunda-zeebe-3.camunda-zeebe.europe-west1-b.svc.cluster.local:26502" - - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_CLASSNAME - value: "io.camunda.zeebe.exporter.ElasticsearchExporter" - - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_URL - value: "http://elasticsearch-master-headless.us-east1-c.svc.cluster.local:9200" - - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_BULK_SIZE - value: "1" - - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_INDEX_PREFIX - value: "zeebe-record" - pvcSize: 1Gi - - resources: - requests: - cpu: "100m" - memory: "512M" - limits: - cpu: "512m" - memory: "2Gi" - -zeebe-gateway: - replicas: 1 - - resources: - requests: - cpu: "100m" - memory: "512M" - limits: - cpu: "1000m" - memory: "1Gi" - - logLevel: ERROR - -elasticsearch: - enabled: true \ No newline at end of file From c9eafd489e412f69cfb2b37c3727ea9f68454b6c Mon Sep 17 00:00:00 2001 From: ManuelDittmar Date: Thu, 25 Jan 2024 14:38:55 +0100 Subject: [PATCH 35/58] Delete Makefile --- google/multi-region/active-active/Makefile | 101 --------------------- 1 file changed, 101 deletions(-) delete mode 100644 google/multi-region/active-active/Makefile diff --git a/google/multi-region/active-active/Makefile b/google/multi-region/active-active/Makefile deleted file mode 100644 index e60bf193..00000000 --- a/google/multi-region/active-active/Makefile +++ /dev/null @@ -1,101 +0,0 @@ -# ------------------------------------ -# Set the following for your specific environment -# Already have a Cluster? Set these values to point to your existing environment -# Otherwise, these values will be used to create a new Cluster - -# GCP project -project ?= camunda-researchanddevelopment -# GCP region 0 -region0 ?=us-east1-c# region = zone for simplicity -# GCP region 1 -region1 ?= europe-west1-b -# GKE cluster name 0 -clusterName0 ?= manus-region-0 -# GKE cluster name 1 -clusterName1 ?= manus-region-1 -# Firewall rule name -firewallRule ?= zeebe-between-clusters-manu -# Brokers per Region -brokersPerRegion = 4 - - -# ------------------------------------ -# The following variables should not be changed except for advanced use cases -ifeq ($(OS),Windows_NT) - root ?= $(CURDIR)/../../../.. -else - root ?= $(shell pwd)/../../../.. -endif - - -.PHONY: gcp-create-cluster -gcp-create-cluster: - gcloud config set project $(project) - gcloud container clusters create $(clusterName0) \ - --region $(region0) \ - --num-nodes=1 \ - --enable-autoscaling --max-nodes=24 --min-nodes=1 \ - --enable-ip-alias \ - --machine-type=n2-standard-2 \ - --disk-type "pd-ssd" \ - --spot \ - --maintenance-window=4:00 \ - --release-channel=regular \ - --cluster-version=latest - gcloud container clusters create $(clusterName1) \ - --region $(region1) \ - --num-nodes=1 \ - --enable-autoscaling --max-nodes=24 --min-nodes=1 \ - --enable-ip-alias \ - --machine-type=n2-standard-2 \ - --disk-type "pd-ssd" \ - --spot \ - --maintenance-window=4:00 \ - --release-channel=regular \ - --cluster-version=latest - -.PHONY: gcp-firewall -gcp-firewall: - networkTag0=$$(gcloud compute instances describe $$(kubectl get nodes -o name --output jsonpath={.items[0].metadata.name} --context gke_$(project)_$(region0)_$(clusterName0)) --zone $(region0) --format="get(tags.items)"); \ - networkTag1=$$(gcloud compute instances describe $$(kubectl get nodes -o name --output jsonpath={.items[0].metadata.name} --context gke_$(project)_$(region1)_$(clusterName1)) --zone $(region1) --format="get(tags.items)"); \ - ipRange0=$$(gcloud container clusters describe $(clusterName0) --zone $(region0) --format='value(clusterIpv4Cidr)'); \ - ipRange1=$$(gcloud container clusters describe $(clusterName1) --zone $(region1) --format='value(clusterIpv4Cidr)'); \ - gcloud compute firewall-rules create $(firewallRule) --direction=INGRESS --priority=999 --network=default --action=ALLOW --rules=tcp:9600,tcp:26501,tcp:26502,tcp:9300,tcp:9200,udp:26502,udp:9300,udp:9200 --source-ranges=$$ipRange0,$$ipRange1 --target-tags=$$networkTag0,$$networkTag1 - -.PHONY: dns-chaining -dns-chaining: - python3 setup-dns-chaining.py $(project) $(region0) $(clusterName0) $(region1) $(clusterName1) $(brokersPerRegion) -## TODO teardown - -.PHONY: generate-camunda-values -generate-camunda-values: - @contactPoints=$$(/bin/bash -c ' \ - join_addrs=(); \ - for region in $(region0) $(region1); do \ - for i in `seq 0 $$(($(brokersPerRegion)-1))`; do \ - join_addrs+=("camunda-zeebe-$$i.camunda-zeebe.$$region.svc.cluster.local:26502"); \ - done; \ - done; \ - IFS=,; echo "$${join_addrs[*]}";'); \ - echo "Initial contact points: $$contactPoints"; \ - cp camunda-values-template.yaml camunda-values-region-0.yaml; \ - sed -i 's/\$$REGIONID\$$/0/' camunda-values-region-0.yaml; \ - sed -i 's@\$$ELASTIC_URL_2\$$@http://elasticsearch-master-headless.$(region1).svc.cluster.local:9200@' camunda-values-region-0.yaml; \ - sed -i 's@\$$CONTACTPOINTS\$$@'"$$contactPoints"'@' camunda-values-region-0.yaml; \ - cp camunda-values-template.yaml camunda-values-region-1.yaml; \ - sed -i 's/\$$REGIONID\$$/1/' camunda-values-region-1.yaml; \ - sed -i 's@\$$ELASTIC_URL_2\$$@http://elasticsearch-master-headless.$(region0).svc.cluster.local:9200@' camunda-values-region-1.yaml; \ - sed -i 's@\$$CONTACTPOINTS\$$@'"$$contactPoints"'@' camunda-values-region-1.yaml - -.PHONY: install-camunda -install-camunda: - kubectl config use-context gke_$(project)_$(region0)_$(clusterName0) - kubectl create namespace $(region0) - helm install camunda camunda/camunda-platform -f camunda-values-region-0.yaml -n $(region0) - kubectl config use-context gke_$(project)_$(region1)_$(clusterName1) - kubectl create namespace $(region1) - helm install camunda camunda/camunda-platform -f camunda-values-region-1.yaml -n $(region1) - -.PHONY: setup-mraa-gcp -setup-mraa-gcp: gcp-create-cluster gcp-firewall dns-chaining generate-camunda-values install-camunda - From 248689c9d08037f08980e50c40aea8d7aa4560c7 Mon Sep 17 00:00:00 2001 From: ManuelDittmar Date: Thu, 25 Jan 2024 14:40:57 +0100 Subject: [PATCH 36/58] allow both - standalone and via arguments use --- .../active-active/setup-dns-chaining.py | 47 ++++++++++++------- 1 file changed, 29 insertions(+), 18 deletions(-) diff --git a/google/multi-region/active-active/setup-dns-chaining.py b/google/multi-region/active-active/setup-dns-chaining.py index ea130461..8800ac6e 100755 --- a/google/multi-region/active-active/setup-dns-chaining.py +++ b/google/multi-region/active-active/setup-dns-chaining.py @@ -27,29 +27,40 @@ # 'us-east1': 'gke_camunda-researchanddevelopment_us-east1_falko-region-0', # 'europe-west1': 'gke_camunda-researchanddevelopment_europe-west1_falko-region-1', # } -if len(sys.argv) != 4: - print("Usage: python your_script.py ") - sys.exit(1) -project = sys.argv[1] -regions_clusters = sys.argv[2] -brokersPerRegion = int(sys.argv[3]) +contexts = { + 'us-east1': 'gke_camunda-researchanddevelopment_us-east1-c_manus-region-0', + 'europe-west1': 'gke_camunda-researchanddevelopment_europe-west1-b_manus-region-1', +} -regions_clusters_list = regions_clusters.split() - -contexts = {} - -for region_cluster in regions_clusters_list: - region_cluster = region_cluster.replace('"', '') - region = region_cluster.split(";")[0] - clusterName = region_cluster.split(";")[1] - contexts[region] = f'gke_{project}_{region}_{clusterName}' - -number_of_zeebe_brokers_per_region = brokersPerRegion +# Fill in the number of Zeebe brokers per region, +# i.e. clusterSize/regions as defined in camunda-values.yaml +number_of_zeebe_brokers_per_region = 4 # Path to directory generated YAML files. generated_files_dir = '../generated' +## Path to dns lb file. +dns_lb_file = './dns-lb.yaml' + +project = "researchanddevelopment" +if len(sys.argv) > 1: + if len(sys.argv) != 4: + print("Usage: python your_script.py ") + sys.exit(1) + + project = sys.argv[1] + regions_clusters = sys.argv[2] + number_of_zeebe_brokers_per_region = int(sys.argv[3]) + regions_clusters_list = regions_clusters.split() + dns_lb_file = '../dns-lb.yaml' + contexts = {} + for region_cluster in regions_clusters_list: + region_cluster = region_cluster.replace('"', '') + region = region_cluster.split(";")[0] + clusterName = region_cluster.split(";")[1] + contexts[region] = f'gke_{project}_{region}_{clusterName}' + # ------------------------------------------------------------------------------ # First, do some basic input validation. @@ -71,7 +82,7 @@ # For each cluster, create a load balancer to its DNS pod. for region, context in contexts.items(): - check_call(['kubectl', 'apply', '-f', '../dns-lb.yaml', '--context', context]) + check_call(['kubectl', 'apply', '-f', dns_lb_file , '--context', context]) # Set up each cluster to forward DNS requests for region-scoped namespaces to the # relevant cluster's DNS server, using load balancers in order to create a From 4cda3e2c09b58bf4f126220c689cc11da65272ae Mon Sep 17 00:00:00 2001 From: Falko Menge Date: Thu, 25 Jan 2024 14:42:34 +0100 Subject: [PATCH 37/58] Refactor fail-over-region1 target in Makefile --- google/multi-region/active-active/region0/Makefile | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/google/multi-region/active-active/region0/Makefile b/google/multi-region/active-active/region0/Makefile index e908786f..425b0e45 100644 --- a/google/multi-region/active-active/region0/Makefile +++ b/google/multi-region/active-active/region0/Makefile @@ -48,17 +48,9 @@ external-urls: use-kube external-urls-no-ingress ### <--- End of setup ---> .PHONY: fail-over-region1 # Create temporary brokers that impersonate half of the ones lost in region 1 to backfill and restore quorum -fail-over-region1: use-kube - chartValues = $(globalChartValues) -f $(regionalChartValues) -f $(failoverChartValues) - -kubectl create namespace $(namespace)-failover - -kubectl config set-context --current --namespace=$(namespace)-failover - helm install --namespace $(namespace)-failover $(release) $(chart) -f $(chartValues) --skip-crds - # --set global.multiregion.installationType=failOver \ - # --set global.multiregion.regionId=1 \ - # --set elasticsearch.enabled=false \ - # --set operate.enabled=false \ - # --set tasklist.enabled=false \ - # --set zeebe-gateway.enabled=false +fail-over-region1: chartValues = $(globalChartValues) -f $(failoverChartValues) +fail-over-region1: namespace = $(region)-failover +fail-over-region1: use-kube namespace camunda # TODO connect to existing elastic in current region # TODO importers From 63a47efd7b33998ad8790b68efba347123bff57b Mon Sep 17 00:00:00 2001 From: ManuelDittmar Date: Thu, 25 Jan 2024 14:44:16 +0100 Subject: [PATCH 38/58] Delete camunda-values-template.yaml --- .../camunda-values-template.yaml | 102 ------------------ 1 file changed, 102 deletions(-) delete mode 100644 google/multi-region/active-active/camunda-values-template.yaml diff --git a/google/multi-region/active-active/camunda-values-template.yaml b/google/multi-region/active-active/camunda-values-template.yaml deleted file mode 100644 index cab38393..00000000 --- a/google/multi-region/active-active/camunda-values-template.yaml +++ /dev/null @@ -1,102 +0,0 @@ -# Chart values for the Camunda Platform 8 Helm chart. -# This file deliberately contains only the values that differ from the defaults. -# For changes and documentation, use your favorite diff tool to compare it with: -# https://github.com/camunda/camunda-platform-helm/blob/main/charts/camunda-platform/values.yaml - -global: - # Multiregion options for Zeebe - # - ## WARNING: In order to get your multi-region setup covered by Camunda enterprise support you MUST get your configuration and run books reviewed by Camunda before going to production. - # This is necessary for us to be able to help you in case of outages, due to the complexity of operating multi-region setups and the dependencies to the underlying Kubernetes prerequisites. - # If you operate this in the wrong way you risk corruption and complete loss of all data especially in the dual-region case. - # If you can, consider three regions. Please, contact your customer success manager as soon as you start planning a multi-region setup. - # Camunda reserves the right to limit support if no review was done prior to launch or the review showed significant risks. - multiregion: - # number of regions that this Camunda Platform instance is stretched across - regions: 2 - # unique id of the region. Should start at 0 for easy computation. With 2 regions, you would have region 0 and 1. - regionId: $REGIONID$ - identity: - auth: - # Disable the Identity authentication - # it will fall back to basic-auth: demo/demo as default user - enabled: false - -operate: - env: - - name: CAMUNDA_OPERATE_BACKUP_REPOSITORYNAME - value: "camunda_backup" -tasklist: - env: - - name: CAMUNDA_TASKLIST_BACKUP_REPOSITORYNAME - value: "camunda_backup" - -identity: - enabled: false - -optimize: - enabled: false - -connectors: - enabled: true - inbound: - mode: credentials - resources: - requests: - cpu: "100m" - memory: "512M" - limits: - cpu: "1000m" - memory: "2Gi" - env: - - name: CAMUNDA_OPERATE_CLIENT_USERNAME - value: demo - - name: CAMUNDA_OPERATE_CLIENT_PASSWORD - value: demo - -zeebe: - clusterSize: 8 - partitionCount: 8 - replicationFactor: 4 - env: - - name: ZEEBE_BROKER_DATA_SNAPSHOTPERIOD - value: "5m" - - name: ZEEBE_BROKER_DATA_DISKUSAGECOMMANDWATERMARK - value: "0.85" - - name: ZEEBE_BROKER_DATA_DISKUSAGEREPLICATIONWATERMARK - value: "0.87" - - name: ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS - value: "$CONTACTPOINTS$" - - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_CLASSNAME - value: "io.camunda.zeebe.exporter.ElasticsearchExporter" - - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_URL - value: "$ELASTIC_URL_2$" - - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_BULK_SIZE - value: "1" - - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH2_ARGS_INDEX_PREFIX - value: "zeebe-record" - pvcSize: 1Gi - - resources: - requests: - cpu: "100m" - memory: "512M" - limits: - cpu: "512m" - memory: "2Gi" - -zeebe-gateway: - replicas: 1 - - resources: - requests: - cpu: "100m" - memory: "512M" - limits: - cpu: "1000m" - memory: "1Gi" - - logLevel: ERROR - -elasticsearch: - enabled: true \ No newline at end of file From 3d697b17e5c8a8091678eb29285e1474d30c540c Mon Sep 17 00:00:00 2001 From: Falko Menge Date: Thu, 25 Jan 2024 14:47:18 +0100 Subject: [PATCH 39/58] Remove Elasticsearch index prefix configuration --- google/multi-region/active-active/camunda-values.yaml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/google/multi-region/active-active/camunda-values.yaml b/google/multi-region/active-active/camunda-values.yaml index 6e698156..6c259b0b 100644 --- a/google/multi-region/active-active/camunda-values.yaml +++ b/google/multi-region/active-active/camunda-values.yaml @@ -71,14 +71,10 @@ zeebe: value: "io.camunda.zeebe.exporter.ElasticsearchExporter" - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCHREGION0_ARGS_URL value: "http://elasticsearch-master-headless.us-east1.svc.cluster.local:9200" - - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCHREGION0_ARGS_INDEX_PREFIX - value: "zeebe-record" - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCHREGION1_CLASSNAME value: "io.camunda.zeebe.exporter.ElasticsearchExporter" - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCHREGION1_ARGS_URL value: "http://elasticsearch-master-headless.europe-west1.svc.cluster.local:9200" - - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCHREGION1_ARGS_INDEX_PREFIX - value: "zeebe-record" pvcSize: 1Gi resources: From bc0ddf7861c6c3a121441b8e7236439685ee6a34 Mon Sep 17 00:00:00 2001 From: HamzaMasood1 Date: Thu, 25 Jan 2024 14:47:25 +0100 Subject: [PATCH 40/58] adding json logging env vars to region 0 --- google/multi-region/active-active/region0/Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/google/multi-region/active-active/region0/Makefile b/google/multi-region/active-active/region0/Makefile index 425b0e45..0133fd80 100644 --- a/google/multi-region/active-active/region0/Makefile +++ b/google/multi-region/active-active/region0/Makefile @@ -32,6 +32,7 @@ chart ?= camunda/camunda-platform globalChartValues ?= ../camunda-values.yaml regionalChartValues ?= camunda-values.yaml failoverChartValues ?= camunda-failover-to-region0.yaml +jsonLoggingValues ?= ../../../include/log-format-stackdriver.yaml chartValues ?= $(globalChartValues) -f $(regionalChartValues) .PHONY: all @@ -48,7 +49,7 @@ external-urls: use-kube external-urls-no-ingress ### <--- End of setup ---> .PHONY: fail-over-region1 # Create temporary brokers that impersonate half of the ones lost in region 1 to backfill and restore quorum -fail-over-region1: chartValues = $(globalChartValues) -f $(failoverChartValues) +fail-over-region1: chartValues = $(globalChartValues) -f $(failoverChartValues) -f $(jsonLoggingValues) fail-over-region1: namespace = $(region)-failover fail-over-region1: use-kube namespace camunda # TODO connect to existing elastic in current region From 4289cdff223d3f6b5cbacfb97e7183e5e0de8342 Mon Sep 17 00:00:00 2001 From: ManuelDittmar Date: Thu, 25 Jan 2024 14:47:36 +0100 Subject: [PATCH 41/58] Create dns-configmap-europe-west1.yaml --- .../generated/dns-configmap-europe-west1.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 google/multi-region/generated/dns-configmap-europe-west1.yaml diff --git a/google/multi-region/generated/dns-configmap-europe-west1.yaml b/google/multi-region/generated/dns-configmap-europe-west1.yaml new file mode 100644 index 00000000..250a9ddc --- /dev/null +++ b/google/multi-region/generated/dns-configmap-europe-west1.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: kube-dns + namespace: kube-system +data: + stubDomains: | + {"us-east1.svc.cluster.local": ["34.74.0.61"], "us-east1-failover.svc.cluster.local": ["34.74.0.61"]} From 6cd0e8dcc5bad8150f48baef8f18ed89c97e4706 Mon Sep 17 00:00:00 2001 From: ManuelDittmar Date: Thu, 25 Jan 2024 14:49:59 +0100 Subject: [PATCH 42/58] Revert "Create dns-configmap-europe-west1.yaml" This reverts commit 4289cdff223d3f6b5cbacfb97e7183e5e0de8342. --- .../generated/dns-configmap-europe-west1.yaml | 8 -------- 1 file changed, 8 deletions(-) delete mode 100644 google/multi-region/generated/dns-configmap-europe-west1.yaml diff --git a/google/multi-region/generated/dns-configmap-europe-west1.yaml b/google/multi-region/generated/dns-configmap-europe-west1.yaml deleted file mode 100644 index 250a9ddc..00000000 --- a/google/multi-region/generated/dns-configmap-europe-west1.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: kube-dns - namespace: kube-system -data: - stubDomains: | - {"us-east1.svc.cluster.local": ["34.74.0.61"], "us-east1-failover.svc.cluster.local": ["34.74.0.61"]} From ef294a34ef3742ce20c0a910db4285e9c483b3bd Mon Sep 17 00:00:00 2001 From: HamzaMasood1 Date: Thu, 25 Jan 2024 15:24:20 +0100 Subject: [PATCH 43/58] removing disabling the gateway --- .../active-active/region0/camunda-failover-to-region0.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/google/multi-region/active-active/region0/camunda-failover-to-region0.yaml b/google/multi-region/active-active/region0/camunda-failover-to-region0.yaml index e41f8cdc..f1706e95 100644 --- a/google/multi-region/active-active/region0/camunda-failover-to-region0.yaml +++ b/google/multi-region/active-active/region0/camunda-failover-to-region0.yaml @@ -6,8 +6,6 @@ operate: enabled: false tasklist: enabled: false -zeebe-gateway: - enabled: false zeebe: env: - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCHREGION1_ARGS_URL From 3fcb33e183b6d3017e11a0c0c695ec0eb3252283 Mon Sep 17 00:00:00 2001 From: ManuelDittmar Date: Thu, 25 Jan 2024 15:48:51 +0100 Subject: [PATCH 44/58] example cm --- .../generated/dns-configmap-europe-west1.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 google/multi-region/active-active/generated/dns-configmap-europe-west1.yaml diff --git a/google/multi-region/active-active/generated/dns-configmap-europe-west1.yaml b/google/multi-region/active-active/generated/dns-configmap-europe-west1.yaml new file mode 100644 index 00000000..bef7e091 --- /dev/null +++ b/google/multi-region/active-active/generated/dns-configmap-europe-west1.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: kube-dns + namespace: kube-system +data: + stubDomains: | + {"us-east1.svc.cluster.local": ["35.243.201.145"], "us-east1-failover.svc.cluster.local": ["35.243.201.145"]} From 8fce54dd377a735813ba054b847c13f98523be05 Mon Sep 17 00:00:00 2001 From: Falko Menge Date: Thu, 25 Jan 2024 15:51:45 +0100 Subject: [PATCH 45/58] Reuse existing make targets (chart value merging needs fix) --- .../multi-region/active-active/region0/Makefile | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/google/multi-region/active-active/region0/Makefile b/google/multi-region/active-active/region0/Makefile index 0133fd80..81894f2d 100644 --- a/google/multi-region/active-active/region0/Makefile +++ b/google/multi-region/active-active/region0/Makefile @@ -29,11 +29,11 @@ release ?= camunda # Helm chart coordinates for Camunda chart ?= camunda/camunda-platform # Helm chart values -globalChartValues ?= ../camunda-values.yaml +chartValues ?= $(globalChartValues) -f $(regionalChartValues) -f $(jsonLoggingValues) +jsonLoggingValues ?= $(root)/google/include/log-format-stackdriver.yaml +globalChartValues ?= $(root)/google/multi-region/active-active/camunda-values.yaml regionalChartValues ?= camunda-values.yaml failoverChartValues ?= camunda-failover-to-region0.yaml -jsonLoggingValues ?= ../../../include/log-format-stackdriver.yaml -chartValues ?= $(globalChartValues) -f $(regionalChartValues) .PHONY: all all: use-kube namespace prepare-elastic-backup-key camunda external-urls @@ -49,8 +49,8 @@ external-urls: use-kube external-urls-no-ingress ### <--- End of setup ---> .PHONY: fail-over-region1 # Create temporary brokers that impersonate half of the ones lost in region 1 to backfill and restore quorum -fail-over-region1: chartValues = $(globalChartValues) -f $(failoverChartValues) -f $(jsonLoggingValues) -fail-over-region1: namespace = $(region)-failover +fail-over-region1: chartValues += -f $(failoverChartValues) +fail-over-region1: namespace = $(region)-failover fail-over-region1: use-kube namespace camunda # TODO connect to existing elastic in current region # TODO importers @@ -78,10 +78,8 @@ clean: use-kube clean-camunda clean-kube: clean-kube-gke #: Delete temporary brokers that impersonated half of the ones lost in region 1 -clean-fail-over-region1: use-kube - -helm --namespace $(namespace)-failover uninstall $(release) - -kubectl delete -n $(namespace)-failover pvc -l app.kubernetes.io/instance=$(release) - -kubectl delete namespace $(namespace)-failover +clean-fail-over-region1: namespace = $(region)-failover +clean-fail-over-region1: clean include $(root)/google/include/kubernetes-gke.mk include $(root)/include/camunda.mk From dc87f71c330c3621f1fce129900212b07712c352 Mon Sep 17 00:00:00 2001 From: ManuelDittmar Date: Fri, 26 Jan 2024 10:28:28 +0100 Subject: [PATCH 46/58] remove --zone --- google/multi-region/active-active/gcp-setup/Makefile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/google/multi-region/active-active/gcp-setup/Makefile b/google/multi-region/active-active/gcp-setup/Makefile index dbbe3b0f..ca9d9937 100644 --- a/google/multi-region/active-active/gcp-setup/Makefile +++ b/google/multi-region/active-active/gcp-setup/Makefile @@ -23,7 +23,7 @@ regions ?= us-east1-c europe-west1-b clusters ?= manus-region-0 manus-region-1 regions_clusters ?= "us-east1-c;manus-region-0" "europe-west1-b;manus-region-1" # Firewall rule name -firewallRule ?= zeebe-between-clusters-manu +firewallRule ?= zeebe-between-clusters-manu2 # Brokers per Region brokersPerRegion = 4 # Bucket Name for GCP @@ -47,8 +47,8 @@ gcp-firewall: for region_cluster in $(regions_clusters); do \ region=$$(echo $$region_cluster | cut -d';' -f1); \ clusterName=$$(echo $$region_cluster | cut -d';' -f2); \ - networkTag=$$(gcloud compute instances describe $$(kubectl get nodes -o name --output jsonpath={.items[0].metadata.name} --context gke_$(project)_"$$region"_"$$clusterName") --zone $$region --format="get(tags.items)"); \ - ipRange=$$(gcloud container clusters describe $$clusterName --zone $$region --format='value(clusterIpv4Cidr)'); \ + networkTag=$$(gcloud compute instances describe $$(kubectl get nodes -o name --output jsonpath={.items[0].metadata.name} --context gke_$(project)_"$$region"_"$$clusterName") --format="get(tags.items)"); \ + ipRange=$$(gcloud container clusters describe $$clusterName --region $$region --format='value(clusterIpv4Cidr)'); \ if [ -z "$$networkTags" ]; then \ networkTags=$$networkTag; \ else \ From 83619ba2eab332dde9170c7e8ffebd46d03ec4c4 Mon Sep 17 00:00:00 2001 From: Falko Menge Date: Fri, 26 Jan 2024 11:16:08 +0100 Subject: [PATCH 47/58] Add missing cleanup for metrics to ensure PVCs are getting deleted --- google/multi-region/active-active/region0/Makefile | 2 +- google/multi-region/active-active/region1/Makefile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/google/multi-region/active-active/region0/Makefile b/google/multi-region/active-active/region0/Makefile index 81894f2d..b6226f2a 100644 --- a/google/multi-region/active-active/region0/Makefile +++ b/google/multi-region/active-active/region0/Makefile @@ -75,7 +75,7 @@ fail-back-to-normal: use-kube update clean: use-kube clean-camunda .PHONY: clean-kube -clean-kube: clean-kube-gke +clean-kube: clean-metrics clean-kube-gke #: Delete temporary brokers that impersonated half of the ones lost in region 1 clean-fail-over-region1: namespace = $(region)-failover diff --git a/google/multi-region/active-active/region1/Makefile b/google/multi-region/active-active/region1/Makefile index ee101a13..fad77ccb 100644 --- a/google/multi-region/active-active/region1/Makefile +++ b/google/multi-region/active-active/region1/Makefile @@ -80,7 +80,7 @@ fail-back-to-normal: use-kube update clean: use-kube clean-camunda .PHONY: clean-kube -clean-kube: clean-kube-gke +clean-kube: clean-metrics clean-kube-gke #: Delete temporary brokers that impersonated half of the ones lost in region 0 clean-fail-over-region0: use-kube From 46dddd9ad87f134e7ee43f0978f98dad2840d1af Mon Sep 17 00:00:00 2001 From: Falko Menge Date: Fri, 26 Jan 2024 11:17:08 +0100 Subject: [PATCH 48/58] Remove regionId from target names and some other cleanups --- .../active-active/region0/Makefile | 18 +++++++++--------- .../active-active/region1/Makefile | 16 +++++++--------- 2 files changed, 16 insertions(+), 18 deletions(-) diff --git a/google/multi-region/active-active/region0/Makefile b/google/multi-region/active-active/region0/Makefile index b6226f2a..607dc3bb 100644 --- a/google/multi-region/active-active/region0/Makefile +++ b/google/multi-region/active-active/region0/Makefile @@ -48,10 +48,10 @@ external-urls: use-kube external-urls-no-ingress ### <--- End of setup ---> -.PHONY: fail-over-region1 # Create temporary brokers that impersonate half of the ones lost in region 1 to backfill and restore quorum -fail-over-region1: chartValues += -f $(failoverChartValues) -fail-over-region1: namespace = $(region)-failover -fail-over-region1: use-kube namespace camunda +.PHONY: fail-over # Create temporary brokers that impersonate half of the ones lost from the other region to backfill and restore quorum +fail-over: chartValues += -f $(failoverChartValues) +fail-over: namespace = $(region)-failover +fail-over: use-kube namespace camunda # TODO connect to existing elastic in current region # TODO importers @@ -60,9 +60,9 @@ fail-back: use-kube namespace prepare-elastic-backup-key --set global.multiregion.installationType=failBack \ --set operate.enabled=false \ --set tasklist.enabled=false - # TODO what if something is running # require clean-camunda but without deleting PVCs or with because its dirty + fail-back-with-cluster-running: use-kube kubectl delete pod camunda-zeebe-0 -n $(namespace) kubectl delete pod camunda-zeebe-2 -n $(namespace) @@ -71,15 +71,15 @@ fail-back-to-normal: use-kube update kubectl delete pod camunda-zeebe-0 -n $(namespace) kubectl delete pod camunda-zeebe-2 -n $(namespace) -#: Remove Camunda from cluster +.PHONY: clean # Uninstall Camunda from cluster and delete its disks clean: use-kube clean-camunda .PHONY: clean-kube clean-kube: clean-metrics clean-kube-gke -#: Delete temporary brokers that impersonated half of the ones lost in region 1 -clean-fail-over-region1: namespace = $(region)-failover -clean-fail-over-region1: clean +.PHONY: clean-fail-over # Delete temporary brokers that impersonated half of the ones lost in region 1 +clean-fail-over: namespace = $(region)-failover +clean-fail-over: clean include $(root)/google/include/kubernetes-gke.mk include $(root)/include/camunda.mk diff --git a/google/multi-region/active-active/region1/Makefile b/google/multi-region/active-active/region1/Makefile index fad77ccb..ef3e5e26 100644 --- a/google/multi-region/active-active/region1/Makefile +++ b/google/multi-region/active-active/region1/Makefile @@ -46,8 +46,8 @@ external-urls: use-kube external-urls-no-ingress ### <--- End of setup ---> -.PHONY: fail-over-region0 # Create temporary brokers that impersonate half of the ones lost in region 0 to backfill and restore quorum -fail-over-region1: use-kube +.PHONY: fail-over # Create temporary brokers that impersonate half of the ones lost in region 0 to backfill and restore quorum +fail-over: use-kube -kubectl create namespace $(namespace)-failover -kubectl config set-context --current --namespace=$(namespace)-failover helm install --namespace $(namespace)-failover $(release) $(chart) -f $(chartValues) --skip-crds \ @@ -65,9 +65,9 @@ fail-back: use-kube namespace prepare-elastic-backup-key --set global.multiregion.installationType=failBack \ --set operate.enabled=false \ --set tasklist.enabled=false - # TODO what if something is running # require clean-camunda but without deleting PVCs or with because its dirty + fail-back-with-cluster-running: use-kube kubectl delete pod camunda-zeebe-0 -n $(namespace) kubectl delete pod camunda-zeebe-2 -n $(namespace) @@ -76,17 +76,15 @@ fail-back-to-normal: use-kube update kubectl delete pod camunda-zeebe-0 -n $(namespace) kubectl delete pod camunda-zeebe-2 -n $(namespace) -#: Remove Camunda from cluster +.PHONY: clean # Uninstall Camunda from cluster and delete its disks clean: use-kube clean-camunda .PHONY: clean-kube clean-kube: clean-metrics clean-kube-gke -#: Delete temporary brokers that impersonated half of the ones lost in region 0 -clean-fail-over-region0: use-kube - -helm --namespace $(namespace)-failover uninstall $(release) - -kubectl delete -n $(namespace)-failover pvc -l app.kubernetes.io/instance=$(release) - -kubectl delete namespace $(namespace)-failover +.PHONY: clean-fail-over # Delete temporary brokers that impersonated half of the ones lost in region 1 +clean-fail-over: namespace = $(region)-failover +clean-fail-over: clean include $(root)/google/include/kubernetes-gke.mk include $(root)/include/camunda.mk From 9213db848c22c4be8416077caaad1eca8ecbbf25 Mon Sep 17 00:00:00 2001 From: HamzaMasood1 Date: Fri, 26 Jan 2024 11:29:09 +0100 Subject: [PATCH 49/58] modified camunda-values.yaml to include the json logging and changed the elasticsearch servicename --- .../multi-region/active-active/camunda-values.yaml | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/google/multi-region/active-active/camunda-values.yaml b/google/multi-region/active-active/camunda-values.yaml index 6c259b0b..00750998 100644 --- a/google/multi-region/active-active/camunda-values.yaml +++ b/google/multi-region/active-active/camunda-values.yaml @@ -70,11 +70,20 @@ zeebe: - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCHREGION0_CLASSNAME value: "io.camunda.zeebe.exporter.ElasticsearchExporter" - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCHREGION0_ARGS_URL - value: "http://elasticsearch-master-headless.us-east1.svc.cluster.local:9200" + value: "http://elasticsearch-master-hl.us-east1.svc.cluster.local:9200" - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCHREGION1_CLASSNAME value: "io.camunda.zeebe.exporter.ElasticsearchExporter" - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCHREGION1_ARGS_URL - value: "http://elasticsearch-master-headless.europe-west1.svc.cluster.local:9200" + value: "http://elasticsearch-master-hl.europe-west1.svc.cluster.local:9200" + # Enable JSON logging for Google Cloud Stackdriver + - name: ZEEBE_LOG_APPENDER + value: Stackdriver + - name: ZEEBE_LOG_STACKDRIVER_SERVICENAME + value: zeebe + - name: ZEEBE_LOG_STACKDRIVER_SERVICEVERSION + valueFrom: + fieldRef: + fieldPath: metadata.namespace pvcSize: 1Gi resources: From e902929682b354fe657d339c1c5d821336c45ad6 Mon Sep 17 00:00:00 2001 From: HamzaMasood1 Date: Fri, 26 Jan 2024 11:34:54 +0100 Subject: [PATCH 50/58] updated es link in failover file in region0 --- .../active-active/region0/camunda-failover-to-region0.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/google/multi-region/active-active/region0/camunda-failover-to-region0.yaml b/google/multi-region/active-active/region0/camunda-failover-to-region0.yaml index f1706e95..5ff8080a 100644 --- a/google/multi-region/active-active/region0/camunda-failover-to-region0.yaml +++ b/google/multi-region/active-active/region0/camunda-failover-to-region0.yaml @@ -9,4 +9,4 @@ tasklist: zeebe: env: - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCHREGION1_ARGS_URL - value: "http://elasticsearch-master-headless.us-east1-failover.svc.cluster.local:9200" + value: "http://elasticsearch-master-hl.us-east1-failover.svc.cluster.local:9200" From 6413c4e5ebeb094f40c7b573d4a3dec75a36f6aa Mon Sep 17 00:00:00 2001 From: HamzaMasood1 Date: Fri, 26 Jan 2024 11:39:02 +0100 Subject: [PATCH 51/58] removed merging of json env vars in make file in region0 --- google/multi-region/active-active/region0/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/google/multi-region/active-active/region0/Makefile b/google/multi-region/active-active/region0/Makefile index 607dc3bb..c12ddcc0 100644 --- a/google/multi-region/active-active/region0/Makefile +++ b/google/multi-region/active-active/region0/Makefile @@ -29,8 +29,8 @@ release ?= camunda # Helm chart coordinates for Camunda chart ?= camunda/camunda-platform # Helm chart values -chartValues ?= $(globalChartValues) -f $(regionalChartValues) -f $(jsonLoggingValues) -jsonLoggingValues ?= $(root)/google/include/log-format-stackdriver.yaml + +chartValues ?= $(globalChartValues) -f $(regionalChartValues) globalChartValues ?= $(root)/google/multi-region/active-active/camunda-values.yaml regionalChartValues ?= camunda-values.yaml failoverChartValues ?= camunda-failover-to-region0.yaml From e921927d8971237b6fc10fd9e11c20b1b659ceb6 Mon Sep 17 00:00:00 2001 From: Falko Menge Date: Fri, 26 Jan 2024 11:51:24 +0100 Subject: [PATCH 52/58] Fix order of variables, targets, and includes --- .../active-active/gcp-setup/Makefile | 40 ++++++++++--------- .../active-active/region0/Makefile | 1 + .../active-active/region1/Makefile | 1 + 3 files changed, 23 insertions(+), 19 deletions(-) diff --git a/google/multi-region/active-active/gcp-setup/Makefile b/google/multi-region/active-active/gcp-setup/Makefile index ca9d9937..1b2bfbdb 100644 --- a/google/multi-region/active-active/gcp-setup/Makefile +++ b/google/multi-region/active-active/gcp-setup/Makefile @@ -1,33 +1,36 @@ # ------------------------------------ -# The following variables should not be changed except for advanced use cases -ifeq ($(OS),Windows_NT) - root ?= $(CURDIR)/../../../.. -else - root ?= $(shell pwd)/../../../.. -endif -# ------------------------------------ # Set the following for your specific environment # Already have a Cluster? Set these values to point to your existing environment # Otherwise, these values will be used to create a new Cluster -include $(root)/google/include/kubernetes-gke.mk -include $(root)/include/camunda.mk # GCP project project ?= camunda-researchanddevelopment - +# GCP regions (see: https://cloud.withgoogle.com/region-picker/) +# and GKE cluster names +regions_clusters ?= "us-east1;falko-region-0" "europe-west1;falko-region-1" +# GCP machine type machineType ?= n2-standard-2 minSize ?= 1 maxSize ?= 24 -regions ?= us-east1-c europe-west1-b -clusters ?= manus-region-0 manus-region-1 -regions_clusters ?= "us-east1-c;manus-region-0" "europe-west1-b;manus-region-1" # Firewall rule name -firewallRule ?= zeebe-between-clusters-manu2 +firewallRule ?= falko-camunda-multi-region # Brokers per Region brokersPerRegion = 4 # Bucket Name for GCP -bucketName ?= manus-backup +bucketName ?= falko-elasticsearch-backup + +# ------------------------------------ +# The following variables should not be changed except for advanced use cases +ifeq ($(OS),Windows_NT) + root ?= $(CURDIR)/../../../.. +else + root ?= $(shell pwd)/../../../.. +endif + +# The all target MUST be the first target in the file so that it is invoked when `make` is called without a goal +.PHONY: all +all: kube gcp-firewall gcp-dns-chaining gcp-bucket gcp-gcs-secret .PHONY: kube kube: @@ -37,7 +40,7 @@ kube: echo "Setting up region: $$region with cluster name: $$clusterName"; \ $(MAKE) kube-gke region=$$region clusterName=$$clusterName; \ done - +#FIXME: run the sub-make directly on the gke makefile to avoid variable clashes .PHONY: gcp-firewall gcp-firewall: @@ -107,6 +110,5 @@ gcp-register-backup-repo: echo "Stopping port-forwarding..."; \ kill $$PID -.PHONY: all -all: kube gcp-firewall gcp-dns-chaining gcp-bucket gcp-gcs-secret - +include $(root)/google/include/kubernetes-gke.mk +include $(root)/include/camunda.mk diff --git a/google/multi-region/active-active/region0/Makefile b/google/multi-region/active-active/region0/Makefile index c12ddcc0..509f6f5a 100644 --- a/google/multi-region/active-active/region0/Makefile +++ b/google/multi-region/active-active/region0/Makefile @@ -35,6 +35,7 @@ globalChartValues ?= $(root)/google/multi-region/active-active/camunda-values.ya regionalChartValues ?= camunda-values.yaml failoverChartValues ?= camunda-failover-to-region0.yaml +# The all target MUST be the first target in the file so that it is invoked when `make` is called without a goal .PHONY: all all: use-kube namespace prepare-elastic-backup-key camunda external-urls diff --git a/google/multi-region/active-active/region1/Makefile b/google/multi-region/active-active/region1/Makefile index ef3e5e26..cec0d4f6 100644 --- a/google/multi-region/active-active/region1/Makefile +++ b/google/multi-region/active-active/region1/Makefile @@ -33,6 +33,7 @@ globalChartValues ?= ../camunda-values.yaml regionalChartValues ?= camunda-values.yaml chartValues ?= $(globalChartValues) -f $(regionalChartValues) +# The all target MUST be the first target in the file so that it is invoked when `make` is called without a goal .PHONY: all all: use-kube namespace prepare-elastic-backup-key camunda external-urls From bd1dddacfbecb225771432f66273e7ee12ce9b0c Mon Sep 17 00:00:00 2001 From: ManuelDittmar Date: Fri, 26 Jan 2024 13:19:25 +0100 Subject: [PATCH 53/58] use regions instead of zones in firewall rule --- google/multi-region/active-active/gcp-setup/Makefile | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/google/multi-region/active-active/gcp-setup/Makefile b/google/multi-region/active-active/gcp-setup/Makefile index 1b2bfbdb..a9db308b 100644 --- a/google/multi-region/active-active/gcp-setup/Makefile +++ b/google/multi-region/active-active/gcp-setup/Makefile @@ -50,8 +50,14 @@ gcp-firewall: for region_cluster in $(regions_clusters); do \ region=$$(echo $$region_cluster | cut -d';' -f1); \ clusterName=$$(echo $$region_cluster | cut -d';' -f2); \ - networkTag=$$(gcloud compute instances describe $$(kubectl get nodes -o name --output jsonpath={.items[0].metadata.name} --context gke_$(project)_"$$region"_"$$clusterName") --format="get(tags.items)"); \ - ipRange=$$(gcloud container clusters describe $$clusterName --region $$region --format='value(clusterIpv4Cidr)'); \ + nodeName=$$(kubectl get nodes -o name --output jsonpath={.items[0].metadata.name} --context gke_$(project)_"$$region"_"$$clusterName"); \ + echo Nodename $$nodeName; \ + zone=$$(kubectl get node $$nodeName -o jsonpath="{.metadata.labels['topology\.gke\.io/zone']}" --context gke_$(project)_"$$region"_"$$clusterName"); \ + echo Zone $$zone;\ + networkTag=$$(gcloud compute instances describe $$nodeName --zone $$zone --project $(project) --format="get(tags.items)" ); \ + echo NetworkTag $$networkTag; \ + ipRange=$$(gcloud container clusters describe $$clusterName --region $$region --project $(project) --format='value(clusterIpv4Cidr)'); \ + echo IPrange $$ipRange;\ if [ -z "$$networkTags" ]; then \ networkTags=$$networkTag; \ else \ @@ -63,7 +69,7 @@ gcp-firewall: ipRanges=$$ipRanges,$$ipRange; \ fi; \ done; \ - gcloud compute firewall-rules create $(firewallRule) --direction=INGRESS --priority=999 --network=default --action=ALLOW --rules=tcp:9600,tcp:26501,tcp:26502,tcp:9300,tcp:9200,udp:26502,udp:9300,udp:9200 --source-ranges=$$ipRanges --target-tags=$$networkTags + gcloud compute firewall-rules create $(firewallRule) --direction=INGRESS --priority=999 --network=default --action=ALLOW --rules=tcp:9600,tcp:26501,tcp:26502,tcp:9300,tcp:9200,udp:26502,udp:9300,udp:9200 --source-ranges=$$ipRanges --target-tags=$$networkTags --project $(project) .PHONY: gcp-dns-chaining gcp-dns-chaining: From c66984675d25b9d5732905d72d6d62ba27cfbf0f Mon Sep 17 00:00:00 2001 From: Hamza Masood <47217263+hamza-m-masood@users.noreply.github.com> Date: Fri, 26 Jan 2024 15:04:13 +0100 Subject: [PATCH 54/58] Updated the failover values.yaml in region 0 in order to not have any helm merging issues --- .../region0/camunda-failover-to-region0.yaml | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/google/multi-region/active-active/region0/camunda-failover-to-region0.yaml b/google/multi-region/active-active/region0/camunda-failover-to-region0.yaml index 5ff8080a..cd34d287 100644 --- a/google/multi-region/active-active/region0/camunda-failover-to-region0.yaml +++ b/google/multi-region/active-active/region0/camunda-failover-to-region0.yaml @@ -8,5 +8,29 @@ tasklist: enabled: false zeebe: env: + - name: ZEEBE_BROKER_DATA_SNAPSHOTPERIOD + value: "5m" + - name: ZEEBE_BROKER_DATA_DISKUSAGECOMMANDWATERMARK + value: "0.85" + - name: ZEEBE_BROKER_DATA_DISKUSAGEREPLICATIONWATERMARK + value: "0.87" + - name: ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS + value: "camunda-zeebe-0.camunda-zeebe.us-east1.svc.cluster.local:26502,camunda-zeebe-1.camunda-zeebe.us-east1.svc.cluster.local:26502,camunda-zeebe-2.camunda-zeebe.us-east1.svc.cluster.local:26502,camunda-zeebe-3.camunda-zeebe.us-east1.svc.cluster.local:26502,camunda-zeebe-0.camunda-zeebe.europe-west1.svc.cluster.local:26502,camunda-zeebe-1.camunda-zeebe.europe-west1.svc.cluster.local:26502,camunda-zeebe-2.camunda-zeebe.europe-west1.svc.cluster.local:26502,camunda-zeebe-3.camunda-zeebe.europe-west1.svc.cluster.local:26502" + - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCHREGION0_CLASSNAME + value: "io.camunda.zeebe.exporter.ElasticsearchExporter" + - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCHREGION0_ARGS_URL + value: "http://elasticsearch-master-hl.us-east1.svc.cluster.local:9200" + - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCHREGION1_CLASSNAME + value: "io.camunda.zeebe.exporter.ElasticsearchExporter" + # Changing the second exporter to the secondary elasticsearch - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCHREGION1_ARGS_URL value: "http://elasticsearch-master-hl.us-east1-failover.svc.cluster.local:9200" + # Enable JSON logging for Google Cloud Stackdriver + - name: ZEEBE_LOG_APPENDER + value: Stackdriver + - name: ZEEBE_LOG_STACKDRIVER_SERVICENAME + value: zeebe + - name: ZEEBE_LOG_STACKDRIVER_SERVICEVERSION + valueFrom: + fieldRef: + fieldPath: metadata.namespace From 8ac93724f50868a39acdef973462344bc31eb338 Mon Sep 17 00:00:00 2001 From: Falko Menge Date: Fri, 26 Jan 2024 17:34:12 +0100 Subject: [PATCH 55/58] Add new failover config to second region --- .../active-active/gcp-setup/Makefile | 1 + .../active-active/region0/Makefile | 3 +- ...ion0.yaml => camunda-values-failover.yaml} | 6 ++- .../active-active/region1/Makefile | 20 ++++------ .../region1/camunda-values-failover.yaml | 40 +++++++++++++++++++ 5 files changed, 54 insertions(+), 16 deletions(-) rename google/multi-region/active-active/region0/{camunda-failover-to-region0.yaml => camunda-values-failover.yaml} (83%) create mode 100644 google/multi-region/active-active/region1/camunda-values-failover.yaml diff --git a/google/multi-region/active-active/gcp-setup/Makefile b/google/multi-region/active-active/gcp-setup/Makefile index a9db308b..5e30f23a 100644 --- a/google/multi-region/active-active/gcp-setup/Makefile +++ b/google/multi-region/active-active/gcp-setup/Makefile @@ -41,6 +41,7 @@ kube: $(MAKE) kube-gke region=$$region clusterName=$$clusterName; \ done #FIXME: run the sub-make directly on the gke makefile to avoid variable clashes +#TODO: add metrics & clean-metrics .PHONY: gcp-firewall gcp-firewall: diff --git a/google/multi-region/active-active/region0/Makefile b/google/multi-region/active-active/region0/Makefile index 509f6f5a..aa523e59 100644 --- a/google/multi-region/active-active/region0/Makefile +++ b/google/multi-region/active-active/region0/Makefile @@ -29,11 +29,10 @@ release ?= camunda # Helm chart coordinates for Camunda chart ?= camunda/camunda-platform # Helm chart values - chartValues ?= $(globalChartValues) -f $(regionalChartValues) globalChartValues ?= $(root)/google/multi-region/active-active/camunda-values.yaml regionalChartValues ?= camunda-values.yaml -failoverChartValues ?= camunda-failover-to-region0.yaml +failoverChartValues ?= camunda-values-failover.yaml # The all target MUST be the first target in the file so that it is invoked when `make` is called without a goal .PHONY: all diff --git a/google/multi-region/active-active/region0/camunda-failover-to-region0.yaml b/google/multi-region/active-active/region0/camunda-values-failover.yaml similarity index 83% rename from google/multi-region/active-active/region0/camunda-failover-to-region0.yaml rename to google/multi-region/active-active/region0/camunda-values-failover.yaml index cd34d287..7294dfb1 100644 --- a/google/multi-region/active-active/region0/camunda-failover-to-region0.yaml +++ b/google/multi-region/active-active/region0/camunda-values-failover.yaml @@ -8,6 +8,8 @@ tasklist: enabled: false zeebe: env: + # the entire env array is copied from camunda-values.yaml + # because Helm cannot merge arrays from multiple value files - name: ZEEBE_BROKER_DATA_SNAPSHOTPERIOD value: "5m" - name: ZEEBE_BROKER_DATA_DISKUSAGECOMMANDWATERMARK @@ -18,11 +20,13 @@ zeebe: value: "camunda-zeebe-0.camunda-zeebe.us-east1.svc.cluster.local:26502,camunda-zeebe-1.camunda-zeebe.us-east1.svc.cluster.local:26502,camunda-zeebe-2.camunda-zeebe.us-east1.svc.cluster.local:26502,camunda-zeebe-3.camunda-zeebe.us-east1.svc.cluster.local:26502,camunda-zeebe-0.camunda-zeebe.europe-west1.svc.cluster.local:26502,camunda-zeebe-1.camunda-zeebe.europe-west1.svc.cluster.local:26502,camunda-zeebe-2.camunda-zeebe.europe-west1.svc.cluster.local:26502,camunda-zeebe-3.camunda-zeebe.europe-west1.svc.cluster.local:26502" - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCHREGION0_CLASSNAME value: "io.camunda.zeebe.exporter.ElasticsearchExporter" + # Changing the exporter for the lost ES instance to a throw-away ES instance + # to allow the other exporter to continue exporting to the surviving ES + # and keep counting sequences in preparation for ES snapshot restore - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCHREGION0_ARGS_URL value: "http://elasticsearch-master-hl.us-east1.svc.cluster.local:9200" - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCHREGION1_CLASSNAME value: "io.camunda.zeebe.exporter.ElasticsearchExporter" - # Changing the second exporter to the secondary elasticsearch - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCHREGION1_ARGS_URL value: "http://elasticsearch-master-hl.us-east1-failover.svc.cluster.local:9200" # Enable JSON logging for Google Cloud Stackdriver diff --git a/google/multi-region/active-active/region1/Makefile b/google/multi-region/active-active/region1/Makefile index cec0d4f6..2f86f489 100644 --- a/google/multi-region/active-active/region1/Makefile +++ b/google/multi-region/active-active/region1/Makefile @@ -29,9 +29,10 @@ release ?= camunda # Helm chart coordinates for Camunda chart ?= camunda/camunda-platform # Helm chart values -globalChartValues ?= ../camunda-values.yaml -regionalChartValues ?= camunda-values.yaml chartValues ?= $(globalChartValues) -f $(regionalChartValues) +globalChartValues ?= $(root)/google/multi-region/active-active/camunda-values.yaml +regionalChartValues ?= camunda-values.yaml +failoverChartValues ?= camunda-values-failover.yaml # The all target MUST be the first target in the file so that it is invoked when `make` is called without a goal .PHONY: all @@ -47,17 +48,10 @@ external-urls: use-kube external-urls-no-ingress ### <--- End of setup ---> -.PHONY: fail-over # Create temporary brokers that impersonate half of the ones lost in region 0 to backfill and restore quorum -fail-over: use-kube - -kubectl create namespace $(namespace)-failover - -kubectl config set-context --current --namespace=$(namespace)-failover - helm install --namespace $(namespace)-failover $(release) $(chart) -f $(chartValues) --skip-crds \ - --set global.multiregion.installationType=failOver \ - --set global.multiregion.regionId=0 \ - --set elasticsearch.enabled=false \ - --set operate.enabled=false \ - --set tasklist.enabled=false \ - --set zeebe-gateway.enabled=false +.PHONY: fail-over # Create temporary brokers that impersonate half of the ones lost from the other region to backfill and restore quorum +fail-over: chartValues += -f $(failoverChartValues) +fail-over: namespace = $(region)-failover +fail-over: use-kube namespace camunda # TODO connect to existing elastic in current region # TODO importers diff --git a/google/multi-region/active-active/region1/camunda-values-failover.yaml b/google/multi-region/active-active/region1/camunda-values-failover.yaml new file mode 100644 index 00000000..21acab7f --- /dev/null +++ b/google/multi-region/active-active/region1/camunda-values-failover.yaml @@ -0,0 +1,40 @@ +global: + multiregion: + installationType: failOver + regionId: 0 +operate: + enabled: false +tasklist: + enabled: false +zeebe: + env: + # the entire env array is copied from camunda-values.yaml + # because Helm cannot merge arrays from multiple value files + - name: ZEEBE_BROKER_DATA_SNAPSHOTPERIOD + value: "5m" + - name: ZEEBE_BROKER_DATA_DISKUSAGECOMMANDWATERMARK + value: "0.85" + - name: ZEEBE_BROKER_DATA_DISKUSAGEREPLICATIONWATERMARK + value: "0.87" + - name: ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS + value: "camunda-zeebe-0.camunda-zeebe.us-east1.svc.cluster.local:26502,camunda-zeebe-1.camunda-zeebe.us-east1.svc.cluster.local:26502,camunda-zeebe-2.camunda-zeebe.us-east1.svc.cluster.local:26502,camunda-zeebe-3.camunda-zeebe.us-east1.svc.cluster.local:26502,camunda-zeebe-0.camunda-zeebe.europe-west1.svc.cluster.local:26502,camunda-zeebe-1.camunda-zeebe.europe-west1.svc.cluster.local:26502,camunda-zeebe-2.camunda-zeebe.europe-west1.svc.cluster.local:26502,camunda-zeebe-3.camunda-zeebe.europe-west1.svc.cluster.local:26502" + - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCHREGION0_CLASSNAME + value: "io.camunda.zeebe.exporter.ElasticsearchExporter" + # Changing the exporter for the lost ES instance to a throw-away ES instance + # to allow the other exporter to continue exporting to the surviving ES + # and keep counting sequences in preparation for ES snapshot restore + - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCHREGION0_ARGS_URL + value: "http://elasticsearch-master-hl.europe-west1-failover.svc.cluster.local:9200" + - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCHREGION1_CLASSNAME + value: "io.camunda.zeebe.exporter.ElasticsearchExporter" + - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCHREGION1_ARGS_URL + value: "http://elasticsearch-master-hl.europe-west1.svc.cluster.local:9200" + # Enable JSON logging for Google Cloud Stackdriver + - name: ZEEBE_LOG_APPENDER + value: Stackdriver + - name: ZEEBE_LOG_STACKDRIVER_SERVICENAME + value: zeebe + - name: ZEEBE_LOG_STACKDRIVER_SERVICEVERSION + valueFrom: + fieldRef: + fieldPath: metadata.namespace From 48a545db4d97bbccb368e225a3830abf46860805 Mon Sep 17 00:00:00 2001 From: Falko Menge Date: Fri, 26 Jan 2024 17:35:03 +0100 Subject: [PATCH 56/58] Add Makefile for keeping similar files in sync using `meld` --- google/multi-region/active-active/Makefile | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 google/multi-region/active-active/Makefile diff --git a/google/multi-region/active-active/Makefile b/google/multi-region/active-active/Makefile new file mode 100644 index 00000000..ab121453 --- /dev/null +++ b/google/multi-region/active-active/Makefile @@ -0,0 +1,12 @@ + +meld-gcp-setup: + meld region0/Makefile gcp-setup/Makefile + +meld-makefiles: + meld region0/Makefile region1/Makefile + +meld-regions: + meld region0 region1 + +meld-failover: + meld camunda-values.yaml region0/camunda-values-failover.yaml \ No newline at end of file From bc67ce202ac6e7fb77f5331d578ab70f29c97b67 Mon Sep 17 00:00:00 2001 From: Falko Menge Date: Fri, 26 Jan 2024 17:42:03 +0100 Subject: [PATCH 57/58] Improve chart value documentation --- .../active-active/region0/camunda-values-failover.yaml | 1 + google/multi-region/active-active/region0/camunda-values.yaml | 2 +- .../active-active/region1/camunda-values-failover.yaml | 1 + google/multi-region/active-active/region1/camunda-values.yaml | 4 ++-- 4 files changed, 5 insertions(+), 3 deletions(-) diff --git a/google/multi-region/active-active/region0/camunda-values-failover.yaml b/google/multi-region/active-active/region0/camunda-values-failover.yaml index 7294dfb1..60104d5f 100644 --- a/google/multi-region/active-active/region0/camunda-values-failover.yaml +++ b/google/multi-region/active-active/region0/camunda-values-failover.yaml @@ -1,6 +1,7 @@ global: multiregion: installationType: failOver + # id of the region that has failed and should be impersonated regionId: 1 operate: enabled: false diff --git a/google/multi-region/active-active/region0/camunda-values.yaml b/google/multi-region/active-active/region0/camunda-values.yaml index 6de61cfa..b738dc41 100644 --- a/google/multi-region/active-active/region0/camunda-values.yaml +++ b/google/multi-region/active-active/region0/camunda-values.yaml @@ -1,4 +1,4 @@ global: multiregion: - # unique id of the region. Should start at 0 for easy computation. With 2 regions, you would have region 0 and 1. + # unique id of the region. MUST be an integer starting at 0 for computation. With 2 regions, you would have region ids 0 and 1. regionId: 0 diff --git a/google/multi-region/active-active/region1/camunda-values-failover.yaml b/google/multi-region/active-active/region1/camunda-values-failover.yaml index 21acab7f..ae925c22 100644 --- a/google/multi-region/active-active/region1/camunda-values-failover.yaml +++ b/google/multi-region/active-active/region1/camunda-values-failover.yaml @@ -1,6 +1,7 @@ global: multiregion: installationType: failOver + # id of the region that has failed and should be impersonated regionId: 0 operate: enabled: false diff --git a/google/multi-region/active-active/region1/camunda-values.yaml b/google/multi-region/active-active/region1/camunda-values.yaml index 672b6f8a..ea570760 100644 --- a/google/multi-region/active-active/region1/camunda-values.yaml +++ b/google/multi-region/active-active/region1/camunda-values.yaml @@ -1,4 +1,4 @@ global: multiregion: - # unique id of the region. Should start at 0 for easy computation. With 2 regions, you would have region 0 and 1. - regionId: 1 \ No newline at end of file + # unique id of the region. MUST be an integer starting at 0 for computation. With 2 regions, you would have region ids 0 and 1. + regionId: 1 From 0429d5d50c6dc29ac8df4f098b2e11d77d0cd270 Mon Sep 17 00:00:00 2001 From: Falko Menge Date: Wed, 3 Jul 2024 01:46:06 +0200 Subject: [PATCH 58/58] Add metrics --- google/multi-region/active-active/gcp-setup/Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/google/multi-region/active-active/gcp-setup/Makefile b/google/multi-region/active-active/gcp-setup/Makefile index 5e30f23a..d0c24176 100644 --- a/google/multi-region/active-active/gcp-setup/Makefile +++ b/google/multi-region/active-active/gcp-setup/Makefile @@ -38,7 +38,7 @@ kube: region=$$(echo $$region_cluster | cut -d';' -f1); \ clusterName=$$(echo $$region_cluster | cut -d';' -f2); \ echo "Setting up region: $$region with cluster name: $$clusterName"; \ - $(MAKE) kube-gke region=$$region clusterName=$$clusterName; \ + $(MAKE) kube-gke metrics region=$$region clusterName=$$clusterName; \ done #FIXME: run the sub-make directly on the gke makefile to avoid variable clashes #TODO: add metrics & clean-metrics @@ -119,3 +119,4 @@ gcp-register-backup-repo: include $(root)/google/include/kubernetes-gke.mk include $(root)/include/camunda.mk +include $(root)/metrics/metrics.mk