From 7b9c5f16c7750cb477a2eeec54cb77f51935b235 Mon Sep 17 00:00:00 2001 From: Frikky Date: Fri, 13 Dec 2024 11:46:23 +0100 Subject: [PATCH] Update configuration.md --- docs/configuration.md | 35 ++++++++++++++--------------------- 1 file changed, 14 insertions(+), 21 deletions(-) diff --git a/docs/configuration.md b/docs/configuration.md index a247e6e..9ba3573 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -256,26 +256,19 @@ Docker Compose Installation Guide: https://docs.docker.com/compose/install/ 1. Add and change the following environment variables for Orborus in the docker-compose.yml file. `BASE_URL` is the external URL of the server you're running Shuffle on (the one you visit Shuffle with in your browser): ``` # Required: -## Free since Shuffle 2.0.0-beta -- SHUFFLE_SWARM_CONFIG=run - -## Ensures apps are not halting due to memory overloads -- SHUFFLE_LOGS_DISABLED=true - -## Your Shuffle URL NEEDS to be replaced by the backend's public IP. Otherwise Docker DNS: https://dockerlabs.collabnix.com/intermediate/networking/Configuring_DNS.html -- BASE_URL=http://YOUR-BACKEND-URL:3001 - -## Worker image choice -- SHUFFLE_WORKER_IMAGE=ghcr.io/shuffle/shuffle-worker:nightly - -## Optional configuration: -SHUFFLE_AUTO_IMAGE_DOWNLOAD=false # This should be set to false IF images are already downloaded -SHUFFLE_WORKER_SERVER_URL=http://shuffle-workers # Internal Docker Worker URL (don't modify if not necessary) -SHUFFLE_SWARM_NETWORK_NAME=shuffle_swarm_executions # If you want a special network name in the executions -SHUFFLE_SCALE_REPLICAS=1 # The amount of worker container replicas PER NODE (since 1.2.0) -SHUFFLE_APP_REPLICAS=1 # The amount of app container replicas PER NODE (since 1.2.1) -SHUFFLE_MAX_SWARM_NODES=1 # The max amount of swarm nodes shuffle can use (since 1.3.2) -SHUFFLE_SKIPSSL_VERIFY=true # Stops Shuffle's internal services from validating TLS/SSL certificates. Good to use if BASE_URL is a domain. +- SHUFFLE_SWARM_CONFIG=run # Enables SWARM scaling +- SHUFFLE_LOGS_DISABLED=true # Ensures we don't have memory issues +- BASE_URL=http://YOUR-BACKEND-IP:3001 # replaced by the backend's public IP +- SHUFFLE_WORKER_IMAGE=ghcr.io/shuffle/shuffle-worker:latest + +# Optional configuration: +- SHUFFLE_AUTO_IMAGE_DOWNLOAD=false # This should be set to false IF images are already downloaded +- SHUFFLE_WORKER_SERVER_URL=http://shuffle-workers # Internal Docker Worker URL (don't modify if not necessary) +- SHUFFLE_SWARM_NETWORK_NAME=shuffle_swarm_executions # If you want a special network name in the executions +- SHUFFLE_SCALE_REPLICAS=1 # The amount of worker container replicas PER NODE (since 1.2.0) +- SHUFFLE_APP_REPLICAS=1 # The amount of app container replicas PER NODE (since 1.2.1) +- SHUFFLE_MAX_SWARM_NODES=1 # The max amount of swarm nodes shuffle can use (since 1.3.2) +- SHUFFLE_SKIPSSL_VERIFY=true # Stops Shuffle's internal services from validating TLS/SSL certificates. Good to use if BASE_URL is a domain. ``` @@ -1354,4 +1347,4 @@ sudo podman-compose -f docker-compose.yml up ### Marketplace Setup -Using cloud marketplaces ([AWS Marketplace](https://aws.amazon.com/marketplace/), [Google Cloud Marketplace](https://console.cloud.google.com/marketplace), [Azure Marketplace](https://azuremarketplace.microsoft.com/)), you should be able to deploy Shuffle onprem with a few clicks. This is a great way to get started with Shuffle, as it's a fully managed service and test it out in your own environment without worrying about the setup. We are working with our cloud partners to get this up and running as soon as possible. \ No newline at end of file +Using cloud marketplaces ([AWS Marketplace](https://aws.amazon.com/marketplace/), [Google Cloud Marketplace](https://console.cloud.google.com/marketplace), [Azure Marketplace](https://azuremarketplace.microsoft.com/)), you should be able to deploy Shuffle onprem with a few clicks. This is a great way to get started with Shuffle, as it's a fully managed service and test it out in your own environment without worrying about the setup. We are working with our cloud partners to get this up and running as soon as possible.