-
+
-
+
diff --git a/d9a4c8ef.39c9fc3f.js b/d9a4c8ef.39c9fc3f.js
new file mode 100644
index 0000000000..ed930980c0
--- /dev/null
+++ b/d9a4c8ef.39c9fc3f.js
@@ -0,0 +1 @@
+(window.webpackJsonp=window.webpackJsonp||[]).push([[264],{416:function(e,n,t){"use strict";t.r(n),t.d(n,"frontMatter",(function(){return s})),t.d(n,"metadata",(function(){return b})),t.d(n,"rightToc",(function(){return p})),t.d(n,"default",(function(){return d}));var a=t(1),o=t(9),l=(t(0),t(465)),r=t(477),c=t(478),i=t(464),s={last_modified_on:"2025-01-02",title:"CLI",description:"How to use the Qovery CLI (Command Line Interface)"},b={id:"using-qovery/interface/cli",title:"CLI",description:"How to use the Qovery CLI (Command Line Interface)",source:"@site/docs/using-qovery/interface/cli.md",permalink:"/docs/using-qovery/interface/cli",sidebar:"docs",previous:{title:"Web interface",permalink:"/docs/using-qovery/interface/web-interface"},next:{title:"REST API",permalink:"/docs/using-qovery/interface/rest-api"}},p=[{value:"First usage",id:"first-usage",children:[{value:"Install",id:"install",children:[]},{value:"Sign up",id:"sign-up",children:[]},{value:"Help",id:"help",children:[]}]},{value:"Context",id:"context",children:[{value:"Set New Context",id:"set-new-context",children:[]},{value:"Print Current Context",id:"print-current-context",children:[]}]},{value:"Log",id:"log",children:[{value:"Follow Logs",id:"follow-logs",children:[]}]},{value:"Status",id:"status",children:[]},{value:"Console",id:"console",children:[]},{value:"Shell",id:"shell",children:[{value:"Pass a command",id:"pass-a-command",children:[]},{value:"Shell in a dedicated pod",id:"shell-in-a-dedicated-pod",children:[]},{value:"Shell in a dedicated container",id:"shell-in-a-dedicated-container",children:[]}]},{value:"Port-forward",id:"port-forward",children:[{value:"Port-forward a dedicated pod",id:"port-forward-a-dedicated-pod",children:[]}]},{value:"Generate API token",id:"generate-api-token",children:[]},{value:"Managing services, environments and projects",id:"managing-services-environments-and-projects",children:[{value:"Environment",id:"environment",children:[]},{value:"Projects",id:"projects",children:[]}]},{value:"Access your Qovery-managed cluster",id:"access-your-qovery-managed-cluster",children:[]},{value:"Lock cluster updates",id:"lock-cluster-updates",children:[]},{value:"Managing the Deployment Pipeline",id:"managing-the-deployment-pipeline",children:[{value:"List stages",id:"list-stages",children:[]},{value:"Add a stage",id:"add-a-stage",children:[]},{value:"Modify a stage",id:"modify-a-stage",children:[]},{value:"Delete a stage",id:"delete-a-stage",children:[]},{value:"Change stage for a service",id:"change-stage-for-a-service",children:[]}]},{value:"Static token",id:"static-token",children:[]},{value:"Support",id:"support",children:[]}],u={rightToc:p};function d(e){var n=e.components,t=Object(o.a)(e,["components"]);return Object(l.b)("wrapper",Object(a.a)({},u,t,{components:n,mdxType:"MDXLayout"}),Object(l.b)(i.a,{type:"success",mdxType:"Alert"},Object(l.b)("p",null,"Use Infrastructure as Code (IaC) with our ",Object(l.b)("a",Object(a.a)({parentName:"p"},{href:"/docs/using-qovery/integration/terraform-provider/"}),"Terraform Provider")," and our ",Object(l.b)("a",Object(a.a)({parentName:"p"},{href:"/docs/using-qovery/interface/rest-api/"}),"REST API")," to manage Qovery and deploy your apps.")),Object(l.b)("p",null,"Qovery provides a very easy to use CLI (Command Line Interface) designed to fit the developer workflow perfectly."),Object(l.b)("hr",null),Object(l.b)("p",null,"The purpose of the CLI is to integrate seamlessly with your development workflow:"),Object(l.b)("ol",null,Object(l.b)("li",{parentName:"ol"},"Write code"),Object(l.b)("li",{parentName:"ol"},"Commit"),Object(l.b)("li",{parentName:"ol"},Object(l.b)("strong",{parentName:"li"},"Qovery")," - deploy a new version of your application"),Object(l.b)("li",{parentName:"ol"},Object(l.b)("strong",{parentName:"li"},"Qovery CLI")," - check the status of your application"),Object(l.b)("li",{parentName:"ol"},Object(l.b)("strong",{parentName:"li"},"Qovery CLI")," - debug your application"),Object(l.b)("li",{parentName:"ol"},"Repeat")),Object(l.b)("h2",{id:"first-usage"},"First usage"),Object(l.b)("h3",{id:"install"},"Install"),Object(l.b)(r.a,{centered:!0,className:"rounded",defaultValue:"linux",placeholder:"Select your OS",select:!1,size:null,values:[{group:"Platforms",label:"Linux",value:"linux"},{group:"Platforms",label:"MacOS",value:"macos"},{group:"Platforms",label:"Windows",value:"windows"},{group:"Platforms",label:"Docker",value:"docker"}],mdxType:"Tabs"},Object(l.b)(c.a,{value:"linux",mdxType:"TabItem"},Object(l.b)(r.a,{centered:!0,className:"rounded",defaultValue:"universal",values:[{label:"*nix",value:"universal"},{label:"Arch Linux",value:"arch"},{label:"Manual",value:"manual"}],mdxType:"Tabs"},Object(l.b)(c.a,{value:"universal",mdxType:"TabItem"},Object(l.b)("p",null,"To download and install Qovery CLI on any Linux distribution:"),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ curl -s https://get.qovery.com | bash\n"))),Object(l.b)(c.a,{value:"arch",mdxType:"TabItem"},Object(l.b)("p",null,"Qovery is part of ",Object(l.b)("a",Object(a.a)({parentName:"p"},{href:"https://aur.archlinux.org/packages"}),"AUR")," packages, so you can install it with ",Object(l.b)("a",Object(a.a)({parentName:"p"},{href:"https://github.com/Jguer/yay"}),"yay"),":"),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ yay qovery-cli\n"))),Object(l.b)(c.a,{value:"manual",mdxType:"TabItem"},Object(l.b)("p",null,"Install the Qovery CLI on Linux manually by downloading the ",Object(l.b)("a",Object(a.a)({parentName:"p"},{href:"https://github.com/Qovery/qovery-cli/releases"}),"latest release"),", and uncompress its content to a folder into your shell ",Object(l.b)("inlineCode",{parentName:"p"},"PATH"),".")))),Object(l.b)(c.a,{value:"macos",mdxType:"TabItem"},Object(l.b)(r.a,{centered:!0,className:"rounded",defaultValue:"homebrew",values:[{label:"Homebrew",value:"homebrew"},{label:"Script",value:"script"},{label:"Manual",value:"manual"}],mdxType:"Tabs"},Object(l.b)(c.a,{value:"homebrew",mdxType:"TabItem"},Object(l.b)("p",null,"The common solution to install a command line binary on the MacOS is to use ",Object(l.b)("a",Object(a.a)({parentName:"p"},{href:"https://brew.sh/"}),"Homebrew"),"."),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"# Add Qovery brew repository\n$ brew tap Qovery/qovery-cli\n\n# Install the CLI\n$ brew install qovery-cli\n"))),Object(l.b)(c.a,{value:"script",mdxType:"TabItem"},Object(l.b)("p",null,"To download and install Qovery CLI from the command line:"),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ curl -s https://get.qovery.com | bash\n"))),Object(l.b)(c.a,{value:"manual",mdxType:"TabItem"},Object(l.b)("p",null,"Install the Qovery CLI on Mac OS manually by downloading the ",Object(l.b)("a",Object(a.a)({parentName:"p"},{href:"https://github.com/Qovery/qovery-cli/releases"}),"latest release"),", and uncompress its content to a folder into your shell ",Object(l.b)("inlineCode",{parentName:"p"},"PATH"),".")))),Object(l.b)(c.a,{value:"windows",mdxType:"TabItem"},Object(l.b)(r.a,{centered:!0,className:"rounded",defaultValue:"scoop",values:[{label:"Scoop",value:"scoop"},{label:"Manual",value:"manual"}],mdxType:"Tabs"},Object(l.b)(c.a,{value:"scoop",mdxType:"TabItem"},Object(l.b)("p",null,"The classic way to install binaries on Windows is to use ",Object(l.b)("a",Object(a.a)({parentName:"p"},{href:"https://scoop.sh/"}),"Scoop"),"."),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"# Add Qovery bucket\n$ scoop bucket add qovery https://github.com/Qovery/scoop-qovery-cli\n\n# Install the CLI\n$ scoop install qovery-cli\n"))),Object(l.b)(c.a,{value:"manual",mdxType:"TabItem"},Object(l.b)("p",null,"Install the Qovery CLI on Windows manually by downloading the ",Object(l.b)("a",Object(a.a)({parentName:"p"},{href:"https://github.com/Qovery/qovery-cli/releases"}),"latest release"),", and uncompress its content to\n",Object(l.b)("inlineCode",{parentName:"p"},"C:\\Windows"),".")))),Object(l.b)(c.a,{value:"docker",mdxType:"TabItem"},Object(l.b)("p",null,"Install Docker on your local machine and run the following command:"),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"# Pull and Run the latest Qovery CLI\n$ docker run ghcr.io/qovery/qovery-cli:latest help\n")),Object(l.b)("p",null,"Change ",Object(l.b)("inlineCode",{parentName:"p"},"latest")," by the version you want to use. For example, to use the version 0.58.4, run:"),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ docker run ghcr.io/qovery/qovery-cli:0.58.4 help\n")),Object(l.b)("p",null,"Note: ",Object(l.b)("inlineCode",{parentName:"p"},"ghcr.io")," is the ",Object(l.b)("a",Object(a.a)({parentName:"p"},{href:"https://github.com/Qovery/qovery-cli/pkgs/container/qovery-cli"}),"GitHub Container Registry"),"."))),Object(l.b)("h3",{id:"sign-up"},"Sign up"),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"# Sign up and sign in command\n$ qovery auth\n")),Object(l.b)(i.a,{type:"info",mdxType:"Alert"},Object(l.b)("p",null,"If you are using a headless (without GUI) environment, you can use the following command to sign up and sign in:"),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"# Sign up and sign in command\n$ qovery auth --headless\n"))),Object(l.b)("p",null,"Your browser window with Qovery sign-up page will open. Follow the instructions to sign up and sign in."),Object(l.b)("h3",{id:"help"},"Help"),Object(l.b)("p",null,"You can see all the commands available by executing:"),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ qovery help\n")),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash",metastring:'title="Help output"',title:'"Help','output"':!0}),'$ qovery help\nA Command-line Interface of the Qovery platform\n\nUsage:\n qovery [command]\n\nAvailable Commands:\n application Manage applications\n auth Log in to Qovery\n cluster Manage clusters\n completion Generate the autocompletion script for the specified shell\n console Opens the application in Qovery Console in your browser\n container Manage containers\n context Manage CLI context\n cronjob Manage cronjobs\n database Manage databases\n env Manage Environment Variables and Secrets\n environment Manage environments\n helm Manage helms\n help Help about any command\n lifecycle Manage lifecycle jobs\n list-pods List the pods of a service with their pods\n log Print your application logs\n port-forward Port forward a port to an application container\n project Manage Project\n service Manage services\n shell Connect to an application container\n status Print the status of your application\n token Generate an API token\n upgrade Upgrade Qovery CLI to latest version\n version Print installed version of the Qovery CLI\n\nFlags:\n -h, --help help for qovery\n --verbose Verbose output\n\nUse "qovery [command] --help" for more information about a command.\n')),Object(l.b)("h2",{id:"context"},"Context"),Object(l.b)("p",null,"Context command lets you configure the CLI to work with your chosen application. Before executing other commands, you need first to set up the context.\nThe context is then remembered and used by the CLI. You can configure a new context anytime by running the ",Object(l.b)("inlineCode",{parentName:"p"},"qovery context set")," command."),Object(l.b)("p",null,"Most of the commands support an inline context set allowing you to directly pass the URL of the application you wants to interact with."),Object(l.b)("p",null,"Example:"),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ qovery shell https://console.qovery.com/organization/51927012-8377-4e0f-84cf-7f5f38a0154b/project/a6545d50-69a3-4966-89cc-4c0bfb6d3448/environment/c9ac549b-a855-4d3b-b652-d68d5f1fea11/application/820ca0a3-08bf-42c1-8ad2-540714ad657f/general\n# this is the url of my back-end application\n\nOrganization | My orga\nProject | R&D / Backend\nEnvironment | prod\nServiceLevel | back-end\nServiceType | application\n\n$ ls\n...\n")),Object(l.b)("h3",{id:"set-new-context"},"Set New Context"),Object(l.b)("p",null,"To set a new context, type ",Object(l.b)("inlineCode",{parentName:"p"},"qovery context set"),":"),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ qovery context set\nQovery: Current context:\nOrganization | Qovery\nProject | test\nEnvironment | development\nApplication | website\n\nQovery: Select new context\nOrganization:\n\u2714 Qovery\nProject:\n\u2714 admin\nEnvironment:\n\u2714 main\nApplication:\n\u2714 app\n\nQovery: New context:\nOrganization | Qovery\nProject | admin\nEnvironment | main\nApplication | app\n")),Object(l.b)("h3",{id:"print-current-context"},"Print Current Context"),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ qovery context\nQovery: Current context:\nOrganization | Qovery\nProject | admin\nEnvironment | main\nApplication | app\n\nQovery: You can set a new context using 'qovery context set'.\n")),Object(l.b)("h2",{id:"log"},"Log"),Object(l.b)("p",null,"Log command allows you to display the application logs."),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ qovery log\n TIME MESSAGE\n Jul 15 08:46:13.019717 at /usr/src/app/autoFunctions/levels.js:17:16\n Jul 15 08:46:13.019721 at Array.forEach ()\n Jul 15 08:46:13.019724 at Timeout._onTimeout (/usr/src/app/autoFunctions/levels.js:15:14)\n Jul 15 08:46:13.019728 at listOnTimeout (internal/timers.js:557:17)\n # ... the rest of logs\n")),Object(l.b)("p",null,"By default, the last 1000 logs is displayed."),Object(l.b)("h3",{id:"follow-logs"},"Follow Logs"),Object(l.b)("p",null,"To make the CLI follow your logs, use ",Object(l.b)("inlineCode",{parentName:"p"},"-f")," flag:"),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ qovery log -f\n TIME MESSAGE\n Jul 15 08:46:13.019717 at /usr/src/app/autoFunctions/levels.js:17:16\n Jul 15 08:46:13.019721 at Array.forEach ()\n Jul 15 08:46:13.019724 at Timeout._onTimeout (/usr/src/app/autoFunctions/levels.js:15:14)\n Jul 15 08:46:13.019728 at listOnTimeout (internal/timers.js:557:17)\n # ... the rest of logs\n")),Object(l.b)("p",null,"This will make the CLI follow your application logs and append any new logs till you use ",Object(l.b)("inlineCode",{parentName:"p"},"CTRL+C"),"."),Object(l.b)("h2",{id:"status"},"Status"),Object(l.b)("p",null,"Status command lets you print the basic status of your application."),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ qovery status\n15 Jul 21 10:55 CEST\nApplication | Backend\nStatus | RUNNING\n")),Object(l.b)("h2",{id:"console"},"Console"),Object(l.b)("p",null,"Console command quickly opens the Qovery Console in your browser to let you display more information about your application."),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ qovery console\nQovery: Opening https://console.qovery.com/platform/organization/your-org/projects/your-proj/environments/your-env/applications/your-app/summary\n")),Object(l.b)("h2",{id:"shell"},"Shell"),Object(l.b)("p",null,"Shell command allows you to open a connection and execute commands directly on the container running application."),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ qovery shell\n/ # ls\nbin media srv\ndev mnt sys\ndocker-entrypoint.d opt tmp\ndocker-entrypoint.sh proc usr\netc root var\nhome run www\nlib sbin\n")),Object(l.b)(i.a,{type:"info",mdxType:"Alert"},"Keep in mind these limitations when using this feature:",Object(l.b)("ul",null,Object(l.b)("li",null,"Install a process reaper as pid one in your container (i.e: dumb-init), as you may leave zoombie process in your container if your shell terminate unproperly (i.e: ctrl+c, cnx restart). This is a known issue with kubernetes exec to leave process alive after attach is closed;"),Object(l.b)("li",null,"shell is force closed after [1 hour, 1GB transmitted];"),Object(l.b)("li",null,"we use SH by default. To have auto-completion, start bash."))),Object(l.b)(i.a,{type:"info",mdxType:"Alert"},"The width of the terminal is limited to 80 characters. But you can resize it once you are inside the application with one of these commands:",Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"COLUMNS=200 tput init\nstty cols 200\n"))),Object(l.b)("h3",{id:"pass-a-command"},"Pass a command"),Object(l.b)("p",null,"To pass a command, you can use the ",Object(l.b)("inlineCode",{parentName:"p"},"--command")," or ",Object(l.b)("inlineCode",{parentName:"p"},"-c")," argument followed by your command."),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ qovery shell --command ls\nbin media srv\ndev mnt sys\ndocker-entrypoint.d opt tmp\ndocker-entrypoint.sh proc usr\netc root var\nhome run www\nlib sbin\n")),Object(l.b)("p",null,"To pass several arguments, you can separate them with a comma or send different ",Object(l.b)("inlineCode",{parentName:"p"},"--command"),"."),Object(l.b)("p",null,Object(l.b)("inlineCode",{parentName:"p"},"qovery shell --command ls --command -l"),"\n",Object(l.b)("inlineCode",{parentName:"p"},"qovery shell --command ls,-l"),"\n",Object(l.b)("inlineCode",{parentName:"p"},"qovery shell -c ls,-l")),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ qovery shell --command ls --command -l\ndrwxr-xr-x 2 root root 4096 Nov 30 09:32 bin\ndrwxr-xr-x 5 root root 360 Dec 21 09:46 dev\ndrwxr-xr-x 1 root root 41 Dec 20 20:13 docker-entrypoint.d\n-rwxr-xr-x 1 root root 1620 Dec 20 20:13 docker-entrypoint.sh\ndrwxr-xr-x 1 root root 25 Dec 21 09:46 etc\ndrwxr-xr-x 2 root root 6 Nov 30 09:32 home\ndrwxr-xr-x 1 root root 61 Dec 20 22:11 lib\ndrwxr-xr-x 5 root root 44 Nov 30 09:32 media\ndrwxr-xr-x 2 root root 6 Nov 30 09:32 mnt\ndrwxr-xr-x 2 root root 6 Nov 30 09:32 opt\ndr-xr-xr-x 209 root root 0 Dec 21 09:46 proc\ndrwx------ 1 root root 26 Dec 21 10:38 root\ndrwxr-xr-x 1 root root 23 Dec 21 09:46 run\ndrwxr-xr-x 2 root root 4096 Nov 30 09:32 sbin\ndrwxr-xr-x 2 root root 6 Nov 30 09:32 srv\ndr-xr-xr-x 13 root root 0 Dec 21 09:46 sys\ndrwxrwxrwt 2 root root 6 Nov 30 09:32 tmp\ndrwxr-xr-x 1 root root 66 Nov 30 09:32 usr\ndrwxr-xr-x 1 root root 19 Nov 30 09:32 var\ndrwxr-xr-x 2 root root 59 Dec 21 09:45 www\n")),Object(l.b)("h3",{id:"shell-in-a-dedicated-pod"},"Shell in a dedicated pod"),Object(l.b)("p",null,"If your application is running on several pods, you can shell directly in a dedicated one by using the ",Object(l.b)("inlineCode",{parentName:"p"},"--pod")," or ",Object(l.b)("inlineCode",{parentName:"p"},"-p")," argument followed by your pod name."),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ qovery shell --pod app-5f65fb5c4-frontend-5f65db5c4b-q4w11\n")),Object(l.b)("p",null,"NOTE: you can get the list of pods by running the ",Object(l.b)("inlineCode",{parentName:"p"},"qovery list-pods")," command."),Object(l.b)("h3",{id:"shell-in-a-dedicated-container"},"Shell in a dedicated container"),Object(l.b)("p",null,"If you have several containers in your pod, you can shell directly in a dedicated one by using the ",Object(l.b)("inlineCode",{parentName:"p"},"--container")," argument followed by your container name."),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ qovery shell --container app-5f65fb5c4-frontend\n")),Object(l.b)("h2",{id:"port-forward"},"Port-forward"),Object(l.b)("p",null,"Port-forward command allows you to port-forward all the traffic on your local machine to a remote resource available on a Qovery environment. This mechanism allows developers to create a secure, encrypted tunnel from their local machine to the application or databases hosted in the cloud. "),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),'$ qovery port-forward -p 8000:80 #your_local_port:your_remote_port\nInfo: Current context:\nOrganization | Qovery Prod\nProject | R&D / Frontend\nEnvironment | prod\nService | console\nType | application\n\nInfo: Continue with port-forward command using this context ?\nPlease type "yes" to validate context: yes\n\nListening on 127.0.0.1:8000 => 80\n')),Object(l.b)("p",null,"The port-forward feature works with any ",Object(l.b)("inlineCode",{parentName:"p"},"application"),", ",Object(l.b)("inlineCode",{parentName:"p"},"Cronjob"),", ",Object(l.b)("inlineCode",{parentName:"p"},"Lifecycle job")," or ",Object(l.b)("inlineCode",{parentName:"p"},"database")," (Container or Managed) deployed with Qovery. For ",Object(l.b)("inlineCode",{parentName:"p"},"Managed database")," instances on AWS, once the port-forwarded is activated, you must specify ~ ",Object(l.b)("inlineCode",{parentName:"p"},"--tls")," and ",Object(l.b)("inlineCode",{parentName:"p"},"--tls-insecure")," in your database connection command since localhost is not the valid hostname."),Object(l.b)("h3",{id:"port-forward-a-dedicated-pod"},"Port-forward a dedicated pod"),Object(l.b)("p",null,"If your application is running on several pods, you can port-forward to a dedicated one by using the ",Object(l.b)("inlineCode",{parentName:"p"},"--pod")," argument followed by your pod name."),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ qovery port-forward -p 8000:80 -pod app-5f65fb5c4-frontend-5f65db5c4b-q4w11\n")),Object(l.b)("p",null,"NOTE: you can get the list of pods by running the ",Object(l.b)("inlineCode",{parentName:"p"},"qovery list-pods")," command."),Object(l.b)("h2",{id:"generate-api-token"},"Generate API token"),Object(l.b)("p",null,"To use the Qovery API you will need to generate an authentication token. To generate an API token you can install the CLI and type"),Object(l.b)(i.a,{type:"warning",mdxType:"Alert"},Object(l.b)("p",null,"Never share your API token with anyone.")),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ qovery token\n\nQovery: Select organization\nOrganization:\n\u2714 My Organization\nChoose a token name\nToken name: Romaric\nChoose a token description\nToken description: used for Github Actions\nQovery: ---- Never share this authentication token and keep it secure ----\nQovery: qov_4LnEg2wFxxxxxHObGSQ22rjBZZyyyySgyR6Y_2500882691\nQovery: ---- Never share this authentication token and keep it secure ----\n")),Object(l.b)("p",null,"To use your token and list your organizations."),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-shell"}),"curl -X GET -H 'Authorization: Token qov_4LnEg2wFxxxxxHObGSQ22rjBZZyyyySgyR6Y_2500882691' https://api.qovery.com/organization\n")),Object(l.b)(i.a,{type:"info",mdxType:"Alert"},Object(l.b)("p",null,"The token can be used to interact programmatically with our API (directly, via our Terraform Provider etc..).\nIf you get a 424 error while trying to create new applications from one of your git repository, please make sure that the Organization Owner has access to the repository you are configuring for your app.")),Object(l.b)("p",null,"Check out our ",Object(l.b)("a",Object(a.a)({parentName:"p"},{href:"https://api-doc.qovery.com"}),"API documentation")),Object(l.b)("h2",{id:"managing-services-environments-and-projects"},"Managing services, environments and projects"),Object(l.b)("p",null,"The CLI allows you to manage and deploy the environment and services within your organization"),Object(l.b)("p",null,"###\xa0application, container, lifecycle, cronjob\nThese commands allow you to manage all these services via the CLI. You can run the following actions on these services:"),Object(l.b)("ul",null,Object(l.b)("li",{parentName:"ul"},"cancel: Cancel the service deployment"),Object(l.b)("li",{parentName:"ul"},"delete: Delete a service"),Object(l.b)("li",{parentName:"ul"},"deploy: Deploy a service"),Object(l.b)("li",{parentName:"ul"},"list: List the service of the specified type"),Object(l.b)("li",{parentName:"ul"},"redeploy: Redeploy a service (already deployed before)"),Object(l.b)("li",{parentName:"ul"},"stop: Stop a service"),Object(l.b)("li",{parentName:"ul"},"update: Update a service (service name, git branch, auto-deploy, ...)")),Object(l.b)("p",null,"Each action allows you to specify additional parameters to define the service you want to modify (you can find them via the --help command) "),Object(l.b)("p",null,"Example: Listing applications and triggering a deployment"),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),'$ qovery application list\nName | Type | Status | Last Update \nbackend | Application | STOPPED | 2023-02-02 14:48:05.339652 +0000 UTC\nfront-end | Application | STOPPED | 2023-02-09 14:04:38.079792 +0000 UTC\n\n$ qovery application deploy -n "backend"\nDeploying application backend in progress..\n\n$ qovery application list\nName | Type | Status | Last Update \nbackend | Application | RUNNING | 2023-02-13 12:59:23.228231 +0000 UTC\nfront-end | Application | STOPPED | 2023-02-09 14:04:38.079792 +0000 UTC\n')),Object(l.b)("p",null,"Example: Enable the auto-deploy feature for an application"),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ qovery application update --application backend --auto-deploy true\nApplication backend updated!\n")),Object(l.b)("h3",{id:"environment"},"Environment"),Object(l.b)("p",null,"The command ",Object(l.b)("inlineCode",{parentName:"p"},"environment")," allow you to manage a specific environment via the CLI. You can run the following actions on environments:"),Object(l.b)("ul",null,Object(l.b)("li",{parentName:"ul"},"cancel: Cancel an environment deployment"),Object(l.b)("li",{parentName:"ul"},"clone: Clone an environment"),Object(l.b)("li",{parentName:"ul"},"delete: Delete an environment"),Object(l.b)("li",{parentName:"ul"},"deploy: Deploy an environment"),Object(l.b)("li",{parentName:"ul"},"list: List environments"),Object(l.b)("li",{parentName:"ul"},"redeploy: Redeploy an environment"),Object(l.b)("li",{parentName:"ul"},"stage: Manage deployment stages"),Object(l.b)("li",{parentName:"ul"},"stop: Stop an environment")),Object(l.b)("p",null,"Each action allows you to specify additional parameters to define the service you want to modify (you can find them via the --help command)"),Object(l.b)("p",null,"Example: Manage deployment stages and triggering deployment"),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),'~ $ qovery environment stage list\n\n# deployment stage 1: "DATABASE DEFAULT"\nRename me to avoid default/legacy ordering\n\nType | Name\nDATABASE | Redis\nDATABASE | DB\n\n\n# deployment stage 2: "JOB DEFAULT"\nRename me to avoid default/legacy ordering\n\n\n\n\n# deployment stage 3: "CONTAINER DEFAULT"\nRename me to avoid default/legacy ordering\n\nType | Name\nCONTAINER | Rabbitmq\n\n\n# deployment stage 4: "APPLICATION DEFAULT"\nRename me to avoid default/legacy ordering\n\nType | Name\nAPPLICATION | Backend\nAPPLICATION | Frontend\nAPPLICATION | Pablo Backend App\nAPPLICATION | API gateway\n\n~ $ qovery environment deploy\nEnvironment is deploying!\n')),Object(l.b)("h3",{id:"projects"},"Projects"),Object(l.b)("p",null,"You can list the organization's projects by using the following command:"),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"qovery project list\n")),Object(l.b)("h2",{id:"access-your-qovery-managed-cluster"},"Access your Qovery-managed cluster"),Object(l.b)("p",null,"To access your Qovery managed cluster, follow these steps:"),Object(l.b)("ol",null,Object(l.b)("li",{parentName:"ol"},Object(l.b)("strong",{parentName:"li"},"Export your kubeconfig file")," by running the command below, replacing ",Object(l.b)("inlineCode",{parentName:"li"},"")," with your actual cluster ID:")),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"qovery cluster kubeconfig --cluster-id \n")),Object(l.b)("ol",{start:2},Object(l.b)("li",{parentName:"ol"},Object(l.b)("strong",{parentName:"li"},"Set the KUBECONFIG environment variable")," to the path of the kubeconfig file obtained from the previous command:")),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"export KUBECONFIG=\n")),Object(l.b)("ol",{start:3},Object(l.b)("li",{parentName:"ol"},"You can now use tools like ",Object(l.b)("inlineCode",{parentName:"li"},"k9s")," or ",Object(l.b)("inlineCode",{parentName:"li"},"kubectl")," to access and manage your cluster:")),Object(l.b)(i.a,{type:"info",mdxType:"Alert"},Object(l.b)("p",null,"Some cloud providers like GCP or AWS require additional configuration to access the cluster. Make sure you have CLI binaries installed (gcloud CLI/AWS CLI/...), and the right permissions and credentials set up (environment variables or profile file).")),Object(l.b)("p",null,Object(l.b)("strong",{parentName:"p"},"Using k9s")),Object(l.b)("p",null,"Launch the k9s terminal UI to interact with your Kubernetes cluster:"),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"k9s\n")),Object(l.b)("p",null,Object(l.b)("strong",{parentName:"p"},"Using kubectl")),Object(l.b)("p",null,"Here are a few examples of common kubectl commands:"),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"kubectl get pods #List all pods in the default namespace\n")),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"kubectl describe pod #Get detailed information about a specific pod\n")),Object(l.b)("h2",{id:"lock-cluster-updates"},"Lock cluster updates"),Object(l.b)("p",null,"The lock cluster command prevents any update or deployment from being initiated on a cluster while it is locked. Once a cluster is locked, no new updates can be processed until it is unlocked."),Object(l.b)("p",null,Object(l.b)("strong",{parentName:"p"},"Lock cluster")),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"qovery cluster lock --cluster-id --reason --ttl-in-days \n")),Object(l.b)("p",null,"Note that the TTL can not be greater than 5 days."),Object(l.b)("p",null,Object(l.b)("strong",{parentName:"p"},"Unlock cluster")),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"qovery cluster unlock --cluster-id \n")),Object(l.b)("p",null,Object(l.b)("strong",{parentName:"p"},"List clusters locked")),Object(l.b)("p",null,"You can list all the cluster locked withing an organization by running this comamnd:"),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"qovery cluster locked --organization-id \n")),Object(l.b)("h2",{id:"managing-the-deployment-pipeline"},"Managing the Deployment Pipeline"),Object(l.b)("p",null,"In the following sections we will describe how to modify the Deployment Pipeline. "),Object(l.b)("h3",{id:"list-stages"},"List stages"),Object(l.b)("p",null,"You can list all the stages of your environment by using the following command:"),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"qovery environment stage list\n")),Object(l.b)("h3",{id:"add-a-stage"},"Add a stage"),Object(l.b)("p",null,"You can add a new stage by using the following command:"),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"qovery environment stage create -n -d \n")),Object(l.b)("p",null,"Note that the stage will be added at the end of the pipeline (the highest number)"),Object(l.b)("h3",{id:"modify-a-stage"},"Modify a stage"),Object(l.b)("p",null,"You can modify a stage by using the following command:"),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"qovery environment stage edit -n --new-name --new-description \n")),Object(l.b)("h3",{id:"delete-a-stage"},"Delete a stage"),Object(l.b)("p",null,"You can modify a stage by using the following command:"),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"qovery environment stage delete -n \n")),Object(l.b)("h3",{id:"change-stage-for-a-service"},"Change stage for a service"),Object(l.b)("p",null,"You can modify the stage associated to a service by using the following command:"),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"qovery environment stage move -n --stage \n")),Object(l.b)("h2",{id:"static-token"},"Static token"),Object(l.b)("p",null,"You can use a static token to authenticate to Qovery CLI. Which is useful for CI/CD pipelines."),Object(l.b)("p",null,"Environment variables available to set static token:"),Object(l.b)("ul",null,Object(l.b)("li",{parentName:"ul"},Object(l.b)("inlineCode",{parentName:"li"},"QOVERY_CLI_ACCESS_TOKEN")),Object(l.b)("li",{parentName:"ul"},Object(l.b)("inlineCode",{parentName:"li"},"Q_CLI_ACCESS_TOKEN"))),Object(l.b)("pre",null,Object(l.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"export QOVERY_CLI_ACCESS_TOKEN=xxx\n\nqovery log --organization MyOrg --project MyProject --environment MyEnv --application MyApp\n# you will see the log output\n")),Object(l.b)("h2",{id:"support"},"Support"),Object(l.b)("p",null,"Do you have any issues with Qovery CLI? ",Object(l.b)("a",Object(a.a)({parentName:"p"},{href:"https://github.com/Qovery/qovery-cli/issues"}),"Open an issue"),"."))}d.isMDXComponent=!0},464:function(e,n,t){"use strict";t(466);var a=t(0),o=t.n(a),l=t(463),r=t.n(l);t(132);n.a=function(e){var n=e.children,t=e.classNames,a=e.fill,l=e.icon,c=e.type,i=null;switch(c){case"danger":i="alert-triangle";break;case"success":i="check-circle";break;case"warning":i="alert-triangle";break;default:i="info"}return o.a.createElement("div",{className:r()(t,"alert","alert--"+c,{"alert--fill":a,"alert--icon":!1!==l}),role:"alert"},!1!==l&&o.a.createElement("i",{className:r()("feather","icon-"+(l||i))}),n)}},477:function(e,n,t){"use strict";var a=t(1),o=(t(480),t(479),t(52),t(29),t(22),t(21),t(0)),l=t.n(o),r=t(483),c=t(463),i=t.n(c),s=t(471),b=t.n(s),p=t(482),u=37,d=39;function m(e){var n=e.block,t=e.centered,a=e.changeSelectedValue,o=e.className,r=e.handleKeydown,c=e.style,s=e.values,b=e.selectedValue,p=e.tabRefs;return l.a.createElement("div",{className:t?"tabs--centered":null},l.a.createElement("ul",{role:"tablist","aria-orientation":"horizontal",className:i()("tabs",o,{"tabs--block":n}),style:c},s.map((function(e){var n=e.value,t=e.label;return l.a.createElement("li",{role:"tab",tabIndex:"0","aria-selected":b===n,className:i()("tab-item",{"tab-item--active":b===n}),key:n,ref:function(e){return p.push(e)},onKeyDown:function(e){return r(p,e.target,e)},onFocus:function(){return a(n)},onClick:function(){return a(n)}},t)}))))}function h(e){var n=e.placeholder,t=e.selectedValue,a=e.changeSelectedValue,o=e.size,c=e.values,i=c;if(i[0].group){var s=_.groupBy(i,"group");i=Object.keys(s).map((function(e){return{label:e,options:s[e]}}))}return l.a.createElement(r.a,{className:"react-select-container react-select--"+o,classNamePrefix:"react-select",options:i,isClearable:t,placeholder:n,value:c.find((function(e){return e.value==t})),onChange:function(e){return a(e?e.value:null)}})}n.a=function(e){e.block,e.centered;var n=e.children,t=e.defaultValue,r=e.groupId,c=e.label,i=e.placeholder,s=e.select,y=e.size,O=(e.style,e.values),j=e.urlKey,g=Object(p.a)(),v=g.tabGroupChoices,f=g.setTabGroupChoices,N=Object(o.useState)(t),w=N[0],x=N[1];if(null!=r){var C=v[r];null!=C&&C!==w&&x(C)}var T=function(e){x(e),null!=r&&f(r,e)},k=[],I=function(e,n,t){switch(t.keyCode){case d:!function(e,n){var t=e.indexOf(n)+1;e[t]?e[t].focus():e[0].focus()}(e,n);break;case u:!function(e,n){var t=e.indexOf(n)-1;e[t]?e[t].focus():e[e.length-1].focus()}(e,n)}};return Object(o.useEffect)((function(){if("undefined"!=typeof window&&window.location&&j){var e=b.a.parse(window.location.search);e[j]&&x(e[j])}}),[]),l.a.createElement(l.a.Fragment,null,l.a.createElement("div",{className:"margin-bottom--"+(y||"md")},c&&l.a.createElement("div",{className:"margin-vert--sm"},c),O.length>1&&(s?l.a.createElement(h,Object(a.a)({changeSelectedValue:T,handleKeydown:I,placeholder:i,selectedValue:w,size:y,tabRefs:k},e)):l.a.createElement(m,Object(a.a)({changeSelectedValue:T,handleKeydown:I,selectedValue:w,tabRefs:k},e)))),o.Children.toArray(n).filter((function(e){return e.props.value===w}))[0])}},478:function(e,n,t){"use strict";var a=t(0),o=t.n(a);n.a=function(e){return o.a.createElement(o.a.Fragment,null,e.children)}}}]);
\ No newline at end of file
diff --git a/d9a4c8ef.e82657d8.js b/d9a4c8ef.e82657d8.js
deleted file mode 100644
index 314cd73679..0000000000
--- a/d9a4c8ef.e82657d8.js
+++ /dev/null
@@ -1 +0,0 @@
-(window.webpackJsonp=window.webpackJsonp||[]).push([[264],{416:function(e,n,t){"use strict";t.r(n),t.d(n,"frontMatter",(function(){return s})),t.d(n,"metadata",(function(){return b})),t.d(n,"rightToc",(function(){return p})),t.d(n,"default",(function(){return d}));var a=t(1),o=t(9),r=(t(0),t(465)),l=t(477),c=t(478),i=t(464),s={last_modified_on:"2024-11-01",title:"CLI",description:"How to use the Qovery CLI (Command Line Interface)"},b={id:"using-qovery/interface/cli",title:"CLI",description:"How to use the Qovery CLI (Command Line Interface)",source:"@site/docs/using-qovery/interface/cli.md",permalink:"/docs/using-qovery/interface/cli",sidebar:"docs",previous:{title:"Web interface",permalink:"/docs/using-qovery/interface/web-interface"},next:{title:"REST API",permalink:"/docs/using-qovery/interface/rest-api"}},p=[{value:"First usage",id:"first-usage",children:[{value:"Install",id:"install",children:[]},{value:"Sign up",id:"sign-up",children:[]},{value:"Help",id:"help",children:[]}]},{value:"Context",id:"context",children:[{value:"Set New Context",id:"set-new-context",children:[]},{value:"Print Current Context",id:"print-current-context",children:[]}]},{value:"Log",id:"log",children:[{value:"Follow Logs",id:"follow-logs",children:[]}]},{value:"Status",id:"status",children:[]},{value:"Console",id:"console",children:[]},{value:"Shell",id:"shell",children:[{value:"Pass a command",id:"pass-a-command",children:[]},{value:"Shell in a dedicated pod",id:"shell-in-a-dedicated-pod",children:[]},{value:"Shell in a dedicated container",id:"shell-in-a-dedicated-container",children:[]}]},{value:"Port-forward",id:"port-forward",children:[{value:"Port-forward a dedicated pod",id:"port-forward-a-dedicated-pod",children:[]}]},{value:"Generate API token",id:"generate-api-token",children:[]},{value:"Managing services, environments and projects",id:"managing-services-environments-and-projects",children:[{value:"Environment",id:"environment",children:[]},{value:"Projects",id:"projects",children:[]}]},{value:"Access your Qovery-managed cluster",id:"access-your-qovery-managed-cluster",children:[]},{value:"Managing the Deployment Pipeline",id:"managing-the-deployment-pipeline",children:[{value:"List stages",id:"list-stages",children:[]},{value:"Add a stage",id:"add-a-stage",children:[]},{value:"Modify a stage",id:"modify-a-stage",children:[]},{value:"Delete a stage",id:"delete-a-stage",children:[]},{value:"Change stage for a service",id:"change-stage-for-a-service",children:[]}]},{value:"Static token",id:"static-token",children:[]},{value:"Support",id:"support",children:[]}],u={rightToc:p};function d(e){var n=e.components,t=Object(o.a)(e,["components"]);return Object(r.b)("wrapper",Object(a.a)({},u,t,{components:n,mdxType:"MDXLayout"}),Object(r.b)(i.a,{type:"success",mdxType:"Alert"},Object(r.b)("p",null,"Use Infrastructure as Code (IaC) with our ",Object(r.b)("a",Object(a.a)({parentName:"p"},{href:"/docs/using-qovery/integration/terraform-provider/"}),"Terraform Provider")," and our ",Object(r.b)("a",Object(a.a)({parentName:"p"},{href:"/docs/using-qovery/interface/rest-api/"}),"REST API")," to manage Qovery and deploy your apps.")),Object(r.b)("p",null,"Qovery provides a very easy to use CLI (Command Line Interface) designed to fit the developer workflow perfectly."),Object(r.b)("hr",null),Object(r.b)("p",null,"The purpose of the CLI is to integrate seamlessly with your development workflow:"),Object(r.b)("ol",null,Object(r.b)("li",{parentName:"ol"},"Write code"),Object(r.b)("li",{parentName:"ol"},"Commit"),Object(r.b)("li",{parentName:"ol"},Object(r.b)("strong",{parentName:"li"},"Qovery")," - deploy a new version of your application"),Object(r.b)("li",{parentName:"ol"},Object(r.b)("strong",{parentName:"li"},"Qovery CLI")," - check the status of your application"),Object(r.b)("li",{parentName:"ol"},Object(r.b)("strong",{parentName:"li"},"Qovery CLI")," - debug your application"),Object(r.b)("li",{parentName:"ol"},"Repeat")),Object(r.b)("h2",{id:"first-usage"},"First usage"),Object(r.b)("h3",{id:"install"},"Install"),Object(r.b)(l.a,{centered:!0,className:"rounded",defaultValue:"linux",placeholder:"Select your OS",select:!1,size:null,values:[{group:"Platforms",label:"Linux",value:"linux"},{group:"Platforms",label:"MacOS",value:"macos"},{group:"Platforms",label:"Windows",value:"windows"},{group:"Platforms",label:"Docker",value:"docker"}],mdxType:"Tabs"},Object(r.b)(c.a,{value:"linux",mdxType:"TabItem"},Object(r.b)(l.a,{centered:!0,className:"rounded",defaultValue:"universal",values:[{label:"*nix",value:"universal"},{label:"Arch Linux",value:"arch"},{label:"Manual",value:"manual"}],mdxType:"Tabs"},Object(r.b)(c.a,{value:"universal",mdxType:"TabItem"},Object(r.b)("p",null,"To download and install Qovery CLI on any Linux distribution:"),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ curl -s https://get.qovery.com | bash\n"))),Object(r.b)(c.a,{value:"arch",mdxType:"TabItem"},Object(r.b)("p",null,"Qovery is part of ",Object(r.b)("a",Object(a.a)({parentName:"p"},{href:"https://aur.archlinux.org/packages"}),"AUR")," packages, so you can install it with ",Object(r.b)("a",Object(a.a)({parentName:"p"},{href:"https://github.com/Jguer/yay"}),"yay"),":"),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ yay qovery-cli\n"))),Object(r.b)(c.a,{value:"manual",mdxType:"TabItem"},Object(r.b)("p",null,"Install the Qovery CLI on Linux manually by downloading the ",Object(r.b)("a",Object(a.a)({parentName:"p"},{href:"https://github.com/Qovery/qovery-cli/releases"}),"latest release"),", and uncompress its content to a folder into your shell ",Object(r.b)("inlineCode",{parentName:"p"},"PATH"),".")))),Object(r.b)(c.a,{value:"macos",mdxType:"TabItem"},Object(r.b)(l.a,{centered:!0,className:"rounded",defaultValue:"homebrew",values:[{label:"Homebrew",value:"homebrew"},{label:"Script",value:"script"},{label:"Manual",value:"manual"}],mdxType:"Tabs"},Object(r.b)(c.a,{value:"homebrew",mdxType:"TabItem"},Object(r.b)("p",null,"The common solution to install a command line binary on the MacOS is to use ",Object(r.b)("a",Object(a.a)({parentName:"p"},{href:"https://brew.sh/"}),"Homebrew"),"."),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"# Add Qovery brew repository\n$ brew tap Qovery/qovery-cli\n\n# Install the CLI\n$ brew install qovery-cli\n"))),Object(r.b)(c.a,{value:"script",mdxType:"TabItem"},Object(r.b)("p",null,"To download and install Qovery CLI from the command line:"),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ curl -s https://get.qovery.com | bash\n"))),Object(r.b)(c.a,{value:"manual",mdxType:"TabItem"},Object(r.b)("p",null,"Install the Qovery CLI on Mac OS manually by downloading the ",Object(r.b)("a",Object(a.a)({parentName:"p"},{href:"https://github.com/Qovery/qovery-cli/releases"}),"latest release"),", and uncompress its content to a folder into your shell ",Object(r.b)("inlineCode",{parentName:"p"},"PATH"),".")))),Object(r.b)(c.a,{value:"windows",mdxType:"TabItem"},Object(r.b)(l.a,{centered:!0,className:"rounded",defaultValue:"scoop",values:[{label:"Scoop",value:"scoop"},{label:"Manual",value:"manual"}],mdxType:"Tabs"},Object(r.b)(c.a,{value:"scoop",mdxType:"TabItem"},Object(r.b)("p",null,"The classic way to install binaries on Windows is to use ",Object(r.b)("a",Object(a.a)({parentName:"p"},{href:"https://scoop.sh/"}),"Scoop"),"."),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"# Add Qovery bucket\n$ scoop bucket add qovery https://github.com/Qovery/scoop-qovery-cli\n\n# Install the CLI\n$ scoop install qovery-cli\n"))),Object(r.b)(c.a,{value:"manual",mdxType:"TabItem"},Object(r.b)("p",null,"Install the Qovery CLI on Windows manually by downloading the ",Object(r.b)("a",Object(a.a)({parentName:"p"},{href:"https://github.com/Qovery/qovery-cli/releases"}),"latest release"),", and uncompress its content to\n",Object(r.b)("inlineCode",{parentName:"p"},"C:\\Windows"),".")))),Object(r.b)(c.a,{value:"docker",mdxType:"TabItem"},Object(r.b)("p",null,"Install Docker on your local machine and run the following command:"),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"# Pull and Run the latest Qovery CLI\n$ docker run ghcr.io/qovery/qovery-cli:latest help\n")),Object(r.b)("p",null,"Change ",Object(r.b)("inlineCode",{parentName:"p"},"latest")," by the version you want to use. For example, to use the version 0.58.4, run:"),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ docker run ghcr.io/qovery/qovery-cli:0.58.4 help\n")),Object(r.b)("p",null,"Note: ",Object(r.b)("inlineCode",{parentName:"p"},"ghcr.io")," is the ",Object(r.b)("a",Object(a.a)({parentName:"p"},{href:"https://github.com/Qovery/qovery-cli/pkgs/container/qovery-cli"}),"GitHub Container Registry"),"."))),Object(r.b)("h3",{id:"sign-up"},"Sign up"),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"# Sign up and sign in command\n$ qovery auth\n")),Object(r.b)(i.a,{type:"info",mdxType:"Alert"},Object(r.b)("p",null,"If you are using a headless (without GUI) environment, you can use the following command to sign up and sign in:"),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"# Sign up and sign in command\n$ qovery auth --headless\n"))),Object(r.b)("p",null,"Your browser window with Qovery sign-up page will open. Follow the instructions to sign up and sign in."),Object(r.b)("h3",{id:"help"},"Help"),Object(r.b)("p",null,"You can see all the commands available by executing:"),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ qovery help\n")),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash",metastring:'title="Help output"',title:'"Help','output"':!0}),'$ qovery help\nA Command-line Interface of the Qovery platform\n\nUsage:\n qovery [command]\n\nAvailable Commands:\n application Manage applications\n auth Log in to Qovery\n cluster Manage clusters\n completion Generate the autocompletion script for the specified shell\n console Opens the application in Qovery Console in your browser\n container Manage containers\n context Manage CLI context\n cronjob Manage cronjobs\n database Manage databases\n env Manage Environment Variables and Secrets\n environment Manage environments\n helm Manage helms\n help Help about any command\n lifecycle Manage lifecycle jobs\n list-pods List the pods of a service with their pods\n log Print your application logs\n port-forward Port forward a port to an application container\n project Manage Project\n service Manage services\n shell Connect to an application container\n status Print the status of your application\n token Generate an API token\n upgrade Upgrade Qovery CLI to latest version\n version Print installed version of the Qovery CLI\n\nFlags:\n -h, --help help for qovery\n --verbose Verbose output\n\nUse "qovery [command] --help" for more information about a command.\n')),Object(r.b)("h2",{id:"context"},"Context"),Object(r.b)("p",null,"Context command lets you configure the CLI to work with your chosen application. Before executing other commands, you need first to set up the context.\nThe context is then remembered and used by the CLI. You can configure a new context anytime by running the ",Object(r.b)("inlineCode",{parentName:"p"},"qovery context set")," command."),Object(r.b)("p",null,"Most of the commands support an inline context set allowing you to directly pass the URL of the application you wants to interact with."),Object(r.b)("p",null,"Example:"),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ qovery shell https://console.qovery.com/organization/51927012-8377-4e0f-84cf-7f5f38a0154b/project/a6545d50-69a3-4966-89cc-4c0bfb6d3448/environment/c9ac549b-a855-4d3b-b652-d68d5f1fea11/application/820ca0a3-08bf-42c1-8ad2-540714ad657f/general\n# this is the url of my back-end application\n\nOrganization | My orga\nProject | R&D / Backend\nEnvironment | prod\nServiceLevel | back-end\nServiceType | application\n\n$ ls\n...\n")),Object(r.b)("h3",{id:"set-new-context"},"Set New Context"),Object(r.b)("p",null,"To set a new context, type ",Object(r.b)("inlineCode",{parentName:"p"},"qovery context set"),":"),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ qovery context set\nQovery: Current context:\nOrganization | Qovery\nProject | test\nEnvironment | development\nApplication | website\n\nQovery: Select new context\nOrganization:\n\u2714 Qovery\nProject:\n\u2714 admin\nEnvironment:\n\u2714 main\nApplication:\n\u2714 app\n\nQovery: New context:\nOrganization | Qovery\nProject | admin\nEnvironment | main\nApplication | app\n")),Object(r.b)("h3",{id:"print-current-context"},"Print Current Context"),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ qovery context\nQovery: Current context:\nOrganization | Qovery\nProject | admin\nEnvironment | main\nApplication | app\n\nQovery: You can set a new context using 'qovery context set'.\n")),Object(r.b)("h2",{id:"log"},"Log"),Object(r.b)("p",null,"Log command allows you to display the application logs."),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ qovery log\n TIME MESSAGE\n Jul 15 08:46:13.019717 at /usr/src/app/autoFunctions/levels.js:17:16\n Jul 15 08:46:13.019721 at Array.forEach ()\n Jul 15 08:46:13.019724 at Timeout._onTimeout (/usr/src/app/autoFunctions/levels.js:15:14)\n Jul 15 08:46:13.019728 at listOnTimeout (internal/timers.js:557:17)\n # ... the rest of logs\n")),Object(r.b)("p",null,"By default, the last 1000 logs is displayed."),Object(r.b)("h3",{id:"follow-logs"},"Follow Logs"),Object(r.b)("p",null,"To make the CLI follow your logs, use ",Object(r.b)("inlineCode",{parentName:"p"},"-f")," flag:"),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ qovery log -f\n TIME MESSAGE\n Jul 15 08:46:13.019717 at /usr/src/app/autoFunctions/levels.js:17:16\n Jul 15 08:46:13.019721 at Array.forEach ()\n Jul 15 08:46:13.019724 at Timeout._onTimeout (/usr/src/app/autoFunctions/levels.js:15:14)\n Jul 15 08:46:13.019728 at listOnTimeout (internal/timers.js:557:17)\n # ... the rest of logs\n")),Object(r.b)("p",null,"This will make the CLI follow your application logs and append any new logs till you use ",Object(r.b)("inlineCode",{parentName:"p"},"CTRL+C"),"."),Object(r.b)("h2",{id:"status"},"Status"),Object(r.b)("p",null,"Status command lets you print the basic status of your application."),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ qovery status\n15 Jul 21 10:55 CEST\nApplication | Backend\nStatus | RUNNING\n")),Object(r.b)("h2",{id:"console"},"Console"),Object(r.b)("p",null,"Console command quickly opens the Qovery Console in your browser to let you display more information about your application."),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ qovery console\nQovery: Opening https://console.qovery.com/platform/organization/your-org/projects/your-proj/environments/your-env/applications/your-app/summary\n")),Object(r.b)("h2",{id:"shell"},"Shell"),Object(r.b)("p",null,"Shell command allows you to open a connection and execute commands directly on the container running application."),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ qovery shell\n/ # ls\nbin media srv\ndev mnt sys\ndocker-entrypoint.d opt tmp\ndocker-entrypoint.sh proc usr\netc root var\nhome run www\nlib sbin\n")),Object(r.b)(i.a,{type:"info",mdxType:"Alert"},"Keep in mind these limitations when using this feature:",Object(r.b)("ul",null,Object(r.b)("li",null,"Install a process reaper as pid one in your container (i.e: dumb-init), as you may leave zoombie process in your container if your shell terminate unproperly (i.e: ctrl+c, cnx restart). This is a known issue with kubernetes exec to leave process alive after attach is closed;"),Object(r.b)("li",null,"shell is force closed after [1 hour, 1GB transmitted];"),Object(r.b)("li",null,"we use SH by default. To have auto-completion, start bash."))),Object(r.b)(i.a,{type:"info",mdxType:"Alert"},"The width of the terminal is limited to 80 characters. But you can resize it once you are inside the application with one of these commands:",Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"COLUMNS=200 tput init\nstty cols 200\n"))),Object(r.b)("h3",{id:"pass-a-command"},"Pass a command"),Object(r.b)("p",null,"To pass a command, you can use the ",Object(r.b)("inlineCode",{parentName:"p"},"--command")," or ",Object(r.b)("inlineCode",{parentName:"p"},"-c")," argument followed by your command."),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ qovery shell --command ls\nbin media srv\ndev mnt sys\ndocker-entrypoint.d opt tmp\ndocker-entrypoint.sh proc usr\netc root var\nhome run www\nlib sbin\n")),Object(r.b)("p",null,"To pass several arguments, you can separate them with a comma or send different ",Object(r.b)("inlineCode",{parentName:"p"},"--command"),"."),Object(r.b)("p",null,Object(r.b)("inlineCode",{parentName:"p"},"qovery shell --command ls --command -l"),"\n",Object(r.b)("inlineCode",{parentName:"p"},"qovery shell --command ls,-l"),"\n",Object(r.b)("inlineCode",{parentName:"p"},"qovery shell -c ls,-l")),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ qovery shell --command ls --command -l\ndrwxr-xr-x 2 root root 4096 Nov 30 09:32 bin\ndrwxr-xr-x 5 root root 360 Dec 21 09:46 dev\ndrwxr-xr-x 1 root root 41 Dec 20 20:13 docker-entrypoint.d\n-rwxr-xr-x 1 root root 1620 Dec 20 20:13 docker-entrypoint.sh\ndrwxr-xr-x 1 root root 25 Dec 21 09:46 etc\ndrwxr-xr-x 2 root root 6 Nov 30 09:32 home\ndrwxr-xr-x 1 root root 61 Dec 20 22:11 lib\ndrwxr-xr-x 5 root root 44 Nov 30 09:32 media\ndrwxr-xr-x 2 root root 6 Nov 30 09:32 mnt\ndrwxr-xr-x 2 root root 6 Nov 30 09:32 opt\ndr-xr-xr-x 209 root root 0 Dec 21 09:46 proc\ndrwx------ 1 root root 26 Dec 21 10:38 root\ndrwxr-xr-x 1 root root 23 Dec 21 09:46 run\ndrwxr-xr-x 2 root root 4096 Nov 30 09:32 sbin\ndrwxr-xr-x 2 root root 6 Nov 30 09:32 srv\ndr-xr-xr-x 13 root root 0 Dec 21 09:46 sys\ndrwxrwxrwt 2 root root 6 Nov 30 09:32 tmp\ndrwxr-xr-x 1 root root 66 Nov 30 09:32 usr\ndrwxr-xr-x 1 root root 19 Nov 30 09:32 var\ndrwxr-xr-x 2 root root 59 Dec 21 09:45 www\n")),Object(r.b)("h3",{id:"shell-in-a-dedicated-pod"},"Shell in a dedicated pod"),Object(r.b)("p",null,"If your application is running on several pods, you can shell directly in a dedicated one by using the ",Object(r.b)("inlineCode",{parentName:"p"},"--pod")," or ",Object(r.b)("inlineCode",{parentName:"p"},"-p")," argument followed by your pod name."),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ qovery shell --pod app-5f65fb5c4-frontend-5f65db5c4b-q4w11\n")),Object(r.b)("p",null,"NOTE: you can get the list of pods by running the ",Object(r.b)("inlineCode",{parentName:"p"},"qovery list-pods")," command."),Object(r.b)("h3",{id:"shell-in-a-dedicated-container"},"Shell in a dedicated container"),Object(r.b)("p",null,"If you have several containers in your pod, you can shell directly in a dedicated one by using the ",Object(r.b)("inlineCode",{parentName:"p"},"--container")," argument followed by your container name."),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ qovery shell --container app-5f65fb5c4-frontend\n")),Object(r.b)("h2",{id:"port-forward"},"Port-forward"),Object(r.b)("p",null,"Port-forward command allows you to port-forward all the traffic on your local machine to a remote resource available on a Qovery environment. This mechanism allows developers to create a secure, encrypted tunnel from their local machine to the application or databases hosted in the cloud. "),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),'$ qovery port-forward -p 8000:80 #your_local_port:your_remote_port\nInfo: Current context:\nOrganization | Qovery Prod\nProject | R&D / Frontend\nEnvironment | prod\nService | console\nType | application\n\nInfo: Continue with port-forward command using this context ?\nPlease type "yes" to validate context: yes\n\nListening on 127.0.0.1:8000 => 80\n')),Object(r.b)("p",null,"The port-forward feature works with any ",Object(r.b)("inlineCode",{parentName:"p"},"application"),", ",Object(r.b)("inlineCode",{parentName:"p"},"Cronjob"),", ",Object(r.b)("inlineCode",{parentName:"p"},"Lifecycle job")," or ",Object(r.b)("inlineCode",{parentName:"p"},"database")," (Container or Managed) deployed with Qovery. For ",Object(r.b)("inlineCode",{parentName:"p"},"Managed database")," instances on AWS, once the port-forwarded is activated, you must specify ~ ",Object(r.b)("inlineCode",{parentName:"p"},"--tls")," and ",Object(r.b)("inlineCode",{parentName:"p"},"--tls-insecure")," in your database connection command since localhost is not the valid hostname."),Object(r.b)("h3",{id:"port-forward-a-dedicated-pod"},"Port-forward a dedicated pod"),Object(r.b)("p",null,"If your application is running on several pods, you can port-forward to a dedicated one by using the ",Object(r.b)("inlineCode",{parentName:"p"},"--pod")," argument followed by your pod name."),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ qovery port-forward -p 8000:80 -pod app-5f65fb5c4-frontend-5f65db5c4b-q4w11\n")),Object(r.b)("p",null,"NOTE: you can get the list of pods by running the ",Object(r.b)("inlineCode",{parentName:"p"},"qovery list-pods")," command."),Object(r.b)("h2",{id:"generate-api-token"},"Generate API token"),Object(r.b)("p",null,"To use the Qovery API you will need to generate an authentication token. To generate an API token you can install the CLI and type"),Object(r.b)(i.a,{type:"warning",mdxType:"Alert"},Object(r.b)("p",null,"Never share your API token with anyone.")),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ qovery token\n\nQovery: Select organization\nOrganization:\n\u2714 My Organization\nChoose a token name\nToken name: Romaric\nChoose a token description\nToken description: used for Github Actions\nQovery: ---- Never share this authentication token and keep it secure ----\nQovery: qov_4LnEg2wFxxxxxHObGSQ22rjBZZyyyySgyR6Y_2500882691\nQovery: ---- Never share this authentication token and keep it secure ----\n")),Object(r.b)("p",null,"To use your token and list your organizations."),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-shell"}),"curl -X GET -H 'Authorization: Token qov_4LnEg2wFxxxxxHObGSQ22rjBZZyyyySgyR6Y_2500882691' https://api.qovery.com/organization\n")),Object(r.b)(i.a,{type:"info",mdxType:"Alert"},Object(r.b)("p",null,"The token can be used to interact programmatically with our API (directly, via our Terraform Provider etc..).\nIf you get a 424 error while trying to create new applications from one of your git repository, please make sure that the Organization Owner has access to the repository you are configuring for your app.")),Object(r.b)("p",null,"Check out our ",Object(r.b)("a",Object(a.a)({parentName:"p"},{href:"https://api-doc.qovery.com"}),"API documentation")),Object(r.b)("h2",{id:"managing-services-environments-and-projects"},"Managing services, environments and projects"),Object(r.b)("p",null,"The CLI allows you to manage and deploy the environment and services within your organization"),Object(r.b)("p",null,"###\xa0application, container, lifecycle, cronjob\nThese commands allow you to manage all these services via the CLI. You can run the following actions on these services:"),Object(r.b)("ul",null,Object(r.b)("li",{parentName:"ul"},"cancel: Cancel the service deployment"),Object(r.b)("li",{parentName:"ul"},"delete: Delete a service"),Object(r.b)("li",{parentName:"ul"},"deploy: Deploy a service"),Object(r.b)("li",{parentName:"ul"},"list: List the service of the specified type"),Object(r.b)("li",{parentName:"ul"},"redeploy: Redeploy a service (already deployed before)"),Object(r.b)("li",{parentName:"ul"},"stop: Stop a service"),Object(r.b)("li",{parentName:"ul"},"update: Update a service (service name, git branch, auto-deploy, ...)")),Object(r.b)("p",null,"Each action allows you to specify additional parameters to define the service you want to modify (you can find them via the --help command) "),Object(r.b)("p",null,"Example: Listing applications and triggering a deployment"),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),'$ qovery application list\nName | Type | Status | Last Update \nbackend | Application | STOPPED | 2023-02-02 14:48:05.339652 +0000 UTC\nfront-end | Application | STOPPED | 2023-02-09 14:04:38.079792 +0000 UTC\n\n$ qovery application deploy -n "backend"\nDeploying application backend in progress..\n\n$ qovery application list\nName | Type | Status | Last Update \nbackend | Application | RUNNING | 2023-02-13 12:59:23.228231 +0000 UTC\nfront-end | Application | STOPPED | 2023-02-09 14:04:38.079792 +0000 UTC\n')),Object(r.b)("p",null,"Example: Enable the auto-deploy feature for an application"),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"$ qovery application update --application backend --auto-deploy true\nApplication backend updated!\n")),Object(r.b)("h3",{id:"environment"},"Environment"),Object(r.b)("p",null,"The command ",Object(r.b)("inlineCode",{parentName:"p"},"environment")," allow you to manage a specific environment via the CLI. You can run the following actions on environments:"),Object(r.b)("ul",null,Object(r.b)("li",{parentName:"ul"},"cancel: Cancel an environment deployment"),Object(r.b)("li",{parentName:"ul"},"clone: Clone an environment"),Object(r.b)("li",{parentName:"ul"},"delete: Delete an environment"),Object(r.b)("li",{parentName:"ul"},"deploy: Deploy an environment"),Object(r.b)("li",{parentName:"ul"},"list: List environments"),Object(r.b)("li",{parentName:"ul"},"redeploy: Redeploy an environment"),Object(r.b)("li",{parentName:"ul"},"stage: Manage deployment stages"),Object(r.b)("li",{parentName:"ul"},"stop: Stop an environment")),Object(r.b)("p",null,"Each action allows you to specify additional parameters to define the service you want to modify (you can find them via the --help command)"),Object(r.b)("p",null,"Example: Manage deployment stages and triggering deployment"),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),'~ $ qovery environment stage list\n\n# deployment stage 1: "DATABASE DEFAULT"\nRename me to avoid default/legacy ordering\n\nType | Name\nDATABASE | Redis\nDATABASE | DB\n\n\n# deployment stage 2: "JOB DEFAULT"\nRename me to avoid default/legacy ordering\n\n\n\n\n# deployment stage 3: "CONTAINER DEFAULT"\nRename me to avoid default/legacy ordering\n\nType | Name\nCONTAINER | Rabbitmq\n\n\n# deployment stage 4: "APPLICATION DEFAULT"\nRename me to avoid default/legacy ordering\n\nType | Name\nAPPLICATION | Backend\nAPPLICATION | Frontend\nAPPLICATION | Pablo Backend App\nAPPLICATION | API gateway\n\n~ $ qovery environment deploy\nEnvironment is deploying!\n')),Object(r.b)("h3",{id:"projects"},"Projects"),Object(r.b)("p",null,"You can list the organization's projects by using the following command:"),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"qovery project list\n")),Object(r.b)("h2",{id:"access-your-qovery-managed-cluster"},"Access your Qovery-managed cluster"),Object(r.b)("p",null,"To access your Qovery managed cluster, follow these steps:"),Object(r.b)("ol",null,Object(r.b)("li",{parentName:"ol"},Object(r.b)("strong",{parentName:"li"},"Export your kubeconfig file")," by running the command below, replacing ",Object(r.b)("inlineCode",{parentName:"li"},"")," with your actual cluster ID:")),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"qovery cluster kubeconfig --cluster-id \n")),Object(r.b)("ol",{start:2},Object(r.b)("li",{parentName:"ol"},Object(r.b)("strong",{parentName:"li"},"Set the KUBECONFIG environment variable")," to the path of the kubeconfig file obtained from the previous command:")),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"export KUBECONFIG=\n")),Object(r.b)("ol",{start:3},Object(r.b)("li",{parentName:"ol"},"You can now use tools like ",Object(r.b)("inlineCode",{parentName:"li"},"k9s")," or ",Object(r.b)("inlineCode",{parentName:"li"},"kubectl")," to access and manage your cluster:")),Object(r.b)(i.a,{type:"info",mdxType:"Alert"},Object(r.b)("p",null,"Some cloud providers like GCP or AWS require additional configuration to access the cluster. Make sure you have CLI binaries installed (gcloud CLI/AWS CLI/...), and the right permissions and credentials set up (environment variables or profile file).")),Object(r.b)("p",null,Object(r.b)("strong",{parentName:"p"},"Using k9s")),Object(r.b)("p",null,"Launch the k9s terminal UI to interact with your Kubernetes cluster:"),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"k9s\n")),Object(r.b)("p",null,Object(r.b)("strong",{parentName:"p"},"Using kubectl")),Object(r.b)("p",null,"Here are a few examples of common kubectl commands:"),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"kubectl get pods #List all pods in the default namespace\n")),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"kubectl describe pod #Get detailed information about a specific pod\n")),Object(r.b)("h2",{id:"managing-the-deployment-pipeline"},"Managing the Deployment Pipeline"),Object(r.b)("p",null,"In the following sections we will describe how to modify the Deployment Pipeline. "),Object(r.b)("h3",{id:"list-stages"},"List stages"),Object(r.b)("p",null,"You can list all the stages of your environment by using the following command:"),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"qovery environment stage list\n")),Object(r.b)("h3",{id:"add-a-stage"},"Add a stage"),Object(r.b)("p",null,"You can add a new stage by using the following command:"),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"qovery environment stage create -n -d \n")),Object(r.b)("p",null,"Note that the stage will be added at the end of the pipeline (the highest number)"),Object(r.b)("h3",{id:"modify-a-stage"},"Modify a stage"),Object(r.b)("p",null,"You can modify a stage by using the following command:"),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"qovery environment stage edit -n --new-name --new-description \n")),Object(r.b)("h3",{id:"delete-a-stage"},"Delete a stage"),Object(r.b)("p",null,"You can modify a stage by using the following command:"),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"qovery environment stage delete -n \n")),Object(r.b)("h3",{id:"change-stage-for-a-service"},"Change stage for a service"),Object(r.b)("p",null,"You can modify the stage associated to a service by using the following command:"),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"qovery environment stage move -n --stage \n")),Object(r.b)("h2",{id:"static-token"},"Static token"),Object(r.b)("p",null,"You can use a static token to authenticate to Qovery CLI. Which is useful for CI/CD pipelines."),Object(r.b)("p",null,"Environment variables available to set static token:"),Object(r.b)("ul",null,Object(r.b)("li",{parentName:"ul"},Object(r.b)("inlineCode",{parentName:"li"},"QOVERY_CLI_ACCESS_TOKEN")),Object(r.b)("li",{parentName:"ul"},Object(r.b)("inlineCode",{parentName:"li"},"Q_CLI_ACCESS_TOKEN"))),Object(r.b)("pre",null,Object(r.b)("code",Object(a.a)({parentName:"pre"},{className:"language-bash"}),"export QOVERY_CLI_ACCESS_TOKEN=xxx\n\nqovery log --organization MyOrg --project MyProject --environment MyEnv --application MyApp\n# you will see the log output\n")),Object(r.b)("h2",{id:"support"},"Support"),Object(r.b)("p",null,"Do you have any issues with Qovery CLI? ",Object(r.b)("a",Object(a.a)({parentName:"p"},{href:"https://github.com/Qovery/qovery-cli/issues"}),"Open an issue"),"."))}d.isMDXComponent=!0},464:function(e,n,t){"use strict";t(466);var a=t(0),o=t.n(a),r=t(463),l=t.n(r);t(132);n.a=function(e){var n=e.children,t=e.classNames,a=e.fill,r=e.icon,c=e.type,i=null;switch(c){case"danger":i="alert-triangle";break;case"success":i="check-circle";break;case"warning":i="alert-triangle";break;default:i="info"}return o.a.createElement("div",{className:l()(t,"alert","alert--"+c,{"alert--fill":a,"alert--icon":!1!==r}),role:"alert"},!1!==r&&o.a.createElement("i",{className:l()("feather","icon-"+(r||i))}),n)}},477:function(e,n,t){"use strict";var a=t(1),o=(t(480),t(479),t(52),t(29),t(22),t(21),t(0)),r=t.n(o),l=t(483),c=t(463),i=t.n(c),s=t(471),b=t.n(s),p=t(482),u=37,d=39;function m(e){var n=e.block,t=e.centered,a=e.changeSelectedValue,o=e.className,l=e.handleKeydown,c=e.style,s=e.values,b=e.selectedValue,p=e.tabRefs;return r.a.createElement("div",{className:t?"tabs--centered":null},r.a.createElement("ul",{role:"tablist","aria-orientation":"horizontal",className:i()("tabs",o,{"tabs--block":n}),style:c},s.map((function(e){var n=e.value,t=e.label;return r.a.createElement("li",{role:"tab",tabIndex:"0","aria-selected":b===n,className:i()("tab-item",{"tab-item--active":b===n}),key:n,ref:function(e){return p.push(e)},onKeyDown:function(e){return l(p,e.target,e)},onFocus:function(){return a(n)},onClick:function(){return a(n)}},t)}))))}function h(e){var n=e.placeholder,t=e.selectedValue,a=e.changeSelectedValue,o=e.size,c=e.values,i=c;if(i[0].group){var s=_.groupBy(i,"group");i=Object.keys(s).map((function(e){return{label:e,options:s[e]}}))}return r.a.createElement(l.a,{className:"react-select-container react-select--"+o,classNamePrefix:"react-select",options:i,isClearable:t,placeholder:n,value:c.find((function(e){return e.value==t})),onChange:function(e){return a(e?e.value:null)}})}n.a=function(e){e.block,e.centered;var n=e.children,t=e.defaultValue,l=e.groupId,c=e.label,i=e.placeholder,s=e.select,y=e.size,O=(e.style,e.values),g=e.urlKey,j=Object(p.a)(),v=j.tabGroupChoices,f=j.setTabGroupChoices,N=Object(o.useState)(t),w=N[0],x=N[1];if(null!=l){var C=v[l];null!=C&&C!==w&&x(C)}var T=function(e){x(e),null!=l&&f(l,e)},I=[],k=function(e,n,t){switch(t.keyCode){case d:!function(e,n){var t=e.indexOf(n)+1;e[t]?e[t].focus():e[0].focus()}(e,n);break;case u:!function(e,n){var t=e.indexOf(n)-1;e[t]?e[t].focus():e[e.length-1].focus()}(e,n)}};return Object(o.useEffect)((function(){if("undefined"!=typeof window&&window.location&&g){var e=b.a.parse(window.location.search);e[g]&&x(e[g])}}),[]),r.a.createElement(r.a.Fragment,null,r.a.createElement("div",{className:"margin-bottom--"+(y||"md")},c&&r.a.createElement("div",{className:"margin-vert--sm"},c),O.length>1&&(s?r.a.createElement(h,Object(a.a)({changeSelectedValue:T,handleKeydown:k,placeholder:i,selectedValue:w,size:y,tabRefs:I},e)):r.a.createElement(m,Object(a.a)({changeSelectedValue:T,handleKeydown:k,selectedValue:w,tabRefs:I},e)))),o.Children.toArray(n).filter((function(e){return e.props.value===w}))[0])}},478:function(e,n,t){"use strict";var a=t(0),o=t.n(a);n.a=function(e){return o.a.createElement(o.a.Fragment,null,e.children)}}}]);
\ No newline at end of file
diff --git a/docs/getting-started/basic-concepts/index.html b/docs/getting-started/basic-concepts/index.html
index 626ddd8f53..a696d586e0 100644
--- a/docs/getting-started/basic-concepts/index.html
+++ b/docs/getting-started/basic-concepts/index.html
@@ -26,9 +26,9 @@
-
+
-
+
@@ -50,14 +50,14 @@
An Organization is the workspace where devops and developers can collaborate across many projects at once and it usually corresponds to your company. A user can have access to one or more organizations and have different roles & permissions assigned within it thanks to our RBAC system.
At Qovery, when we refer to Cluster, we mean Kubernetes cluster. A Kubernetes cluster is a collection of node machines that allows you to run containerized applications.
A Managed Cluster is a Kubernetes cluster managed by Qovery. It means that Qovery will create the cluster for you and will take care of the cluster lifecycle (creation, upgrade, deletion etc..). Zero maintenance for you.
A Self-Managed Cluster is a Kubernetes cluster managed by you. It means that you have to create the cluster yourself and you have to take care of the cluster lifecycle (creation, upgrade, deletion etc..). You can install Qovery on your cluster to let Qovery manage the deployment of your applications on your cluster.
A Project allows you to group together a set of services interacting between each other to serve a common purpose. For example, you can have one project to run your main application (composed by a front-end, back-end and a db) and another project to manage your internal tools.
Services can be then organized into environments so that you can have different versions of the same service running within your project (production, staging, fix for issue X etc..)
One organization can have more than one project and you can customize the access to your project thanks to our RBAC system.
An Environment allows you to group together a set of services having a specific version, usually based on a branch of your repository. For example, you can have one Production environment (all the services pointing to the main branch), one Staging environment (all services pointing to the staging branch) etc..
Your production environment runs 24/7 while your other environments may not need to run all day long. By setting a Deployment Rule on your environment you can automatically start/stop your non-production environments and thus reduce your cloud provider bill.
Environments let's you chose on which cluster your services should be deployed.
A Preview Environment is an ephemeral environment allowing you to get early feedback on your application changes before the changes are merged into production. A dedicated preview environment can be automatically created at each new PR on your repository to validate the change. The environment is automatically deleted once the PR is merged or closed.
A Service is the basic unit that you can add to an environment, representing one of the elements of your tech stack (front-end, database etc..).
Qovery provides five "basic" services that can be combined together to deploy any tech stack:
Application: it allows you to run your long-running workloads on your Kubernetes cluster. It can be deployed from a git repository or as a container from an image registry. More information about Applications here
Database: it allows you to deploy a database. Qovery allows you to deploy a database as a container on your Kubernetes cluster (for test/dev) and as a cloud provider managed version (RDS AWS etc..). More information about Databases here
CronJob: it allows you to deploy a cronjob on your Kuberentes cluster and execute it based on the selected schedule. More information about Cronjob here
Lifecycle: it allows you to execute code based on the events happening on your environment (Deploy, Stop, Delete etc..). It is enough flexible to cover multiple use cases: seed your database when the environment is created, manage the lifecycle of any external resource (via a terraform file, pulumi code etc..). More information about Lifecycle here
Helm: it allows you to deploy a helm chart on your Kubernetes cluster. More information about Helm here
On top of these basic services, Qovery provides a pre-set of configuration to simplify the deployment of your tech stack.
A Deployment is the operation allowing you to gather your code and make it runs on your cluster. Qovery can pull your repository, generate a docker image and spawn the necessary resources on your clusters to make your application run. You can find more information within this section.
You can monitor the execution of the deployment via the Deployment Logs while you can monitor the execution of your application thanks to the streamed Live Logs directly from the Qovery interface.
An Organization is the workspace where devops and developers can collaborate across many projects at once and it usually corresponds to your company. A user can have access to one or more organizations and have different roles & permissions assigned within it thanks to our RBAC system.
At Qovery, when we refer to Cluster, we mean Kubernetes cluster. A Kubernetes cluster is a collection of node machines that allows you to run containerized applications.
A Managed Cluster is a Kubernetes cluster managed by Qovery. It means that Qovery will create the cluster for you and will take care of the cluster lifecycle (creation, upgrade, deletion etc..). Zero maintenance for you.
A Self-Managed Cluster is a Kubernetes cluster managed by you. It means that you have to create the cluster yourself and you have to take care of the cluster lifecycle (creation, upgrade, deletion etc..). You can install Qovery on your cluster to let Qovery manage the deployment of your applications on your cluster.
A Project allows you to group together a set of services interacting between each other to serve a common purpose. For example, you can have one project to run your main application (composed by a front-end, back-end and a db) and another project to manage your internal tools.
Services can be then organized into environments so that you can have different versions of the same service running within your project (production, staging, fix for issue X etc..)
One organization can have more than one project and you can customize the access to your project thanks to our RBAC system.
An Environment allows you to group together a set of services having a specific version, usually based on a branch of your repository. For example, you can have one Production environment (all the services pointing to the main branch), one Staging environment (all services pointing to the staging branch) etc..
Your production environment runs 24/7 while your other environments may not need to run all day long. By setting a Deployment Rule on your environment you can automatically start/stop your non-production environments and thus reduce your cloud provider bill.
Environments let's you chose on which cluster your services should be deployed.
A Preview Environment is an ephemeral environment allowing you to get early feedback on your application changes before the changes are merged into production. A dedicated preview environment can be automatically created at each new PR on your repository to validate the change. The environment is automatically deleted once the PR is merged or closed.
A Service is the basic unit that you can add to an environment, representing one of the elements of your tech stack (front-end, database etc..).
Qovery provides five "basic" services that can be combined together to deploy any tech stack:
Application: it allows you to run your long-running workloads on your Kubernetes cluster. It can be deployed from a git repository or as a container from an image registry. More information about Applications here
Database: it allows you to deploy a database. Qovery allows you to deploy a database as a container on your Kubernetes cluster (for test/dev) and as a cloud provider managed version (RDS AWS etc..). More information about Databases here
CronJob: it allows you to deploy a cronjob on your Kuberentes cluster and execute it based on the selected schedule. More information about Cronjob here
Lifecycle: it allows you to execute code based on the events happening on your environment (Deploy, Stop, Delete etc..). It is enough flexible to cover multiple use cases: seed your database when the environment is created, manage the lifecycle of any external resource (via a terraform file, pulumi code etc..). More information about Lifecycle here
Helm: it allows you to deploy a helm chart on your Kubernetes cluster. More information about Helm here
On top of these basic services, Qovery provides a pre-set of configuration to simplify the deployment of your tech stack.
A Deployment is the operation allowing you to gather your code and make it runs on your cluster. Qovery can pull your repository, generate a docker image and spawn the necessary resources on your clusters to make your application run. You can find more information within this section.
You can monitor the execution of the deployment via the Deployment Logs while you can monitor the execution of your application thanks to the streamed Live Logs directly from the Qovery interface.
This guide will help you to create your Amazon Web Services (AWS) credentials for Qovery. Those credentials will be used to create a Kubernetes cluster, a dedicated VPC and a few services on your AWS account. Refer to our Infrastructure page to learn more about the infrastructure created by Qovery.
Before you begin, this page assumes the following:
The default name required by Qovery is Admins. If you want to use another name, you have to change the cluster advanced settings aws.iam.admin_group BEFORE launching the cluster installation process
Well done!! You now have your AWS access key id and secret access key and your permissions are setups; It is time to connect Qovery to your AWS account.
You will be able to use the credentials you just generated when creating a cluster via the Qovery console. This cluster will be linked to your Qovery organization.
-Follow this documentation to create a new cluster on your organization.
Qovery is an abstraction layer on top of AWS and Kubernetes. Qovery manages the configuration of AWS account, and helps you to deploy production ready apps in seconds.
-To make it works, Qovery rely on Kubernetes for stateless apps (containers), and AWS for stateful apps (databases, storage...).
The first time you set up your AWS account, Qovery creates a Kubernetes cluster in your chosen region. Qovery managed it for you - no action required. It takes ~15 minutes to configure and bootstrap a Kubernetes cluster. Once bootstrapped, your Kubernetes cluster runs the Qovery app and is ready to deploy your applications.
AWS provides managed services for PostgreSQL, MySQL, Redis, MongoDB. Qovery gives you access to those services when you set the environment mode to Production. In Development mode, Qovery provides containers equivalent, which is cheaper and faster to start.
Different datacenters are located in different geographic areas, and you may want to keep your site physically close to the bulk of your user base for reduced latency.
The first time you set up your AWS account, Qovery creates a Kubernetes cluster in your chosen region. Qovery managed it for you - no action required. It takes ~15 minutes to configure and bootstrap a Kubernetes cluster. Once bootstrapped, your Kubernetes cluster runs the Qovery app and is ready to deploy your applications.
AWS provides managed services for PostgreSQL, MySQL, Redis, MongoDB. Qovery gives you access to those services when you set the environment mode to Production. In Development mode, Qovery provides containers equivalent, which is cheaper and faster to start.
Different datacenters are located in different geographic areas, and you may want to keep your site physically close to the bulk of your user base for reduced latency.
-
+
-
+
diff --git a/docs/getting-started/install-qovery/aws/cluster-managed-by-qovery/infrastructure/index.html b/docs/getting-started/install-qovery/aws/cluster-managed-by-qovery/infrastructure/index.html
index 0c02a52744..d1d2a64e5e 100644
--- a/docs/getting-started/install-qovery/aws/cluster-managed-by-qovery/infrastructure/index.html
+++ b/docs/getting-started/install-qovery/aws/cluster-managed-by-qovery/infrastructure/index.html
@@ -26,9 +26,9 @@
-
+
-
+
@@ -54,14 +54,14 @@
by yourself by following the guide "I don't have Qovery access anymore, how could I delete Qovery deployed resources on my AWS account?"
in this section.
Qovery required IAM permissions to create, update and managed the infrastructure.
IAM is used to create IAM roles
S3 is used to store our generated configuration files
Cloudwatch, for creating a group stream for each Kubernetes clusters
Autoscaling for RDS and autoscaling rules for the Kubernetes cluster
Elastic load-balancing for ELB / ALB / NLB.
DynamoDB to have a distributed lock on infrastructure deployment.
ECR for managing the container registry, create/update/delete repository.
KMS to load and store keys (RDS, SSH, …)
EKS to create and update the Kubernetes cluster.
Minimum IAM permission set
Last update: 2023-06-08
This is purely informative and we strongly recommend you to NOT use this configuration within your IAM permissions since it might not
reflect the latest product update. Please use the one provided in the section above.
Below you can find the minimum permission set required by Qovery to run and deploy your applications.
Policies lengths are limited regarding which object they’re attached to but the one Qovery needs represent more than the maximum (~6000
-characters).
In order to setup it up, you need to create two IAM groups, each one with one of the following policies.
Then we must create a user added to each of the previously created groups.
Once it’s done, the user’s access key and secret key can be used in Qovery.
Install Qovery on your AWS account in less than 30 minutes. Qovery will create a Kubernetes cluster for you and manage it for you. To install Qovery on an existing Kubernetes cluster, please refer to the dedicated documentation.
Before you begin, this page assumes the following:
Now you can create your Kubernetes cluster. Follow this guide to create your Kubernetes cluster.
Note that you can create multiple clusters on the same AWS account with different VPCs. You can also create multiple clusters on different AWS accounts. Qovery will manage them for you.
Qovery propose multiple options that you can select to customize your installation. You can also change some of them later. Refer to this guide for more details on the available configurations.
Install Qovery on your AWS account in less than 30 minutes. Qovery will create a Kubernetes cluster for you and manage it for you. To install Qovery on an existing Kubernetes cluster, please refer to the dedicated documentation.
Before you begin, this page assumes the following:
Now you can create your Kubernetes cluster. Follow this guide to create your Kubernetes cluster.
Note that you can create multiple clusters on the same AWS account with different VPCs. You can also create multiple clusters on different AWS accounts. Qovery will manage them for you.
Qovery propose multiple options that you can select to customize your installation. You can also change some of them later. Refer to this guide for more details on the available configurations.
If you are not familiar with Kubernetes, we recommend you to use Qovery on a Managed Kubernetes cluster on AWS, GCP, Scaleway, Azure, or contact us.
Qovery Self-Managed (or BYOK: Bring Your Own Kubernetes) is a self-hosted version of Qovery. It allows you to install Qovery on your own Kubernetes cluster.
-Read this article to better understand the difference with the Managed Kubernetes by Qovery. In a nutshell, Qovery BYOK is for Kubernetes experts who want to manage their own Kubernetes cluster. In this version, Qovery does not manage the Kubernetes cluster for you.
Qovery automatically updates ONLY the Qovery applications (agent, shell-agent etc..) via the Qovery Helm chart. With the self-managed offer it will be up to you to manage any dependency components (ingress, dns, logging...), making sure they run with the right version over time.
The dependencies provided with the Qovery Helm chart are here to help you with the bootstrap, and are not maintained by Qovery. If you want to simplify the maintenance of your cluster, please look at Qovery managed Kubernetes offer.
Now that you have Qovery installed on your Kubernetes cluster, you can check that Qovery is properly installed by following the Validate Installation guide.
+Read this article to better understand the difference with the Managed Kubernetes by Qovery. In a nutshell, Qovery BYOK is for Kubernetes experts who want to manage their own Kubernetes cluster. In this version, Qovery does not manage the Kubernetes cluster for you.
Qovery automatically updates ONLY the Qovery applications (agent, shell-agent etc..) via the Qovery Helm chart. With the self-managed offer it will be up to you to manage any dependency components (ingress, dns, logging...), making sure they run with the right version over time.
The dependencies provided with the Qovery Helm chart are here to help you with the bootstrap, and are not maintained by Qovery. If you want to simplify the maintenance of your cluster, please look at Qovery managed Kubernetes offer.
Now that you have Qovery installed on your Kubernetes cluster, you can check that Qovery is properly installed by following the Validate Installation guide.
If you are not familiar with Kubernetes, we recommend you to use Qovery on a Managed Kubernetes cluster on AWS, GCP, Scaleway, Azure, or contact us.
Qovery Self-Managed (or BYOK: Bring Your Own Kubernetes) is a self-hosted version of Qovery. It allows you to install Qovery on your own Kubernetes cluster.
-Read this article to better understand the difference with the Managed Kubernetes by Qovery. In a nutshell, Qovery BYOK is for Kubernetes experts who want to manage their own Kubernetes cluster. In this version, Qovery does not manage the Kubernetes cluster for you.
Qovery automatically updates ONLY the Qovery applications (agent, shell-agent etc..) via the Qovery Helm chart. With the self-managed offer it will be up to you to manage any dependency components (ingress, dns, logging...), making sure they run with the right version over time.
The dependencies provided with the Qovery Helm chart are here to help you with the bootstrap, and are not maintained by Qovery. If you want to simplify the maintenance of your cluster, please look at Qovery managed Kubernetes offer.
Now that you have Qovery installed on your Kubernetes cluster, you can check that Qovery is properly installed by following the Validate Installation guide.
+Read this article to better understand the difference with the Managed Kubernetes by Qovery. In a nutshell, Qovery BYOK is for Kubernetes experts who want to manage their own Kubernetes cluster. In this version, Qovery does not manage the Kubernetes cluster for you.
Qovery automatically updates ONLY the Qovery applications (agent, shell-agent etc..) via the Qovery Helm chart. With the self-managed offer it will be up to you to manage any dependency components (ingress, dns, logging...), making sure they run with the right version over time.
The dependencies provided with the Qovery Helm chart are here to help you with the bootstrap, and are not maintained by Qovery. If you want to simplify the maintenance of your cluster, please look at Qovery managed Kubernetes offer.
Now that you have Qovery installed on your Kubernetes cluster, you can check that Qovery is properly installed by following the Validate Installation guide.
You will be able to use the credentials you just generated when creating a cluster via the Qovery console. This cluster will be linked to your Qovery organization.
-Follow this documentation to create a new cluster on your organization.
Install Qovery on your GCP account in less than 20 minutes. Qovery will create a Kubernetes cluster for you and manage it for you. To install Qovery on an existing Kubernetes cluster, please refer to the dedicated documentation.
Before you begin, this page assumes the following:
Before creating your cluster, ensure that you have at least 4 CPUS and 8 GB of memory available in your GCP quotas.
These quotas can be edited by yourself or by contacting GCP support.
Now you can create your Kubernetes cluster. Follow this guide to create your Kubernetes cluster.
Note that you can create multiple clusters on the same GCP account with different VPCs. You can also create multiple clusters on different GCP accounts. Qovery will manage them for you.
Qovery propose multiple options that you can select to customize your installation. You can also change some of them later. Refer to this guide for more details on the available configurations.
Install Qovery on your GCP account in less than 20 minutes. Qovery will create a Kubernetes cluster for you and manage it for you. To install Qovery on an existing Kubernetes cluster, please refer to the dedicated documentation.
Before you begin, this page assumes the following:
Before creating your cluster, ensure that you have at least 4 CPUS and 8 GB of memory available in your GCP quotas.
These quotas can be edited by yourself or by contacting GCP support.
Now you can create your Kubernetes cluster. Follow this guide to create your Kubernetes cluster.
Note that you can create multiple clusters on the same GCP account with different VPCs. You can also create multiple clusters on different GCP accounts. Qovery will manage them for you.
Qovery propose multiple options that you can select to customize your installation. You can also change some of them later. Refer to this guide for more details on the available configurations.
If you are not familiar with Kubernetes, we recommend you to use Qovery on a Managed Kubernetes cluster on AWS, GCP, Scaleway, Azure, or contact us.
Qovery Self-Managed (or BYOK: Bring Your Own Kubernetes) is a self-hosted version of Qovery. It allows you to install Qovery on your own Kubernetes cluster.
-Read this article to better understand the difference with the Managed Kubernetes by Qovery. In a nutshell, Qovery BYOK is for Kubernetes experts who want to manage their own Kubernetes cluster. In this version, Qovery does not manage the Kubernetes cluster for you.
Qovery automatically updates ONLY the Qovery applications (agent, shell-agent etc..) via the Qovery Helm chart. With the self-managed offer it will be up to you to manage any dependency components (ingress, dns, logging...), making sure they run with the right version over time.
The dependencies provided with the Qovery Helm chart are here to help you with the bootstrap, and are not maintained by Qovery. If you want to simplify the maintenance of your cluster, please look at Qovery managed Kubernetes offer.
Now that you have Qovery installed on your Kubernetes cluster, you can check that Qovery is properly installed by following the Validate Installation guide.
+Read this article to better understand the difference with the Managed Kubernetes by Qovery. In a nutshell, Qovery BYOK is for Kubernetes experts who want to manage their own Kubernetes cluster. In this version, Qovery does not manage the Kubernetes cluster for you.
Qovery automatically updates ONLY the Qovery applications (agent, shell-agent etc..) via the Qovery Helm chart. With the self-managed offer it will be up to you to manage any dependency components (ingress, dns, logging...), making sure they run with the right version over time.
The dependencies provided with the Qovery Helm chart are here to help you with the bootstrap, and are not maintained by Qovery. If you want to simplify the maintenance of your cluster, please look at Qovery managed Kubernetes offer.
Now that you have Qovery installed on your Kubernetes cluster, you can check that Qovery is properly installed by following the Validate Installation guide.
#Managed Cluster by Qovery vs. Self-Managed - What to choose?
Qovery offers two distinct approaches to cluster management: Cluster Managed by Qovery and Self-managed Cluster.
-Choose Cluster Managed by Qovery if you are not familiar with Kubernetes or you don't want to bother with it and delegate infrastructure management to Qovery. Choose Self-Managed otherwise.
Here is a table to help you to choose between both:
Feature/Aspect
Cluster Managed by Qovery (recommended)
Self-Managed Cluster (advanced)
Management
Fully managed by Qovery
Self-managed by the organization
Control
Limited control over Kubernetes infrastructure
Full control over Kubernetes setup
Supported Cloud Service Providers
AWS, GCP, Scaleway
All
Customization
Standard Qovery configuration
High customization and configuration freedom
Expertise Required
None
Requires Kubernetes expertise
Responsibility
Qovery is responsible for maintenance
Organization is responsible for maintenance
Developer Experience
Streamlined and simplified
Streamlined and simplified (no difference)
Setup Complexity
Just a AWS, GCP or Scaleway account
Requires infrastructure and Kubernetes knowledge
Flexibility in Usage
Standardized to Qovery's environment
Flexible to meet specific organizational needs
Ideal Use Case
Organizations preferring a hands-off approach
Organizations with specific Kubernetes needs
Managed Services
Cf. list below
N/A
Self-Managed Cluster is also known as Bring Your Own Kubernetes (BYOK).
Managed Services
Here is the list of managed services provided by Qovery with the Kubernetes Managed by Qovery approach:
Vertical Pod Autoscaler
Cluster Autoscaler
CoreDNS
Cert-manager
Cert-manager Qovery Webhook
Nginx Ingress
Metrics Server
External DNS
Promtail
Loki
AWS
AWS EBS Driver
AWS Kubeproxy
AWS CNI
IAM EKS User Mapper
Karpenter
AWS Node Term Handler
A more detailed comparison is available on our blog
+Choose Cluster Managed by Qovery if you are not familiar with Kubernetes or you don't want to bother with it and delegate infrastructure management to Qovery. Choose Self-Managed otherwise.
Here is a table to help you to choose between both:
Feature/Aspect
Cluster Managed by Qovery (recommended)
Self-Managed Cluster (advanced)
Management
Fully managed by Qovery
Self-managed by the organization
Control
Limited control over Kubernetes infrastructure
Full control over Kubernetes setup
Supported Cloud Service Providers
AWS, GCP, Scaleway
All
Customization
Standard Qovery configuration
High customization and configuration freedom
Expertise Required
None
Requires Kubernetes expertise
Responsibility
Qovery is responsible for maintenance
Organization is responsible for maintenance
Developer Experience
Streamlined and simplified
Streamlined and simplified (no difference)
Setup Complexity
Just a AWS, GCP or Scaleway account
Requires infrastructure and Kubernetes knowledge
Flexibility in Usage
Standardized to Qovery's environment
Flexible to meet specific organizational needs
Ideal Use Case
Organizations preferring a hands-off approach
Organizations with specific Kubernetes needs
Managed Services
Cf. list below
N/A
Self-Managed Cluster is also known as Bring Your Own Kubernetes (BYOK).
Managed Services
Here is the list of managed services provided by Qovery with the Kubernetes Managed by Qovery approach:
Vertical Pod Autoscaler
Cluster Autoscaler
CoreDNS
Cert-manager
Cert-manager Qovery Webhook
Nginx Ingress
Metrics Server
External DNS
Promtail
Loki
AWS
AWS EBS Driver
AWS Kubeproxy
AWS CNI
IAM EKS User Mapper
Karpenter
AWS Node Term Handler
A more detailed comparison is available on our blog
Qovery BYOK is a set of Kubernetes components that you can configure to fit your needs. It is used to connect your Kubernetes cluster to Qovery control plane.
Qovery Control Plane: the Qovery Control Plane is the brain of Qovery. It is responsible for managing your applications and providing the API to interact with Qovery.
Qovery Cluster Agent (mandatory): the Qovery Cluster Agent is responsible for securely forwarding logs and metrics from your Kubernetes cluster to Qovery control plane.
Qovery Shell Agent (mandatory): the Qovery Shell Agent is responsible for giving you a secure remote shell access to your Kubernetes pods if you need it. E.g. when using qovery shell command.
Qovery Engine (optional): the Qovery Engine is responsible for managing your applications deployment on your Kubernetes cluster. It can be used Qovery side or is installed on your Kubernetes cluster.
Third-party components:
NGINX Ingress Controller (optional)
External DNS (optional)
Loki (optional)
Promtail (optional)
Cert Manager (optional)
...
Within the values.yaml file of the helm chart you can choose what you want to install and manage, and you will have a description of what services are used, and responsible for. You can disable them if you don't want to use them. And you can even install other components if you want to.
This is the configuration of Qovery itself and it is used by all Qovery components. This configuration is automatically generated when creating the cluster on the Qovery console (see [docs.getting-started.install-qovery.kubernetes.quickstart#install-qovery])
Do not share the jwtToken! Keep it in a safe place. It is used to authenticate the cluster.
Key
Required
Description
qovery.clusterId
Yes
The cluster ID. It is used to identify your cluster.
qovery.clusterShortId
Yes
The short cluster ID. It is used to identify your cluster.
qovery.organizationId
Yes
The organization ID. It is used to identify your organization.
qovery.jwtToken
Yes
The JWT token. It is used to authenticate your cluster.
qovery.domain
Yes
The domain name used by Qovery.
qovery.domainWildcard
Yes
The wildcard domain name used by Qovery.
qovery.qoveryDnsUrl
Yes
Qovery DNS url in case you want to use Qovery provided DNS
qovery.lokiUrl
No
Local Loki URL (required if Loki is set)
qovery.promtailLokiUrl
No
Promtail Loki URL (required if Promtail and Loki are set)
qovery.acmeEmailAddr
No
Email address used for Let's Encrypt TLS requests
qovery.externalDnsPrefix
No
ExernalDNS TXT record prefix (required if ExternalDNS is set)
Used to easily reach your applications with DNS records, even on private network
If missing
You will have easy access with dns names to your services, you'll have to use IPs
Qovery uses External DNS to automatically configure DNS records for your applications.
If you don't want or can't add your own DNS provider, Qovery proposes it's own managed sub-domain DNS provider for free.
-You'll then be able to later add your custom DNS record (no matter the provider) to point to your Qovery DNS sub-domain.
If you are not familiar with Kubernetes, we recommend you to use Qovery on a Managed Kubernetes cluster on AWS, GCP, Scaleway, Azure, or contact us.
Qovery Self-Managed (or BYOK: Bring Your Own Kubernetes) is a self-hosted version of Qovery. It allows you to install Qovery on your own Kubernetes cluster.
-Read this article to better understand the difference with the Managed Kubernetes by Qovery. In a nutshell, Qovery BYOK is for Kubernetes experts who want to manage their own Kubernetes cluster. In this version, Qovery does not manage the Kubernetes cluster for you.
Qovery automatically updates ONLY the Qovery applications (agent, shell-agent etc..) via the Qovery Helm chart. With the self-managed offer it will be up to you to manage any dependency components (ingress, dns, logging...), making sure they run with the right version over time.
The dependencies provided with the Qovery Helm chart are here to help you with the bootstrap, and are not maintained by Qovery. If you want to simplify the maintenance of your cluster, please look at Qovery managed Kubernetes offer.
Now that you have Qovery installed on your Kubernetes cluster, you can check that Qovery is properly installed by following the Validate Installation guide.
+Read this article to better understand the difference with the Managed Kubernetes by Qovery. In a nutshell, Qovery BYOK is for Kubernetes experts who want to manage their own Kubernetes cluster. In this version, Qovery does not manage the Kubernetes cluster for you.
Qovery automatically updates ONLY the Qovery applications (agent, shell-agent etc..) via the Qovery Helm chart. With the self-managed offer it will be up to you to manage any dependency components (ingress, dns, logging...), making sure they run with the right version over time.
The dependencies provided with the Qovery Helm chart are here to help you with the bootstrap, and are not maintained by Qovery. If you want to simplify the maintenance of your cluster, please look at Qovery managed Kubernetes offer.
Now that you have Qovery installed on your Kubernetes cluster, you can check that Qovery is properly installed by following the Validate Installation guide.
Open the Qovery console and access the "Environment" section.
Add a new environment and select as target the cluster that was created in the previous step.
Create an application
Within this environment, create a new service of type Application.
Fill the fields this way:
Name: test
Application source: Container Registry
Registry: Dockerhub public
image name: stefanprodan/podinfo
image tag: 6.5.2
Click on Continue until the installation recap is displayed. Now click on Create and deploy.
Follow the deployment
The application will start the deployment and you can follow it opening the Log button or by pressing on the Deployment status
After a few seconds, the deployment should end and the message Deployment of Container succeeded should be displayed in the deployment logs.
You should now see at least one pod running on your cluster with the specified container.
Verify Qovery functionalities
Click on the log button to access the Live logs section.
You should be able to:
1) access the log of the deployed application
-2) retrieve the running status of the application from the element next to the Live logs tab
#Step 2: verify application public exposure and TLS
This step should be run only if you have enabled the services external-dns, cert-manager, cert-manager-config and qovery-cert-manager-webhook in your values.yaml file during the installation.
Expose container publicly
Open the settings of the container created in the step 1. Open the section Port
Add one port with:
Application port: 9898
Protocol: HTTP
Publicly exposed: true
Add the port and then click on Re-deploy now banner.
Follow the deployment
The application will start the deployment and you can follow it opening the Log button or by pressing on the Deployment status
After a few seconds, the deployment should end and the message Deployment of Container succeeded should be displayed in the deployment logs.
Check the accessibility
Click on the "Link" button and select one of the URLs of the list.
You should be able to access the podinfo homepage with a valid certificate.
This step should be run only if you have enabled the services q-storageclass-aws and aws-ebs-csi-driver in your values.yaml file during the installation (or you already have the CSI plugin activated on your AWS cluster).
Create a database
Go back to the environment page and create a new service of type Database.
Fill the fields this way:
Name: test-db
Database Mode: Container
Database type: Mysql
version: select one from the list
accessibility: private
Click on Continue until the installation recap is displayed. Now click on Create and deploy.
Follow the deployment
The databse will start the deployment and you can follow it opening the Log button or by pressing on the Deployment status
After a few seconds, the deployment should end and the message Deployment of Database succeeded should be displayed in the deployment logs.
You should now see at least one pod running on your cluster with the specified container and you should be able to access your database from within you cluster (you can retrieve the connection string via the button Connection URI available in the database overview targetCPUUtilizationPercentage)
#Step 2: verify application public exposure and TLS
This step should be run only if you have enabled the services external-dns, cert-manager, cert-manager-config and qovery-cert-manager-webhook in your values.yaml file during the installation.
Expose container publicly
Open the settings of the container created in the step 1. Open the section Port
Add one port with:
Application port: 9898
Protocol: HTTP
Publicly exposed: true
Add the port and then click on Re-deploy now banner.
Follow the deployment
The application will start the deployment and you can follow it opening the Log button or by pressing on the Deployment status
After a few seconds, the deployment should end and the message Deployment of Container succeeded should be displayed in the deployment logs.
Check the accessibility
Click on the "Link" button and select one of the URLs of the list.
You should be able to access the podinfo homepage with a valid certificate.
This step should be run only if you have enabled the services q-storageclass-aws and aws-ebs-csi-driver in your values.yaml file during the installation (or you already have the CSI plugin activated on your AWS cluster).
Create a database
Go back to the environment page and create a new service of type Database.
Fill the fields this way:
Name: test-db
Database Mode: Container
Database type: Mysql
version: select one from the list
accessibility: private
Click on Continue until the installation recap is displayed. Now click on Create and deploy.
Follow the deployment
The databse will start the deployment and you can follow it opening the Log button or by pressing on the Deployment status
After a few seconds, the deployment should end and the message Deployment of Database succeeded should be displayed in the deployment logs.
You should now see at least one pod running on your cluster with the specified container and you should be able to access your database from within you cluster (you can retrieve the connection string via the button Connection URI available in the database overview targetCPUUtilizationPercentage)
Here is how to install Qovery on your local machine. This is the fastest way to get started with Qovery and start deploying your applications to experience the Qovery experience.
It's important to note that this local setup of Qovery using the qovery demo up command is designed for demonstration and testing purposes only. It is not intended for production use. Please refer to other guides for production-grade installations.
To install Qovery on your local machine, follow these steps:
Install Qovery CLI by running the following command:
Linux
MacOS
Windows
Docker
*nix
Arch Linux
Manual
To download and install Qovery CLI on any Linux distribution:
$ curl -s https://get.qovery.com |bash
Authenticate with Qovery by running the following command:
# Sign up and sign in command
$ qovery auth
If you are using a headless (without GUI) environment, you can use the following command to sign up and sign in:
# Sign up and sign in command
$ qovery auth --headless
Your browser window with Qovery sign-up page will open. Follow the instructions to sign up and sign in.
Set Qovery context:
qovery context set
Start the Qovery demo by running the following command:
Ensure you have Docker running and that you have installed jq, curl, sed, grep, and git.
qovery demo up
A k3s Kubernetes cluster will be installed on your local machine and Qovery will be installed on top of it.
Note that if you are on MacOS or Windows, you might be prompted for your admin password - which is necessary to properly route the traffic from your host to your k3s apps.
...
""""""""""""""""""""""""""""""""""""""""""""
Configure network
""""""""""""""""""""""""""""""""""""""""""""
+ sudoifconfig lo0 alias172.42.0.3/32 up
Password:
...
At the end of the installation, you will see the following message:
...
""""""""""""""""""""""""""""""""""""""""""""
Qovery demo cluster is now installed !!!!
The kubeconfig is correctly set, so you can connect to it directly with kubectl or k9s from your local machine
To delete/stop/start your cluster, use k3d cluster xxxx
-
Go to https://console.qovery.com to create your first environment on this cluster 'hello-local-cluster'
Well done, you have successfully installed Qovery on your local machine. You can now start deploying your applications and experience the Qovery experience.
Well done, you have successfully installed Qovery on your local machine. You can now start deploying your applications and experience the Qovery experience.
Well done!! You now have your Scaleway access key id, secret access key, organization_id and project id; It is time to connect Qovery to your Scaleway account.
Well done!! You now have your Scaleway access key id, secret access key, organization_id and project id; It is time to connect Qovery to your Scaleway account.
Qovery is an abstraction layer on top of Scaleway and Kubernetes. Qovery manages the configuration of Scaleway account, and helps you to deploy production ready apps in seconds.
-To make it works, Qovery rely on Kubernetes for stateless apps (containers), and Scaleway for stateful apps (databases, storage...).
The first time you set up your Scaleway account, Qovery creates a Kubernetes cluster in your chosen region. Qovery managed it for you - no action required. It takes ~15 minutes to configure and bootstrap a Kubernetes cluster. Once bootstrapped, your Kubernetes cluster runs the Qovery app and is ready to deploy your applications.
Scaleway provides managed services for PostgreSQL, MySQL, Redis, MongoDB. Qovery gives you access to those services when you set the environment mode to Production. In Development mode, Qovery provides containers equivalent, which is cheaper and faster to start.
Different datacenters are located in different geographic areas, and you may want to keep your site physically close to the bulk of your user base for reduced latency.
#I don't find a region that is provided by Scaleway
We are probably testing the support of this region, please contact us to know what's the status
The first time you set up your Scaleway account, Qovery creates a Kubernetes cluster in your chosen region. Qovery managed it for you - no action required. It takes ~15 minutes to configure and bootstrap a Kubernetes cluster. Once bootstrapped, your Kubernetes cluster runs the Qovery app and is ready to deploy your applications.
Scaleway provides managed services for PostgreSQL, MySQL, Redis, MongoDB. Qovery gives you access to those services when you set the environment mode to Production. In Development mode, Qovery provides containers equivalent, which is cheaper and faster to start.
Different datacenters are located in different geographic areas, and you may want to keep your site physically close to the bulk of your user base for reduced latency.
#I don't find a region that is provided by Scaleway
We are probably testing the support of this region, please contact us to know what's the status
Install Qovery on your Scaleway account in less than 20 minutes. Qovery will create a Kubernetes cluster for you and manage it for you. To install Qovery on an existing Kubernetes cluster, please refer to the dedicated documentation.
Before you begin, this page assumes the following:
Now you can create your Kubernetes cluster. Follow this guide to create your Kubernetes cluster.
Note that you can create multiple clusters on the same Scaleway account with different VPCs. You can also create multiple clusters on different Scaleway accounts. Qovery will manage them for you.
Qovery propose multiple options that you can select to customize your installation. You can also change some of them later. Refer to this guide for more details on the available configurations.
Install Qovery on your Scaleway account in less than 20 minutes. Qovery will create a Kubernetes cluster for you and manage it for you. To install Qovery on an existing Kubernetes cluster, please refer to the dedicated documentation.
Before you begin, this page assumes the following:
Now you can create your Kubernetes cluster. Follow this guide to create your Kubernetes cluster.
Note that you can create multiple clusters on the same Scaleway account with different VPCs. You can also create multiple clusters on different Scaleway accounts. Qovery will manage them for you.
Qovery propose multiple options that you can select to customize your installation. You can also change some of them later. Refer to this guide for more details on the available configurations.
If you are not familiar with Kubernetes, we recommend you to use Qovery on a Managed Kubernetes cluster on AWS, GCP, Scaleway, Azure, or contact us.
Qovery Self-Managed (or BYOK: Bring Your Own Kubernetes) is a self-hosted version of Qovery. It allows you to install Qovery on your own Kubernetes cluster.
-Read this article to better understand the difference with the Managed Kubernetes by Qovery. In a nutshell, Qovery BYOK is for Kubernetes experts who want to manage their own Kubernetes cluster. In this version, Qovery does not manage the Kubernetes cluster for you.
Qovery automatically updates ONLY the Qovery applications (agent, shell-agent etc..) via the Qovery Helm chart. With the self-managed offer it will be up to you to manage any dependency components (ingress, dns, logging...), making sure they run with the right version over time.
The dependencies provided with the Qovery Helm chart are here to help you with the bootstrap, and are not maintained by Qovery. If you want to simplify the maintenance of your cluster, please look at Qovery managed Kubernetes offer.
Now that you have Qovery installed on your Kubernetes cluster, you can check that Qovery is properly installed by following the Validate Installation guide.
+Read this article to better understand the difference with the Managed Kubernetes by Qovery. In a nutshell, Qovery BYOK is for Kubernetes experts who want to manage their own Kubernetes cluster. In this version, Qovery does not manage the Kubernetes cluster for you.
Qovery automatically updates ONLY the Qovery applications (agent, shell-agent etc..) via the Qovery Helm chart. With the self-managed offer it will be up to you to manage any dependency components (ingress, dns, logging...), making sure they run with the right version over time.
The dependencies provided with the Qovery Helm chart are here to help you with the bootstrap, and are not maintained by Qovery. If you want to simplify the maintenance of your cluster, please look at Qovery managed Kubernetes offer.
Now that you have Qovery installed on your Kubernetes cluster, you can check that Qovery is properly installed by following the Validate Installation guide.
Qovery is a DevOps Automation Platform that eliminates your DevOps hiring needs. Provision and maintain a secure and compliant infrastructure in hours - not months!
By using Qovery, Platform Engineering teams can provide an outstanding platform to their developers in less than a hour. Then Platform Engineering teams can tailor the experience of Qovery and even build on top of it to fit their own golden path. They keep the control and can audit what developers do.
By using Qovery, developers are autonomous in deploying their applications, debugging, and scaling. They don't need any infrastructure knowledge. They can connect their git repository, pushing and deploying their apps.
Qovery focus on providing an outstanding Developer Experience and never assume that developers know how underlying infrastructure work.
Qovery is a DevOps Automation Platform that eliminates your DevOps hiring needs. Provision and maintain a secure and compliant infrastructure in hours - not months!
By using Qovery, Platform Engineering teams can provide an outstanding platform to their developers in less than a hour. Then Platform Engineering teams can tailor the experience of Qovery and even build on top of it to fit their own golden path. They keep the control and can audit what developers do.
By using Qovery, developers are autonomous in deploying their applications, debugging, and scaling. They don't need any infrastructure knowledge. They can connect their git repository, pushing and deploying their apps.
Qovery focus on providing an outstanding Developer Experience and never assume that developers know how underlying infrastructure work.
Backups and restore are frequently a nightmare to setup. Especially for databases. Qovery helps you to get this part always automatically managed by the Cloud provider.
Backups and restore are frequently a nightmare to setup. Especially for databases. Qovery helps you to get this part always automatically managed by the Cloud provider.
Data in transit between the World and Qovery is always encrypted, as all of the services which Qovery supports. Services include the Qovery CLI, management console, Documentation, Landing Page, and Back Office.
Data in transit between the World and customer applications is encrypted. By default, HTTPS connections use an automatically generated Let's Encrypt certificate, or users may provide their own TLS certificate (Enterprise only).
Data in transit on Qovery controlled networks (e.g., between the application and a database) use end-to-end encryption and private networking rules.
All application data is encrypted by using encrypted storage (typically using an AES-256 block cipher). If you have specific audit requirements surrounding data at rest encryption, please contact us.
Data in transit between the World and Qovery is always encrypted, as all of the services which Qovery supports. Services include the Qovery CLI, management console, Documentation, Landing Page, and Back Office.
Data in transit between the World and customer applications is encrypted. By default, HTTPS connections use an automatically generated Let's Encrypt certificate, or users may provide their own TLS certificate (Enterprise only).
Data in transit on Qovery controlled networks (e.g., between the application and a database) use end-to-end encryption and private networking rules.
All application data is encrypted by using encrypted storage (typically using an AES-256 block cipher). If you have specific audit requirements surrounding data at rest encryption, please contact us.
We've implemented policies in the company to ensure all of our employees follow the necessary training and protocols around security. Besides, privacy protection is part of every project during instantiation.
We've confirmed that all of our customer communication, both business-related and marketing-related, is opt-in, and no information is shared with us without a customer's consent.
The GDPR provides rights to individuals, such as the right to portability, right of rectification, and the right to be forgotten. We've made sure we comply with these rights. Nearly all information can be edited through a user's account, and we can delete accounts upon request.
We've implemented policies in the company to ensure all of our employees follow the necessary training and protocols around security. Besides, privacy protection is part of every project during instantiation.
We've confirmed that all of our customer communication, both business-related and marketing-related, is opt-in, and no information is shared with us without a customer's consent.
The GDPR provides rights to individuals, such as the right to portability, right of rectification, and the right to be forgotten. We've made sure we comply with these rights. Nearly all information can be edited through a user's account, and we can delete accounts upon request.
Qovery infrastructure and processes comply with SOC2 (Systems and Organizations Controls 2) best practices. By default, Qovery integrates numerous security features into your applications, clusters, and databases,
-ensuring alignment with SOC2’s stringent security standards. For more information, visit the Qovery trust page.
All customers using Qovery benefit from a SOC2-compliant infrastructure, significantly reducing the time required for compliance readiness.
This documentation outlines configuration settings for achieving SOC2 compliance and additional recommended security measures.
To meet SOC2 retention requirements, and store applications/containers logs in the object storage used by Loki, set the loki.log_retention_in_week to at least 365 days.
Qovery allows databases to be publicly accessible for convenience; however, to comply with SOC2, it’s recommended to restrict this access to secure your databases by changing the value of those settings:
database.<database type>.deny_public_access: set the CIDR ranges permitted to access the database.
database.<database type>.allowed_cidrs: limit access to only your VPC CIDR or other specified IP ranges.
By default, cloud providers allow public access to the Kubernetes API, which is secured by TLS certificates. AWS and GCP provide an added layer of security by requiring account-based dual authentication.
SOC2 compliance, however, mandates restricted access to the Kubernetes API. To achieve this:
qovery.static_ip_mode: limit access to Qovery’s designated IPs. Qovery needs this access to perform infrastructure maintenance and application deployment.
k8s.api.allowed_public_access_cidrs: optional, define any additional CIDRs that require access to the Kubernetes API, thus limiting external access further.
Please refer to the dedicated documentation section, you have to create a Dockerhub account and link it to Qovery to avoid rate limit.
To meet SOC2 retention requirements, and store applications/containers logs in the object storage used by Loki, set the loki.log_retention_in_week to at least 365 days.
Qovery allows databases to be publicly accessible for convenience; however, to comply with SOC2, it’s recommended to restrict this access to secure your databases by changing the value of those settings:
database.<database type>.deny_public_access: set the CIDR ranges permitted to access the database.
database.<database type>.allowed_cidrs: limit access to only your VPC CIDR or other specified IP ranges.
By default, cloud providers allow public access to the Kubernetes API, which is secured by TLS certificates. AWS and GCP provide an added layer of security by requiring account-based dual authentication.
SOC2 compliance, however, mandates restricted access to the Kubernetes API. To achieve this:
qovery.static_ip_mode: limit access to Qovery’s designated IPs. Qovery needs this access to perform infrastructure maintenance and application deployment.
k8s.api.allowed_public_access_cidrs: optional, define any additional CIDRs that require access to the Kubernetes API, thus limiting external access further.
Please refer to the dedicated documentation section, you have to create a Dockerhub account and link it to Qovery to avoid rate limit.
#What is the difference between a Project, an Application, and an Environment?
A project is the site that you're working on. Each project can contain multiple applications and be deployed in multiple environments. An environment is a standalone copy of your site, including apps, databases, storage, data, and all other services. By default, main branch is the production environment, while all other branches can be set up as identical copies of the prod environment for testing purposes.
Qovery provides managed and container modes for your databases. Basically, managed mode relies on the managed database provided by the cloud provider. E.g. if you choose Postgres with the managed mode while your environment is running on AWS, then Qovery provides an AWS RDS instance. Please check out our database section for further details.
Behind the scene, Qovery uses Kubernetes. Qovery extends Kubernetes to make it accessible to any developer teams.
Important: Qovery does not modify Kubernetes. It only deploys his services in a qovery Kubernetes namespace.
#Which IP address does my cluster use to communicate externally over the Internet?
There isn't just one public cluster IP adress dedicated to external communication. However, worker nodes inside your cluster each have a public IP automatically attached to them. You can view those default public IPs in the details of your worker nodes (EC2 instances for AWS users) which belong to the node group in your cluster.
For improved security and control, the Static IP feature allows you to ensure that outbound traffic from your cluster uses specific IP addresses. For more information on the Static IP feature and how to enable it at cluster creation, see Static IP on AWS or Static IP on GCP.
#If I have N custom domains under the same root domain, do I need to create N CNAME records, or just creating one for the root domain is enough ?
The Qovery team manages your Kubernetes cluster's upgrade, and you don't have to think about it. Upgrades from one minor Kubernetes version to another require a good amount of tests to make sure everything goes smoothly with zero interruptions for your app. This is why Qovery always provides 1 or 2 minor versions below the last one offered by the cloud provider. Our goal is to guarantee you the maximum uptime.
Under the hood, Qovery uses containers and Kubernetes to run applications. With us, your applications scale accordingly to your traffic and needs. We rely on major cloud providers to provide reliable infrastructure to make your applications highly available.
What's more, we took on our shoulders the complexity of providing and managing other infrastructure requirements you need (like databases or message brokers), so you can focus merely on developing business features.
With Qovery, the cloud is simple again. Get all the benefits of using cloud and Kubernetes without dealing with its complexity. You don't need to hire infrastructure experts - configuring continuous integration, deployment, databases, message brokers, storage, DNS, SSL/TLS, VPCs, and many others - we do it all for you. On Qovery, you can spin up a set of microservices, databases, and other cloud services in minutes with a single Git push!
Qovery is designed by developers for developers. Our goal is to make your life easier and allow you to move faster. Developer experience is at our heart. Building cloud-native applications was never that fast and simple!
#Fully customizable for advanced business use cases
Create teams, split responsibilities, manage privileges, enforce company-wide rules, deploy to multiple clouds, plug in your own CI solutions. Qovery Business allows you to bring your organization to the next level with ease.
Under the hood, Qovery uses containers and Kubernetes to run applications. With us, your applications scale accordingly to your traffic and needs. We rely on major cloud providers to provide reliable infrastructure to make your applications highly available.
What's more, we took on our shoulders the complexity of providing and managing other infrastructure requirements you need (like databases or message brokers), so you can focus merely on developing business features.
With Qovery, the cloud is simple again. Get all the benefits of using cloud and Kubernetes without dealing with its complexity. You don't need to hire infrastructure experts - configuring continuous integration, deployment, databases, message brokers, storage, DNS, SSL/TLS, VPCs, and many others - we do it all for you. On Qovery, you can spin up a set of microservices, databases, and other cloud services in minutes with a single Git push!
Qovery is designed by developers for developers. Our goal is to make your life easier and allow you to move faster. Developer experience is at our heart. Building cloud-native applications was never that fast and simple!
#Fully customizable for advanced business use cases
Create teams, split responsibilities, manage privileges, enforce company-wide rules, deploy to multiple clouds, plug in your own CI solutions. Qovery Business allows you to bring your organization to the next level with ease.
Qovery is responsible for deployed elements on your cloud provider made and maintained by Qovery. We are not responsible for the cloud provider itself.
Qovery strongly advises you to take a support plan with your cloud provider. When outages occur, Qovery is limited to the elements given by the cloud provider, and sometimes does not have enough information to diagnose a service failure.
In those cases, you will need to contact your cloud provider support. for investigation.
Qovery is responsible for deployed elements on your cloud provider made and maintained by Qovery. We are not responsible for the cloud provider itself.
Qovery strongly advises you to take a support plan with your cloud provider. When outages occur, Qovery is limited to the elements given by the cloud provider, and sometimes does not have enough information to diagnose a service failure.
In those cases, you will need to contact your cloud provider support. for investigation.
This feature is available in public beta. Access and functionalities might change in the future based on your Qovery Plan.
Qovery allows you to monitor any action happened within your organization thanks to the audit logs section. This section provides you with a complete view on any change happened within your organization configuration, providing you the answer to "who did what, where, and when?".
This is extremely useful when debugging complex issues and trying to understand what happened in a specific timeframe or monitor the actions done by your users within your organization.
You can access this section by opening the Audit logs section from the nav bar on the left
Once entered this section, you will find here the list of events happened within your organization over the past 30 days (this is the maximum retention time).
From a technical point of view, Qovery tracks in the audit logs any call happening on our API for your organization. Example: if you modify the configuration of an application via the Qovery console, Qovery will track the call to the api endpoint /application and log an UPDATE event.
Each event in the list is composed by the following information:
Timestamp: it tells you when the event happened
Event Type: it describe the type of event (Create, Update, Delete, Trigger Deployment etc..)
Target Type: it defines the type of object that has been modified (Environment, Cluster, Role, Image registry etc..)
Target: it defines the object that has been modified. You can get additional information on the target by hovering on it.
Change: it describes what has been modified (high level information: its config, a deployment rule etc..)
User: it describes who modified the object. If the change has been done via API, you will find the API token name that has changed it.
Tool: it describes how the object has been changed (via the console, the qovery terraform provider, via a git push etc..)
Since the audit logs are based on the calls done on our API, Qovery provides you with the JSON sent in the API response for each API call (and thus, for each event). This JSON represents the status of the target object after the event has happened. You can access the JSON by clicking on the event and might be useful to get a more granular information of what has changed between two events of the same type by comparing their JSON.
Example: if an update happened on the configuration of an application , the stored UPDATE event will provide you access to the JSON returned by the API when the /application endpoint was called. This JSON will thus contain the configuration of the application after the update.
To simplify the research within the audit logs, you can filter the events by:
Time range
Target: you will have to specify a target type (cluster, environment, service etc..) and then specify the name of the target. For example, if you want to look for the changes happened on the cluster Production, you will have to select Cluster as Target type and then you will have to select Production from within the cluster list.
While navigating within the console, a few quick filters allow you to jump on the audit logs and get the events happened on that specific object. For example, you can quickly get the events happened on a specific environment, by clicking on the See Events button available within the 3 dots sub-menu
This feature is available in public beta. Access and functionalities might change in the future based on your Qovery Plan.
Qovery allows you to monitor any action happened within your organization thanks to the audit logs section. This section provides you with a complete view on any change happened within your organization configuration, providing you the answer to "who did what, where, and when?".
This is extremely useful when debugging complex issues and trying to understand what happened in a specific timeframe or monitor the actions done by your users within your organization.
You can access this section by opening the Audit logs section from the nav bar on the left
Once entered this section, you will find here the list of events happened within your organization over the past 30 days (this is the maximum retention time).
From a technical point of view, Qovery tracks in the audit logs any call happening on our API for your organization. Example: if you modify the configuration of an application via the Qovery console, Qovery will track the call to the api endpoint /application and log an UPDATE event.
Each event in the list is composed by the following information:
Timestamp: it tells you when the event happened
Event Type: it describe the type of event (Create, Update, Delete, Trigger Deployment etc..)
Target Type: it defines the type of object that has been modified (Environment, Cluster, Role, Image registry etc..)
Target: it defines the object that has been modified. You can get additional information on the target by hovering on it.
Change: it describes what has been modified (high level information: its config, a deployment rule etc..)
User: it describes who modified the object. If the change has been done via API, you will find the API token name that has changed it.
Tool: it describes how the object has been changed (via the console, the qovery terraform provider, via a git push etc..)
Since the audit logs are based on the calls done on our API, Qovery provides you with the JSON sent in the API response for each API call (and thus, for each event). This JSON represents the status of the target object after the event has happened. You can access the JSON by clicking on the event and might be useful to get a more granular information of what has changed between two events of the same type by comparing their JSON.
Example: if an update happened on the configuration of an application , the stored UPDATE event will provide you access to the JSON returned by the API when the /application endpoint was called. This JSON will thus contain the configuration of the application after the update.
To simplify the research within the audit logs, you can filter the events by:
Time range
Target: you will have to specify a target type (cluster, environment, service etc..) and then specify the name of the target. For example, if you want to look for the changes happened on the cluster Production, you will have to select Cluster as Target type and then you will have to select Production from within the cluster list.
While navigating within the console, a few quick filters allow you to jump on the audit logs and get the events happened on that specific object. For example, you can quickly get the events happened on a specific environment, by clicking on the See Events button available within the 3 dots sub-menu
Decide how many times in seconds the application is supposed to stop at maximum. After this time, the application will be forced to stop (killed)
An application requiring several tasks to be stopped properly should have a higher grace period. If the application finishes early, then it will not wait until the end of the grace period
Define how you want pods affinity to behave. • Preferred: allows, but does not require, pods of a given service are not co-located (or co-hosted) on a single node • Required: ensures that the pods of a given service are not co-located (or co-hosted) on a single node (safer in term of availability but can be expensive depending on the number of replicas)
Set deployment strategy type (RollingUpdate or Recreate)
Rolling update strategy will gracefully rollout new versions, while Recreate will stop all current versions and create new ones once all old ones have been shutdown (more info)
Allows you to run a command before the application is stopped. The command should be a shell command or script. Qovery requires the sh shell by default and sets a sleep of 15 seconds to let Nginx update its config. Avoiding error codes returned during a rolling update.
(For CORS users) Allows you to specify which set of headers can be present in the client request.
For security purposes, you can indicate which HTTP headers can be used during a CORS preflight request which includes the Access-Control-Request-Headers request header. For more information, see CORS HTTP Response Headers.
(For CORS users) Allows you to specify which set of methods can be used for the client request.
For security purposes, you can indicate which HTTP methods are permitted while accessing a resource in response to cross-origin requests. For more information, see CORS HTTP Response Headers.
(For CORS users) Allows you to specify which origin(s) (domain, scheme, port) can access a resource.
For security purposes, you can allow only one or a short list of origins to access your resources. For more information, see CORS HTTP Response Headers.
Allows you to enable Cross-Origin Resource Sharing (CORS).
The CORS mechanism supports secure cross-origin requests and data transfers between browsers and servers. For more information on CORS and when to enable it, see Cross-Origin Resources Sharing.
Enable the load balancer to bind a user's session to a specific target. This ensures that all requests from the user during the session are sent to the same target
Limits the maximum time (in seconds) during which requests can be processed through one keepalive connection. After this time is reached, the connection is closed following the subsequent request processing.
Allows you to set, in megabytes, a maximum size for resources that can be downloaded from your server.
By default, users can download resources (files, images, videos...) of up to 100 MB. You can use this advanced setting to lower or increase this limitation.
Defines a timeout (in seconds) for establishing a connection with a proxied server. It should be noted that this timeout cannot usually exceed 75 seconds.
E.g. You can use it to define the maximum time to wait for your application to establish the connexion.
Defines a timeout for reading a response from the proxied server. The timeout is set only between two successive read operations, not for the transmission of the whole response. If the proxied server does not transmit anything within this time, the connection is closed.
E.g. You can use it to fine-tune your WebSocket application.
Sets a timeout (in seconds) for transmitting a request to the proxied server. The timeout is set only between two successive write operations, not for the transmission of the whole request. If the proxied server does not receive anything within this time, the connection is closed.
E.g. You can use it to fine-tune your WebSocket application.
Sets a timeout (in seconds) for transmitting a response to the client. The timeout is set only between two successive write operations, not for the transmission of the whole response. If the client does not receive anything within this time, the connection is closed.
Useful to define the maximum timeout to wait for client connection.
Allows you to specify which IP ranges are allowed to access your application. The value is a comma-separated list of CIDRs, e.g. 10.0.0.0/24,172.10.0.1
By default, any IP can access your application if it's exposed publicly and the users know the URL. You can limit its access by specifying the IPs you want to reach the app (e.g. the IP of your office)
Allows you to specify which IP ranges are not allowed to access your application. The value is a comma-separated list of CIDRs, e.g. 10.0.0.0/24,172.10.0.1
Set the name of an environment variable to use as a basic authentication (login:crypted_password) from htpasswd command.
``
Here is an example where you can create a secret environment variable on Qovery and set a name like BASIC_AUTH_CREDENTIALS. The content should be the result of the htpasswd command:
$ htpasswd -n <username>
New password:
Re-type new password:
username:$apr1$jpwW4vG9$fwbzWBgRqARzNX93plDq20
The content of the BASIC_AUTH_CREDENTIALS environment variable should be: username:$apr1$jpwW4vG9$fwbzWBgRqARzNX93plDq20. To finish, set the network.ingress.basic_auth_env_var advanced settings to BASIC_AUTH_CREDENTIALS.
You can pass set credentials by separating them with a comma. For example: username1:$apr1$jpwW4vG9$fwbzWBgRqARzNX93plDq20,username2:$apr1$jpwW4vG9$fwbzWBgRqARzNX93plDq20. However, the total length of the environment variable should not exceed 1MB.
Allows you to specify additional headers to the outgoing response. The header values are separated by comma (e.g. {"X-Frame-Options":"DENY","X-Content-Type-Options":"nosniff"}
Allows you to specify additional headers to the incoming requests. The header values are separated by comma (e.g. {"X-Frame-Options":"DENY","X-Content-Type-Options":"nosniff"}).
Auto-scaling is triggered when a specific memory utilization metric is reached (for instance, 40%). This advanced setting allows you to set this metric.
By default terminated jobs in a completed or failure state are not deleted. if this parameter is set, Kubernetes will automatically cleanup completed jobs after the ttl
Using overcommit on pod resources can lead to instability on your cluster and we strongly discourage it. Be careful when using this feature.
Type
Description
Use Case
Default Value
integer
Define the CPU overcommit (pod cpu limit) of the service.
A service require more CPU at startup than during the running phase. You can reduce the configured CPU for the service and just increase the resources.override.limit.cpu_in_milli to reduce the resources used by the service at runtime
Using overcommit on pod resources can lead to instability on your cluster and we strongly discourage it. Be careful when using this feature.
Type
Description
Use Case
Default Value
integer
Define the memory overcommit (pod memory limit) of the service.
A service require more memory at startup than during the running phase. You can reduce the configured memory for the service and just increase the resources.override.limit.ram_in_mib to reduce the resources used by the service at runtime
Decide how many times in seconds the application is supposed to stop at maximum. After this time, the application will be forced to stop (killed)
An application requiring several tasks to be stopped properly should have a higher grace period. If the application finishes early, then it will not wait until the end of the grace period
Define how you want pods affinity to behave. • Preferred: allows, but does not require, pods of a given service are not co-located (or co-hosted) on a single node • Required: ensures that the pods of a given service are not co-located (or co-hosted) on a single node (safer in term of availability but can be expensive depending on the number of replicas)
Set deployment strategy type (RollingUpdate or Recreate)
Rolling update strategy will gracefully rollout new versions, while Recreate will stop all current versions and create new ones once all old ones have been shutdown (more info)
Allows you to run a command before the application is stopped. The command should be a shell command or script. Qovery requires the sh shell by default and sets a sleep of 15 seconds to let Nginx update its config. Avoiding error codes returned during a rolling update.
(For CORS users) Allows you to specify which set of headers can be present in the client request.
For security purposes, you can indicate which HTTP headers can be used during a CORS preflight request which includes the Access-Control-Request-Headers request header. For more information, see CORS HTTP Response Headers.
(For CORS users) Allows you to specify which set of methods can be used for the client request.
For security purposes, you can indicate which HTTP methods are permitted while accessing a resource in response to cross-origin requests. For more information, see CORS HTTP Response Headers.
(For CORS users) Allows you to specify which origin(s) (domain, scheme, port) can access a resource.
For security purposes, you can allow only one or a short list of origins to access your resources. For more information, see CORS HTTP Response Headers.
Allows you to enable Cross-Origin Resource Sharing (CORS).
The CORS mechanism supports secure cross-origin requests and data transfers between browsers and servers. For more information on CORS and when to enable it, see Cross-Origin Resources Sharing.
Enable the load balancer to bind a user's session to a specific target. This ensures that all requests from the user during the session are sent to the same target
Limits the maximum time (in seconds) during which requests can be processed through one keepalive connection. After this time is reached, the connection is closed following the subsequent request processing.
Allows you to set, in megabytes, a maximum size for resources that can be downloaded from your server.
By default, users can download resources (files, images, videos...) of up to 100 MB. You can use this advanced setting to lower or increase this limitation.
Defines a timeout (in seconds) for establishing a connection with a proxied server. It should be noted that this timeout cannot usually exceed 75 seconds.
E.g. You can use it to define the maximum time to wait for your application to establish the connexion.
Defines a timeout for reading a response from the proxied server. The timeout is set only between two successive read operations, not for the transmission of the whole response. If the proxied server does not transmit anything within this time, the connection is closed.
E.g. You can use it to fine-tune your WebSocket application.
Sets a timeout (in seconds) for transmitting a request to the proxied server. The timeout is set only between two successive write operations, not for the transmission of the whole request. If the proxied server does not receive anything within this time, the connection is closed.
E.g. You can use it to fine-tune your WebSocket application.
Sets a timeout (in seconds) for transmitting a response to the client. The timeout is set only between two successive write operations, not for the transmission of the whole response. If the client does not receive anything within this time, the connection is closed.
Useful to define the maximum timeout to wait for client connection.
Allows you to specify which IP ranges are allowed to access your application. The value is a comma-separated list of CIDRs, e.g. 10.0.0.0/24,172.10.0.1
By default, any IP can access your application if it's exposed publicly and the users know the URL. You can limit its access by specifying the IPs you want to reach the app (e.g. the IP of your office)
Allows you to specify which IP ranges are not allowed to access your application. The value is a comma-separated list of CIDRs, e.g. 10.0.0.0/24,172.10.0.1
Set the name of an environment variable to use as a basic authentication (login:crypted_password) from htpasswd command.
``
Here is an example where you can create a secret environment variable on Qovery and set a name like BASIC_AUTH_CREDENTIALS. The content should be the result of the htpasswd command:
$ htpasswd -n <username>
New password:
Re-type new password:
username:$apr1$jpwW4vG9$fwbzWBgRqARzNX93plDq20
The content of the BASIC_AUTH_CREDENTIALS environment variable should be: username:$apr1$jpwW4vG9$fwbzWBgRqARzNX93plDq20. To finish, set the network.ingress.basic_auth_env_var advanced settings to BASIC_AUTH_CREDENTIALS.
You can pass set credentials by separating them with a comma. For example: username1:$apr1$jpwW4vG9$fwbzWBgRqARzNX93plDq20,username2:$apr1$jpwW4vG9$fwbzWBgRqARzNX93plDq20. However, the total length of the environment variable should not exceed 1MB.
Allows you to specify additional headers to the outgoing response. The header values are separated by comma (e.g. {"X-Frame-Options":"DENY","X-Content-Type-Options":"nosniff"}
Allows you to specify additional headers to the incoming requests. The header values are separated by comma (e.g. {"X-Frame-Options":"DENY","X-Content-Type-Options":"nosniff"}).
Auto-scaling is triggered when a specific memory utilization metric is reached (for instance, 40%). This advanced setting allows you to set this metric.
By default terminated jobs in a completed or failure state are not deleted. if this parameter is set, Kubernetes will automatically cleanup completed jobs after the ttl
Using overcommit on pod resources can lead to instability on your cluster and we strongly discourage it. Be careful when using this feature.
Type
Description
Use Case
Default Value
integer
Define the CPU overcommit (pod cpu limit) of the service.
A service require more CPU at startup than during the running phase. You can reduce the configured CPU for the service and just increase the resources.override.limit.cpu_in_milli to reduce the resources used by the service at runtime
Using overcommit on pod resources can lead to instability on your cluster and we strongly discourage it. Be careful when using this feature.
Type
Description
Use Case
Default Value
integer
Define the memory overcommit (pod memory limit) of the service.
A service require more memory at startup than during the running phase. You can reduce the configured memory for the service and just increase the resources.override.limit.ram_in_mib to reduce the resources used by the service at runtime
-
+
-
+
diff --git a/docs/using-qovery/configuration/application-health-checks/index.html b/docs/using-qovery/configuration/application-health-checks/index.html
index 8ef084cd21..fe5fcf7ab8 100644
--- a/docs/using-qovery/configuration/application-health-checks/index.html
+++ b/docs/using-qovery/configuration/application-health-checks/index.html
@@ -26,9 +26,9 @@
-
+
-
+
@@ -54,14 +54,14 @@
You have a liveness probe configured on port 80 of your application. If during the deployment of your application the probes can't connect to port 80 and we reach a timeout, the deployment fails.
Qovery allows you to manage these probes directly from within the Qovery console during the setup of your application, letting you decide their activation, configuration and check frequency.
Allows you to specify the type of probe you want to run against your application:
NONE if NONE is selected, the probe is disabled and thus Kubernetes won't be able to verify the state of your application and take the right corrective actions.
We strongly advise to not disable the liveness probe.
HTTP probes are the most common probe type. You can use them if your application is a HTTP server, or if you create a lightweight HTTP server inside your application specifically to respond to such probes. When using a HTTP probe, you need to configure:
a port
a path
Once configured, Kubernetes pings a path (for example: /healthz ) at a given port. If it gets a response in the 200 or 300 range, the check is passed. Otherwise, it is considered as failed and Kubernetes takes the necessary corrective actions.
TCP probes are most often used when HTTP or command probes aren't an option. When using a TCP Liveness probe, Kubernetes tries to establish a connection on the specified port. If the connection is successful, the application is considered healthy. Otherwise, it is considered dead and the container is restarted.
gRPC probes
When using a gRCP Liveness probe, Kubernetes tries to establish a connection on the specified port and service. If the connection is successful, the application is considered healthy. Otherwise, it is considered dead and the container is restarted.
EXEC probes
-Exec probes allow to define a command to be executed within your container. If the command execution fails, the probe is considered as failed.
Allows you to specify an interval, in seconds, between the application container start and the first liveness check.
Allowing additional time for the application to start can be useful when boot time usually takes too long (due to long boot operations), or when the application opens the port before being ready to receive traffic on it (due to a still ongoing boot operation).
Allows you to specify how many consecutive successes are needed, as a minimum, for the probe to be considered successful after having failed previously.
If your application has a long boot operation to run, your deployment might be marked as failed since the probe can't verify the state of your application within the specified time frame. In this case, you will find in your deployment logs a warning message Liveness probe failed: dial tcp xx.xx.xx.xx:xx: connect: connection refused , telling you that the probe is failing.
If your application needs more time to boot, increase the Initial Delay in seconds of the probes to match the application boot time.
Allows you to specify an interval, in seconds, between the application container start and the first liveness check.
Allowing additional time for the application to start can be useful when boot time usually takes too long (due to long boot operations), or when the application opens the port before being ready to receive traffic on it (due to a still ongoing boot operation).
Allows you to specify how many consecutive successes are needed, as a minimum, for the probe to be considered successful after having failed previously.
If your application has a long boot operation to run, your deployment might be marked as failed since the probe can't verify the state of your application within the specified time frame. In this case, you will find in your deployment logs a warning message Liveness probe failed: dial tcp xx.xx.xx.xx:xx: connect: connection refused , telling you that the probe is failing.
If your application needs more time to boot, increase the Initial Delay in seconds of the probes to match the application boot time.
-
+
-
+
diff --git a/docs/using-qovery/configuration/application/index.html b/docs/using-qovery/configuration/application/index.html
index 73d4d935d1..688d13688e 100644
--- a/docs/using-qovery/configuration/application/index.html
+++ b/docs/using-qovery/configuration/application/index.html
@@ -26,9 +26,9 @@
-
+
-
+
@@ -57,14 +57,14 @@
You can adjust the minimum and maximum of instances you need in your application settings. Qovery runs your application on Kubernetes and relies on metrics-server service to auto-scale your app.
The default filesystem for applications running on Qovery is ephemeral. Application data isn’t persisted across deploys and restarts, which works just fine for most apps because they use managed databases to persist data.
However, many applications need persistent disk storage that isn’t ephemeral. These include:
Blogging platforms and CMSs like WordPress, Ghost, and Strapi.
Collaboration apps like Mattermost, GitLab, and Discourse.
This is where Qovery block Storage comes in. Qovery applications can use storage to store data that persists across deploys and restarts, making it easy to deploy stateful applications.
For most use cases, it is better to use Object Storage instead of Block Storage.
Within this section you can define the port exposed by your application to the other services or even over the internet.
You can edit the existing ports or declare new ones by specifying:
Application port: this is the port exposed internally by your application for the other services.
Protocol: you can select the protocol used by your application : HTTP (for both standard HTTP or websocket communications), gRPC, TCP, UDP.
Publicly exposed: it allows you to expose over the public network your service. A public domain will be assigned to your application during the deployment (see Connecting from the internet section)
If Publicly Exposed is selected:
External port: it is the port that can be used to access this service over the internet (when exposed publicly). Note that for HTTP and gRPC the port is set by default to 443.
Port Name: it is the name assigned to the port. When multiple ports are exposed publicly, its value is used to route the traffic to the right port based on the called subdomain (which will contain the port name value). Since each port is exposed on the port 443, having a different subdomain is the only way to have multiple ports exposed over the internet. If not set, the default value is p<portNumber> (see Qovery Provided Domain section for more information)
Most of the Kubernetes Health Checks]docs.using-qovery.configuration.service-health-checks are based on the port declared in this section. Make sure you declare the right port and that you configure the health checks properly.
Connections on public ports are automatically closed after 60 seconds. If you want to implement long living connection (like for websockets) please make sure to use the rigth ingress timeouts in the advanced settings section
Exposing publicly TCP/UDP ports requires to create a dedicated load balancer and it takes a few minutes before having it ready (~15 minutes). Note also that this has a direct impact on your cloud provider bill.
You can configure your application to use the PORT environment variable by adding the PORT on your application env variables page.
A Note on Listening IPs: It's best for your application to listen on 0.0.0.0:$PORT. While most things work with 127.0.0.1 and localhost, some do not (NodeJS for example)
This section allows to specify which changes on your repository should trigger an auto-deploy (if enabled). To know more about how to configure your Deployment Restrictions, have a look at the deployment restrictions section.
Your application can be reached from the internet by publicly exposing at least one of its ports (See the Ports section to know more). Once this is done, Qovery will generate and assign a domain to your application (See this section to know more). You can customize the domain assigned to your application via the Domain section in the settings (see this section to know more).
For each port publicly exposed, a domain is automatically assigned by Qovery to your application. Qovery will manage for you the networking and the TLS configuration for these domains.
Example: p80-zdf72de72-z709e1a88-gtw.za8ad0657.bool.sh or <service_name>-p80-zdf72de72-z709e1a88-gtw.za8ad0657.bool.sh for helm services.
Note:
each service deployed on the same cluster will have the same root domain assigned (example: za8ad0657.bool.sh)
the first characters of the domain (before the -) is based on the portName given to the port associated with this domain (See the port section)
a default domain (without the portName) is assigned to the default port(See the port section). Example zdf72de72-z709e1a88-gtw.za8ad0657.bool.sh
Special Case - Preview Environment
For each port exposed publicly, an additional domain will be created with the following pattern portName-prId-srvName-envSourceName.cluster_domain:
portName: is the port name, as explained above
prID: is the id of the PR that has generated the preview environment
srvName: is the name of the service
envSourceName: is the name of the blueprint environment that has created the current preview environment
If you prefer to assign your own domain to the application, you can customize it from the "Domain" section within the application settings.
You can customize the domain of your application in different ways, depending on what you want to achieve:
You want to use your own domain for your application
You want to modify the subdomain assigned to your application by Qovery (i.e. change p80-zdf72de72-z709e1a88-gtw.za8ad0657.bool.sh into my-app-domain.za8ad0657.bool.sh). See this section to know more about these domains.
In both cases, you can assign the new custom domain by pressing the Add Domain button.
This configuration will be automatically removed on every cloned environment or preview environment in order to avoid domain collision.
For each custom domain, a green checkmark will appear if the domain is correctly configured. You can perform another check by clicking on the checkmark. If you're behind a CDN, we will only check if your custom domain resolves to an IP address.
If there's an issue with a domain, a global error message will be displayed, and you can view the error details by hovering over the red cross. After correcting your configuration, you can perform another check by clicking on the red cross.
Once the domain is added within the Qovery console (Example: mydomain.com), you need to configure within your DNS two CNAME records pointing to the domain provided by Qovery, as shown in the UI (example: mydomain.com CNAME za7cc1b71-z4b8474b3-gtw.zc531a994.rustrocks.cloud and *.mydomain.com CNAME za7cc1b71-z4b8474b3-gtw.zc531a994.rustrocks.cloud).
Having a wildcard domain entry (example: *.mydomain.com) configured on your DNS will avoid you to modify the Qovery setup every time you want to add a new subdomain. If wildcard is not supported by your DNS provider, you will have to configure each subdomain manually.
If a service needs to expose more than one port publicly, you can define a dedicated subdomain to redirect the traffic on the right port by setting the “Port Name” value within the port settings.
After re-deploying the service, Qovery will automatically handle the TLS/SSL certificate creation and renewal for the configured domain.
If your service is behind a CDN using a proxy mode (i.e. the traffic is routed through the CDN to Qovery), make sure to enable the option Domain behind a CDN and disable the option "Generate certificate" on the domain setup. Since the certificate of your domain is directly managed by the CDN, Qovery won't be able to do that for you and it will raise warnings on your application status.
If you are using Cloudflare to manage your CDN, we can also manage automatically your custom domain configuration via a wildcard domain setup for the whole cluster. Check our documentation here
You can specify a different sub-domain for your application as long as it belongs to the assigned cluster domain (see Qovery provided domains).
-Example:
your current domain is zdf72de71-z709e1a85-gtw.za8ad0659.bool.sh (so your assigned cluster domain is za8ad0659.bool.sh)
you can enter a new custom domain myfrontend.za8ad0659.bool.sh (since it is a subdomain of the cluster domain)
The application will now be accessible from both the default and the new custom domain.
Qovery does not check collision in the domain declaration. Make sure you assign a unique subdomain within your cluster.
You can create a clone of the service via the clone feature. A new service with the same configuration (see below for exceptions) will be created into the target environment.
The target environment can be the same as the current environment or even another one in a completely different project.
Important information
Not every configuration parameter will be copied within the new service for consistency reasons. The configuration is fully or partially copied depending on the target environment:
same environment:
custom domain: this setup is not copied into the new service (to avoid collision)
another environment:
custom domain: this setup is not copied into the new service (to avoid collision)
environment variable: aliases defined on environment variables are not copied (since the aliased env var might not exist)
deployment pipeline: stage setup is not copied (since the target stage might not exist)
number of instances: if the target environment runs on a Qovery EC2 cluster, the max number of instances is set to 1 (Qovery EC2 constraint)
Please check the configuration of the new service before deploying it.
You can create a clone of the service via the clone feature. A new service with the same configuration (see below for exceptions) will be created into the target environment.
The target environment can be the same as the current environment or even another one in a completely different project.
Important information
Not every configuration parameter will be copied within the new service for consistency reasons. The configuration is fully or partially copied depending on the target environment:
same environment:
custom domain: this setup is not copied into the new service (to avoid collision)
another environment:
custom domain: this setup is not copied into the new service (to avoid collision)
environment variable: aliases defined on environment variables are not copied (since the aliased env var might not exist)
deployment pipeline: stage setup is not copied (since the target stage might not exist)
number of instances: if the target environment runs on a Qovery EC2 cluster, the max number of instances is set to 1 (Qovery EC2 constraint)
Please check the configuration of the new service before deploying it.
To further fine-tune your Qovery infrastructure, you can set advanced settings through the Qovery API endpoint.
Cluster advanced settings are not available in the Qovery console yet.
All clusters have access to advanced settings, you can find where they are available in the documentation below with those badges mentioning for which Cloud provider they are available:
You will also find badges mentioning for which components it will be applied:
Enabling this feature will create a 10 min max downtime on your application's public access (time to delete, replace and propagate DNS of the new load balancer).
Type
Description
Default Value
boolean
Enable the AWS ALB controller to manage the load balancer for the cluster.
true
Requirements for customers using custom VPCs (Qovery Managed VPC does not require these steps):
On public subnets: add a label kubernetes.io/role/elb with the value 1 to the subnet where the ALB will be created.
On private subnets: add a label kubernetes.io/role/internal-elb with the value 1 to the subnet where the ALB will be created.
On all subnets: add a label kubernetes.io/cluster/<cluster-name> with the value shared to the subnet where the ALB will be created.
Allows you to specify the load balancer size in front of your cluster. Possible values are: - lb-s: 200 Mbps - lb-gp-m: 500 Mbps - lb-gp-l: 1 Gbps - lb-gp-xl: 4 Gbps
Deny any access to all PostgreSQL databases. When false, configure the CIDR range you want to allow within the associated allowed_cidrs parameter (default is "any IP"). ⚠️ Any access to managed databases will instantly be removed ⚠️ Any access to container databases will be removed only after a database redeployment
Deny any access to all MySQL databases. When false, configure the CIDR range you want to allow within the associated allowed_cidrs parameter (default is "any IP"). ⚠️ Any access to managed databases will instantly be removed ⚠️ Any access to container databases will be removed only after a database redeployment
Deny any access to all MongoDB databases. When false, configure the CIDR range you want to allow within the associated allowed_cidrs parameter (default is "any IP"). ⚠️ Any access to managed databases will instantly be removed ⚠️ Any access to container databases will be removed only after a database redeployment
Deny any access to all Redis databases. When false, configure the CIDR range you want to allow within the associated allowed_cidrs parameter (default is "anyone"). ⚠️ Any access to managed databases will instantly be removed ⚠️ Any access to container databases will be removed only after a database redeployment
Allows you to specify the IAM group name associated with the Qovery user in the AWS console during the IAM permissions setup to be able to connect to the Kubernetes cluster. Its value can be changed after the cluster installation via a re-deploy without any downtime.
Enable the static ip mode for the qovery control plane and automatically 1) activate the private endpoint on the Kubernetes API 2) add the Qovery IP to the CIDR whitelist.
false
If you need to connect to the Kubernetes cluster from your network, make sure to add your CIDR to the advanced setting k8s.api.allowed_public_access_cidrs.
Dockerhub credentials are necessary to activate this feature.
Before setting this advanced settings to true, go through the Organization settings > Container registry and make sure that your Dockerhub registry has some credentials set.
Why? Dockerhub has a rate limit system by IP when pulling from their registry. Since the Qovery control plane will be seen as a single IP, we will quickly reach the limit. This limit can be increased if you are a logged-in user and thus, if you put your credentials in the Dockerhub registry configuration of your organization, you should not encounter any rate limit issue during the deployment.
To further fine-tune your Qovery infrastructure, you can set advanced settings through the Qovery API endpoint.
Cluster advanced settings are not available in the Qovery console yet.
All clusters have access to advanced settings, you can find where they are available in the documentation below with those badges mentioning for which Cloud provider they are available:
You will also find badges mentioning for which components it will be applied:
Enabling this feature will create a 10 min max downtime on your application's public access (time to delete, replace and propagate DNS of the new load balancer).
Type
Description
Default Value
boolean
Enable the AWS ALB controller to manage the load balancer for the cluster.
true
Requirements for customers using custom VPCs (Qovery Managed VPC does not require these steps):
On public subnets: add a label kubernetes.io/role/elb with the value 1 to the subnet where the ALB will be created.
On private subnets: add a label kubernetes.io/role/internal-elb with the value 1 to the subnet where the ALB will be created.
On all subnets: add a label kubernetes.io/cluster/<cluster-name> with the value shared to the subnet where the ALB will be created.
Allows you to specify the load balancer size in front of your cluster. Possible values are: - lb-s: 200 Mbps - lb-gp-m: 500 Mbps - lb-gp-l: 1 Gbps - lb-gp-xl: 4 Gbps
Deny any access to all PostgreSQL databases. When false, configure the CIDR range you want to allow within the associated allowed_cidrs parameter (default is "any IP"). ⚠️ Any access to managed databases will instantly be removed ⚠️ Any access to container databases will be removed only after a database redeployment
Deny any access to all MySQL databases. When false, configure the CIDR range you want to allow within the associated allowed_cidrs parameter (default is "any IP"). ⚠️ Any access to managed databases will instantly be removed ⚠️ Any access to container databases will be removed only after a database redeployment
Deny any access to all MongoDB databases. When false, configure the CIDR range you want to allow within the associated allowed_cidrs parameter (default is "any IP"). ⚠️ Any access to managed databases will instantly be removed ⚠️ Any access to container databases will be removed only after a database redeployment
Deny any access to all Redis databases. When false, configure the CIDR range you want to allow within the associated allowed_cidrs parameter (default is "anyone"). ⚠️ Any access to managed databases will instantly be removed ⚠️ Any access to container databases will be removed only after a database redeployment
Allows you to specify the IAM group name associated with the Qovery user in the AWS console during the IAM permissions setup to be able to connect to the Kubernetes cluster. Its value can be changed after the cluster installation via a re-deploy without any downtime.
Enable the static ip mode for the qovery control plane and automatically 1) activate the private endpoint on the Kubernetes API 2) add the Qovery IP to the CIDR whitelist.
false
If you need to connect to the Kubernetes cluster from your network, make sure to add your CIDR to the advanced setting k8s.api.allowed_public_access_cidrs.
Dockerhub credentials are necessary to activate this feature.
Before setting this advanced settings to true, go through the Organization settings > Container registry and make sure that your Dockerhub registry has some credentials set.
Why? Dockerhub has a rate limit system by IP when pulling from their registry. Since the Qovery control plane will be seen as a single IP, we will quickly reach the limit. This limit can be increased if you are a logged-in user and thus, if you put your credentials in the Dockerhub registry configuration of your organization, you should not encounter any rate limit issue during the deployment.
Karpenter is only available for non-production clusters. If you have created a production cluster, this option will not be visible.
Karpenter automatically launches just the right compute resources to handle your cluster's applications. It is designed to let you take full advantage of the cloud with fast and simple compute provisioning for Kubernetes clusters.
You can our blog post for more information."
Click on AWS as hosting mode and then Qovery Managed option:
In the Create Cluster window enter:
Cluster name: enter the name of your choice for your cluster.
Description: enter a description to identify better your cluster.
Production cluster: select this option if your cluster will be used for production. Note: Karpenter is currently only available for non-production clusters.
Region: select the geographical area in which you want your cluster to be hosted.
Credentials: select one of the existing cloud provider credentials or add a new one by clicking on New Credentials. In the New credentials window, add the credentials that you have generated on your cloud provider console (Procedure for AWS account). Added credentials can be used later to create and manage additional cluster.
To confirm, click Next.
In the Set Resources window, select:
Karpenter: Toggle the switch to enable Karpenter on your AWS EKS cluster
Instance types scopes: By editing it, you can apply different filters to the node architectures, categories, families, and sizes. On the right, you can view all the instance types that match the applied filters. This means Karpenter will be able to spawn nodes on any of the listed instance types.
Architectures: by default both AMD64 and ARM64 architectures are selected.
Default build architecture: by default AMD64. If you build your application with the Qovery CI, your application will be built using this architecture by default.
Families: by default all families are selected.
Sizes: by default all sizes are selected.
Spot instances: In order to reduce even more your costs, you can also enable the spot instances on your clusters. Spot instances cost up to 90% less compared to On-Demand prices. But keep in mind that spot instances can be terminated by the cloud provider at any time. Check this documentation for more information. Even if this flag is enabled, the statefulsets won't run on spot instances.
Disk size: select the size of the disks to be attached to your cluster instances (to locally store container images etc..).
Instance type selection from your Qovery Console has direct consequences on your cloud provider’s bill. While Qovery allows you to switch to a different instance type whenever you want, it is your sole responsibility to keep an eye on your infrastructure costs, especially when you want to upsize.
Please be aware that changing the instance type or disk size might cause a downtime for your service.
Also, before downsizing, you need to ensure that your applications will still have enough resources to run correctly.
To confirm, click Next.
In the Features step, select the features you want to enable on your cluster.
If you want to manage the network layer of your cluster by yourself, you can switch VPC mode to Deploy on my existing VPC to use your own VPC instead of the one provided by Qovery.
These options can only be configured during cluster creation and cannot be modified later.
By default, when your cluster is created, its worker nodes are allocated public IP addresses, which are used for external communication. For improved security and control, the Static IP feature allows you to ensure that outbound traffic from your cluster uses specific IP addresses.
Here is what will be deployed on your cluster:
Nat Gateways
Elastic IPs
Private subnets
Once set up, here is the procedure to find your static IP addresses on AWS:
On your AWS account, select the VPC service.
On the left menu, you’ll find Elastic IP addresses. Once on it, in the Allocated IPv4 address column, you’ll have your public IPs.
If you work in a sensitive business area such as financial technology, enabling the Static IP feature can help fulfil the security requirements of some of the external services you use, therefore making it easier for you to get whitelisted by them.
This feature has been activated by default. Since February 1, 2024, AWS charge public IPv4 Addresses. Disabling it may cost you more, depending on the number of nodes in your cluster. Check this link for more information.
Virtual Private Cloud (VPC) peering allows you to set up a connection between your Qovery VPC and another VPC on your AWS account. This way, you can access resources stored on your AWS VPC directly from your Qovery applications.
A VPC can only be used if it has at least one range of IP addresses called a subnet. When you create a cluster, Qovery automatically picks a default subnet for it. However, to perform VPC peering, you may want to define which specific VPC subnet you want to use, so that you can avoid any conflicting settings. To do so, you can enable the Custom VPC Subnet feature on your cluster. For more information on how to set up VPC peering, see our dedicated tutorial.
In the Ready to install your cluster window, check that the services needed to install your cluster are correct.
You can now press the Create and Install button.
Your cluster is now displayed in your organization settings, featuring the Installing... status (orange status). Once your cluster is properly installed, its status turns to green and you will be able to deploy your applications on it.
You can follow the execution of the action via the cluster status and/or by accessing the Cluster Logs
#Migrating from AWS with auto-scaler to AWS with Karpenter
A SQS queue will be created. Before deploying your cluster, update the IAM permissions of the Qovery user, make sure to use the latest version here to add the permission on SQS.
You can easily activate Karpenter on your non-production existing cluster by following this process:
To access your cluster settings, click on the wheel button:
Access to Resources section and switch on the toggle Activate Karpenter
Update your cluster by selecting the action Update from the drop-down menu.
Once the update is complete, your cluster will be running on Karpenter. By default, only the instance types selected when you created your AWS cluster with the auto-scaler will be configured. You can add additional instance types by editing the instance types in the resources section.
Qovery allows you to modify the resources allocated for your cluster:
The list of the instance types
The spot instances activation
The Node disk size (GB) field, enter the disk capacity you want to allocate to your worker node(s) (meaning how much data, in gigabytes, you want each worker node to be able to hold).
Instance type selection from your Qovery Console has direct consequences on your cloud provider’s bill. While Qovery allows you to switch to a different instance type whenever you want, it is your sole responsibility to keep an eye on your infrastructure costs, especially when you want to upsize.
In this tab, you will see that a container registry already exist (called registry-{$UIID}).
-This is your cloud provider container registry used by Qovery to manage the deployment of your applications by mirroring the docker images.
The credentials configured on this registry are the one used to create the cluster. But you can still update them if you prefer to manage them separately (dedicated pair of creds just to access the registry).
The Features tab in your cluster settings allows you to check if the Static IP, Custom VPC subnet, Deploy on existing VPC features are enabled on your cluster. The enabled features cannot be changed after the creation of the cluster.
The Network tab in your cluster settings allows you to update your Qovery VPC route table so that you can perform VPC peering. For step-by-step guidelines on how to set up VPC peering, see our dedicated tutorial.
+This is your cloud provider container registry used by Qovery to manage the deployment of your applications by mirroring the docker images.
The credentials configured on this registry are the one used to create the cluster. But you can still update them if you prefer to manage them separately (dedicated pair of creds just to access the registry).
The Features tab in your cluster settings allows you to check if the Static IP, Custom VPC subnet, Deploy on existing VPC features are enabled on your cluster. The enabled features cannot be changed after the creation of the cluster.
The Network tab in your cluster settings allows you to update your Qovery VPC route table so that you can perform VPC peering. For step-by-step guidelines on how to set up VPC peering, see our dedicated tutorial.
By default, clusters will be deployed with the AWS default autoscaler. If you are looking to install Qovery on your AWS cluster with the Karpenter autoscaler to improve cluster resource usage and activate spot instances, please refer to this guide.
Karpenter is only available for non-production clusters. If you have created a production cluster, this option will not be visible.
Click on AWS as hosting mode and then Qovery Managed option:
In the Create Cluster window enter:
Cluster name: enter the name of your choice for your cluster.
Description: enter a description to identify better your cluster.
Production cluster: select this option if your cluster will be used for production.
Region: select the geographical area in which you want your cluster to be hosted.
Credentials: select one of the existing cloud provider credentials or add a new one by clicking on New Credentials. In the New credentials window, add the credentials that you have generated on your cloud provider console (Procedure for AWS account). Added credentials can be used later to create and manage additional cluster.
To confirm, click Next.
In the Set Resources window, select:
Karpenter: to activate karpenter. If you are looking to install Qovery on your AWS with Karpenter, please refer to this guide
Instance type: select the type of worker nodes you want to deploy to your cluster.
Disk size: select the size of the disks to be attached to your cluster instances (to locally store container images etc..).
Node auto-scaling: define the minimum and the maximum number of worker nodes that your cluster can run. The lowest number is the number of worker nodes running on your infrastructure at any time, while the highest number is the maximum number of worker nodes that can automatically be deployed as traffic grows. Please note that a minimum of 3 worker nodes is required to deploy your EKS cluster.
Instance type selection from your Qovery Console has direct consequences on your cloud provider’s bill. While Qovery allows you to switch to a different instance type whenever you want, it is your sole responsibility to keep an eye on your infrastructure costs, especially when you want to upsize.
Please be aware that changing the instance type or disk size might cause a downtime for your service.
Also, before downsizing, you need to ensure that your applications will still have enough resources to run correctly.
To confirm, click Next.
In the Features step, select the features you want to enable on your cluster.
If you want to manage the network layer of your cluster by yourself, you can switch VPC mode to Deploy on my existing VPC to use your own VPC instead of the one provided by Qovery.
These options can only be configured during cluster creation and cannot be modified later.
By default, when your cluster is created, its worker nodes are allocated public IP addresses, which are used for external communication. For improved security and control, the Static IP feature allows you to ensure that outbound traffic from your cluster uses specific IP addresses.
Here is what will be deployed on your cluster:
Nat Gateways
Elastic IPs
Private subnets
Once set up, here is the procedure to find your static IP addresses on AWS:
On your AWS account, select the VPC service.
On the left menu, you’ll find Elastic IP addresses. Once on it, in the Allocated IPv4 address column, you’ll have your public IPs.
If you work in a sensitive business area such as financial technology, enabling the Static IP feature can help fulfil the security requirements of some of the external services you use, therefore making it easier for you to get whitelisted by them.
This feature has been activated by default. Since February 1, 2024, AWS charge public IPv4 Addresses. Disabling it may cost you more, depending on the number of nodes in your cluster. Check this link for more information.
Virtual Private Cloud (VPC) peering allows you to set up a connection between your Qovery VPC and another VPC on your AWS account. This way, you can access resources stored on your AWS VPC directly from your Qovery applications.
A VPC can only be used if it has at least one range of IP addresses called a subnet. When you create a cluster, Qovery automatically picks a default subnet for it. However, to perform VPC peering, you may want to define which specific VPC subnet you want to use, so that you can avoid any conflicting settings. To do so, you can enable the Custom VPC Subnet feature on your cluster. For more information on how to set up VPC peering, see our dedicated tutorial.
In the Ready to install your cluster window, check that the services needed to install your cluster are correct.
You can now press the Create and Install button.
Your cluster is now displayed in your organization settings, featuring the Installing... status (orange status). Once your cluster is properly installed, its status turns to green and you will be able to deploy your applications on it.
You can follow the execution of the action via the cluster status and/or by accessing the Cluster Logs
Qovery allows you to modify the resources allocated for your cluster:
In the Instance type dropdown menu, select the type of worker node(s) you want to deploy to your cluster.
(AWS users only) In the Node disk size (GB) field, enter the disk capacity you want to allocate to your worker node(s) (meaning how much data, in gigabytes, you want each worker node to be able to hold).
(EKS users only) On the Nodes auto-scaling, define the range of worker nodes you want to deploy to your cluster.
Instance type selection from your Qovery Console has direct consequences on your cloud provider’s bill. While Qovery allows you to switch to a different instance type whenever you want, it is your sole responsibility to keep an eye on your infrastructure costs, especially when you want to upsize.
The lowest number is the number of worker nodes running on your infrastructure at any time, while the highest number is the maximum number of worker nodes that can automatically be deployed as traffic grows.
Please note that a minimum of 3 worker nodes is required to deploy your EKS cluster.
In this tab, you will see that a container registry already exist (called registry-{$UIID}).
-This is your cloud provider container registry used by Qovery to manage the deployment of your applications by mirroring the docker images.
The credentials configured on this registry are the one used to create the cluster. But you can still update them if you prefer to manage them separately (dedicated pair of creds just to access the registry).
The Features tab in your cluster settings allows you to check if the Static IP, Custom VPC subnet, Deploy on existing VPC features are enabled on your cluster. The enabled features cannot be changed after the creation of the cluster.
The Network tab in your cluster settings allows you to update your Qovery VPC route table so that you can perform VPC peering. For step-by-step guidelines on how to set up VPC peering, see our dedicated tutorial.
+This is your cloud provider container registry used by Qovery to manage the deployment of your applications by mirroring the docker images.
The credentials configured on this registry are the one used to create the cluster. But you can still update them if you prefer to manage them separately (dedicated pair of creds just to access the registry).
The Features tab in your cluster settings allows you to check if the Static IP, Custom VPC subnet, Deploy on existing VPC features are enabled on your cluster. The enabled features cannot be changed after the creation of the cluster.
The Network tab in your cluster settings allows you to update your Qovery VPC route table so that you can perform VPC peering. For step-by-step guidelines on how to set up VPC peering, see our dedicated tutorial.
Click on GCP as hosting mode and then Qovery Managed option:
In the Create Cluster window enter:
Cluster name: enter the name of your choice for your cluster.
Description: enter a description to identify better your cluster.
Production cluster: select this option if your cluster will be used for production.
Region: select the geographical area in which you want your cluster to be hosted.
Credentials: select one of the existing cloud provider credentials or add a new one by clicking on New Credentials. In the New credentials window, add the credentials that you have generated on your cloud provider console (Procedure for GCP account). Added credentials can be used later to create and manage additional cluster.
To confirm, click Next.
In the Features step, select the features you want to enable on your cluster.
If you want to manage the network layer of your cluster by yourself, you can switch VPC mode to Deploy on my existing VPC to use your own VPC instead of the one provided by Qovery.
These options can only be configured during cluster creation and cannot be modified later.
The Static IP feature is currently only available to clusters deployed with a VPC managed by Qovery and can only be enabled at cluster creation.
By default, when your cluster is created, its worker nodes are allocated public IP addresses, which are used for external communication. For improved security and control, the Static IP feature allows you to ensure that outbound traffic from your cluster uses specific IP addresses.
Here is what will be deployed on your cluster:
Cloud Nats
Static IPs
Routers
Once set up, here is the procedure to find your static IP addresses on GCP:
On your GCP account, select the IP addresses service.
In the list you will find your static IP used by your cluster router.
If you work in a sensitive business area such as financial technology, enabling the Static IP feature can help fulfil the security requirements of some of the external services you use, therefore making it easier for you to get whitelisted by them.
In the Ready to install your cluster window, check that the services needed to install your cluster are correct.
You can now press the Create and Install button.
Your cluster is now displayed in your organization settings, featuring the Installing... status (orange status). Once your cluster is properly installed, its status turns to green and you will be able to deploy your applications on it.
You can follow the execution of the action via the cluster status and/or by accessing the Cluster Logs
In this tab, you will see that a container registry already exist (called registry-{$UIID}).
-This is your cloud provider container registry used by Qovery to manage the deployment of your applications by mirroring the docker images.
The credentials configured on this registry are the one used to create the cluster. But you can still update them if you prefer to manage them separately (dedicated pair of creds just to access the registry).
The Features tab in your cluster settings allows you to check if the Static IP, Deploy on existing VPC features are enabled on your cluster. The enabled features cannot be changed after the creation of the cluster.
+This is your cloud provider container registry used by Qovery to manage the deployment of your applications by mirroring the docker images.
The credentials configured on this registry are the one used to create the cluster. But you can still update them if you prefer to manage them separately (dedicated pair of creds just to access the registry).
The Features tab in your cluster settings allows you to check if the Static IP, Deploy on existing VPC features are enabled on your cluster. The enabled features cannot be changed after the creation of the cluster.
-
+
-
+
diff --git a/docs/using-qovery/configuration/clusters/index.html b/docs/using-qovery/configuration/clusters/index.html
index 1be7c41b44..de99d289d7 100644
--- a/docs/using-qovery/configuration/clusters/index.html
+++ b/docs/using-qovery/configuration/clusters/index.html
@@ -26,9 +26,9 @@
-
+
-
+
@@ -54,14 +54,14 @@
For more information, please contact your cloud provider.
To permanently delete a cluster and all its associated costs, see Deleting a Cluster.
To temporarily stop a cluster, select the Stop action from the drop-down menu.
A confirmation pop-up window opens before triggering the action.
Once confirmed, the status of your cluster turns to Pausing... (orange status).
Once the stop is complete, the status dot next to your cluster turns to grey, and the status of your cluster turns to Paused (gray status).
To delete a cluster, open the ... section and press Delete Cluster.
3 options can be chosen to delete a cluster:
1) Default
-This is the default behaviour, this option shall be chosen every time you want to delete properly a cluster from the Qovery console AND your cloud provider account.
This operation will delete:
Cloud provider: any resource created by Qovery on your cloud provider account to run this cluster will be deleted, including any application running on it.
Qovery organization: the configuration of this cluster and any linked environment.
Please note that you will have to manually delete on your cloud account:
the S3 bucket created at cluster installation
the image registry linked to this cluster
any resource created by a lifecycle job that will not be properly deleted during the environment deletion event.
Check this section to find these elements and delete them.
2) Delete Cluster on cloud provider and Qovery configuration
This option shall be chosen when the cluster delete operation with the Default option fails since you have manually modified/deleted the RDS instances created by Qovery on your cloud provider account.
This operation will delete:
Cloud provider: any resource created by Qovery on your cloud provider account to run this cluster will be deleted, including any application running on it.
Qovery organization: the configuration of this cluster and any linked environment.
Please note that you will have to manually delete on your cloud account:
the S3 bucket created at cluster installation
the image registry linked to this cluster
any managed database that was created via Qovery
any resource created by a lifecycle job that will not be properly deleted during the environment deletion event.
Check this section to find these elements and delete them.
3) Delete Qovery config only
This option shall be chosen when you have already deleted any Qovery resource on your cloud account and you want to delete the cluster object from your Qovery console.
This operation will delete:
Cloud provider: nothing will be removed from your cloud account. You will have to manually delete any resource created by Qovery directly from your cloud provider console.
Qovery organization: the configuration of this cluster and any linked environment.
Check this section to find these elements and delete them.
Once confirmed, the cluster status turns to Deleting... (red status) and once the deletion is complete, the cluster is removed from your organization settings.
To get the cluster filtered audit logs, open the ... section and press See audit logs.
You will be redirected to the audit logs section. A filter on the dedicated cluster will be applied. You only see the audit logs regarding cluster operations.
Qovery allows you to access the logs of your cluster in order to follow its installation or investigate any issue happening on it.
To access the logs you need to open the cluster, click the log button
A new window is opened, displaying the logs of the cluster.
The tab system on the right allows you to access the cluster information and, if an error occurs, the detail of the error.
The error message should provide you enough information to solve the issue. If that's not the case, feel free to ask for support on our forum
#Use custom domain and wildcard TLS for the whole cluster (beta)
By default, Qovery provides a domain (ex bool.sh) on every deployed cluster. It is used to provide a DNS and TLS certificate to every application requiring external access on a cluster.
You can customize the domain for every application. However, when it comes to having more than 100 custom domains with the same domain you will hit Let's Encrypt quotas.
To overcome this issue, you can use a wildcard TLS certificate for the whole cluster. It will allow you to have as many DNS records for a single domain as you want on the same cluster with a single TLS certificate.
At the moment, Qovery only supports wildcard TLS certificates with Cloudflare. To use it, you need to have a Cloudflare account and a domain name managed by Cloudflare. If you don't have one, you can create a free account and transfer your domain to Cloudflare.
Once you have a Cloudflare account and a domain name managed by Cloudflare, you need to create a Cloudflare API token. Go into your Cloudflare account, click on your profile picture, then My Profile. In the API Tokens section, click on Create Token. In the Create Custom Token section, select the following permissions:
API token a descriptive name: Qovery domain your domain name
Permissions:
Zone - DNS - Edit
Zone - Zone - Read
Zone Resources:
Include - Specific zone - your domain name
To finish, click on Continue to Summary and Create Token. Save the token somewhere safe, you will need it later.
Prepare the Token, the Cloudflare account email and the domain to be set on your cluster. Now contact Qovery and request to use your domain.
The following troubleshooting procedure is intended for AWS users who did not properly delete their cluster before revoking Qovery's access to their platform.
To properly delete your clusters and avoid any unexpected issues or costs, see Deleting a Cluster.
To clean up a Qovery cluster from your cloud provider account, go to AWS Console>Services>Management & Governance>Resource Groups & Tag Editor> Create Resource Group:
Step
Description
1
In the Group type area, select Tag based.
2
In the Tags field of the Grouping criteria area, enter ClusterId.
3
Click Add.
4
Click Preview Resources. All your Qovery clusters are now displayed in the Group resources table, and you can delete them by hand.
Qovery is built on top of Kubernetes, which means we need Kubernetes clusters to be able to deploy and run your applications.
Thanks to clusters, you can easily deploy several (and many) instances of the same application, so that if one fails, the others can instantly take over. Also, clusters can auto-scale, meaning that the number of worker nodes in a cluster can automatically go up or down as traffic fluctuates on your application(s), thus ensuring high availability and performance. Clusters are also extremely useful to isolate your production environment from your staging environment.
In short, through the use of clusters, Kubernetes provides you with a resilient, flexible and powerful infrastructure, fit for production environment needs and requirements. And with the help of Qovery, setting up and maintaining your Kubernetes clusters has never been easier.
#What are the different instance types available when creating a cluster?
The range of instance types available at cluster creation depends on your cloud provider:
GCP clusters are deployed in auto-pilot mode so you will have access by default to every instance type available
Qovery supports only instance types having an x86_64 or ARM architecture.
#How does Qovery handle Kubernetes version upgrades?
As far as cluster upgrades to a newer version of Kubernetes are concerned, our Qovery engineering team handles everything in due time, so you don’t even need to think about it!
You may notice that Qovery does not provide you with the latest Kubernetes version offered by your cloud provider. This is due to the high amount of testing we need to perform to ensure smooth upgrades with no interruptions for your applications. Our priority is always to guarantee you maximum uptime.
Please DO NOT upgrade the cluster version by yourself from the cloud provider console.
Proceeding with a cluster version upgrade outside of Qovery will prevent any future update on this cluster and might be irreversible preventing Qovery from properly deploying on this cluster. Most importantly will expose you to some unknown / untested areas which can put your application stability at risks.
If you did update by mistake, then you need to reach to Qovery team in order to get some help.
Usually, we work on a given upgrade for one month of intensive testing on our end in order to make sure everything will be smooth for you. Once we are pretty confident our stack is stable, we move on with the following steps which last approximately 3 weeks:
Notify users about new version coming in approximatively 1 month before
Upgrade clusters for a handful of beta-tester customers (1 week)
Make available the new version for all clusters (new or existing)
Upgrade all non-production flagged clusters (1 week)
Upgrade all clusters the production clusters (1 week)
If, somehow the planning or timeframe for the upgrade is clashing with your business needs, you will be able to trigger the upgrade of your cluster manually via the "Upgrade to XX.XX" action available from the Play menu of your cluster. This action will be available on your cluster once we will make the new version available globally (step 3), you will notice that the Play button of your cluster will be highlighted in yellow.
+This is the default behaviour, this option shall be chosen every time you want to delete properly a cluster from the Qovery console AND your cloud provider account.
This operation will delete:
Cloud provider: any resource created by Qovery on your cloud provider account to run this cluster will be deleted, including any application running on it.
Qovery organization: the configuration of this cluster and any linked environment.
Please note that you will have to manually delete on your cloud account:
the S3 bucket created at cluster installation
the image registry linked to this cluster
any resource created by a lifecycle job that will not be properly deleted during the environment deletion event.
Check this section to find these elements and delete them.
2) Delete Cluster on cloud provider and Qovery configuration
This option shall be chosen when the cluster delete operation with the Default option fails since you have manually modified/deleted the RDS instances created by Qovery on your cloud provider account.
This operation will delete:
Cloud provider: any resource created by Qovery on your cloud provider account to run this cluster will be deleted, including any application running on it.
Qovery organization: the configuration of this cluster and any linked environment.
Please note that you will have to manually delete on your cloud account:
the S3 bucket created at cluster installation
the image registry linked to this cluster
any managed database that was created via Qovery
any resource created by a lifecycle job that will not be properly deleted during the environment deletion event.
Check this section to find these elements and delete them.
3) Delete Qovery config only
This option shall be chosen when you have already deleted any Qovery resource on your cloud account and you want to delete the cluster object from your Qovery console.
This operation will delete:
Cloud provider: nothing will be removed from your cloud account. You will have to manually delete any resource created by Qovery directly from your cloud provider console.
Qovery organization: the configuration of this cluster and any linked environment.
Check this section to find these elements and delete them.
Once confirmed, the cluster status turns to Deleting... (red status) and once the deletion is complete, the cluster is removed from your organization settings.
To get the cluster filtered audit logs, open the ... section and press See audit logs.
You will be redirected to the audit logs section. A filter on the dedicated cluster will be applied. You only see the audit logs regarding cluster operations.
Qovery allows you to access the logs of your cluster in order to follow its installation or investigate any issue happening on it.
To access the logs you need to open the cluster, click the log button
A new window is opened, displaying the logs of the cluster.
The tab system on the right allows you to access the cluster information and, if an error occurs, the detail of the error.
The error message should provide you enough information to solve the issue. If that's not the case, feel free to ask for support on our forum
#Use custom domain and wildcard TLS for the whole cluster (beta)
By default, Qovery provides a domain (ex bool.sh) on every deployed cluster. It is used to provide a DNS and TLS certificate to every application requiring external access on a cluster.
You can customize the domain for every application. However, when it comes to having more than 100 custom domains with the same domain you will hit Let's Encrypt quotas.
To overcome this issue, you can use a wildcard TLS certificate for the whole cluster. It will allow you to have as many DNS records for a single domain as you want on the same cluster with a single TLS certificate.
At the moment, Qovery only supports wildcard TLS certificates with Cloudflare. To use it, you need to have a Cloudflare account and a domain name managed by Cloudflare. If you don't have one, you can create a free account and transfer your domain to Cloudflare.
Once you have a Cloudflare account and a domain name managed by Cloudflare, you need to create a Cloudflare API token. Go into your Cloudflare account, click on your profile picture, then My Profile. In the API Tokens section, click on Create Token. In the Create Custom Token section, select the following permissions:
API token a descriptive name: Qovery domain your domain name
Permissions:
Zone - DNS - Edit
Zone - Zone - Read
Zone Resources:
Include - Specific zone - your domain name
To finish, click on Continue to Summary and Create Token. Save the token somewhere safe, you will need it later.
Prepare the Token, the Cloudflare account email and the domain to be set on your cluster. Now contact Qovery and request to use your domain.
The following troubleshooting procedure is intended for AWS users who did not properly delete their cluster before revoking Qovery's access to their platform.
To properly delete your clusters and avoid any unexpected issues or costs, see Deleting a Cluster.
To clean up a Qovery cluster from your cloud provider account, go to AWS Console>Services>Management & Governance>Resource Groups & Tag Editor> Create Resource Group:
Step
Description
1
In the Group type area, select Tag based.
2
In the Tags field of the Grouping criteria area, enter ClusterId.
3
Click Add.
4
Click Preview Resources. All your Qovery clusters are now displayed in the Group resources table, and you can delete them by hand.
Qovery is built on top of Kubernetes, which means we need Kubernetes clusters to be able to deploy and run your applications.
Thanks to clusters, you can easily deploy several (and many) instances of the same application, so that if one fails, the others can instantly take over. Also, clusters can auto-scale, meaning that the number of worker nodes in a cluster can automatically go up or down as traffic fluctuates on your application(s), thus ensuring high availability and performance. Clusters are also extremely useful to isolate your production environment from your staging environment.
In short, through the use of clusters, Kubernetes provides you with a resilient, flexible and powerful infrastructure, fit for production environment needs and requirements. And with the help of Qovery, setting up and maintaining your Kubernetes clusters has never been easier.
#What are the different instance types available when creating a cluster?
The range of instance types available at cluster creation depends on your cloud provider:
GCP clusters are deployed in auto-pilot mode so you will have access by default to every instance type available
Qovery supports only instance types having an x86_64 or ARM architecture.
#How does Qovery handle Kubernetes version upgrades?
As far as cluster upgrades to a newer version of Kubernetes are concerned, our Qovery engineering team handles everything in due time, so you don’t even need to think about it!
You may notice that Qovery does not provide you with the latest Kubernetes version offered by your cloud provider. This is due to the high amount of testing we need to perform to ensure smooth upgrades with no interruptions for your applications. Our priority is always to guarantee you maximum uptime.
Please DO NOT upgrade the cluster version by yourself from the cloud provider console.
Proceeding with a cluster version upgrade outside of Qovery will prevent any future update on this cluster and might be irreversible preventing Qovery from properly deploying on this cluster. Most importantly will expose you to some unknown / untested areas which can put your application stability at risks.
If you did update by mistake, then you need to reach to Qovery team in order to get some help.
Usually, we work on a given upgrade for one month of intensive testing on our end in order to make sure everything will be smooth for you. Once we are pretty confident our stack is stable, we move on with the following steps which last approximately 3 weeks:
Notify users about new version coming in approximatively 1 month before
Upgrade clusters for a handful of beta-tester customers (1 week)
Make available the new version for all clusters (new or existing)
Upgrade all non-production flagged clusters (1 week)
Upgrade all clusters the production clusters (1 week)
If, somehow the planning or timeframe for the upgrade is clashing with your business needs, you will be able to trigger the upgrade of your cluster manually via the "Upgrade to XX.XX" action available from the Play menu of your cluster. This action will be available on your cluster once we will make the new version available globally (step 3), you will notice that the Play button of your cluster will be highlighted in yellow.
Click on Scaleway as hosting mode and then Qovery Managed option:
In the Create Cluster window enter:
Cluster name: enter the name of your choice for your cluster.
Description: enter a description to identify better your cluster.
Production cluster: select this option if your cluster will be used for production.
Region: select the geographical area in which you want your cluster to be hosted.
Credentials: select one of the existing cloud provider credentials or add a new one by clicking on New Credentials. In the New credentials window, add the credentials that you have generated on your cloud provider console (Procedure for Scaleway account). Added credentials can be used later to create and manage additional cluster.
To confirm, click Next.
In the Set Resources window, select:
Instance type: select the type of worker nodes you want to deploy to your cluster.
Disk size: select the size of the disks to be attached to your cluster instances (to locally store container images etc..).
Node auto-scaling: define the minimum and the maximum number of worker nodes that your cluster can run. The lowest number is the number of worker nodes running on your infrastructure at any time, while the highest number is the maximum number of worker nodes that can automatically be deployed as traffic grows. Please note that a minimum of 3 worker nodes is required to deploy your EKS cluster.
Instance type selection from your Qovery Console has direct consequences on your cloud provider’s bill. While Qovery allows you to switch to a different instance type whenever you want, it is your sole responsibility to keep an eye on your infrastructure costs, especially when you want to upsize.
Please be aware that changing the instance type or disk size might cause a downtime for your service.
Also, before downsizing, you need to ensure that your applications will still have enough resources to run correctly.
To confirm, click Next.
In the Ready to install your cluster window, check that the services needed to install your cluster are correct.
You can now press the Create and Install button.
Your cluster is now displayed in your organization settings, featuring the Installing... status (orange status). Once your cluster is properly installed, its status turns to green and you will be able to deploy your applications on it.
You can follow the execution of the action via the cluster status and/or by accessing the Cluster Logs
In this tab, you will see that a container registry already exist (called registry-{$UIID}).
-This is your cloud provider container registry used by Qovery to manage the deployment of your applications by mirroring the docker images.
The credentials configured on this registry are the one used to create the cluster. But you can still update them if you prefer to manage them separately (dedicated pair of creds just to access the registry).
+This is your cloud provider container registry used by Qovery to manage the deployment of your applications by mirroring the docker images.
The credentials configured on this registry are the one used to create the cluster. But you can still update them if you prefer to manage them separately (dedicated pair of creds just to access the registry).
-
+
-
+
diff --git a/docs/using-qovery/configuration/cronjob/index.html b/docs/using-qovery/configuration/cronjob/index.html
index 480594b449..e18e17087d 100644
--- a/docs/using-qovery/configuration/cronjob/index.html
+++ b/docs/using-qovery/configuration/cronjob/index.html
@@ -26,9 +26,9 @@
-
+
-
+
@@ -54,14 +54,14 @@
Any additional environment variable can be added later from the environment variable section
You will find a recap of your job setup and you can now decide to:
1. Go back to one of the previous steps and change your settings
2. Create your job without deploying it
-3. Create and deploy your job
You can force the execution of a job independently its deployment status by:
Select the job that you want to force
click on the Play button of the cronjob you want to force and select the Force Run option. Note: the same option is available on the service list as well
Once you click, the job will be deployed and executed once. You will be able to follow its execution within the application logs
If your repository contains private submodules using SSH protocol, you will need to add a secret beginning with GITSSH_KEY, containing a private SSH key with access rights to your sumbodules repositories.
If your application is deployed from an image registry, within this section you can modify:
Registry: select the container registry storing the image of your application. Note: only pre-configured registry are available in this list, check the Container Registry Management page for more information.
Image name: the name of the image to be deployed with this application (example: postgres)
Image tag: the tag of the image to be deployed with this application (example: 12)
The tag 'latest' is not supported, please use a specific tag.
You can modify here the configuration of your job:
CRON Schedule: specify a valid CRON expression (see Crontab guru for help). After being deployed, the job will be executed following the defined schedule.
Timezone: select a valid timezone identifier. After being deployed, the job will be executed following the defined timezone. Etc/UTC is the default value.
Image Entrypoint: the entrypoint to be used to launch your job (not mandatory)
CMD Arguments: the arguments to be passed to launch your application (not mandatory) separated with a space. Example: rails -h 0.0.0.0 -p 8080 string "complex arg".
Number of restarts: Maximum number of restarts allowed in case of job failure (0 means no failure)
Max duration time in seconds: Maximum duration allowed for the job to run before killing it and mark it as failed
Port: Port used by Kubernetes to run readiness and liveliness probes checks. The port will not be exposed externally
To configure the number of CPUs that your job needs, adjust the setting in the Resources section.
Default is 500m (0.5 vCPU).
Please note that in this section you configure the CPU allocated by the cluster for your application and that cannot consume more than this value. Even if the application is underused and consume less resources, the cluster will still reserve the selected amount of CPU.
To configure the amount of RAM that your app needs, adjust the setting in Resources section.
Default is 512MB.
Please note that in this section you configure the CPU allocated by the cluster for your application and that cannot consume more than this value. Even if the application is underused and consume less resources, the cluster will still reserve the selected amount of CPU. If your application requires more RAM than requested, it will be killed by the kubernetes scheduler.
This section allows to specify which changes on your repository should trigger an auto-deploy (if enabled). To know more about how to configure your Deployment Restrictions, have a look at the deployment restrictions section.
You can create a clone of the service via the clone feature. A new service with the same configuration (see below for exceptions) will be created into the target environment.
The target environment can be the same as the current environment or even another one in a completely different project.
Important information
Not every configuration parameter will be copied within the new service for consistency reasons. The configuration is fully or partially copied depending on the target environment:
same environment:
custom domain: this setup is not copied into the new service (to avoid collision)
another environment:
custom domain: this setup is not copied into the new service (to avoid collision)
environment variable: aliases defined on environment variables are not copied (since the aliased env var might not exist)
deployment pipeline: stage setup is not copied (since the target stage might not exist)
number of instances: if the target environment runs on a Qovery EC2 cluster, the max number of instances is set to 1 (Qovery EC2 constraint)
Please check the configuration of the new service before deploying it.
You can force the execution of a job independently its deployment status by:
Select the job that you want to force
click on the Play button of the cronjob you want to force and select the Force Run option. Note: the same option is available on the service list as well
Once you click, the job will be deployed and executed once. You will be able to follow its execution within the application logs
If your repository contains private submodules using SSH protocol, you will need to add a secret beginning with GITSSH_KEY, containing a private SSH key with access rights to your sumbodules repositories.
If your application is deployed from an image registry, within this section you can modify:
Registry: select the container registry storing the image of your application. Note: only pre-configured registry are available in this list, check the Container Registry Management page for more information.
Image name: the name of the image to be deployed with this application (example: postgres)
Image tag: the tag of the image to be deployed with this application (example: 12)
The tag 'latest' is not supported, please use a specific tag.
You can modify here the configuration of your job:
CRON Schedule: specify a valid CRON expression (see Crontab guru for help). After being deployed, the job will be executed following the defined schedule.
Timezone: select a valid timezone identifier. After being deployed, the job will be executed following the defined timezone. Etc/UTC is the default value.
Image Entrypoint: the entrypoint to be used to launch your job (not mandatory)
CMD Arguments: the arguments to be passed to launch your application (not mandatory) separated with a space. Example: rails -h 0.0.0.0 -p 8080 string "complex arg".
Number of restarts: Maximum number of restarts allowed in case of job failure (0 means no failure)
Max duration time in seconds: Maximum duration allowed for the job to run before killing it and mark it as failed
Port: Port used by Kubernetes to run readiness and liveliness probes checks. The port will not be exposed externally
To configure the number of CPUs that your job needs, adjust the setting in the Resources section.
Default is 500m (0.5 vCPU).
Please note that in this section you configure the CPU allocated by the cluster for your application and that cannot consume more than this value. Even if the application is underused and consume less resources, the cluster will still reserve the selected amount of CPU.
To configure the amount of RAM that your app needs, adjust the setting in Resources section.
Default is 512MB.
Please note that in this section you configure the CPU allocated by the cluster for your application and that cannot consume more than this value. Even if the application is underused and consume less resources, the cluster will still reserve the selected amount of CPU. If your application requires more RAM than requested, it will be killed by the kubernetes scheduler.
This section allows to specify which changes on your repository should trigger an auto-deploy (if enabled). To know more about how to configure your Deployment Restrictions, have a look at the deployment restrictions section.
You can create a clone of the service via the clone feature. A new service with the same configuration (see below for exceptions) will be created into the target environment.
The target environment can be the same as the current environment or even another one in a completely different project.
Important information
Not every configuration parameter will be copied within the new service for consistency reasons. The configuration is fully or partially copied depending on the target environment:
same environment:
custom domain: this setup is not copied into the new service (to avoid collision)
another environment:
custom domain: this setup is not copied into the new service (to avoid collision)
environment variable: aliases defined on environment variables are not copied (since the aliased env var might not exist)
deployment pipeline: stage setup is not copied (since the target stage might not exist)
number of instances: if the target environment runs on a Qovery EC2 cluster, the max number of instances is set to 1 (Qovery EC2 constraint)
Please check the configuration of the new service before deploying it.
Qovery natively lets you deploy and access the most popular SQL and NoSQL databases available on the major cloud providers. Reliability and resiliency are at the heart of their services, so you don't have to worry about your data on Qovery.
Qovery natively supports the following databases:
PostgreSQL
MySQL
MongoDB
Redis
Qovery can natively operate a database in two different ways (called "Mode"):
Container mode: preferred for testing and development
Managed mode: preferred for production, limited configuration parameters (see the Configuration section).
If the natively supported databases or operation modes are not enough for you, depending on your use case you have the following alternative solutions:
Use an existing DB on a dedicated VPC: your applications can access this database via VPC peering. Have a look at this guide for more information.
Create your custom database via Qovery: You will be able to deploy any kind of database through Qovery by using a lifecycle jobs. For example, you can use a terraform script to deploy your custom RDS instance on AWS via Terraform (have a look at this example).
The following sections will show you how you can create and manage the databases natively supported by Qovery. For any other use case, please refer to the guides provided above.
The database is created as a container with attached persistent storage directly on your Kubernetes cluster (1 instance). They are perfect for development and testing, as they are significantly cheaper than services provided by cloud providers.
Qovery creates and manages the lifecycle of a cloud provider managed database instance (for example an RDS instance on AWS). These are perfect for production since they guarantee the right level of resilience, performance and data security best practices.
Since Qovery manages the lifecycle of your database, DO NOT change the database settings directly from within the cloud provider console (to avoid configuration drifts).
Once you request to change the version, instance type or disk size of your Managed database, the cloud provider applies the update based on its own internal rules and might cause downtime of your database.
For example, by default AWS doesn't apply major updates immediately on the database and instead, it waits for a maintenance window. This means that your change will not be applied immediately but you can always force the change directly from your AWS console AFTER having applied the change on Qovery (to avoid configuration drifts).
Have a look at your cloud provider documentation to know more about how version upgrades are managed:
Select database type, name, description (optional), version, mode and accessibility
Please refer to the Configuration section below to know more about each of these parameters.
Extra labels/annotations (optional)
Add your extra annotation/label groups. See the Add annotation/label group section for more information.
Annotation groups are not supported for managed databases.
Within the "Resources" step you will find different configurations based on the selected mode:
If you are using the database in Container mode, you can set the CPU, RAM and storage that will be assigned to the instance running the docker image of the database.
If you are using the database in Managed mode, you can select the instance type and the storage that will be assigned to the instance running the database. Note, the instance selected instance type has a direct impact on your cloud provider cost.
At the end a recap will allow you to just create the database or create and deploy it
As described at the beginning of this document, databases can operate in two modes:
Managed
Container
Managed databases are perfect for production - they are provided and managed by major cloud providers like AWS to make sure your production data is well managed.
Container databases are managed by Qovery as Docker containers with attached persistent storage directly on your Kubernetes cluster (1 instance). They are perfect for development and testing, as they are significantly cheaper than services provided by cloud providers.
Please refer to the dedicated database sub-pages to get more information on the supported mode for each cloud provider.
We regularly update the version available for each database. Please refer to the dedicated database sub-pages to get more information on the supported version for each database types and cloud provider.
You can upgrade the version of your database directly from the Qovery interface.
Any change on this field will not be applied immediately to your database, check the managed mode section.
Add your extra annotation/label groups. See the Add annotation/label group section for more information.
Annotation groups are not supported for managed databases.
When a database is created in your environment, Qovery will automatically create and inject a set of BUILT_IN environment variables containing all the parameters necessary to your application to connect to the database.
This is the list of environment variables and secrets that will be automatically created:
Name
Description
Example
QOVERY<DATABASE_TYPE><DBID>_DEFAULT_DATABASE_NAME
Env Var containing the default database name
postgres
QOVERY<DATABASE_TYPE><DBID>_HOST
Env Var containing the external hostname of the database (if you need access from the outside and the DB is configured with visibility "PUBLIC")
zf5206c84-postgresql.oom.sh
QOVERY<DATABASE_TYPE><DBID>_HOST_INTERNAL
Env Var containing the internal hostname of the database (if you need access it from within the cluster network)
zf5206c84-postgresql
QOVERY<DATABASE_TYPE><DBID>_LOGIN
Env Var containing the username of the DB
superuser
QOVERY<DATABASE_TYPE><DBID>_PORT
Env Var containing the port to be used for connecting to the DB
5432
QOVERY<DATABASE_TYPE><DBID>_DATABASE_URL
Secret containing the external URI to be used for connecting to the DB (if you need access from the outside and the DB is configured with visibility "PUBLIC")
You can create a clone of the service via the clone feature. A new service with the same configuration (see below for exceptions) will be created into the target environment.
The target environment can be the same as the current environment or even another one in a completely different project.
Important information
Not every configuration parameter will be copied within the new service for consistency reasons. The configuration is fully or partially copied depending on the target environment:
same environment:
custom domain: this setup is not copied into the new service (to avoid collision)
another environment:
custom domain: this setup is not copied into the new service (to avoid collision)
environment variable: aliases defined on environment variables are not copied (since the aliased env var might not exist)
deployment pipeline: stage setup is not copied (since the target stage might not exist)
number of instances: if the target environment runs on a Qovery EC2 cluster, the max number of instances is set to 1 (Qovery EC2 constraint)
Please check the configuration of the new service before deploying it.
Note that only the instance configuration will be copied, not the data contained within the database.
As Managed Services databases (like RDS) are mainly used for production, Qovery does not delete automated snapshots and backups on deletion.
-It is up to the user or Cloud provider Administrator to delete it manually.
Your Docker image must contain the TLS certificate of the MongoDB cluster - it can be downloaded here.
The application must be configured to use it. If you use the environment variables to build the URI to connect tou your database, you usually should have just append &ssl_ca_certs=/path/to/the/rds-combined-ca-bundle.pem to its value.
Have a look at the Database page to know more about the database creation and setup.
Your Docker image must contain the TLS certificate of the MongoDB cluster - it can be downloaded here.
The application must be configured to use it. If you use the environment variables to build the URI to connect tou your database, you usually should have just append &ssl_ca_certs=/path/to/the/rds-combined-ca-bundle.pem to its value.
Have a look at the Database page to know more about the database creation and setup.
MySQL is the world's most popular open source database. Whether you are a fast growing web property, technology ISV or large enterprise, MySQL can cost-effectively help you deliver high performance, scalable database applications.
MySQL is the world's most popular open source database. Whether you are a fast growing web property, technology ISV or large enterprise, MySQL can cost-effectively help you deliver high performance, scalable database applications.
PostgreSQL is a powerful, open source object-relational database system with over 30 years of active development that has earned it a strong reputation for reliability, feature robustness, and performance.
PostgreSQL is a powerful, open source object-relational database system with over 30 years of active development that has earned it a strong reputation for reliability, feature robustness, and performance.
Redis is an open source (BSD licensed), in-memory data structure store, used as a database, cache and message broker. It supports data structures such as strings, hashes, lists, sets, sorted sets with range queries, bitmaps, hyperloglogs, geospatial indexes with radius queries and streams.
Redis is an open source (BSD licensed), in-memory data structure store, used as a database, cache and message broker. It supports data structures such as strings, hashes, lists, sets, sorted sets with range queries, bitmaps, hyperloglogs, geospatial indexes with radius queries and streams.
-
+
-
+
diff --git a/docs/using-qovery/configuration/deployment-rule/index.html b/docs/using-qovery/configuration/deployment-rule/index.html
index 6305c6203a..a4d5bd2220 100644
--- a/docs/using-qovery/configuration/deployment-rule/index.html
+++ b/docs/using-qovery/configuration/deployment-rule/index.html
@@ -26,9 +26,9 @@
-
+
-
+
@@ -58,14 +58,14 @@
your development environments to a cheaper cloud account while still keeping your production using the most reliable services provided by the more expensive cloud provider.
You can set up your Rules at Project and Environment levels. Rules set up at the Project level will be automatically applied to newly created Environments you target in the rule.
If, however, the default settings applied by the Project level rule does not meet your needs, you are allowed to override the settings at the Environment level later on.
Declaring deployment rules at the project level allows you to apply reasonable defaults to all newly created environments. After a new environment within a project is created, rules from the Project are applied to the Environment. However, to keep things flexible, Qovery allows you to override the rules after environment creation at the Environment level, in Environment settings.
Selecting the cluster allows you to control to which cluster your environments in the project will be deployed to.
Example use cases
deploy your development environments on a more cost effective cluster
deploy your environments in multiple regions
We are re-building this feature and thus you will always have to select a clsuter when manually creating a new environment. This feature still works for preview environments.
Since you can define several rules, it is possible that an environment is targeted by more than one of them.
In order to define which rule applies first to your new environments, you can reorder the list of rules in the deployment setting window.
-Starting from the top, the rules are ranked from highest to lowest priority.
Setting up Deployment Rules at the Enviornment level allows you to make all necessary adjustments applied by your default rules from the Project level.
Have a look at [this section][docs.using-qovery.configuration.environment#deployment-rule]] to know more.
Setting up Deployment Rules at the Enviornment level allows you to make all necessary adjustments applied by your default rules from the Project level.
Have a look at [this section][docs.using-qovery.configuration.environment#deployment-rule]] to know more.
-
+
-
+
diff --git a/docs/using-qovery/configuration/environment-variable/index.html b/docs/using-qovery/configuration/environment-variable/index.html
index 9d502cbcec..80a1097679 100644
--- a/docs/using-qovery/configuration/environment-variable/index.html
+++ b/docs/using-qovery/configuration/environment-variable/index.html
@@ -26,9 +26,9 @@
-
+
-
+
@@ -62,14 +62,14 @@
To understand how we handle conflicts, please take a look to the Importation conflicts section.
Configure variables import
On this modal, you can define for each variable the following parameters:
name: upate variable name
Value: update variable value
Scope: Specify the scope in which you want to import the variable
Secret: Specify if this value is considered as a secret or not
Preset variables
To help you import a large number of variables quickly, you can predefine scope and secret settings.
This will change the scope and secret value of all listed variables.
If the secret and scope of one or more specific variables are subsequently updated, this will not change the predefined setting.
When you have finished the configuration, click on the Import button.
A pop-up message is displayed to inform you that your environment variables have been imported.
To avoid conflicts between already existing and imported environment variables, some of them will not be imported, even if the overwrite option is activated.
-The different cases are described below.
#Imported variable has same name as BUILT_IN variable
Type
Name
Value
Scope
Existing variables
VALUE
MY_VAR
42
Built_in
Variables to import
VALUE
MY_VAR
10
Application
Built_in environment variables are generated and managed by Qovery and will not be overwritten, even if the overwriting option is activated.
#Imported variable has same name as an existing ALIAS
Type
Name
Value
Scope
Existing variables
VALUE
MY_VAR
42
Environment
ALIAS
MY_VAR_ALIAS
MY_VAR
Application
Variables to import
VALUE
MY_VAR_ALIAS
10
Application
The value cannot be rewritten because the link between the original variable and the alias would be lost.
#Imported variable has same name as an existing secret (or vice versa)
Type
Name
Value
Scope
Secret
Existing variables
VALUE
MY_VAR
1
Application
Ye
Variables to import
VALUE
MY_VAR
2
Application
No
The value cannot be imported because this will overwrite the existing secret.
To access a database managed by Qovery from your application, you can use the BUILT_IN environment variables and secrets that have been automatically created by Qovery during the database creation process. You can find all the BUILT_IN variables on the Qovery console within the Environment Variable section of your application (see the credentials and connectivity section for the full list).
In order to match the naming convention of the database connection variables used within your code, you can create an alias for each variable in the Qovery console so that you don't need to change your code.
Once you have defined an alias for each variable, you can redeploy the application and check that it has finally access to the database.
You have created a postgres database on the Qovery console. Within the code of your application you need some environment variables containing the connection parameters of the database: DATABASE_URL, DATABASE_USER, DATABASE_PASSWORD, DATABASE_PORT, DATABASE_NAME
To access another application managed by Qovery, you can use the BUILT_IN environment variables that have been automatically created by Qovery during the creation of that particular application. You can find all the BUILT_IN variables on the Qovery console within the Environment Variable section of your application.
Please note that two BUILT_IN might exist:
QOVERY_APPLICATION_<APPID>_HOST_INTERNAL : it contains the INTERNAL host of the application that can be used inside your Kubernetes cluster (and thus by any application running on it)
QOVERY_APPLICATION_<APPID>_HOST_EXTERNAL : it contains the EXTERNAL host of the application that can be used to reach your application from outside your Kubernetes cluster (if the application is publicly exposing one of its ports)
In order to match the naming convention of the connection variables used within your code, you can create an alias for the HOST_INTERNAL variable so that you don't need to change your code.
Once you have defined an alias for each variable, you can redeploy the application and check that it can reach the other application.
You have created a backend application on the Qovery console and a BUILD_IN variable has been created containing the application HOST called QOVERY_APPLICATION_Z9D8DAA08_HOST_INTERNAL. Within the code of your front-end application you need some environment variables containing the host of the backend application (BACKEND_HOST)
To match your internal naming convention, you can create alias for the corresponding variable in this way:
To access a database managed by Qovery from your application, you can use the BUILT_IN environment variables and secrets that have been automatically created by Qovery during the database creation process. You can find all the BUILT_IN variables on the Qovery console within the Environment Variable section of your application (see the credentials and connectivity section for the full list).
In order to match the naming convention of the database connection variables used within your code, you can create an alias for each variable in the Qovery console so that you don't need to change your code.
Once you have defined an alias for each variable, you can redeploy the application and check that it has finally access to the database.
You have created a postgres database on the Qovery console. Within the code of your application you need some environment variables containing the connection parameters of the database: DATABASE_URL, DATABASE_USER, DATABASE_PASSWORD, DATABASE_PORT, DATABASE_NAME
To access another application managed by Qovery, you can use the BUILT_IN environment variables that have been automatically created by Qovery during the creation of that particular application. You can find all the BUILT_IN variables on the Qovery console within the Environment Variable section of your application.
Please note that two BUILT_IN might exist:
QOVERY_APPLICATION_<APPID>_HOST_INTERNAL : it contains the INTERNAL host of the application that can be used inside your Kubernetes cluster (and thus by any application running on it)
QOVERY_APPLICATION_<APPID>_HOST_EXTERNAL : it contains the EXTERNAL host of the application that can be used to reach your application from outside your Kubernetes cluster (if the application is publicly exposing one of its ports)
In order to match the naming convention of the connection variables used within your code, you can create an alias for the HOST_INTERNAL variable so that you don't need to change your code.
Once you have defined an alias for each variable, you can redeploy the application and check that it can reach the other application.
You have created a backend application on the Qovery console and a BUILD_IN variable has been created containing the application HOST called QOVERY_APPLICATION_Z9D8DAA08_HOST_INTERNAL. Within the code of your front-end application you need some environment variables containing the host of the backend application (BACKEND_HOST)
To match your internal naming convention, you can create alias for the corresponding variable in this way:
There are different types of environments that can be defined within Qovery. Types of environment are also called mode, to label it and share with others in the organization how to use it.
Here is the mode you should set depending of the use of your Environment.
environment mode
recommended mode
why
Production
Production environment should not be stopped or deleted by anyone.
Staging
Staging environment reflects how things work and is sometimes as critical as production for companies.
Development
Development environment is a working environment that could be used to develop and test new features and fixes.
A special mode Preview exists and it is automatically set when a Preview Environment is created on a new pull request. Have a look at this section to know more about preview environments.
You can create a new environment by clicking on the Create environment button of the Environment list page.
A modal will appear that will allow you to specify following parameters
name: Give a name to your environment that is easily recognizable by anyone from your team. It is good practice to name your environment production, main or master, staging, dev, fix/xxx, feat/xxx, depending on the purpose of your environment.
On the General tab, you will be able to update your environment name. It will also display the environment mode and the cluster assigned to your environment.
Please note that the associated cluster is not editable after the environment was provisioned. If you need to edit it, you have to clone the environment on the desired cluster
Using Deployment Rules is a good practice to drastically reduce your cost. To know more of the benefit of using them, have a look at the Deployment Rules section.
A default deployment configuration is applied to your environment when it's created but you can modify this default behaviour by creating a dedicated rule at project level that will affect any new environment created and matching the condition.
Once created, you can edit the deployment rule of the environment from the deployment rules settings.
Below you can find the description of the deployment rule settings that can be modified for a specific environment
The start and stop section allow you to override the default settings applied by the project rule to precisely set up when the environment should be deployed and cleaned up.
This section allows you to configure the deployment pipeline to be executed when a deployment on the environment is triggered. More in particular, you can define the deployment order of each service within your environment.
You can get more information about the Qovery deployment pipeline and how it works within this section.
Use Preview Environment to get early feedback on your application changes by creating a dedicated environment for each of your pull requests. Your production environment runs 24/7, where your other environments may not need to run all day long. E.g. you may need to run Environments to get early feedback on your application changes before the changes are merged into production. This is what we call Preview Environment.
Sometimes Preview Environment is also known as Ephemeral Environment, Temporary Environment, Development Environment, Review App.
The feature works only for application deployed from a git repository but you can still re-create the same behaviour with container images by integrating your CI. Have a look at this section on how to.
it allows you to enable the preview environment feature for the current environment. Any PR opened on a service belonging to this environment will trigger the preview environment flow.
You can define the behaviour to follow for the creation of the preview environments:
On Demand (Flag enabled)
On every PR (Flag disabled)
On Demand Flow
A message is dropped on the PR asking you if you want to create a preview environment or not. You will get the list of environments where the preview env feature is activated (in case you have multiple environments) and the command to add as a comment of your PR to trigger the preview.
you will decide weather to create a preview environment or not by typing the right command as a comment within the PR
once the command is added in the comments, the preview creation is triggered and your preview environment is created and its deployment starts
once the deployment is completed, an additional comment will be posted in the PR, providing you with URLs to access your services.
On every PR Flow
-Same as above but the preview environment creation flow is triggered automatically without any user intervention (only step 3 and 4)
By default the preview environment feature is activated on any services of the environment connected to a git repository. In this sectoin you can decide to activate/desactivate the feature for a specific service.
Application BUILT_IN variables: Since completely new services will be create, the original built_in variables will be replaced. Aliases and overrides are preserved during the clone operation.
You can export the configuration of your environment as a Terraform manifest via the Export as Terraform option. This is helpful when you want to manage your configuration via Terraform: instead of creating the terraform manifest by hand, you can build the setup via the Qovery interface and export is as a Terraform file
The export will contain the Terraform definition of the environment, the services within it but as well all the other resources linked to the environment (organization, cluster, project).
You can decide wether or not the export should contain or not the secrets defined within the Qovery console.
By default the preview environment feature is activated on any services of the environment connected to a git repository. In this sectoin you can decide to activate/desactivate the feature for a specific service.
Application BUILT_IN variables: Since completely new services will be create, the original built_in variables will be replaced. Aliases and overrides are preserved during the clone operation.
You can export the configuration of your environment as a Terraform manifest via the Export as Terraform option. This is helpful when you want to manage your configuration via Terraform: instead of creating the terraform manifest by hand, you can build the setup via the Qovery interface and export is as a Terraform file
The export will contain the Terraform definition of the environment, the services within it but as well all the other resources linked to the environment (organization, cluster, project).
You can decide wether or not the export should contain or not the secrets defined within the Qovery console.
-
+
-
+
diff --git a/docs/using-qovery/configuration/helm/index.html b/docs/using-qovery/configuration/helm/index.html
index 609b1a105c..a1a1127320 100644
--- a/docs/using-qovery/configuration/helm/index.html
+++ b/docs/using-qovery/configuration/helm/index.html
@@ -26,9 +26,9 @@
-
+
-
+
@@ -55,14 +55,14 @@
If you want to override it from another already existing values file from a Git Repository you will have to select:
Git Repository: Select the git provider and git repository hosting your code (it can be hosted on GitHub, GitLab or Bitbucket).
Branch: Select branch that Qovery should use to deploy your helm
Overrides path: the path of the values files (example: ci/values_ci.yaml). You can specify multiple paths by separating them with a semi-colon.
Raw YAML
If you want to override it with a raw yaml you will have to click on Create override. A new editor modal will be opened, to let you write your yaml override. The default values.yaml content will be displayed on the right to help you to respect the structure.
On both file types you can use your environment variables in your chart. Check the section below.
if you want to specify one by one your overrides or define additional overrides on top of the one available in your override file, you can pass them as arguments. These will be passed to the helm command via the --set, --set-string or --set-json arguments.
Add a new variable by declaring:
Override type: select the type of your variable. For more information, have a look at the Helm documentation
Variable: the variable name
Value
You can combine override as file and override as argument but, in case of collision, the priority will be given to the override as argument.
You can use your environment variables in your chart. Check the section below.
Qovery allows you to use the following macros within your override file. These macros will be automatically replaced by Qovery during the deployment phase, allowing you to access additional functionalities.
Access to the Qovery environment variables
Macro: qovery.env.<ENV_VAR_NAME>
It allows you to access the value of an environment variable or secret stored within Qovery. This is helpful when your deployed helm chart needs to access a secret or an environment variable available in Qovery.
Example:
On Qovery we have created a database and created two aliases for the database url (DB_URL) and, the database password (DB_PASSWORD). Here an example on how the helm chart can access these environment variables and let your service point to the right database:
Within this section you can define the Kubernetes service and port to be exposed publicly over the network. Concretely, Qovery will take care of deploying the right ingress configuration and assign a domain and certificate to reach your service.
You can edit the existing service/port or declare new ones by specifying:
Service name: this is the kubernetes service name in your helm chart. Qovery automatically fetches the services actually deployed on your kubernetes cluster
Namespace (only if Allow cluster-wide resources was enabled): this is the kubernetes namespace used by your helm chart to deploy the pods behind the chosen service
Service port: this is the port exposed internally by your service for the other services. Qovery automatically fetches the ports from your kubernetes cluster based on the selected service
Protocol: you can select the protocol used by your service. Today Qovery supports the following protocols:
HTTPS (Select this protocol if you need to run Websockets)
gRPC
External port: it is the port that can be used to access this service over the internet (when exposed publicly). Note that for HTTP and gRPC the port is set by default to 443.
Port Name: it is the name assigned to the port. When multiple ports are exposed publicly, its value is used to route the traffic to the right port based on the called subdomain (which will contain the port name value). Since each port is exposed on the port 443, having a different subdomain is the only way to have multiple ports exposed over the internet. If not set, the default value is p<portNumber> (see Qovery Provided Domain section for more information)
This setup is not copied when the helm chart is cloned (via a [manual clone][docs.using-qovery.configuration.environment#clone-environment] or the [preview environment feature][docs.using-qovery.configuration.environment#preview-environment])
Connections on public ports are automatically closed after 60 seconds. If you want to implement long living connection (like for websockets) please make sure to use the rigth ingress timeouts in the advanced settings section
Your helm services can be reached from the internet by publicly exposing at least one of its ports (See the Ports section to know more). Once this is done, Qovery will generate and assign a domain to your application (See this section to know more). You can customize the domain assigned to your application via the Domain section in the settings (see this section to know more).
For each port publicly exposed, a domain is automatically assigned by Qovery to your helm services. Qovery will manage for you the networking and the TLS configuration for these domains.
Example: p80-zdf72de72-z709e1a88-gtw.za8ad0657.bool.sh or <service_name>-p80-zdf72de72-z709e1a88-gtw.za8ad0657.bool.sh for helm services.
Note:
each service deployed on the same cluster will have the same root domain assigned (example: za8ad0657.bool.sh)
the first characters of the domain (before the -) is based on the portName given to the port associated with this domain (See the port section)
a default domain (without the portName) is assigned to the default port(See the port section). Example zdf72de72-z709e1a88-gtw.za8ad0657.bool.sh
Special Case - Preview Environment
For each port exposed publicly, an additional domain will be created with the following pattern portName-prId-srvName-envSourceName.cluster_domain:
portName: is the port name, as explained above
prID: is the id of the PR that has generated the preview environment
srvName: is the name of the service
envSourceName: is the name of the blueprint environment that has created the current preview environment
If you prefer to assign your own domain to the helm services, you can customize it from the "Domain" section within the helm services settings.
You can customize the domain of your helm services in different ways, depending on what you want to achieve:
You want to use your own domain for your helm services
You want to modify the subdomain assigned to your helm services by Qovery (i.e. change p80-zdf72de72-z709e1a88-gtw.za8ad0657.bool.sh into my-app-domain.za8ad0657.bool.sh). See this section to know more about these domains.
In both cases, you can assign the new custom domain by pressing the Add Domain button.
This configuration will be automatically removed on every cloned environment or preview environment in order to avoid domain collision.
For each custom domain, a green checkmark will appear if the domain is correctly configured. You can perform another check by clicking on the checkmark. If you're behind a CDN, we will only check if your custom domain resolves to an IP address.
If there's an issue with a domain, a global error message will be displayed, and you can view the error details by hovering over the red cross. After correcting your configuration, you can perform another check by clicking on the red cross.
Once the domain is added within the Qovery console (Example: mydomain.com), you need to configure within your DNS two CNAME records pointing to the domain provided by Qovery, as shown in the UI (example: mydomain.com CNAME za7cc1b71-z4b8474b3-gtw.zc531a994.rustrocks.cloud and *.mydomain.com CNAME za7cc1b71-z4b8474b3-gtw.zc531a994.rustrocks.cloud).
Having a wildcard domain entry (example: *.mydomain.com) configured on your DNS will avoid you to modify the Qovery setup every time you want to add a new subdomain. If wildcard is not supported by your DNS provider, you will have to configure each subdomain manually.
If a service needs to expose more than one port publicly, you can define a dedicated subdomain to redirect the traffic on the right port by setting the “Port Name” value within the port settings.
After re-deploying the service, Qovery will automatically handle the TLS/SSL certificate creation and renewal for the configured domain.
If your service is behind a CDN using a proxy mode (i.e. the traffic is routed through the CDN to Qovery), make sure to enable the option Domain behind a CDN and disable the option "Generate certificate" on the domain setup. Since the certificate of your domain is directly managed by the CDN, Qovery won't be able to do that for you and it will raise warnings on your application status.
If you are using Cloudflare to manage your CDN, we can also manage automatically your custom domain configuration via a wildcard domain setup for the whole cluster. Check our documentation here
You can specify a different sub-domain for your helm services as long as it belongs to the assigned cluster domain (see Qovery provided domains).
-Example:
your current domain is zdf72de71-z709e1a85-gtw.za8ad0659.bool.sh (so your assigned cluster domain is za8ad0659.bool.sh)
you can enter a new custom domain myfrontend.za8ad0659.bool.sh (since it is a subdomain of the cluster domain)
The helm services will now be accessible from both the default and the new custom domain.
Qovery does not check collision in the domain declaration. Make sure you assign a unique subdomain within your cluster.
You can create a clone of the service via the clone feature. A new service with the same configuration (see below for exceptions) will be created into the target environment.
The target environment can be the same as the current environment or even another one in a completely different project.
Important information
Not every configuration parameter will be copied within the new service for consistency reasons. The configuration is fully or partially copied depending on the target environment:
same environment:
custom domain: this setup is not copied into the new service (to avoid collision)
another environment:
custom domain: this setup is not copied into the new service (to avoid collision)
environment variable: aliases defined on environment variables are not copied (since the aliased env var might not exist)
deployment pipeline: stage setup is not copied (since the target stage might not exist)
number of instances: if the target environment runs on a Qovery EC2 cluster, the max number of instances is set to 1 (Qovery EC2 constraint)
Please check the configuration of the new service before deploying it.
You can create a clone of the service via the clone feature. A new service with the same configuration (see below for exceptions) will be created into the target environment.
The target environment can be the same as the current environment or even another one in a completely different project.
Important information
Not every configuration parameter will be copied within the new service for consistency reasons. The configuration is fully or partially copied depending on the target environment:
same environment:
custom domain: this setup is not copied into the new service (to avoid collision)
another environment:
custom domain: this setup is not copied into the new service (to avoid collision)
environment variable: aliases defined on environment variables are not copied (since the aliased env var might not exist)
deployment pipeline: stage setup is not copied (since the target stage might not exist)
number of instances: if the target environment runs on a Qovery EC2 cluster, the max number of instances is set to 1 (Qovery EC2 constraint)
Please check the configuration of the new service before deploying it.
Qovery expects the output file to be written in the following path /qovery-output/qovery-output.json (the output folder is automatically mounted by Qovery).
The file should follow this format:
{
"varname1":{
"sensitive":true,
"value":"myvalue"
},
"varname2":{
"sensitive":false,
"value":"myvalue"
}
}
...
At the end of the job execution, this file will be processed by Qovery and a set of environment variables will be created, one for each element in the json. The information in the json file will be mapped to an environment variables in this way:
Variable Name: QOVERY_OUTPUT_JOB_<JOBID>_<VARNAME> , where <JOBID> is the id of the Job on Qovery side and <VARNAME> is the name of the element in the output file.
Variable Value: field "value"
Secret: field "sensitive"
An alias <VARNAME> will be automatically created to simplify your setup.
The output (and thus the created environment variables) are displayed in the Lifecycle job overview.
Example
-Let's say that the code of our job creates a PostgreSQL RDS on AWS. At the end of its execution, the job should know the connection Once created, the job should know the connection string of the PostgreSQL. The job can now create a file /qovery-output/qovery-output.json with the following structure:
{
"POSTGRES_DB_HOST":{
"sensitive": False,
"value":"zf138d9c8-postgresql"
},
"POSTGRES_DB_USER":{
"sensitive": False,
"value":"root"
},
"POSTGRES_DB_PASS":{
"sensitive": True,
"value":"mypassword"
},
"POSTGRES_DB_TABLE":{
"sensitive": False,
"value":"MYDB"
},
"POSTGRES_DB_PORT":{
"sensitive": False,
"value":"3600"
}
}
This file will be processed by Qovery and the following environment variables will be created:
Var QOVERY_OUTPUT_JOB_<JOBID>_POSTGRES_DB_HOST
Value: "zf138d9c8-postgresql"
Secret: false
Alias: POSTGRES_DB_HOST
Var QOVERY_OUTPUT_JOB_<JOBID>_POSTGRES_DB_USER
Value: "root"
Secret: false
Alias: POSTGRES_DB_USER
Var QOVERY_OUTPUT_JOB_<JOBID>_POSTGRES_DB_PASS
Value: "mypassword"
Secret: true
Alias: POSTGRES_DB_PASS
Var QOVERY_OUTPUT_JOB_<JOBID>_POSTGRES_DB_TABLE
Value: "MYDB"
Secret: false
Alias: POSTGRES_DB_TABLE
Var QOVERY_OUTPUT_JOB_<JOBID>_DB_PORT
Value: "3600"
Secret: false
Alias: POSTGRES_DB_PORT
Once the execution of the job is terminated and the environment variables are created, any application within the same environment will be able to access those environment variables and thus connect to the postgres instance.
You can force the execution of a job independently its deployment status by:
Select the job that you want to force
click on the Play button of the cronjob you want to force and select the Force Run option. Note: the same option is available on the service list as well
Select the environment event you want to force.
Once you click, the job will be deployed and executed with the entrypoint and arguments associated to the selected event. You will be able to follow its execution within the application logs
If your repository contains private submodules using SSH protocol, you will need to add a secret beginning with GITSSH_KEY, containing a private SSH key with access rights to your sumbodules repositories.
If your application is deployed from an image registry, within this section you can modify:
Registry: select the container registry storing the image of your application. Note: only pre-configured registry are available in this list, check the Container Registry Management page for more information.
Image name: the name of the image to be deployed with this application (example: postgres)
Image tag: the tag of the image to be deployed with this application (example: 12)
The tag 'latest' is not supported, please use a specific tag.
This section allows you to define when the lifecycle job should be executed and which command should run.
In this section you can configure:
Event: select the environment/job event which should trigger the execution of the job (deploy, stop, delete)
Image Entrypoint: the entrypoint to be used to launch your job (not mandatory).
CMD Arguments: the arguments to be passed to launch your application (not mandatory) separated with a space. Example: rails -h 0.0.0.0 -p 8080 string "complex arg".
Number of restarts: Maximum number of restarts allowed in case of job failure (0 means no failure)
Max duration time in seconds: Maximum duration allowed for the job to run before killing it and mark it as failed
Port: Port used by Kubernetes to run readiness and liveliness probes checks. The port will not be exposed externally
To configure the number of CPUs that your job needs, adjust the setting in the Resources section.
Default is 500m (0.5 vCPU).
Please note that in this section you configure the CPU allocated by the cluster for your application and that cannot consume more than this value. Even if the application is underused and consume less resources, the cluster will still reserve the selected amount of CPU.
To configure the amount of RAM that your app needs, adjust the setting in Resources section.
Default is 512MB.
Please note that in this section you configure the CPU allocated by the cluster for your application and that cannot consume more than this value. Even if the application is underused and consume less resources, the cluster will still reserve the selected amount of CPU. If your application requires more RAM than requested, it will be killed by the kubernetes scheduler.
This section allows to specify which changes on your repository should trigger an auto-deploy (if enabled). To know more about how to configure your Deployment Restrictions, have a look at the deployment restrictions section.
You can create a clone of the service via the clone feature. A new service with the same configuration (see below for exceptions) will be created into the target environment.
The target environment can be the same as the current environment or even another one in a completely different project.
Important information
Not every configuration parameter will be copied within the new service for consistency reasons. The configuration is fully or partially copied depending on the target environment:
same environment:
custom domain: this setup is not copied into the new service (to avoid collision)
another environment:
custom domain: this setup is not copied into the new service (to avoid collision)
environment variable: aliases defined on environment variables are not copied (since the aliased env var might not exist)
deployment pipeline: stage setup is not copied (since the target stage might not exist)
number of instances: if the target environment runs on a Qovery EC2 cluster, the max number of instances is set to 1 (Qovery EC2 constraint)
Please check the configuration of the new service before deploying it.
+Let's say that the code of our job creates a PostgreSQL RDS on AWS. At the end of its execution, the job should know the connection Once created, the job should know the connection string of the PostgreSQL. The job can now create a file /qovery-output/qovery-output.json with the following structure:
{
"POSTGRES_DB_HOST":{
"sensitive": False,
"value":"zf138d9c8-postgresql"
},
"POSTGRES_DB_USER":{
"sensitive": False,
"value":"root"
},
"POSTGRES_DB_PASS":{
"sensitive": True,
"value":"mypassword"
},
"POSTGRES_DB_TABLE":{
"sensitive": False,
"value":"MYDB"
},
"POSTGRES_DB_PORT":{
"sensitive": False,
"value":"3600"
}
}
This file will be processed by Qovery and the following environment variables will be created:
Var QOVERY_OUTPUT_JOB_<JOBID>_POSTGRES_DB_HOST
Value: "zf138d9c8-postgresql"
Secret: false
Alias: POSTGRES_DB_HOST
Var QOVERY_OUTPUT_JOB_<JOBID>_POSTGRES_DB_USER
Value: "root"
Secret: false
Alias: POSTGRES_DB_USER
Var QOVERY_OUTPUT_JOB_<JOBID>_POSTGRES_DB_PASS
Value: "mypassword"
Secret: true
Alias: POSTGRES_DB_PASS
Var QOVERY_OUTPUT_JOB_<JOBID>_POSTGRES_DB_TABLE
Value: "MYDB"
Secret: false
Alias: POSTGRES_DB_TABLE
Var QOVERY_OUTPUT_JOB_<JOBID>_DB_PORT
Value: "3600"
Secret: false
Alias: POSTGRES_DB_PORT
Once the execution of the job is terminated and the environment variables are created, any application within the same environment will be able to access those environment variables and thus connect to the postgres instance.
You can force the execution of a job independently its deployment status by:
Select the job that you want to force
click on the Play button of the cronjob you want to force and select the Force Run option. Note: the same option is available on the service list as well
Select the environment event you want to force.
Once you click, the job will be deployed and executed with the entrypoint and arguments associated to the selected event. You will be able to follow its execution within the application logs
If your repository contains private submodules using SSH protocol, you will need to add a secret beginning with GITSSH_KEY, containing a private SSH key with access rights to your sumbodules repositories.
If your application is deployed from an image registry, within this section you can modify:
Registry: select the container registry storing the image of your application. Note: only pre-configured registry are available in this list, check the Container Registry Management page for more information.
Image name: the name of the image to be deployed with this application (example: postgres)
Image tag: the tag of the image to be deployed with this application (example: 12)
The tag 'latest' is not supported, please use a specific tag.
This section allows you to define when the lifecycle job should be executed and which command should run.
In this section you can configure:
Event: select the environment/job event which should trigger the execution of the job (deploy, stop, delete)
Image Entrypoint: the entrypoint to be used to launch your job (not mandatory).
CMD Arguments: the arguments to be passed to launch your application (not mandatory) separated with a space. Example: rails -h 0.0.0.0 -p 8080 string "complex arg".
Number of restarts: Maximum number of restarts allowed in case of job failure (0 means no failure)
Max duration time in seconds: Maximum duration allowed for the job to run before killing it and mark it as failed
Port: Port used by Kubernetes to run readiness and liveliness probes checks. The port will not be exposed externally
To configure the number of CPUs that your job needs, adjust the setting in the Resources section.
Default is 500m (0.5 vCPU).
Please note that in this section you configure the CPU allocated by the cluster for your application and that cannot consume more than this value. Even if the application is underused and consume less resources, the cluster will still reserve the selected amount of CPU.
To configure the amount of RAM that your app needs, adjust the setting in Resources section.
Default is 512MB.
Please note that in this section you configure the CPU allocated by the cluster for your application and that cannot consume more than this value. Even if the application is underused and consume less resources, the cluster will still reserve the selected amount of CPU. If your application requires more RAM than requested, it will be killed by the kubernetes scheduler.
This section allows to specify which changes on your repository should trigger an auto-deploy (if enabled). To know more about how to configure your Deployment Restrictions, have a look at the deployment restrictions section.
You can create a clone of the service via the clone feature. A new service with the same configuration (see below for exceptions) will be created into the target environment.
The target environment can be the same as the current environment or even another one in a completely different project.
Important information
Not every configuration parameter will be copied within the new service for consistency reasons. The configuration is fully or partially copied depending on the target environment:
same environment:
custom domain: this setup is not copied into the new service (to avoid collision)
another environment:
custom domain: this setup is not copied into the new service (to avoid collision)
environment variable: aliases defined on environment variables are not copied (since the aliased env var might not exist)
deployment pipeline: stage setup is not copied (since the target stage might not exist)
number of instances: if the target environment runs on a Qovery EC2 cluster, the max number of instances is set to 1 (Qovery EC2 constraint)
Please check the configuration of the new service before deploying it.
This section allows you to define the list of container registries that can be used within your organization. Only images stored on those container registries are allowed to be deployed on your cluster.
You can access this section by opening the Organization Settings -> Container Registries
You can modify an existing container registry by clicking on the "Wheel" button next to it
-You can delete an existing container registry by clicking on the "Trash" button next to it
Before deleting it, make sure that there is no application within your organization using an image stored in this registry.
-
+
-
+
diff --git a/docs/using-qovery/configuration/organization/git-repository-access/index.html b/docs/using-qovery/configuration/organization/git-repository-access/index.html
index 89d749330e..c3064f0ed2 100644
--- a/docs/using-qovery/configuration/organization/git-repository-access/index.html
+++ b/docs/using-qovery/configuration/organization/git-repository-access/index.html
@@ -26,9 +26,9 @@
-
+
-
+
@@ -55,14 +55,14 @@
2. Modify the existing token on the Qovery console by updating its value with the token created in step 1.
Once the token is created, you can configure your Qovery services.
In the creation flow of your service, you will be able to either select your own git account or one of the git tokens configured within your organization.
If a git token is selected, Qovery will use that token to access the git repository as long as the token does not expire (see the Token expiration section)
Press the wheel button on the token you want to modify.
Modify the token.
Press the Save button.
Note: If you want to modify the git token configured in Qovery, you can directly edit the token value. It will prevent you from manually updating every application using the old token.
The Qovery GitHub app is being deprecated and it will be replaced by the git tokens. If you are using the Qovery Github app today, please start migrating to the new Git token system.
For better control, as a GitHub user, you can install the Qovery Github App, and define which Github repositories Qovery can access.
If you have already one or more applications running on your Qovery Organization, please make sure to give the Qovery Github App access to their repositories. If a repository is missing, you might experience a loss of functionalities for those applications (update, auto-deploy, preview environments, etc.).
You can only link one Github Organization to your Qovery Organization through the Qovery Github App.
Also, once the Qovery Github App is installed, all the members of your Qovery Organization will only have access to the repositories linked to your Qovery Github App.
To install the Qovery Github App:
Open your Qovery Console and access your organization settings:
In the Organization settings menu, click Git Repository Access:
To start the installation process click Install:
A new window opens in your browser so you can install the Qovery Github App on your Github account.
Click the Github account on which you want to install the Qovery Github App:
Click Only select repositories and, in the dropdown menu, define which Github repositories you want to give Qovery access to:
You must give Qovery access to any Github repository linked to an existing Qovery application.
Failure to do so will result in the loss of some functionalities (update, auto-deploy, preview environments, etc.).
To confirm, click Install & Authorize:
You are redirected to your Qovery Console, where the list of authorized Github repositories is updated.
You can update or revoke access to one or multiple Github repositories at any time. To do so, in the Git Repository Access section, click Manage Permission below your Git provider account, and repeat the selection process on the Github website.
-Please note that the repositories must belong to the same Github organization, we do not support yet a multi-github organization setup
To add or remove access to one of your repositories:
Open your Qovery Console and access your organization settings:
In the Organization settings menu, click Git Permission:
Next to your Git provider account, click Manage permission:
Click the Github account on which you want to manage the Qovery Github App access:
Add or remove the repositories you want to give Qovery access to:
Rremoving access to a Github repository linked to an existing Qovery application will result in the loss of some functionalities for that application (update, auto-deploy, preview environments, etc.).
Uninstalling the Qovery Github App will result in a loss of some functionalities for all your applications (update, auto-deploy, preview environments, etc.).
To uninstall the Qovery Github App:
Open your Qovery Console and access your organization settings:
In the Organization settings menu, click Git Permission:
Next to your Git provider account, click Disconnect:
The list of authorized Github repositories is updated, meaning Qovery now has access to all of your Github repositories again.
From your browser, access your Github account and open your Settings:
To add or remove access to one of your repositories:
Open your Qovery Console and access your organization settings:
In the Organization settings menu, click Git Permission:
Next to your Git provider account, click Manage permission:
Click the Github account on which you want to manage the Qovery Github App access:
Add or remove the repositories you want to give Qovery access to:
Rremoving access to a Github repository linked to an existing Qovery application will result in the loss of some functionalities for that application (update, auto-deploy, preview environments, etc.).
Uninstalling the Qovery Github App will result in a loss of some functionalities for all your applications (update, auto-deploy, preview environments, etc.).
To uninstall the Qovery Github App:
Open your Qovery Console and access your organization settings:
In the Organization settings menu, click Git Permission:
Next to your Git provider account, click Disconnect:
The list of authorized Github repositories is updated, meaning Qovery now has access to all of your Github repositories again.
From your browser, access your Github account and open your Settings:
This section allows you to define the list of helm repositories that can be used within your organization. Only helm charts stored on those helm repositories are allowed to be deployed on your cluster.
You can access this section by opening the Organization Settings -> Helm Repositories
You can modify an existing helm repository by clicking on the "Wheel" button next to it
-You can delete an existing helm repository by clicking on the "Trash" button next to it
Before deleting it, make sure there is no helm service within your organization using a helm chart stored in this repository.
An organization is a shared account where developers can collaborate across many projects at once. Owners and organization administrators
can manage every aspect of the organization, from the clusters up to the member access.
When signing up for Qovery, you need to sign in through your Git provider (GitHub, GitLab or Bitbucket).
Once this is done, you can create your first organization and the first project within it. Before completing the creation process, you need to choose one of our 3 plans:
Description: enter a description of your organization.
Website: enter the website of your company.
Admin contact emails: enter one or several email addresses (separated by commas) on which you want to receive important communications from Qovery.
We will only use your admin contact email details to send you communications about infrastructure outages, maintenance updates, and weekly and monthly usage reports.
Don't forget to click Update to save your organization information!
Description: enter a description of your organization.
Website: enter the website of your company.
Admin contact emails: enter one or several email addresses (separated by commas) on which you want to receive important communications from Qovery.
We will only use your admin contact email details to send you communications about infrastructure outages, maintenance updates, and weekly and monthly usage reports.
Don't forget to click Update to save your organization information!
-
+
-
+
diff --git a/docs/using-qovery/configuration/organization/labels-annotations/index.html b/docs/using-qovery/configuration/organization/labels-annotations/index.html
index fefb9e6d64..126d67ecb2 100644
--- a/docs/using-qovery/configuration/organization/labels-annotations/index.html
+++ b/docs/using-qovery/configuration/organization/labels-annotations/index.html
@@ -26,9 +26,9 @@
-
+
-
+
@@ -56,14 +56,14 @@
If this label group was already used in your services. You will have to redeploy them for these changes to be taken into account.
You can delete a label group.
If this label group was already used in your services. You will have to redeploy them for removing the labels linked to your services.
As a Qovery service is mapped to multiple Kubernetes objects (pods, deployments, ingress etc..) you will be able to define the kubernetes scope for each annotation group.
You can create a new annotation group by pressing the Add annotation button. You need to provide:
A group name
The different annotations keys/values constituting the group. The key/value have to respect a certain syntax, check the official Kubernetes documentation to learn more.
To ensure that Qovery will be able to continue managing your services. Some annotations prefixes are forbidden (this list is not exhaustive):
kubernetes.io/
k8s.io/
appCommitId
checksum/config
checksum/config-mount-files
meta.helm.sh/release-name
meta.helm.sh/release-namespace
kubernetes.io/
all prefixes containing qovery.com
A scope: this allows you to define the kubernetes objects where the extra annotations should be applied. Example: If you make your application accessible publicly and add an annotation group with the scope set to ingress, all annotations within that group will only be added to the ingress of your service.
Example:
Once validated the annotation group will be displayed on the interface.
You can now apply it your applications, cronjobs, lifecycle jobs and database containers.
Helm is not supported as you can directly add extra annotations within your helm chart.
You can edit your annotation group to add/remove/edit annotations or update the scope.
If this annotation group was already used in your services. You will have to redeploy them for these changes to be taken into account.
You can delete an annotation group.
-If this annotation group was already used in your services. You will have to redeploy them for removing the annotations linked to your services.
Qovery allows you to control the access to your cluster and environment resources by defining and assigning roles to your users.
By default, five roles are created within your organization (Basic Roles):
Owner: the user has full access on the organization. Only one user can be the owner of the organization.
Admin: same as the owner, the has full access to the organization but he cannot delete it
DevOps: the user can manage the organization infrastructure (clusters/registry/webhook setup) and manage the deployments of any environment within the organization.
Billing Manager: the user can only manage the billing of the organization
Viewer: the user has read-only access to any section of the organization
More in detail, you can find the associated permissions below:
Action
Owner
Admin
DevOps
Billing Manager
Viewer
Read organization
yes
yes
yes
yes
yes
Edit organization
yes
yes
no
no
no
Delete organization
yes
no
no
no
no
Manage billing
yes
yes
no
yes
no
Manage members & roles
yes
yes
no
no
no
Manage cluster & container registry
yes
yes
yes
no
no
Manage organization setup (webhooks, Git and API tokens etc..)
If the basic roles are not enough given your internal organization, Qovery allows you to customize the accesses to your clusters, projets and environments by defining Custom Roles.
A Custom role allows you to customize:
Cluster Level Permissions: you can specify the access to the existing computing resources (manage cluster X, create environments on cluster Y, read-only access on cluster K)
Project Level Permissions: you can specify the access to the projects and their environments by environment type (deploy type X, create type K etc..)
Users with a custom role cannot create clusters or manage any of the organization settings (members, webhook, API token etc..)
To create a custom role, go in the Roles & Permissions section press "Add new Role"
This section allows you to fine tune the access to the computing resources. For each cluster of your organization, you will be able to specify an access permission (ordered by permission level):
Name
Permission Type
Read-Only
The user can access the cluster information (name, region etc..). Minimum permission level.
Create Environment
The user can create environments on this cluster. Only users with this role could allocate resources for their environments on this cluster. Further environment level permissions (like deployment rights) are managed via the "Project Permissions", see below
Full Access
The user can create create environments on this cluster and as well manage the cluster's settings (start/stop, change number and type of nodes etc..). This permission allows a group of users to manage by themselves a specific cluster
Project Level Permissions
This section allows you to fine tune the access to the projects and their environments. The environment access is managed by "Environment Type" to simplify the configuration (Production, Staging, Development, Preview). For each project of your organization and by environment type, you will be able to specify an access permission (ordered by permission level):
Name
Permission Type
No Access
The user has no access to this environment type. If the user has "No Access" on all the environment types, he will not have access to the project
Read-Only
Access in read-only to this environment type. Useful to restrict access on sensitive environments
Deploy
Manage the deployments of this environment type, access the logs, connect via SSH to the application and manage its environment variables
Manage
Manage the deployments and the settings of this environment type (including adding or removing services)
Full Access
The user is admin of the project and can do everything he wants on it (no matter the environment type)
Once the role is created, you can assign it to a member of your organization within the "Members" section. You can also update the permissions by editing the role from the Roles&Permissions screen
An organization has 3 clusters ("prod cluster", “staging cluster”, “dev cluster”) and 1 project P1. The organization has a CTO, a devops and some developers.
The roles & permissions could be configured in this way:
CTO = Owner
Devops = Devops or Admin
Developers: we want these users capable of accessing the project, having read access to the prod clusters/env, managing deployments on the staging cluster (but not creating new environments on it) and doing whatever they want for the development environments on the dev cluster. So the configuration will look like:
Create a new Role “developer” with the following permissions:
Cluster Level Permissions:
Prod cluster → Read-Only
Staging cluster → Read-Only
Dev cluster → Create Environment (they can create environments on this cluster)
Project Level Permissions for the project "P1":
Environment access (by env type)
prod = Read-Only
staging = deploye (i.e. they can deploy env of type “staging”)
development = Full Access (i.e. they can manage and create env of type “dev”)
An organization with 4 dev clusters (“prod cluster”, “staging clyster”, 2 Dev clusters called “dev cluster team 1” and "dev cluster team 2”) and 2 projects P1 and P2. The organization has a CTO, a devops, 2 dev teams with an “acting dev-ops” in it who manages the dev cluster on behalf of the devops.
-The roles & permissions could be configured in this way:
CTO = Owner
Devops = Devops or Admin
Dev team 1: we want these users capable of accessing the project P1, having no access to the prod env and managing their deployments only on the "dev cluster Dev team 1" for their development environments.So the config will look like:
Create a new Role “Dev Team 1”
Cluster Level Permissions:
Prod cluster → Read-Only
Staging cluster → Read-Only
Dev cluster team 1 → Create Environment (they can create envs only on their dev cluster)
Dev cluster team 2 → Read-Only
Project Level Permissions:
Config on the project “P1”
Environment access (by env type)
prod = no-access
staging = deploy
dev = Full Access (i.e. they can do whatever they want on env of type “dev”)
Config on the project “P2” (i.e. they can't access P2)
Environment access (by env type)
prod = no-access
staging = no-access
dev = no-access
Dev team 2: we want these users capable of accessing the project P2, having no access to the prod env and managing their deployments only on the "dev cluster team 2" for their development environments. So the config will look like:
Create a new Role “Dev Team 2”
Cluster Level Permissions:
Prod cluster → Read-Only
Staging cluster → Read-Only
Dev cluster team 1 → Read-Only
Dev cluster team 2 → Create Environment (they can create envs only on their dev cluster)
Project Level Permissions:
Config on the project “P1” (i.e. they can't access P1)
Environment access (by env type)
prod = no-access
staging = no-access
dev = no-access
Config on the project “P2”
Environment access (by env type)
prod = no-access
staging = deploy
dev = Full Access (i.e. they can do whatever they want on env of type “dev”)
Acting DevOps user: we want this user capable of accessing the project, having read access to the prod env, managing the dev clusters and all the environments on it. So the config will look like this:
+The roles & permissions could be configured in this way:
CTO = Owner
Devops = Devops or Admin
Dev team 1: we want these users capable of accessing the project P1, having no access to the prod env and managing their deployments only on the "dev cluster Dev team 1" for their development environments.So the config will look like:
Create a new Role “Dev Team 1”
Cluster Level Permissions:
Prod cluster → Read-Only
Staging cluster → Read-Only
Dev cluster team 1 → Create Environment (they can create envs only on their dev cluster)
Dev cluster team 2 → Read-Only
Project Level Permissions:
Config on the project “P1”
Environment access (by env type)
prod = no-access
staging = deploy
dev = Full Access (i.e. they can do whatever they want on env of type “dev”)
Config on the project “P2” (i.e. they can't access P2)
Environment access (by env type)
prod = no-access
staging = no-access
dev = no-access
Dev team 2: we want these users capable of accessing the project P2, having no access to the prod env and managing their deployments only on the "dev cluster team 2" for their development environments. So the config will look like:
Create a new Role “Dev Team 2”
Cluster Level Permissions:
Prod cluster → Read-Only
Staging cluster → Read-Only
Dev cluster team 1 → Read-Only
Dev cluster team 2 → Create Environment (they can create envs only on their dev cluster)
Project Level Permissions:
Config on the project “P1” (i.e. they can't access P1)
Environment access (by env type)
prod = no-access
staging = no-access
dev = no-access
Config on the project “P2”
Environment access (by env type)
prod = no-access
staging = deploy
dev = Full Access (i.e. they can do whatever they want on env of type “dev”)
Acting DevOps user: we want this user capable of accessing the project, having read access to the prod env, managing the dev clusters and all the environments on it. So the config will look like this:
A project allows you to group together a set of environments with the objective to run the same application (see the Environment page for more information).
When creating a new organization, a project is created by default. You can customize the access to your project thanks to our RBAC system.
Before you begin, this documentation assumes the following:
A project allows you to group together a set of environments with the objective to run the same application (see the Environment page for more information).
When creating a new organization, a project is created by default. You can customize the access to your project thanks to our RBAC system.
Before you begin, this documentation assumes the following:
-
+
-
+
diff --git a/docs/using-qovery/configuration/service-health-checks/index.html b/docs/using-qovery/configuration/service-health-checks/index.html
index 3cd1d3acdb..d9ed91c992 100644
--- a/docs/using-qovery/configuration/service-health-checks/index.html
+++ b/docs/using-qovery/configuration/service-health-checks/index.html
@@ -26,9 +26,9 @@
-
+
-
+
@@ -54,14 +54,14 @@
You have a liveness probe configured on port 80 of your application. If during the deployment of your application the probes can't connect to port 80 and we reach a timeout, the deployment fails.
Qovery allows you to manage these probes directly from within the Qovery console during the setup of your application, letting you decide their activation, configuration and check frequency.
Allows you to specify the type of probe you want to run against your application:
NONE if NONE is selected, the probe is disabled and thus Kubernetes won't be able to verify the state of your application and take the right corrective actions.
We strongly advise to not disable the liveness probe.
HTTP probes are the most common probe type. You can use them if your application is a HTTP server, or if you create a lightweight HTTP server inside your application specifically to respond to such probes. When using a HTTP probe, you need to configure:
a port
a path
Once configured, Kubernetes pings a path (for example: /healthz ) at a given port. If it gets a response in the 200 or 300 range, the check is passed. Otherwise, it is considered as failed and Kubernetes takes the necessary corrective actions.
TCP probes are most often used when HTTP or command probes aren't an option. When using a TCP Liveness probe, Kubernetes tries to establish a connection on the specified port. If the connection is successful, the application is considered healthy. Otherwise, it is considered dead and the container is restarted.
gRPC probes
When using a gRCP Liveness probe, Kubernetes tries to establish a connection on the specified port and service. If the connection is successful, the application is considered healthy. Otherwise, it is considered dead and the container is restarted.
EXEC probes
-Exec probes allow to define a command to be executed within your container. If the command execution fails, the probe is considered as failed.
Allows you to specify an interval, in seconds, between the application container start and the first liveness check.
Allowing additional time for the application to start can be useful when boot time usually takes too long (due to long boot operations), or when the application opens the port before being ready to receive traffic on it (due to a still ongoing boot operation).
Allows you to specify how many consecutive successes are needed, as a minimum, for the probe to be considered successful after having failed previously.
Due to a Kubernetes limitation, this value can only be 1
If your application has a long boot operation to run, your deployment might be marked as failed since the probe can't verify the state of your application within the specified time frame. In this case, you will find in your deployment logs a warning message Liveness probe failed: dial tcp xx.xx.xx.xx:xx: connect: connection refused , telling you that the probe is failing.
If your application needs more time to boot, increase the Initial Delay in seconds of the probes to match the application boot time.
Allows you to specify an interval, in seconds, between the application container start and the first liveness check.
Allowing additional time for the application to start can be useful when boot time usually takes too long (due to long boot operations), or when the application opens the port before being ready to receive traffic on it (due to a still ongoing boot operation).
Allows you to specify how many consecutive successes are needed, as a minimum, for the probe to be considered successful after having failed previously.
Due to a Kubernetes limitation, this value can only be 1
If your application has a long boot operation to run, your deployment might be marked as failed since the probe can't verify the state of your application within the specified time frame. In this case, you will find in your deployment logs a warning message Liveness probe failed: dial tcp xx.xx.xx.xx:xx: connect: connection refused , telling you that the probe is failing.
If your application needs more time to boot, increase the Initial Delay in seconds of the probes to match the application boot time.
This section shows you some basic information about your account like:
First name: retrieved from your git account, it can't be changed.
Last name: retrieved from your git account, it can't be changed.
Account email: retrieved from your git account, it can't be changed.
Communication email: this email will be used by Qovery to communicate you any update or issue ongoing on the product. Make sure to set the communication email with a valid email adress
Timezone: used in the Qovery console for the dates display. To change it, modify the timezone used in your browser settings.
By default Qovery uses your account email to share with you important updates on the product. Make sure to set the communication email with your professional email address!
This section shows you some basic information about your account like:
First name: retrieved from your git account, it can't be changed.
Last name: retrieved from your git account, it can't be changed.
Account email: retrieved from your git account, it can't be changed.
Communication email: this email will be used by Qovery to communicate you any update or issue ongoing on the product. Make sure to set the communication email with a valid email adress
Timezone: used in the Qovery console for the dates display. To change it, modify the timezone used in your browser settings.
By default Qovery uses your account email to share with you important updates on the product. Make sure to set the communication email with your professional email address!
Once you have configured your services and deployed them for the first time, you can decide to automatically update the applications to the latest version of your git branch thanks to the auto-deploy feauture.
Each time a commit is pushed on your git repository, Qovery receives a webhook call containing the commit information (who did it, what changed, which branch etc..). Thanks to this information, Qovery is able to determine which application should be automatically re-deployed with the new version.
By default every new commit pushed on the branch will trigger a deployment of the application. You can use the Deployment restrictions feature to include or exclude certain files or folders from the feature and avoid un-necessary deployments (like a change on the README file).
To add a new restriction, go into the Deployment restrictions section of your application settings.
When adding a new restriction, two modes can be selected:
EXCLUDE: commits on the file or folder defined in the "Value" field will be ignored
MATCH: only commits on the file or folder defined in the "Value" field will trigger a deployment
Note:
"Value" should contain the full file path and can't start with /
Wildcards are not supported in the "Value" field (you can specify "my-prefix*" to exclude commits on files starting with "my-prefix")
The auto-deploy feature can be activated as well if you directly deploy your container images but it requires some additional integration via your CI/CD. Your CI/CD has to inform Qovery that a new version of the image (a new tag) is available for a specific container image. Thanks to this information, Qovery can find any application that uses this container image and automatically trigger a deployment of the new image tag.
To inform Qovery of the new version, your CI/CD needs to call the following endpoints, depending on the service type:
If you have the auto-deploy feature activated on your service and you stop it, the next commit on the service's branch won't start the service. You have to manually start it.
The auto-deploy feature works on mono-repositories as well, triggering a deployment of any service linked to the mono-repository. If you want to trigger a deployment only if a commit is done on the sub-folder of the app, add a Deployment Restriction to include only that repository. (Have a look at this section)
#Does Qovery check if a new image is available for my service
No, there's no automatic hook on your container registry verifying that a new image is available to trigger a deployment. You have to inform Qovery about the new version and trigger a deployment:
manually: updating the version on the console and triggering a deployment
CLI: deploying the new version with the qovery <service> deploy --tag command. See this section
via the API: you have two options
-- call the deployment endpoint with the new tag for that service
--- use a auto-deploy containers endpoint that will trigger the deployment of any service using the same image within the organization and having the auto-deploy feature activated
+-- use a auto-deploy containers endpoint that will trigger the deployment of any service using the same image within the organization and having the auto-deploy feature activated
Once you have configured your environments and services, you can decide to manage the deployments via the UI or directly from your CI/CD.
You can find more information on how to integrate your CI/CD within this section.
Resources
-
+
-
+
diff --git a/docs/using-qovery/deployment/deployment-actions/index.html b/docs/using-qovery/deployment/deployment-actions/index.html
index f3fb30669d..cf168a867e 100644
--- a/docs/using-qovery/deployment/deployment-actions/index.html
+++ b/docs/using-qovery/deployment/deployment-actions/index.html
@@ -26,9 +26,9 @@
-
+
-
+
@@ -56,14 +56,14 @@
1. an environment: via the Play button at environment level, the action will be executed on each service within the environment. To know more about the deployment order of your services, have a look at the Deployment Pipeline
2. a single service: via the Play button at service level, the action will be executed only on the selected service.
3. a subset of services: selecting one or more services from the service list and using the floating action button.
Note that all the deployment actions are available via any interface described within this section.
You cannot queue deployment actions on one environment. Example: you can't trigger the deployment of service A and stop service B at the same time. You need to wait for the deployment of service A to finish before triggering the pause of the service B
You can find below a description of each deployment action, including its purpose and the deployment status your environment and/or service will go through.
The Deploy action allows you to create the resource necessary to run your code on your Kubernetes cluster. This action is available only if the service or environment have never been deployed.
Based on the configuration of your services within, a certain number of Pods will be created in a dedicated Namespace of the target Kubernetes cluster.
The commit id or tag that will be deployed is the one visible on the interface and not necessarily the latest version (unless the auto-deploy feature is activated)
Once triggered, the deployment of a service goes through the following deployment statuses:
QUEUED : the deployment has been queued and it is waiting for the necessary resources to be allocated to manage your request
BUILDING : the Qovery engine is downloading the git repository and building your code. At the end of this step an image is built and pushed to a registry available on your cloud account. The status will become BUILD ERROR in case of issues on building your code
DEPLOYING : the pods are being created on your cluster based on the image built on the previous step. The status will become DEPLOYMENT ERROR in case of issues on deploying your service. A service is considered un-healthy if the Kubernetes readiness probe check is never OK (more info on readiness probe).
DEPLOYMENT OK : all the requested pods have been created and the service is correctly running (liveness and readiness probes are ok).
If the deployment was triggered on the entire environment, the environment will go through the following deployment statuses:
QUEUED : at least one service is in status QUEUED
BUILDING : at least one service is in status BUILDING
DEPLOYING : at least one service is in status DEPLOYING
DEPLOYMENT OK : at least one service is in status DEPLOYMENT OK but none of them is in error (BUILD ERROR or DEPLOYMENT ERROR)
DEPLOYMENT ERROR : at least one service is in status DEPLOYMENT ERROR
The Redeploy action allows you to update the remote configuration of your services based on their configuration on Qovery side. If any difference exists (vCPU, number of instances, code version etc..), a new set of pod will be created with the new configuration and replace the existing ones. If there are no configuration differences, nothing will happen on the pods running on your cluster (not even a restart, please use the Restart Service feature).
-This action is available only if the Deploy action has been triggered at least once on the service or environment.
When replacing the pods of your application, Qovery uses the rolling-restart deployment logic:
1) Deploy new version of instance #1.
2) New version of instance #1 is running => kill previous version of instance #1.
3) Deploy new version of instance #2.
4) New version of instance #2 is running => kill previous version of instance #2.
And so on...
You can trigger the re-deployment of a service or of the entire environment. The service or environment goes through the same deployment statuses described in the deployment section.
A redeploy on an environment triggers the deployment of any service in the environment, no matter their previous status (even stopped ones)
The Stop action allows you to stop the execution on the cluster of the selected service or environment (deployment status = Stopped). This action is available only if the current deployment status is Deployment OK or Deployment Error.
The effect on your cluster of the stop operation is different depending on the type of service:
Application, Container, Container DB : Pods of those services are stopped. Any attached storage is preserved
Cloud provider Managed DB: the database is paused (only for AWS, not working on Redis)
The Restart Service action allows you to restart the pods of your service without applying any configuration change. This action is available only if the current deployment status is Deployment OK and only for a single service.
Once triggered, the deployment status service goes through the following statuses:
RESTARTING : the request to restart has been received
RESTARTED : all the pods of the service have been restarted
RESTART ERROR : Qovery couldn't process the restart request
The Cancel Deployment action allows you to abort any Deploy or Redeploy action and stop the execution of the deployment pipeline. This action is available only if the current deployment status is Queued or Building or Deploying.
If a deployment of a service A is already ongoing, the cancel operation will stop the deployment execution and rollback the service A to the previous version. Any service already deployed during the pipeline execution will not rollback to the previous version.
For Lifecycle Jobs, the cancel operation is not taken into account unless it is forced via the checkbox available in the "Deployment cancel" modal.
The Deploy other version action allows you to deploy a different version for your service. This action is available no matter the deployment status of the service.
Once you click on the action, this panel will appear, and you will be able to choose the version you wish to update/rollback (either git commit or image Tag).
By pressing on the Deploy button, a deployment of the service will be triggered using the selected version.
The Deploy latest version action allows you to deploy the latest version for any of your services within the environment. This action is available no matter the deployment status of the service and only at environment level
Once you click on the action, this panel will appear, and you will be able to choose the services you wish to update to the latest version (only for services with source = git repository).
By pressing on the Deploy button, a deployment of the service will be triggered using the selected version.
+This action is available only if the Deploy action has been triggered at least once on the service or environment.
When replacing the pods of your application, Qovery uses the rolling-restart deployment logic:
1) Deploy new version of instance #1.
2) New version of instance #1 is running => kill previous version of instance #1.
3) Deploy new version of instance #2.
4) New version of instance #2 is running => kill previous version of instance #2.
And so on...
You can trigger the re-deployment of a service or of the entire environment. The service or environment goes through the same deployment statuses described in the deployment section.
A redeploy on an environment triggers the deployment of any service in the environment, no matter their previous status (even stopped ones)
The Stop action allows you to stop the execution on the cluster of the selected service or environment (deployment status = Stopped). This action is available only if the current deployment status is Deployment OK or Deployment Error.
The effect on your cluster of the stop operation is different depending on the type of service:
Application, Container, Container DB : Pods of those services are stopped. Any attached storage is preserved
Cloud provider Managed DB: the database is paused (only for AWS, not working on Redis)
The Restart Service action allows you to restart the pods of your service without applying any configuration change. This action is available only if the current deployment status is Deployment OK and only for a single service.
Once triggered, the deployment status service goes through the following statuses:
RESTARTING : the request to restart has been received
RESTARTED : all the pods of the service have been restarted
RESTART ERROR : Qovery couldn't process the restart request
The Cancel Deployment action allows you to abort any Deploy or Redeploy action and stop the execution of the deployment pipeline. This action is available only if the current deployment status is Queued or Building or Deploying.
If a deployment of a service A is already ongoing, the cancel operation will stop the deployment execution and rollback the service A to the previous version. Any service already deployed during the pipeline execution will not rollback to the previous version.
For Lifecycle Jobs, the cancel operation is not taken into account unless it is forced via the checkbox available in the "Deployment cancel" modal.
The Deploy other version action allows you to deploy a different version for your service. This action is available no matter the deployment status of the service.
Once you click on the action, this panel will appear, and you will be able to choose the version you wish to update/rollback (either git commit or image Tag).
By pressing on the Deploy button, a deployment of the service will be triggered using the selected version.
The Deploy latest version action allows you to deploy the latest version for any of your services within the environment. This action is available no matter the deployment status of the service and only at environment level
Once you click on the action, this panel will appear, and you will be able to choose the services you wish to update to the latest version (only for services with source = git repository).
By pressing on the Deploy button, a deployment of the service will be triggered using the selected version.
You can access the deployments history of your environment or service by opening the Deployments tab on either the environment or service page.
For each deployment triggered in the past, you will find
The execution id: an internal id assigned to each deployment. You can share this id with the Qovery team in case of errors in one of your deployments
Each service that has been deployed during this deployment together with their deployment status and the version that has been deployed
Resources
-
+
-
+
diff --git a/docs/using-qovery/deployment/deployment-pipeline/index.html b/docs/using-qovery/deployment/deployment-pipeline/index.html
index 5a5ce15c79..1a3ca41ebd 100644
--- a/docs/using-qovery/deployment/deployment-pipeline/index.html
+++ b/docs/using-qovery/deployment/deployment-pipeline/index.html
@@ -26,9 +26,9 @@
-
+
-
+
@@ -54,14 +54,14 @@
If you have 10 applications to be deployed within a stage, Qovery will:
build 7 applications in parallel. Once the build of one application is terminated, Qovery will start immediately another one until all the applications are built.
deploy 7 applications in parallel on your Kubernetes cluster. Once the deployment of one application is terminated, Qovery will start immediately another one until all the applications are deployed.
The parallel build and deployment is a feature in beta and free for everyone during the beta phase
By default, the deployment pipeline is constituted of 4 deployment stages with a default service assignment rule:
"0.DEFAULT DATABASE": any new service of type DATABASE will be added to this stage.
"1.DEFAULT JOB": any new service of type JOB will be added to this stage.
"2.DEFAULT CONTAINER": any new service of type CONTAINER will be added to this stage (application deployed from a container image).
"3.DEFAULT APPLICATION": any new service of type APPLICATION will be added to this stage (application deployed from a git repository).
Once the service is created, the assigned stage can be modified afterwards. See this section for more information.
This default assignment is maintained as long as you do not delete or rename the default stage. If the default stage is modified or deleted, the service will be automatically added to the latest stage (based on the stage deployment)
Review the content of your Dockerfile to find performance improvements. There's a list of suggestion on the Docker website, feel free to ask for help on our forum
#Benefit from the build and deployment parallelism
Try to put on the same deployment stage as many apps as you can, making sure there is no dependency between them. It will allow you to benefit from the parallel build and deployment.
#Update your Qovery configuration based on your application repository structure
If within the same environment you have multiple applications using the same git repository and build context, you can benefit from the image caching mechanism provided by the mirriring registry by:
having one application X on a stage A. This is the one that will be built each time
having all the other application on other stages as long as they are after the stage A. For all these applications the build phase will be skipped since the image has already been built from application X
No real configuration change is required here to speed up your deployment. Ensure to avoid as much as you can to use environment specific variables as ARGS within your Dockerfile (such as QOVERY_ENVIRONMENT_ID) since this will force a new build on each environment.
#Cross-environment deploy with auto-deploy enabled
When the auto-deploy feature is enabled and you deploy the same application across multiple environments, the applications will be built separately on each environment and you can't benefit from the caching mechanism available for the already built images.
A small diagram to explain it:
Example: two apps A and B are configured on Qovery pointing to a repo X.
Flow:
1. A commit is pushed on the repo X. Git(hub/lab) inform Qovery about the new commit
2. Qovery starts the deployment of the two apps separately and checks the existence of the image. At this moment, the image does not exist and thus both deployments move forward with the build phase.
-3. Qovery starts building the image for each application and pushes it onto the repository.
As you can see, every deployment is independent and the build choice is only based on the existence or not of the image on the container registry at the very beginning.
+3. Qovery starts building the image for each application and pushes it onto the repository.
As you can see, every deployment is independent and the build choice is only based on the existence or not of the image on the container registry at the very beginning.
RollingUpdate (default): Qovery will gracefully rollout new versions. It will automatically rollback if the new version fails to start | Useful to avoid downtime and load spikes during update
Recreate: Qovery will stop all current versions and create new ones once all old ones have been shutdown.
To make it more clear, here is a representation of the 2 strategies. First and default one, the RollingUpdate strategy:
RollingUpdate (default): Qovery will gracefully rollout new versions. It will automatically rollback if the new version fails to start | Useful to avoid downtime and load spikes during update
Recreate: Qovery will stop all current versions and create new ones once all old ones have been shutdown.
To make it more clear, here is a representation of the 2 strategies. First and default one, the RollingUpdate strategy:
When Qovery is running on your infrastructure, it requires an image registry to store the images built via the Qovery CI and to mirror the images deployed from a 3rd party container registry.
This mirroring registry is available and configurable within the Qovery interface within the Image registry section of your cluster.
Every time an application needs to be deployed on your cluster, the deployed image is picked from the mirroring registry. How the image is pushed on the mirroring registry it depends if you build the application with the Qovery Deployment Pipeline or not.
#Application built via the Qovery Deployment Pipeline
Images built by Qovery are organized in the mirroring registry by "Git repository", meaning that the image built on services having the same git repsitory will be pushed in the same repository, named z<short_cluster_id>-git_repo_name (or namespace, depending on the cloud provider).
The tag assigned to the built image is based on the following elements (build context): commit ID, repository root path, Dockerfile path, Dockerfile content, and ARGS environment variables (present in the dockerfile). This ensures that each service's build and mirroring process is completely isolated from others.
Before building the application A1, Qovery checks within mirroring registry at the repository of the application A1 if an image has already being built with the build context parameters (commit id, repository root path, dockerfile path, dockerfile content and environment variables) within the same cluster.
If the image already exists, the build is skipped and Qovery starts the deployment of that image on the Kubernetes cluster.
Otherwise, the image is built by the Qovery pipeline the resulting image is pushed on the mirroring registry at the repository of the application A1.
Once an application is deleted, if no other application is using the same image name and tag, the image is deleted from the mirroring registry.
In order to speed up the image build, we are using the mirroring registry as a remote cache (available in AWS, GCP and Scaleway). It will avoid building the image from scratch, only the layers that changed will be built.
Check out the Best practices section below to know some best practices you can follow to ensure you benefit from all the caching system and reduce the build time.
The Qovery behaviour in this case will depend on the chosen mirroring mode within the cluster advanced settings.
Service (Default)
Images within the mirroring registry are organized by "Qovery service", each service has its own repository (or namespace, naming depends on the cloud provider). This means that each service mirroring process is completely isolated from the others.
At the beginning of the deployment of the application A1, Qovery checks within mirroring registry at the repository of the application A1 if an image with the same image name and tag exists.
If the image already exists, the mirroring process is skipped and Qovery starts the deployment of that image on the Kubernetes cluster.
Otherwise, the image is pulled from the source registry and pushed on the mirroring registry at the repository of the application A1, deleting any previous image.
Pro:
Images are automatically deleted when not needede anymore
Cons:
If the same image is used across environments or service, Qovery will mirror multiple time the same image, reducing the deployment speed
Cluster
This is not available on Scaleway.
Images within the mirroring registry are organized by "Qovery cluster", meaning that the application deployed on the same cluster are all mirrored on the same repository.
At the beginning of the deployment of the application A1, Qovery checks within mirroring registry at the repository of the cluster C1 if an image with the same image name and tag exists.
If the image already exists, the mirroring process is skipped and Qovery starts the deployment of that image on the Kubernetes cluster.
Otherwise, the image is pulled from the source registry and pushed on the mirroring registry at the repository of the cluster C1.
Pro:
If the same image is used across environments or service, this setup will avoid to mirror multiple time the same image, increasing the deployment speed.
Cons:
Qovery can't automatically delete the images mirrored on the mirroring registry. This will increase the cloud provider cost of your image registry since it will store more data. To reduce the amount data stored you can reduce the image TTL via the cluster advanced settings registry.image_retention_time
Image mirroring is a general best practice: you don't want your system to be strictly coupled on a third party.
Let's say that you run an application on your production environment and Kubernetes needs to pull again the image to spawn a new instance for the application. In this case, you don't want to make this fail due to the unavailability of your source container registry. This is why we make sure that a copy is always available on the container registry next to the Kubernetes cluster.
When working with containerized applications, it is crucial to employ unique image tags for precise version management. This practice ensures complete confidence in the version running within a container. Failing to use unique image tags can lead to adverse consequences due to the image caching mechanisms employed by both the Qovery mirroring system and Kubernetes:
Mirroring Registry: Qovery’s mirroring system stores images in a registry. If an image tag remains the same between two versions, the new version will not be mirrored. Consequently, the new version will not be deployed, affecting the overall application.
Kubernetes: Applications deployed by Qovery on Kubernetes adhere to the “ifNotPresent” image pull policy. This policy means that if the image already exists on the Kubernetes node’s local disk, Kubernetes will not attempt to pull it again. However, if the image tag remains unchanged, the new image version will not be fetched, resulting in your pods running the outdated application code.
In summary, maintaining unique image tags is a critical aspect of effective version control and ensuring that your applications run the intended versions without disruptions caused by caching mechanisms.
When Qovery is running on your infrastructure, it requires an image registry to store the images built via the Qovery CI and to mirror the images deployed from a 3rd party container registry.
This mirroring registry is available and configurable within the Qovery interface within the Image registry section of your cluster.
Every time an application needs to be deployed on your cluster, the deployed image is picked from the mirroring registry. How the image is pushed on the mirroring registry it depends if you build the application with the Qovery Deployment Pipeline or not.
#Application built via the Qovery Deployment Pipeline
Images built by Qovery are organized in the mirroring registry by "Git repository", meaning that the image built on services having the same git repsitory will be pushed in the same repository, named z<short_cluster_id>-git_repo_name (or namespace, depending on the cloud provider).
The tag assigned to the built image is based on the following elements (build context): commit ID, repository root path, Dockerfile path, Dockerfile content, and ARGS environment variables (present in the dockerfile). This ensures that each service's build and mirroring process is completely isolated from others.
Before building the application A1, Qovery checks within mirroring registry at the repository of the application A1 if an image has already being built with the build context parameters (commit id, repository root path, dockerfile path, dockerfile content and environment variables) within the same cluster.
If the image already exists, the build is skipped and Qovery starts the deployment of that image on the Kubernetes cluster.
Otherwise, the image is built by the Qovery pipeline the resulting image is pushed on the mirroring registry at the repository of the application A1.
Once an application is deleted, if no other application is using the same image name and tag, the image is deleted from the mirroring registry.
In order to speed up the image build, we are using the mirroring registry as a remote cache (available in AWS, GCP and Scaleway). It will avoid building the image from scratch, only the layers that changed will be built.
Check out the Best practices section below to know some best practices you can follow to ensure you benefit from all the caching system and reduce the build time.
The Qovery behaviour in this case will depend on the chosen mirroring mode within the cluster advanced settings.
Service (Default)
Images within the mirroring registry are organized by "Qovery service", each service has its own repository (or namespace, naming depends on the cloud provider). This means that each service mirroring process is completely isolated from the others.
At the beginning of the deployment of the application A1, Qovery checks within mirroring registry at the repository of the application A1 if an image with the same image name and tag exists.
If the image already exists, the mirroring process is skipped and Qovery starts the deployment of that image on the Kubernetes cluster.
Otherwise, the image is pulled from the source registry and pushed on the mirroring registry at the repository of the application A1, deleting any previous image.
Pro:
Images are automatically deleted when not needede anymore
Cons:
If the same image is used across environments or service, Qovery will mirror multiple time the same image, reducing the deployment speed
Cluster
This is not available on Scaleway.
Images within the mirroring registry are organized by "Qovery cluster", meaning that the application deployed on the same cluster are all mirrored on the same repository.
At the beginning of the deployment of the application A1, Qovery checks within mirroring registry at the repository of the cluster C1 if an image with the same image name and tag exists.
If the image already exists, the mirroring process is skipped and Qovery starts the deployment of that image on the Kubernetes cluster.
Otherwise, the image is pulled from the source registry and pushed on the mirroring registry at the repository of the cluster C1.
Pro:
If the same image is used across environments or service, this setup will avoid to mirror multiple time the same image, increasing the deployment speed.
Cons:
Qovery can't automatically delete the images mirrored on the mirroring registry. This will increase the cloud provider cost of your image registry since it will store more data. To reduce the amount data stored you can reduce the image TTL via the cluster advanced settings registry.image_retention_time
Image mirroring is a general best practice: you don't want your system to be strictly coupled on a third party.
Let's say that you run an application on your production environment and Kubernetes needs to pull again the image to spawn a new instance for the application. In this case, you don't want to make this fail due to the unavailability of your source container registry. This is why we make sure that a copy is always available on the container registry next to the Kubernetes cluster.
When working with containerized applications, it is crucial to employ unique image tags for precise version management. This practice ensures complete confidence in the version running within a container. Failing to use unique image tags can lead to adverse consequences due to the image caching mechanisms employed by both the Qovery mirroring system and Kubernetes:
Mirroring Registry: Qovery’s mirroring system stores images in a registry. If an image tag remains the same between two versions, the new version will not be mirrored. Consequently, the new version will not be deployed, affecting the overall application.
Kubernetes: Applications deployed by Qovery on Kubernetes adhere to the “ifNotPresent” image pull policy. This policy means that if the image already exists on the Kubernetes node’s local disk, Kubernetes will not attempt to pull it again. However, if the image tag remains unchanged, the new image version will not be fetched, resulting in your pods running the outdated application code.
In summary, maintaining unique image tags is a critical aspect of effective version control and ensuring that your applications run the intended versions without disruptions caused by caching mechanisms.
In the following subsections, you'll find all the information about the deployment management with Qovery.
The deployment has the end goal to create the resources necessary to run your applications on your cloud account, based on the configuration you have done on the Qovery console.
In the image below you can find the complete flow that your application will go through, from your Git repository up to your Kuernetes cluster.
The developer pushes the code within the git repository
The deployment trigger can come from different sources depending on your integration type:
2.a The auto-deploy feature is activated on Qovery. When the new commit is pushed, a webhook call is received by Qovery and can proceed with the application deployment. See this section for more information.
2.b The auto-deploy feature is not activated on Qovery and the deployment is managed via the CI/CD.
-2.c The auto-deploy feature is not activated on Qovery and the user decides to trigger the deployment directly from within the Qovery console.
The Qovery engine starts processing based on the configured Deployment Pipeline. The pipeline defines the steps that need to be followed in order to deploy your applications. See this section for more information.
The Qovery engine pulls the code from your repository.
The Qovery engine builds the code and pushes the generated images on a registry present within your cloud account (See the Image Mirroring page for more information).
The Qovery engine creates the load balancers and configure the network.
The Qovery engine creates a namespace within the Kubernetes cluster and deploys the application.
The Qovery engine takes care of creating a custom domain for your application and as well configure the TLS so that you can access the application from the internet.
The developer can monitor at each time the status of the deployment or of the running applications by:
checking the Deployment Status and Running Status. See this section for more information.
access the Logs interface to retrieve the deployment logs and as well the application logs in real-time. See this section for more information.
access the Deployment History section to get all the information about the past deployments. See this section for more information.
Note:
Qovery also support deployments from container registry but actions 2a is not supported plus 4 and 5 are not done.
In the example above we have shown how the deployment of an application is done but Qovery provides you with a complete set of Deployment Actions allowing you to manage the deployment lifecycle of your applications and environments (Stop, restart etc..). See this section for more information.
Resources
+2.c The auto-deploy feature is not activated on Qovery and the user decides to trigger the deployment directly from within the Qovery console.
The Qovery engine starts processing based on the configured Deployment Pipeline. The pipeline defines the steps that need to be followed in order to deploy your applications. See this section for more information.
The Qovery engine pulls the code from your repository.
The Qovery engine builds the code and pushes the generated images on a registry present within your cloud account (See the Image Mirroring page for more information).
The Qovery engine creates the load balancers and configure the network.
The Qovery engine creates a namespace within the Kubernetes cluster and deploys the application.
The Qovery engine takes care of creating a custom domain for your application and as well configure the TLS so that you can access the application from the internet.
The developer can monitor at each time the status of the deployment or of the running applications by:
checking the Deployment Status and Running Status. See this section for more information.
access the Logs interface to retrieve the deployment logs and as well the application logs in real-time. See this section for more information.
access the Deployment History section to get all the information about the past deployments. See this section for more information.
Note:
Qovery also support deployments from container registry but actions 2a is not supported plus 4 and 5 are not done.
In the example above we have shown how the deployment of an application is done but Qovery provides you with a complete set of Deployment Actions allowing you to manage the deployment lifecycle of your applications and environments (Stop, restart etc..). See this section for more information.
The deployment logs: every time a deployment is triggered, Qovery provides you with the log of its execution and as well with any error that might occur.
The live logs of your applications: Qovery allows you to retrieve the logs of your application in real-time, streamed directly from your remote application (no data is stored on Qovery side). The logs are accessible as long as the application is running and writing the logs in the stdout.
This section provides you with some information on the last Deployment that happened on the environment and a navigation system to access the logs of each service of your environment.
More in detail you will find here:
Deployment information (top section): this section shows you the status of the deployment execution and when it happened. If a deployment is ongoing, its status will be updated accordingly in this section.
Pipeline view: this section provides an overall view of the current configuration of the Deployment Pipeline and each service present within the environment. By default, only the services that have been deployed within the last deployment execution are displayed but you can still display all of them by un-ticking the option Last deployed only.
This tab shows you the deployment logs for each service of the environment. By default, you get access to the logs of the last deployment execution but you can switch to the previous execution (See Accessing old deployment logs).
If the service is built via the Qovery CI pipeline, you will get access to the build logs.
When the deployment on Kubernetes is executed, the system will provide you with the deployment status updates. In case of deployment issues, these updates will provide you with some information on the root cause.
At the end of the deployment, a final message is emitted confirming if the deployment was successful or not and, in case of an issue, it provides you with some information on how to solve the issue.
You can use the Troubleshoot section to investigate any issue you might encounter during the deployment of your services.
You can access the logs of a past deployment execution in two ways:
using the Deployment log switch on the logs view
from the Deployment tab from the service or environment page and clicking on the parchment icon of a previous deployment
Qovery provides access to the logs of the last 20 deployments executed on your environment. If your service has been deployed more than 20 deployments ago, you won't be able to access its deployment logs.
The live logs tab gives you a real-time view on the log generated by your application while running remotely on your cloud provider infrastructure.
Within this section you will find:
Timestamp: the timestamp of the message
Pod Name: the name of the kubernetes pod where your application is running (to distinguish the instance in case of the multi-instance app). If you want to follow a specific pod, you can filter the logs by clicking on the pod name
Version: the commit id or the image tag of the application running on this POD
Message: the log message
If you have several pods within your application, you have the possiblity to filter the logs by pod.
Past application logs are also preserved on your cluster via Loki and can be accessed from the same log view within the qovery console. Please keep in mind that:
Loki is configured to preserve only the latest 1000 lines of log for each application and retain them for 12 weeks (configurable via the cluster advanced settings)
This feature is not available on EC2 Clusters since we don't install Loki.
If you need to troubleshoot issues on the requests managed by your application, you can also access the Nginx logs in the same view (logs format is available in the helper). Note that this option is available only if the application is exposed publicly (See the Port Section)
The deployment logs: every time a deployment is triggered, Qovery provides you with the log of its execution and as well with any error that might occur.
The live logs of your applications: Qovery allows you to retrieve the logs of your application in real-time, streamed directly from your remote application (no data is stored on Qovery side). The logs are accessible as long as the application is running and writing the logs in the stdout.
This section provides you with some information on the last Deployment that happened on the environment and a navigation system to access the logs of each service of your environment.
More in detail you will find here:
Deployment information (top section): this section shows you the status of the deployment execution and when it happened. If a deployment is ongoing, its status will be updated accordingly in this section.
Pipeline view: this section provides an overall view of the current configuration of the Deployment Pipeline and each service present within the environment. By default, only the services that have been deployed within the last deployment execution are displayed but you can still display all of them by un-ticking the option Last deployed only.
This tab shows you the deployment logs for each service of the environment. By default, you get access to the logs of the last deployment execution but you can switch to the previous execution (See Accessing old deployment logs).
If the service is built via the Qovery CI pipeline, you will get access to the build logs.
When the deployment on Kubernetes is executed, the system will provide you with the deployment status updates. In case of deployment issues, these updates will provide you with some information on the root cause.
At the end of the deployment, a final message is emitted confirming if the deployment was successful or not and, in case of an issue, it provides you with some information on how to solve the issue.
You can use the Troubleshoot section to investigate any issue you might encounter during the deployment of your services.
You can access the logs of a past deployment execution in two ways:
using the Deployment log switch on the logs view
from the Deployment tab from the service or environment page and clicking on the parchment icon of a previous deployment
Qovery provides access to the logs of the last 20 deployments executed on your environment. If your service has been deployed more than 20 deployments ago, you won't be able to access its deployment logs.
The live logs tab gives you a real-time view on the log generated by your application while running remotely on your cloud provider infrastructure.
Within this section you will find:
Timestamp: the timestamp of the message
Pod Name: the name of the kubernetes pod where your application is running (to distinguish the instance in case of the multi-instance app). If you want to follow a specific pod, you can filter the logs by clicking on the pod name
Version: the commit id or the image tag of the application running on this POD
Message: the log message
If you have several pods within your application, you have the possiblity to filter the logs by pod.
Past application logs are also preserved on your cluster via Loki and can be accessed from the same log view within the qovery console. Please keep in mind that:
Loki is configured to preserve only the latest 1000 lines of log for each application and retain them for 12 weeks (configurable via the cluster advanced settings)
This feature is not available on EC2 Clusters since we don't install Loki.
If you need to troubleshoot issues on the requests managed by your application, you can also access the Nginx logs in the same view (logs format is available in the helper). Note that this option is available only if the application is exposed publicly (See the Port Section)
From any environment window on your Qovery Console, you can monitor the running and deployment status of your environments and services.
Item
Description
1
The dot in the service tab shows the environment running status. For more information, see the Environment Statuses section below.
2
The dot in the deployment tab shows the environment deployment status. For more information, see the Deployment Statuses section below.
3
The label in the column "Service status" represents the running status of the service. For more information, see Service Statuses section below.
4
The label in the column "Last deployment" represents the status of the latest deployment of the service. For more information, see Deployment Statuses section below.
Thanks to Running statuses, you can find out which services are currently running on your platform, and which are interrupted. There are two types of run services available currently: environment statuses and service statuses.
When you access an environment on your Qovery Console, you can check its status in real-time.
The environment status is computed based on the statuses of all the services in that specific environment. Here are all the possible environment statuses:
Status
Description
STOPPED (Gray dot)
All the services are stopped.
STARTING (Loading Icon)
At least 1 service is starting.
STOPPING (Loading Icon)
At least 1 service is stopping.
RUNNING (Green dot)
All services are running correctly.
ERROR (Red dot)
All services are in error status.
WARNING (Orange dot)
At least 1 service is in error status (but not all of them).
COMPLETED (Green dot)
The job execution has completed (only for cronjob and lifecycle jobs).
When you access an environment on your Qovery Console, you can check the status of each service in that environment in real-time within the column "Service status".
Here are all the possible service statuses:
Status
Description
STOPPED (Gray dot)
All the application instances are stopped.
STARTING (Loading Icon)
At least 1 application instance is starting.
STOPPING (Loading Icon)
At least 1 application instance is stopping.
RUNNING (Green dot)
All application instances are running correctly.
ERROR (Red dot)
All application instances are in error status.
WARNING (Orange dot)
(Valid for multi-instance applications only) At least 1 application instance is in error status (but not all of them).
Completed (Green dot)
(Valid for Lifecycle and Cronjob only) The job was correctly executed.
The service status is computed based on the status of each Kubernetes pod deployed for this application.
You can check on the Service overview page the status of each pod running your application in Kubernetes. This page is accessible by clicking on one of the services of your environment.
Within this page you will have a view of:
the number of running instances of your application
the status of each instance
in case of an error, you will get the reason behind the issue by clicking on the Pod in error.
By clicking on Logs, you will be redirected to the service logs specifically filtered for this pod.
If you have old cronjobs or lifecycle jobs execution in error, your global job status will be in Warning.
-You have the possibility to clear these old executions by clicking on the Clear status button in the status banner of your job.
When you access an environment on your Qovery Console, you can check:
the overall status of your deployments in that specific environment, thanks to the dot present within the "Deployment" tab. This corresponds to the overall deployment status of your environment.
the deployment status of each service in that specific environment, thanks to the label displayed in the Service status column. This corresponds to the status of the last deployment performed on the service.
Here are all the possible deployment statuses for both environments and services:
QUEUED (temporary state).
BUILDING (temporary state).
BUILDING ERROR (final state).
DEPLOYING (temporary state).
DEPLOYMENT ERROR (final state).
CANCELLING BUILDING (temporary state).
CANCELLED (temporary state).
DEPLOYMENT OK (final state).
Just because an error arised during deployment does not mean your application is not running. Monitoring both your deployment and service statuses allows you to know exactly which applications are currently running on your platform.
When you access an environment on your Qovery Console, you can check:
the overall status of your deployments in that specific environment, thanks to the dot present within the "Deployment" tab. This corresponds to the overall deployment status of your environment.
the deployment status of each service in that specific environment, thanks to the label displayed in the Service status column. This corresponds to the status of the last deployment performed on the service.
Here are all the possible deployment statuses for both environments and services:
QUEUED (temporary state).
BUILDING (temporary state).
BUILDING ERROR (final state).
DEPLOYING (temporary state).
DEPLOYMENT ERROR (final state).
CANCELLING BUILDING (temporary state).
CANCELLED (temporary state).
DEPLOYMENT OK (final state).
Just because an error arised during deployment does not mean your application is not running. Monitoring both your deployment and service statuses allows you to know exactly which applications are currently running on your platform.
Qovery allows you to integrate with major container registries, enabling you to deploy your own container images or those available on public registries.
You can control the container registry used by your teams directly within the Qovery Console.
To know more about how to configure your container registry connection and the supported container registries, have a look at this section
Qovery allows you to integrate with major container registries, enabling you to deploy your own container images or those available on public registries.
You can control the container registry used by your teams directly within the Qovery Console.
To know more about how to configure your container registry connection and the supported container registries, have a look at this section
Using Circle CI with Qovery is super powerful and gives you the ability to manage the way that you want to deploy your applications. As the possibility are endless, I will share with you a couple of examples that you can use. Feel free to adapt them to your need.
Set the environment variable Q_CLI_ACCESS_TOKEN or QOVERY_CLI_ACCESS_TOKEN (both are valid) with your API token. E.g. export QOVERY_CLI_ACCESS_TOKEN=your-api-token
You have turned off the Qovery Auto Deployment for every service that you want to deploy manually.
Since Circle CI also provides a .yaml file to configure your pipeline. Refers to GitLab CI and GitHub Actions examples to learn how to configure your pipeline with Qovery.
This is also applicable for the qovery container deploy, qovery lifecycle deploy, and qovery cronjob deploy commands.
#Create a Preview Environment for your Pull-Request
Qovery integrates automatically with GitHub, GitLab and Bitbucket to create a Preview Environment for each Pull-Request. But in case you want to control the creation of the Preview Environment manually, you can use the following commands:
This is also applicable for the qovery container deploy, qovery lifecycle deploy, and qovery cronjob deploy commands.
#Create a Preview Environment for your Pull-Request
Qovery integrates automatically with GitHub, GitLab and Bitbucket to create a Preview Environment for each Pull-Request. But in case you want to control the creation of the Preview Environment manually, you can use the following commands:
This is also applicable for the qovery container deploy, qovery lifecycle deploy, and qovery cronjob deploy commands.
#Create a Preview Environment for your Pull-Request
Qovery integrates automatically with GitHub, GitLab and Bitbucket to create a Preview Environment for each Pull-Request. But in case you want to control the creation of the Preview Environment manually, you can use the following commands:
#I don't find my Continuous Integration platform, what should I do?
Your CI platform is probably going to be officially supported in the near future. In the meantime, you can use our Qovery CLI and make the integration yourself (it is super easy).
#I don't find my Continuous Integration platform, what should I do?
Your CI platform is probably going to be officially supported in the near future. In the meantime, you can use our Qovery CLI and make the integration yourself (it is super easy).
Using Jenkins with Qovery is super powerful and gives you the ability to manage the way that you want to deploy your applications. As the possibility are endless, I will share with you a couple of examples that you can use. Feel free to adapt them to your need.
Set the environment variable Q_CLI_ACCESS_TOKEN or QOVERY_CLI_ACCESS_TOKEN (both are valid) with your API token. E.g. export QOVERY_CLI_ACCESS_TOKEN=your-api-token
You have turned off the Qovery Auto Deployment for every service that you want to deploy manually.
Since Jenkins also provides a .yaml file to configure your pipeline. Refers to GitLab CI and GitHub Actions examples to learn how to configure your pipeline with Qovery.
This is also applicable for the qovery container deploy, qovery lifecycle deploy, and qovery cronjob deploy commands.
#Create a Preview Environment for your Pull-Request
Qovery integrates automatically with GitHub, GitLab and Bitbucket to create a Preview Environment for each Pull-Request. But in case you want to control the creation of the Preview Environment manually, you can use the following commands:
Qovery allows you to integrate with the major git based software version control systems in order to build and deploy the applications available on your own repositories.
Today Qovery supports the following software version control systems:
GitHub and GitHub Enterprise
GitLab
Bitbucket
Once connected to the Qovery Console via one of these three systems, Qovery will be able to access all the repositories connected to your account.
If you have special access needs, you can use the git provider tokens instead of your own git provider account. Have a look at the Managing git permission section to know more.
Qovery allows you to integrate with the major git based software version control systems in order to build and deploy the applications available on your own repositories.
Today Qovery supports the following software version control systems:
GitHub and GitHub Enterprise
GitLab
Bitbucket
Once connected to the Qovery Console via one of these three systems, Qovery will be able to access all the repositories connected to your account.
If you have special access needs, you can use the git provider tokens instead of your own git provider account. Have a look at the Managing git permission section to know more.
You can deploy any Cloudformation manifests/templates with Qovery and manage the lifecycle of your own cloud resources. For example, you can deploy your own databases, lambdas, brokers etc...
Running and deploying your Cloudformation manifest/template is achieved via the Qovery Lifecycle Jobs, have a look at this section to know how it works.
To simplify the configuration, Qovery provides a Cloudformation configuration template for your Lifecycle job, allowing you to package your manifest and run it with the Cloudformation CLI directly on your cluster.
Follow these steps to create and deploy your Cloudformation manifest/template:
Add a new service
Enter the environment where you want to deploy your Cloudformation manifest and select the "Add Service" button
Use the Cloudformation template
Select the "Cloudformation" option in the service creation list and follow the steps.
Manifest location
Provide the location of your manifest within your git repository
Customize your configuration
Qovery provides you with a pre-configuration for your lifecycle job capable to run and deploy your Cloudformation:
Dockerfile: you will find a Dockerfile capable to package your manifest/template and run the right Cloudformation command depending on the event triggered (Example: the "start" command executes "Cloudformation apply .."). Customize this file to match your needs (backend config, additional configuration etc..)
Triggers: you will find the default triggers and commands based on the default Dockerfile.
Resources: you will find a default CPU/Memory values capable to run the Cloudformation CLI on a Kubernetes job
Environment variables: you will be able to provide the input of your Cloudformation manifest/template as file, which will be stored as an environment variable as file. You can also add additional environment variables necessary to run the Cloudformation commands (like AWS_SECRET_ACCESS_KEY etc..)
Create & Deploy
Once it is all set, you can Create and Deploy your Cloudformation job. This will trigger the execution and deployment of the Cloudformation manifest/template.
Access the Cloudformation output
If your Cloudformation manifest/template generates an output (see Lifecycle job output for more information), the output will be fetched and injected as environment variable to any service of the same environment. It will allow those services to access the newly created resource.
You can deploy any Cloudformation manifests/templates with Qovery and manage the lifecycle of your own cloud resources. For example, you can deploy your own databases, lambdas, brokers etc...
Running and deploying your Cloudformation manifest/template is achieved via the Qovery Lifecycle Jobs, have a look at this section to know how it works.
To simplify the configuration, Qovery provides a Cloudformation configuration template for your Lifecycle job, allowing you to package your manifest and run it with the Cloudformation CLI directly on your cluster.
Follow these steps to create and deploy your Cloudformation manifest/template:
Add a new service
Enter the environment where you want to deploy your Cloudformation manifest and select the "Add Service" button
Use the Cloudformation template
Select the "Cloudformation" option in the service creation list and follow the steps.
Manifest location
Provide the location of your manifest within your git repository
Customize your configuration
Qovery provides you with a pre-configuration for your lifecycle job capable to run and deploy your Cloudformation:
Dockerfile: you will find a Dockerfile capable to package your manifest/template and run the right Cloudformation command depending on the event triggered (Example: the "start" command executes "Cloudformation apply .."). Customize this file to match your needs (backend config, additional configuration etc..)
Triggers: you will find the default triggers and commands based on the default Dockerfile.
Resources: you will find a default CPU/Memory values capable to run the Cloudformation CLI on a Kubernetes job
Environment variables: you will be able to provide the input of your Cloudformation manifest/template as file, which will be stored as an environment variable as file. You can also add additional environment variables necessary to run the Cloudformation commands (like AWS_SECRET_ACCESS_KEY etc..)
Create & Deploy
Once it is all set, you can Create and Deploy your Cloudformation job. This will trigger the execution and deployment of the Cloudformation manifest/template.
Access the Cloudformation output
If your Cloudformation manifest/template generates an output (see Lifecycle job output for more information), the output will be fetched and injected as environment variable to any service of the same environment. It will allow those services to access the newly created resource.
Qovery lets you handle your infrastructure via the most popular IAC framework.
Thanks to the Qovery Lifecycle jobs, you can manage the lifecycle of any external resource and easily make them available to any application running on your Kubernetes cluster.
Here's how it works:
You define the git repository and folder where the manifest/termplate is located, together with the inputs necessary for its execution (manifest/template inputs, cloud provider credentials etc..)
Your manifest and inputs are packaged into a containerized application, thanks to a Dockerfile provided by Qovery. This dockerfile defines the IaC framework CLI version (Ex: Terrafrom 1.9.0), commands to run (Ex: on "delete", excecute "terraform destroy") etc.. It can be fully customized based on your needs.
When an event happens on your environment or job (deploy, stop, destroy), the job is deployed and scheduled for execution.
The job is executed on your cluster and creates/destroys the resource depending on the triggered event (deploy -> create, delete -> destroy).
When a resource is created, your manifest/template output is retrieved and injected as environment variable for any other service within the same environment. For example, if you create an RDS instance and have an output for it, any other applications will be able to retrieve this value and access the resource.
Have a look at how to deploy the different IAC frameworks with Qovery:
Qovery lets you handle your infrastructure via the most popular IAC framework.
Thanks to the Qovery Lifecycle jobs, you can manage the lifecycle of any external resource and easily make them available to any application running on your Kubernetes cluster.
Here's how it works:
You define the git repository and folder where the manifest/termplate is located, together with the inputs necessary for its execution (manifest/template inputs, cloud provider credentials etc..)
Your manifest and inputs are packaged into a containerized application, thanks to a Dockerfile provided by Qovery. This dockerfile defines the IaC framework CLI version (Ex: Terrafrom 1.9.0), commands to run (Ex: on "delete", excecute "terraform destroy") etc.. It can be fully customized based on your needs.
When an event happens on your environment or job (deploy, stop, destroy), the job is deployed and scheduled for execution.
The job is executed on your cluster and creates/destroys the resource depending on the triggered event (deploy -> create, delete -> destroy).
When a resource is created, your manifest/template output is retrieved and injected as environment variable for any other service within the same environment. For example, if you create an RDS instance and have an output for it, any other applications will be able to retrieve this value and access the resource.
Have a look at how to deploy the different IAC frameworks with Qovery:
You can deploy any Terraform manifests/templates with Qovery and manage the lifecycle of your own cloud resources. For example, you can deploy your own databases, lambdas, brokers etc...
Running and deploying your Terraform manifest/template is achieved via the Qovery Lifecycle Jobs, have a look at this section to know how it works.
To simplify the configuration, Qovery provides a Terraform configuration template for your Lifecycle job, allowing you to package your manifest and run it with the Terraform CLI directly on your cluster.
Follow these steps to create and deploy your Terraform manifest/template:
Add a new service
Enter the environment where you want to deploy your Terraform manifest and select the "Add Service" button
Use the Terraform template
Select the "Terraform" option in the service creation list and follow the steps.
Manifest location
Provide the location of your manifest within your git repository
Customize your configuration
Qovery provides you with a pre-configuration for your lifecycle job capable to run and deploy your Terraform:
Dockerfile: you will find a Dockerfile capable to package your manifest/template and run the right Terraform command depending on the event triggered (Example: the "start" command executes "Terraform apply .."). Customize this file to match your needs (backend config, additional configuration etc..)
Triggers: you will find the default triggers and commands based on the default Dockerfile.
Resources: you will find a default CPU/Memory values capable to run the Terraform CLI on a Kubernetes job
Environment variables: you will be able to provide the input of your Terraform manifest/template as file, which will be stored as an environment variable as file. You can also add additional environment variables necessary to run the Terraform commands (like AWS_SECRET_ACCESS_KEY etc..)
Create & Deploy
Once it is all set, you can Create and Deploy your Terraform job. This will trigger the execution and deployment of the Terraform manifest/template.
Access the Terraform output
If your Terraform manifest/template generates an output (see Lifecycle job output for more information), the output will be fetched and injected as environment variable to any service of the same environment. It will allow those services to access the newly created resource.
You can deploy any Terraform manifests/templates with Qovery and manage the lifecycle of your own cloud resources. For example, you can deploy your own databases, lambdas, brokers etc...
Running and deploying your Terraform manifest/template is achieved via the Qovery Lifecycle Jobs, have a look at this section to know how it works.
To simplify the configuration, Qovery provides a Terraform configuration template for your Lifecycle job, allowing you to package your manifest and run it with the Terraform CLI directly on your cluster.
Follow these steps to create and deploy your Terraform manifest/template:
Add a new service
Enter the environment where you want to deploy your Terraform manifest and select the "Add Service" button
Use the Terraform template
Select the "Terraform" option in the service creation list and follow the steps.
Manifest location
Provide the location of your manifest within your git repository
Customize your configuration
Qovery provides you with a pre-configuration for your lifecycle job capable to run and deploy your Terraform:
Dockerfile: you will find a Dockerfile capable to package your manifest/template and run the right Terraform command depending on the event triggered (Example: the "start" command executes "Terraform apply .."). Customize this file to match your needs (backend config, additional configuration etc..)
Triggers: you will find the default triggers and commands based on the default Dockerfile.
Resources: you will find a default CPU/Memory values capable to run the Terraform CLI on a Kubernetes job
Environment variables: you will be able to provide the input of your Terraform manifest/template as file, which will be stored as an environment variable as file. You can also add additional environment variables necessary to run the Terraform commands (like AWS_SECRET_ACCESS_KEY etc..)
Create & Deploy
Once it is all set, you can Create and Deploy your Terraform job. This will trigger the execution and deployment of the Terraform manifest/template.
Access the Terraform output
If your Terraform manifest/template generates an output (see Lifecycle job output for more information), the output will be fetched and injected as environment variable to any service of the same environment. It will allow those services to access the newly created resource.
Datadog is a recommended product to monitor and track down your application performance issue (APM). Qovery supports and recommends using Datadog (or another monitoring/observability platform).
-Check out our tutorial to know how to integrate Datadog with Qovery.
#I don't find my Monitoring provider, what should I do?
Basically, Qovery relies on Kubernetes to run your apps. Meaning, Qovery will support your monitoring solution if their maintainers provide a Helm Chart.
If your monitoring platform provides a Helm Chart, then you can install it:
#I don't find my Monitoring provider, what should I do?
Basically, Qovery relies on Kubernetes to run your apps. Meaning, Qovery will support your monitoring solution if their maintainers provide a Helm Chart.
If your monitoring platform provides a Helm Chart, then you can install it:
NewRelic is a recommended product to monitor and track down your application performance issue (APM). Qovery supports and recommends using NewRelic (or another monitoring/observability platform).
NewRelic is a recommended product to monitor and track down your application performance issue (APM). Qovery supports and recommends using NewRelic (or another monitoring/observability platform).
AWS Secrets Manager is a service that helps you protect secrets needed to access your applications, services, and IT resources. The service enables you to easily rotate, manage, and retrieve database credentials, API keys, and other secrets throughout their lifecycle.
To provide better integration with Qovery - we recommend using AWS Secrets Manager with Doppler. Doppler ease the synchronization of AWS Secrets Manager with Qovery. You can find more information about Doppler and Qovery integration here.
Follow this guide to get assume roles on your Kubernetes cluster. Once it is set up, your application will be able to connect to AWS Secrets Manager using the AWS SDK.
AWS Secrets Manager is a service that helps you protect secrets needed to access your applications, services, and IT resources. The service enables you to easily rotate, manage, and retrieve database credentials, API keys, and other secrets throughout their lifecycle.
To provide better integration with Qovery - we recommend using AWS Secrets Manager with Doppler. Doppler ease the synchronization of AWS Secrets Manager with Qovery. You can find more information about Doppler and Qovery integration here.
Follow this guide to get assume roles on your Kubernetes cluster. Once it is set up, your application will be able to connect to AWS Secrets Manager using the AWS SDK.
Doppler is a universal secrets manager that integrates with Qovery. Doppler allows you to store and manage your application secrets in a single place and access them from anywhere.
Doppler is a universal secrets manager that integrates with Qovery. Doppler allows you to store and manage your application secrets in a single place and access them from anywhere.
If you'd like to automatically notify your team on a Slack workspace whenever a change has occurred on your apps, this integration will help you out. You can choose which actions should trigger messages on your Slack workspace.
You can have multiple webhooks targeting different Slack channels.
You can specify the events that you want to receive. E.g. if you just want to be notified when a deployment failed, then use "events": ["DEPLOYMENT_FAILURE"]. All the events and the description are available on our Webhook section.
You can turn off or delete your webhooks at any time from the webhook section.
Check out this page for further details on how to use and configure the WebHook.
If you'd like to automatically notify your team on a Slack workspace whenever a change has occurred on your apps, this integration will help you out. You can choose which actions should trigger messages on your Slack workspace.
You can have multiple webhooks targeting different Slack channels.
You can specify the events that you want to receive. E.g. if you just want to be notified when a deployment failed, then use "events": ["DEPLOYMENT_FAILURE"]. All the events and the description are available on our Webhook section.
You can turn off or delete your webhooks at any time from the webhook section.
Check out this page for further details on how to use and configure the WebHook.
Qovery integrates with Terraform to create a complete workflow with a strong developer and operations experience for the different teams from development to critical production applications. By integrating Terraform with Qovery, your team can quickly implement governance at scale while drastically improving the developer experience when deploying and managing applications.
Thanks to our Terraform provider, you can automate the creation of your organization, project, clusters, applications and environments (and more).
Qovery allows you to easily export your environment as a Terraform Manifest and from there manage the configuration of the environment via our Terraform Provider. Check the Terraform Exporter documentation to know more.
Qovery integrates with Terraform to create a complete workflow with a strong developer and operations experience for the different teams from development to critical production applications. By integrating Terraform with Qovery, your team can quickly implement governance at scale while drastically improving the developer experience when deploying and managing applications.
Thanks to our Terraform provider, you can automate the creation of your organization, project, clusters, applications and environments (and more).
Qovery allows you to easily export your environment as a Terraform Manifest and from there manage the configuration of the environment via our Terraform Provider. Check the Terraform Exporter documentation to know more.
Qovery allows you to create webhooks at organization-level so that, when an event happens on an environment within your organization, you can get notified on external applications.
This is useful for the following use cases:
integrate Qovery with an exeternal tool that needs to be informed when the deployment status changes.
share within a slack channel any deployment status change for your environments.
You can trigger webhooks when:
A deployment has started in the environment.
A deployment has been successful in the environment.
A deployment has been cancelled in the environment.
A deployment has failed in the environment.
Two types of webhooks can be created within Qovery:
Standard: this type of webhook will send a payload to the defined url with a Qovery proprietary format (check out our Webhook payload documentation for more information on the payload format)
Slack: this type of webhook will send pre-formatted messages using the Slack messaging syntax. Have a look at our Slack integration for more information on the integration.
Open the Organization settings and the Webhook section
Press the Add New button.
Enter the following parameters:
Parameter
Usage
URL
The webhook URL provided by the external application you want to receive notifications on.
"kind"
Specify which kind of webhook you want to create. At the moment, you can specify : "kind": "STANDARD" to create a generic webhook, or "kind": "SLACK" to create a Slack webhook.
"description"
(Optional) Enter a self-explanatory description of what your webhook does. In the example, "description": "slack notifications" clearly states that the webhook triggers notifications on Slack.
"secret"
(Optional) Specify the secret to be used when calling the specified webhook URL
"events"
List all the events you want to be notified about.
"environment_types_filter"
(Optional) If you only want to get notified about events happening on one or several specific type(s) or environment(s), you can provide a list using the following possible values: "PRODUCTION", "DEVELOPMENT", "STAGING" and "PREVIEW".
Please note that "environment_types_filter" can be used together with "project_names_filter".
"project_names_filter"
(Optional) If you only want to get notified about events happening in one or several specific projects, you can provide a list of project names that will act as a filter. Notifications will then only be triggered for projects whose names match or, if you're using a wildcard, start with one of the values from your list.
Please note that "project_names_filter" is not case-sensitive, accepts wildcards, and can be used together with "environment_types_filter".
Qovery allows you to create webhooks at organization-level so that, when an event happens on an environment within your organization, you can get notified on external applications.
This is useful for the following use cases:
integrate Qovery with an exeternal tool that needs to be informed when the deployment status changes.
share within a slack channel any deployment status change for your environments.
You can trigger webhooks when:
A deployment has started in the environment.
A deployment has been successful in the environment.
A deployment has been cancelled in the environment.
A deployment has failed in the environment.
Two types of webhooks can be created within Qovery:
Standard: this type of webhook will send a payload to the defined url with a Qovery proprietary format (check out our Webhook payload documentation for more information on the payload format)
Slack: this type of webhook will send pre-formatted messages using the Slack messaging syntax. Have a look at our Slack integration for more information on the integration.
Open the Organization settings and the Webhook section
Press the Add New button.
Enter the following parameters:
Parameter
Usage
URL
The webhook URL provided by the external application you want to receive notifications on.
"kind"
Specify which kind of webhook you want to create. At the moment, you can specify : "kind": "STANDARD" to create a generic webhook, or "kind": "SLACK" to create a Slack webhook.
"description"
(Optional) Enter a self-explanatory description of what your webhook does. In the example, "description": "slack notifications" clearly states that the webhook triggers notifications on Slack.
"secret"
(Optional) Specify the secret to be used when calling the specified webhook URL
"events"
List all the events you want to be notified about.
"environment_types_filter"
(Optional) If you only want to get notified about events happening on one or several specific type(s) or environment(s), you can provide a list using the following possible values: "PRODUCTION", "DEVELOPMENT", "STAGING" and "PREVIEW".
Please note that "environment_types_filter" can be used together with "project_names_filter".
"project_names_filter"
(Optional) If you only want to get notified about events happening in one or several specific projects, you can provide a list of project names that will act as a filter. Notifications will then only be triggered for projects whose names match or, if you're using a wildcard, start with one of the values from your list.
Please note that "project_names_filter" is not case-sensitive, accepts wildcards, and can be used together with "environment_types_filter".
Set the KUBECONFIG environment variable to the path of the kubeconfig file obtained from the previous command:
exportKUBECONFIG=<path_to_kubeconfig_file>
You can now use tools like k9s or kubectl to access and manage your cluster:
Some cloud providers like GCP or AWS require additional configuration to access the cluster. Make sure you have CLI binaries installed (gcloud CLI/AWS CLI/...), and the right permissions and credentials set up (environment variables or profile file).
Using k9s
Launch the k9s terminal UI to interact with your Kubernetes cluster:
k9s
Using kubectl
Here are a few examples of common kubectl commands:
kubectl get pods #List all pods in the default namespace
kubectl describe pod <pod_name>#Get detailed information about a specific pod
Set the KUBECONFIG environment variable to the path of the kubeconfig file obtained from the previous command:
exportKUBECONFIG=<path_to_kubeconfig_file>
You can now use tools like k9s or kubectl to access and manage your cluster:
Some cloud providers like GCP or AWS require additional configuration to access the cluster. Make sure you have CLI binaries installed (gcloud CLI/AWS CLI/...), and the right permissions and credentials set up (environment variables or profile file).
Using k9s
Launch the k9s terminal UI to interact with your Kubernetes cluster:
k9s
Using kubectl
Here are a few examples of common kubectl commands:
kubectl get pods #List all pods in the default namespace
kubectl describe pod <pod_name>#Get detailed information about a specific pod
The lock cluster command prevents any update or deployment from being initiated on a cluster while it is locked. Once a cluster is locked, no new updates can be processed until it is unlocked.
In the following subsections, you'll learn how to use the web interface, the CLI (Command Line Interface) and other interfaces to deploy your application with Qovery.
In the following subsections, you'll learn how to use the web interface, the CLI (Command Line Interface) and other interfaces to deploy your application with Qovery.
Use the Qovery REST API to programmatically create infrastructure and deploy your applications. The only limit is your imagination. Find the Qovery API documentation and the OpenAPI spec to generate your own Qovery client with your favorite programming language.
Use the Qovery REST API to programmatically create infrastructure and deploy your applications. The only limit is your imagination. Find the Qovery API documentation and the OpenAPI spec to generate your own Qovery client with your favorite programming language.
If you log in with the Google or Microsoft providers you will have to setup a git token to access and deploy your applications from your private repositories.
If you log in with the Google or Microsoft providers you will have to setup a git token to access and deploy your applications from your private repositories.
echo"ERROR: the cluster does not have a correct status, please check cluster logs and fix the issue. Then delete the key $old_aws_access_key and retry"
exit1
fi
if[$(date +"%s") -gt $max_time];then
echo"ERROR: timeout reached, the cluster is not deployed yet, please check cluster logs and fix the cluster issue. Then delete the key $new_aws_access_key and retry"
exit1
fi
done
echo"[+] Waiting up to 2h to ensure all ongoing deployments are done ($(date -d @$max_time))"
while[$(date +"%s") -lt $max_time];do
sleep10
done
echo"[+] Delete the old Access Key"
aws iam delete-access-key --access-key-id $old_aws_access_key --user-name $aws_iam_username
-
echo"[+] Done"
You will see the following output:
[+] Ensure there is only one Access Key
-> Current (future old) key detected: xxx
[+] Create a new Access Key
-> Successfully created a new access key: yyy
[+] Update Qovery credentials
[+] Deploy the cluster with the new credentials
[+] Wait for the cluster deployment to be done
->15:04 Waiting for the cluster deployment to be done. Current status: DEPLOYING...
->15:05 Waiting for the cluster deployment to be done. Current status: DEPLOYING...
->15:06 Waiting for the cluster deployment to be done. Current status: DEPLOYING...
->15:07 Waiting for the cluster deployment to be done. Current status: RUNNING...
[+] Waiting up to 2h to ensure all ongoing deployments are done(Fri Nov 11 03:22:57 PM CET 2022)
Within this section you will find the common errors you might encounter when deploying or running your clusters with Qovery
#I don't have Qovery access anymore, how could I delete Qovery deployed resources on my AWS account?
Unfortunately, there is no automatic way to do it with Qovery once we don't have access. However, AWS provides an easy way to retrieve those resources, so you can manually perform the delete. To do so, go on the AWS web console, and search for "Resource Groups & Tag Editor" service, then:
Click on "Create Resource Group".
In Tags, enter: "ClusterLongId".
In the "Optional Tag value", enter the Qovery cluster ID. If you don't have it, let AWS suggest it for you. If you have Qovery deployed elements remainings, it will propose the Cluster long ID automatically.
Click on "Add".
You should see the filter with the information you just entered.
Click on "Preview groups resources".
You'll have all elements deployed by Qovery and you can delete what you want.
#My cloud account has been blocked, what should I do?
If you encounter this kind of error during an infrastructure deployment (including managed DBs):
This account is currently blocked by your cloud provider, please contact them directly.
Or
This AWS account is currently blocked and not recognized as a valid account.
Please contact aws-verification@amazon.com directly to get more details.
Maybe you are not allowed to use your free tier in this region?
Maybe you need to provide billing info?
This error is likely due to a billing issue or blocked free-tier usage in the given region.
Unfortunately, there is nothing Qovery can do. You need to reach out directly to your cloud provider to get more details and get your account unblocked.
If you are using AWS, you can contact them directly via dedicated email: aws-verification@amazon.com.
Within this section you will find the common errors you might encounter when deploying or running your clusters with Qovery
#I don't have Qovery access anymore, how could I delete Qovery deployed resources on my AWS account?
Unfortunately, there is no automatic way to do it with Qovery once we don't have access. However, AWS provides an easy way to retrieve those resources, so you can manually perform the delete. To do so, go on the AWS web console, and search for "Resource Groups & Tag Editor" service, then:
Click on "Create Resource Group".
In Tags, enter: "ClusterLongId".
In the "Optional Tag value", enter the Qovery cluster ID. If you don't have it, let AWS suggest it for you. If you have Qovery deployed elements remainings, it will propose the Cluster long ID automatically.
Click on "Add".
You should see the filter with the information you just entered.
Click on "Preview groups resources".
You'll have all elements deployed by Qovery and you can delete what you want.
#My cloud account has been blocked, what should I do?
If you encounter this kind of error during an infrastructure deployment (including managed DBs):
This account is currently blocked by your cloud provider, please contact them directly.
Or
This AWS account is currently blocked and not recognized as a valid account.
Please contact aws-verification@amazon.com directly to get more details.
Maybe you are not allowed to use your free tier in this region?
Maybe you need to provide billing info?
This error is likely due to a billing issue or blocked free-tier usage in the given region.
Unfortunately, there is nothing Qovery can do. You need to reach out directly to your cloud provider to get more details and get your account unblocked.
If you are using AWS, you can contact them directly via dedicated email: aws-verification@amazon.com.
-
+
-
+
diff --git a/docs/using-qovery/troubleshoot/service-deployment-troubleshoot/index.html b/docs/using-qovery/troubleshoot/service-deployment-troubleshoot/index.html
index 75404d516c..9e60604624 100644
--- a/docs/using-qovery/troubleshoot/service-deployment-troubleshoot/index.html
+++ b/docs/using-qovery/troubleshoot/service-deployment-troubleshoot/index.html
@@ -26,9 +26,9 @@
-
+
-
+
@@ -54,14 +54,14 @@
If you don't make a copy, doing this procedure directly on the PRODUCTION application will lead to a downtime in your service. Be sure of what you're doing before going ahead!
Your app is crashing very quickly, here is how to keep the full control of your container:
When a custom domain is added to an application, it must be configured on your side according to the instructions displayed:
Qovery will verify whether your custom domain is properly configured. If you're behind a CDN, we will only check if your custom domain resolves to an IP address.
If you want to verify by yourself that your custom domain is well configured you can use the following command: dig CNAME ${YOUR_CUSTOM_DOMAIN} +short. On the domain above, we can check the configuration is correct on Google DNS servers:
It should return the same value as the one configured on Qovery. Otherwise, be patient (some minutes depending on DNS registrars) and ensure the DNS modification has been applied. Finally, you can check the content of the CNAME with:
We can see the destination contains other elements, indicating that the CNAME is pointing to an endpoint and correctly configured.
The SSL / TLS Certificate is generated for the whole group of custom domains you define:
if one custom domain is misconfigured: the certificate can't be generated. A general error is displayed in your service overview.
if the certificate has been generated once, but later one custom domain configuration is changed and misconfigured: the certificate can't be generated again
If you experience some invalid certificate, here is how you can fix the issue:
Identify the misconfigured custom domain(s) in your application domain settings.
We check each of your domains. If one or more have errors, a red cross will appear with an error message on hover.
Fix or delete them. After correcting your configuration, you can perform another check by clicking on the red cross.
Error message: Error while checkout submodules from repository https://github.com/user/repo.git.
Error: Error { code: -1, klass: 23, message: "authentication required but no callback set" }
There are limitations with the support for Git Submodules. Only public Submodules over HTTPS or private with embedded basic authentication are supported.
Solution: Follow our Git Submodules guide to make your application working with Git Submodules on Qovery.
#Container image xxxxxx.xxx.xx failed to build: Cannot build Application "zXXXXXXXXX" due to an error with docker: Timeout
This error shows up in your deployment logs when the application takes more time to build than the maximum build allowed time (today 1800 seconds).
If your application needs more time to build, increase parameter build.timeout_max_sec within your application advanced settings and trigger again the deployment.
#Joib failed: either it couldn't be executed correctly after X retries or its execution didn't finish after Y minutes
This errors occurs in the following two cases:
Job code execution failures
The pod running your lifecycle job is crashing due to an exception in your code or OOM issue. Have a look at the Live Logs of your Lifecycle job to understand from where the issue is coming from your code.
Job execution timeout
-The code run in your job is taking more time than expected and thus it's execution is stopped. If your code needs more time to be excecuted, increase the Max Duration value within the Lifecycle Job configuration page
#SnapshotQuotaExceeded - while deleting a managed DB
This errors occurs because Qovery creates a snapshot before the delete of the database. This to avoid a user mistake who delete a database accidentally.
To fix this issue, you have 2 solutions:
You certainly have useless snapshots, from old databases or old ones you don't want to keep anymore. Delete them directly from your Cloud Provider web interface. Here is an example on AWS:
Search for the database service (here RDS)
Select the Snapshots menu
Select the snapshots to delete
Open a ticket to the Cloud Provider support, and as to raise this limit.
+The code run in your job is taking more time than expected and thus it's execution is stopped. If your code needs more time to be excecuted, increase the Max Duration value within the Lifecycle Job configuration page
#SnapshotQuotaExceeded - while deleting a managed DB
This errors occurs because Qovery creates a snapshot before the delete of the database. This to avoid a user mistake who delete a database accidentally.
To fix this issue, you have 2 solutions:
You certainly have useless snapshots, from old databases or old ones you don't want to keep anymore. Delete them directly from your Cloud Provider web interface. Here is an example on AWS:
Search for the database service (here RDS)
Select the Snapshots menu
Select the snapshots to delete
Open a ticket to the Cloud Provider support, and as to raise this limit.
Within this section you will find the common errors you might encounter when running your services with Qovery
#My app is crashing, how do I connect to investigate?
Goal: You want to connect to your container's application to debug your application
First, try to use qovery shell command from the Qovery CLI. It's a safe method to connect to your container and debug your application.
If your app is crashing in the first seconds, you'll lose the connection to your container, making the debug almost impossible, then continue reading.
You can apply this procedure directly on your application OR on a copy having the same setup.
-If you don't make a copy, doing this procedure directly on the PRODUCTION application will lead to a downtime in your service. Be sure of what you're doing before going ahead!
Your app is crashing very quickly, here is how to keep the full control of your container:
If you are deploying a helm service, to get all the Qovery features (access your container logs, apply the stop/restart actions, display the pod status in the overview page), make sure to create an override and assign the macros qovery.labels.service and qovery.annotations.service to the labels and annotations of any deployed Pods/Deployments/Services/Jobs.
Override example:
commonLabels:
mylabel:"test"
qovery.labels.service
annotations:
qovery.annotations.service
These macros will be automatically replaced by Qovery during the deployment phase.
+If you don't make a copy, doing this procedure directly on the PRODUCTION application will lead to a downtime in your service. Be sure of what you're doing before going ahead!
Your app is crashing very quickly, here is how to keep the full control of your container:
If you are deploying a helm service, to get all the Qovery features (access your container logs, apply the stop/restart actions, display the pod status in the overview page), make sure to create an override and assign the macros qovery.labels.service and qovery.annotations.service to the labels and annotations of any deployed Pods/Deployments/Services/Jobs.
Override example:
commonLabels:
mylabel:"test"
qovery.labels.service
annotations:
qovery.annotations.service
These macros will be automatically replaced by Qovery during the deployment phase.
Managed RDS instances deployed with Qovery have by default a backup option enabled on the same region where the RDS instance is located. However, for enhanced disaster recovery and compliance purposes, you may need to set up multi-region backups.
-This guide will help you configure an AWS Backup job to create additional backups of your RDS instances in a different region. Multi-region backups provide several benefits:
Improved disaster recovery: If one AWS region becomes unavailable, you can restore your database from a backup in another region.
Compliance: Some regulations require data to be backed up in geographically distinct locations.
Data migration: Multi-region backups can facilitate moving your database to a different region if needed.
For more information about AWS Backup, have a look at this documentation.
Cost Considerations: Setting up multi-region backups will incur additional costs. These costs include: 1) Storage costs for the backup data in the secondary region 2) Data transfer costs for copying the backup data between regions 3) Potential costs for restoring from a backup in a different region Be sure to review the AWS Backup pricing and AWS data transfer pricing to estimate the costs for your specific use case. You can use AWS Cost Explorer to monitor these costs over time.
Go to the AWS Console and follow this guide to create your AWS Backup plan. For the setup, you can use the default settings but take into account the following points:
Vault: you can use the default vault as long as you don't have any additional security requirement (share snapshots externally etc.)
Start time: update the start time based on your needs
Total retention period: update the retention period for the snapshots based on your needs
Copy to destination: at this step, select the region where you want to create the database snapshot
On the next step, we will define the resource that this backup job should target:
Select the option "Include specific resource types"
In the dropdown list for "Select specific resource types", select "RDS"
(Optional) if you don't want this plan to run on every RDS instance, uncheck the option "All databases" and select the databases from the dropdown list
Now, based on your schedule, you should see backup jobs starting and creating backups in the selected vault:
Select the region where the backup should be created
Open the "Vault" section on the right
Select the vault that the AWS backup job should target
Verify that new "Recovery points" have been created
From this page you can restore a backup (refer to the AWS documentation for more information).
+This guide will help you configure an AWS Backup job to create additional backups of your RDS instances in a different region. Multi-region backups provide several benefits:
Improved disaster recovery: If one AWS region becomes unavailable, you can restore your database from a backup in another region.
Compliance: Some regulations require data to be backed up in geographically distinct locations.
Data migration: Multi-region backups can facilitate moving your database to a different region if needed.
For more information about AWS Backup, have a look at this documentation.
Cost Considerations: Setting up multi-region backups will incur additional costs. These costs include: 1) Storage costs for the backup data in the secondary region 2) Data transfer costs for copying the backup data between regions 3) Potential costs for restoring from a backup in a different region Be sure to review the AWS Backup pricing and AWS data transfer pricing to estimate the costs for your specific use case. You can use AWS Cost Explorer to monitor these costs over time.
Go to the AWS Console and follow this guide to create your AWS Backup plan. For the setup, you can use the default settings but take into account the following points:
Vault: you can use the default vault as long as you don't have any additional security requirement (share snapshots externally etc.)
Start time: update the start time based on your needs
Total retention period: update the retention period for the snapshots based on your needs
Copy to destination: at this step, select the region where you want to create the database snapshot
On the next step, we will define the resource that this backup job should target:
Select the option "Include specific resource types"
In the dropdown list for "Select specific resource types", select "RDS"
(Optional) if you don't want this plan to run on every RDS instance, uncheck the option "All databases" and select the databases from the dropdown list
Qovery integrates with all existing Continuous Integration platforms. We have a guide for the most popular CI platforms. However, even if you don't find your CI platform, you can see here that integrating Qovery into a CI is just a matter of:
Adding a new step into your CI pipeline
Installing the Qovery CLI
Running the qovery <application|container|lifecycle|cronjob> deploy ... commands
Qovery integrates with all existing Continuous Integration platforms. We have a guide for the most popular CI platforms. However, even if you don't find your CI platform, you can see here that integrating Qovery into a CI is just a matter of:
Adding a new step into your CI pipeline
Installing the Qovery CLI
Running the qovery <application|container|lifecycle|cronjob> deploy ... commands
Deployment rules allows you to start and stop environments at a given time. Stopping dev environments outside of your working hours is a great way to reduce your cloud bills.
-You can find more information on Deployment rules in our documentation
To optimize efficiency and reduce costs for your nodes on AWS EKS clusters, you can enable the Karpenter autoscaler. For a deeper dive into Karpenter and its integration with Qovery, check out our blog. Detailed instructions on enabling Karpenter are available in our documentation.
To optimize efficiency and reduce costs for your nodes on AWS EKS clusters, you can enable the Karpenter autoscaler. For a deeper dive into Karpenter and its integration with Qovery, check out our blog. Detailed instructions on enabling Karpenter are available in our documentation.
An API Gateway is a web service that acts as an interface between consumers and your services. It acts as a single point of entry into a system and is responsible for request routing, composition, and protocol translation. It's essentially a middleman that processes requests from clients to services.
An API Gateway is a web service that acts as an interface between consumers and your services. It acts as a single point of entry into a system and is responsible for request routing, composition, and protocol translation. It's essentially a middleman that processes requests from clients to services.
Karpenter is a great way to cut your AWS bill. It provides an easy and flexible way to scale and optimize your resource consumption. But there is a known issue with capacity planning when you deploy DaemonSets. In this guide, I will present the issue and explain how to avoid it by using Priority Class.
A DaemonSet in Kubernetes is a specialized controller used to ensure that a copy of a particular pod runs on all nodes in a cluster. It is particularly useful for deploying background tasks or system-level services that need to run on every node, such as log collectors, monitoring agents, or network components.
When nodes are added to the cluster, the DaemonSet automatically schedules the specified pod on the new nodes, ensuring consistent deployment across the entire infrastructure. Similarly, when nodes are removed, the DaemonSet takes care of cleaning up the pods that were running on those nodes.
This makes DaemonSets a powerful tool for maintaining uniformity and reliability in the operation of essential services across a Kubernetes cluster.
There is a known issue with Karpenter and DaemonSets when scaling nodes. DaemonSets ensure a copy of a pod runs on every node, consuming additional resources that Karpenter does not consider, leading to potential resource contention and under-provisioned nodes.
This forces operators to over-provision their nodes, resulting in inefficient resource utilization and higher costs. While the Kubernetes community and Karpenter developers are working on solutions, users currently need to manually adjust resource allocations and monitor node utilization to mitigate these issues.
A way to resolve this problem is to use a Priority Class and attach it to the DaemonSet we are creating.
A PriorityClass in Kubernetes is a resource used to assign a priority level to pods. This resource helps the scheduler make decisions during resource contention.
Higher-priority pods are scheduled before lower-priority ones
In case of resource shortages, lower-priority pods may be preempted (evicted) to make room for higher-priority pods.
This ensures that critical workloads receive the necessary resources to run effectively.
Create the karpenter-priority-class service in the Qovery environment where you want to deploy your DaemonSet by following this documentation and these values:
General:
Service name: karpenter-priority-class
Source:
Helm source: Git Provider
Git account: Public repository (Change if you are not using GitHub)
Repository: https://github.com/Qovery/qovery-charts-examples.git (Replace by the name of your repository)
Even if Karpenter is a great way of reducing your AWS bill, sometimes you will have to do some manual lifting. This issue is a good example. A single Priority Class is enough to avoid a complex resource allocation problem.
Karpenter is a great way to cut your AWS bill. It provides an easy and flexible way to scale and optimize your resource consumption. But there is a known issue with capacity planning when you deploy DaemonSets. In this guide, I will present the issue and explain how to avoid it by using Priority Class.
A DaemonSet in Kubernetes is a specialized controller used to ensure that a copy of a particular pod runs on all nodes in a cluster. It is particularly useful for deploying background tasks or system-level services that need to run on every node, such as log collectors, monitoring agents, or network components.
When nodes are added to the cluster, the DaemonSet automatically schedules the specified pod on the new nodes, ensuring consistent deployment across the entire infrastructure. Similarly, when nodes are removed, the DaemonSet takes care of cleaning up the pods that were running on those nodes.
This makes DaemonSets a powerful tool for maintaining uniformity and reliability in the operation of essential services across a Kubernetes cluster.
There is a known issue with Karpenter and DaemonSets when scaling nodes. DaemonSets ensure a copy of a pod runs on every node, consuming additional resources that Karpenter does not consider, leading to potential resource contention and under-provisioned nodes.
This forces operators to over-provision their nodes, resulting in inefficient resource utilization and higher costs. While the Kubernetes community and Karpenter developers are working on solutions, users currently need to manually adjust resource allocations and monitor node utilization to mitigate these issues.
A way to resolve this problem is to use a Priority Class and attach it to the DaemonSet we are creating.
A PriorityClass in Kubernetes is a resource used to assign a priority level to pods. This resource helps the scheduler make decisions during resource contention.
Higher-priority pods are scheduled before lower-priority ones
In case of resource shortages, lower-priority pods may be preempted (evicted) to make room for higher-priority pods.
This ensures that critical workloads receive the necessary resources to run effectively.
Create the karpenter-priority-class service in the Qovery environment where you want to deploy your DaemonSet by following this documentation and these values:
General:
Service name: karpenter-priority-class
Source:
Helm source: Git Provider
Git account: Public repository (Change if you are not using GitHub)
Repository: https://github.com/Qovery/qovery-charts-examples.git (Replace by the name of your repository)
Even if Karpenter is a great way of reducing your AWS bill, sometimes you will have to do some manual lifting. This issue is a good example. A single Priority Class is enough to avoid a complex resource allocation problem.
Qovery is versatile and has the ability to cater to a wide range of frontend applications. Whether you're working with a Single-Page
Application (SPA), a Server-Side Rendered (SSR) applications, or a general web app, Qovery has you covered.
Here are some resources you can use to deploy your different type of Frontend apps with Qovery.
Most Frontend apps does not require to have much CPU and RAM resources allocated to them at runtime.
You can use 100 mCPU and 128 MiB of RAM for most of them.
However, build time can be very CPU and RAM intensive. Qovery provides default build resources for each type of Frontend app.
-You can change them in your app advanced settings.
In this step, we'll use the private API of our APP_B in our APP_A over a private network.
We have already configured everything to make it work. The only missing step is the configuration in APP_A - it needs to know how to access our APP_B.
In the example below, we'll use Node.js and axios to create an HTTP client able to consume the API of APP_B:
Now, you can configure your HTTP client in the frontend application to target your backend API:
This is it! Every request using the API client we have just configured will consume the API of APP_B over the secure, internal network.
Once again, we used the BUILT_IN secrets. Read more about them here
#Consume the public API in the frontend application
In this step, we'll deploy a frontend application and consume our public API exposed by APP_A.
In the first step, create your frontend application.
After the application is created, we can easily configure it to consume our public API. All we need to do is to make use of the BUILT_IN secrets. See how to achieve it in a Nuxt.js example below:
exportdefault{
env:{
apiUrl: process.env.QOVERY_APPLICATION_APP_A_URL
}
}
import axios from'axios'
-
exportdefault axios.create({
baseURL: process.env.apiUrl
})
After providing the configuration from above, deploy your frontend application.
Now our frontend application will be able to consume the API exposed by the publicly exposed APP_A.
In this guide, we deployed two microservices that communicate over the internal network. We also deployed a frontend application that makes use of a public API exposed by one of our applications. At the same time, we deployed a database and connected it to the second of our backend microservices.
In this guide, we deployed two microservices that communicate over the internal network. We also deployed a frontend application that makes use of a public API exposed by one of our applications. At the same time, we deployed a database and connected it to the second of our backend microservices.
Here are some resources you can use to migrate your applications to your favorite cloud provider with Qovery.
Are you migrating from Digital Ocean, OVH, Netlify or any other cloud provider? You can use the same resources to migrate your applications. Qovery provides the same features for all cloud providers.
Qovery provides a migration assistance to help you migrate your applications with Qovery. Contact us via the Qovery Console and ask for migration assistance via the chat.
Here are some resources you can use to migrate your applications to your favorite cloud provider with Qovery.
Are you migrating from Digital Ocean, OVH, Netlify or any other cloud provider? You can use the same resources to migrate your applications. Qovery provides the same features for all cloud providers.
Qovery provides a migration assistance to help you migrate your applications with Qovery. Contact us via the Qovery Console and ask for migration assistance via the chat.
This guide is a bit outdated. We are working on a new version of it. Stay tuned!
Qovery provides a very simple way of working with monorepositories.
You can deploy multiple applications using the same git repository or deploy the same application in many different modes/configurations.
#Deploying multiple applications using one repository
To deploy multiple apps using one repository, set up the app to target your monorepo. Additionally, you need to set up the folder in which your application resides.
#Deploying application with multiple configurations using one repository
A special case of monorepository is a situation when one repository is used to deploy multiple applications with the same source code but different configurations or modes. Application behaviour depends on provided config, like environment variables and secrets.
Qovery supports this case well. The steps do not differ much from the steps from the previous example:
As you see in the examples above, we used one repository (poc-factory/tweetifier) in two applications:
app-1
app-2
Those applications use the same application root path - /, so they can be build using the same source code. To adjust the behavior of applications to meet your needs, use environment variables or secrets.
-It allows you to run multiple applications using the same source code in different modes.
You can set up secret or env variables in your application Environment Variables section:
Seeding a database is a common task when developing an application. It allows you to populate your database with some data to test your application.
-Qovery provides multiple ways to seed your database.
Do you need more examples? Do you have any questions? Feel free to ask on our Community forum.
-
+
-
+
diff --git a/guides/advanced/sub-account-sso/index.html b/guides/advanced/sub-account-sso/index.html
index 324dd1ef0e..f3ceb5f3b6 100644
--- a/guides/advanced/sub-account-sso/index.html
+++ b/guides/advanced/sub-account-sso/index.html
@@ -26,9 +26,9 @@
-
+
-
+
@@ -50,14 +50,14 @@
Then, click on AWS Organizations, and click on Add an AWS account:
Fill in the "IAM Role Name" with a meaningful name. This will typically be displayed when users are switching to this account, so it should clearly identify this sub-account.
If all goes well, this takes us back to the list of accounts. If the new one looks grayed out, it’s not quite done being created; wait a little and refresh the list until it looks normal.
Log out of the master account, and connect to this account.
You will find in this guide, how to configure SSO on your subaccount with your identity provider (Google Workspace, Okta, Microsoft Active Directory etc.).
In the ends, you should see your users in the AWS IAM Identity Center console:
#Ass SSO (with Qovery) on your EKS cluster to support SSO (optional)
Qovery allows you to connect to your EKS cluster using SSO. It's optional, but if you want to do this anyway, please refer to this guide.
SSO Sync is helping you populate AWS SSO directly from your Google Workspace users. It's optional, you can use it inside Qovery with a Dockerfile (clone the repo and add this Dockerfile, feel free to update to your needs):
Managed RDS instances deployed with Qovery have by defualt a maintenance window configured, applying database updates only during the specifyied timeframe (see more information here).
During the maintenance window, a downtime has to be expected on your service since the upgrade might takes a few minutes to take effect.
If this is not acceptable for your business, AWS allows you to deploy changes via the Blue/Green Deployments.
This basically allows you to create a copy of your database with the new configuration (called "Green group") and then switch over to the new database by removing the old instance ("Blue group").
Blue/Green deployments on RDS are not natively supported by Qovery but you can follow this guide to upgrade your RDS instances.
This part has to be done only once for each database. If you have already activated the logical replication, go to the next section.
Logical replication is necessary to use the RDS Blue/Green deployment feature.
To enable the logical replication you have to:
Create a Custom DB Parameter group. Make sure it targets your current engine type (PostgreSQL) and version (Parameter group family = postgres14), let's call it parameter-group-14. (Documentation here)
Edit the DB parameter group parameter-group-14 and activate the logical replication by setting rds.logical_replication to 1 (Documentation here)
Associate the DB Parameter group parameter-group-14 to your database. If you have read replicas, associate the parameter group to the replicas as well. (Documentation here)
Reboot your database to apply the changes or wait for the next maintenance window for applying these changes.
#Enable logical replication for the "Green" database
We will create a separate DB Parameter group for our "Green group" so that we will ensure that the you won't have to do the previous step the next time.
Create a Custom DB Parameter group. Make sure it targets your next engine type and version (Parameter group family), let's call it parameter-group-15 Documentation here)
Edit the DB parameter group parameter-group-15 and activate the logical replication by setting rds.logical_replication to 1 (Documentation here)
Go on the Database list on the AWS console, select the database and press the "Create new Blue/Green Deployment" button.
Define a name for the deployment (blue-green-upgrade), ensure you select the right engine version (PostgreSQL 15) and the right DB parameter group (parameter-group-15) and create the Blue/Green deployment.
Managed RDS instances deployed with Qovery have by defualt a maintenance window configured, applying database updates only during the specifyied timeframe (see more information here).
During the maintenance window, a downtime has to be expected on your service since the upgrade might takes a few minutes to take effect.
If this is not acceptable for your business, AWS allows you to deploy changes via the Blue/Green Deployments.
This basically allows you to create a copy of your database with the new configuration (called "Green group") and then switch over to the new database by removing the old instance ("Blue group").
Blue/Green deployments on RDS are not natively supported by Qovery but you can follow this guide to upgrade your RDS instances.
This part has to be done only once for each database. If you have already activated the logical replication, go to the next section.
Logical replication is necessary to use the RDS Blue/Green deployment feature.
To enable the logical replication you have to:
Create a Custom DB Parameter group. Make sure it targets your current engine type (PostgreSQL) and version (Parameter group family = postgres14), let's call it parameter-group-14. (Documentation here)
Edit the DB parameter group parameter-group-14 and activate the logical replication by setting rds.logical_replication to 1 (Documentation here)
Associate the DB Parameter group parameter-group-14 to your database. If you have read replicas, associate the parameter group to the replicas as well. (Documentation here)
Reboot your database to apply the changes or wait for the next maintenance window for applying these changes.
#Enable logical replication for the "Green" database
We will create a separate DB Parameter group for our "Green group" so that we will ensure that the you won't have to do the previous step the next time.
Create a Custom DB Parameter group. Make sure it targets your next engine type and version (Parameter group family), let's call it parameter-group-15 Documentation here)
Edit the DB parameter group parameter-group-15 and activate the logical replication by setting rds.logical_replication to 1 (Documentation here)
Go on the Database list on the AWS console, select the database and press the "Create new Blue/Green Deployment" button.
Define a name for the deployment (blue-green-upgrade), ensure you select the right engine version (PostgreSQL 15) and the right DB parameter group (parameter-group-15) and create the Blue/Green deployment.
Use Preview Environment to get early feedback on your application changes by creating a dedicated environment for each of your pull requests.
Your production environment runs 24/7, where your other environments may not need to run all day long.
-E.g. you may need to run Environments to get early feedback on your application changes before the changes are merged into production. This is what we call Preview Environment.
Sometimes Preview Environment is also known as Ephemeral Environment, Temporary Environment, Development Environment, Review App.
If you are using Qovery to run your Production, we recommend using Preview Environments on a separate cluster. This will ensure that your Production is not impacted by the Preview Environments and vice versa.
Do you need more examples? Do you have any questions? Feel free to ask on our Community forum.
+E.g. you may need to run Environments to get early feedback on your application changes before the changes are merged into production. This is what we call Preview Environment.
Sometimes Preview Environment is also known as Ephemeral Environment, Temporary Environment, Development Environment, Review App.
If you are using Qovery to run your Production, we recommend using Preview Environments on a separate cluster. This will ensure that your Production is not impacted by the Preview Environments and vice versa.
Now, we need to connect our application to our database. The credentials (URI, Username, Password ...) are available
through environment variables. They are injected by Qovery when your application runs.
To connect our NodeJS application to our PostgreSQL database, we only have to:
Your application is running, but something goes wrong? In this guide, you'll learn how to debug your application and solve your problem to
make it running smoothly.
Before you begin, this page assumes the following:
You have already deployed an application with Qovery
Your application is running, but for some reason, it is not working as expected. Here are a few tips to find out what's going on.
Qovery expose in the interface the running status of your application which provides you some highlevel information of its healthiness. You can look in this section to know more about the Running Status
If the service crashes, its Running Status will be displayed as a red dot. If that's the case, you can have a look at the logs to investigate the reason behind.
If you need to see the log output of your application while it's running, qovery expose them to you in real-time thanks to the Logs interface. You can have a look at this section to know more.
You can use this information to find out what causes your application to behave incorrectly.
If your application fails to start, you can check what's the cause in its deployment logs. You can have a look at this section to have more information on the deployment logs and how to access them.
This view provides insight into the build and deployment process. If anything goes wrong, you can see all the required information to fix the problem here.
You can check the Troubleshoot section to investigate any issue you might encounter during the deployment of your services.
We highly recommend using tools like Datadog, Sentry or NewRelic to manage your alerting.
-Qovery will provide easy integrations in the coming release. Check out our roadmap
Qovery is an easy way to deploy a full-stack application. Meaning, you can deploy a backend, frontend and a database seamlessly. In this guide, I'll show you how to deploy a template app.
Before you begin, this page assumes the following:
Qovery is an easy way to deploy a full-stack application. Meaning, you can deploy a backend, frontend and a database seamlessly. In this guide, I'll show you how to deploy a template app.
Before you begin, this page assumes the following:
console.log(`Server running at http://${hostname}:${port}/`);
});
As you can see, to get access to your environment variable you just need to use process.env.ENABLE_DEBUG. Environment variables are
injected at the build and run time.
On Qovery, every application exposed publicly automatically gets a temporary qovery.io domain. You can also bring your domains to Qovery
quickly. We handle TLS/SSL certificate creation and renewal, as well as automatic HTTP to HTTPS redirects for all your custom domains. Let’s
-learn how to set up your domains on Qovery!
Before you begin, this page assumes the following:
You have a domain
You have the permission to add a CNAME record to your domain
Message queuing service enables you to decouple and scale microservices, distributed systems, and serverless applications. In this guide, we'll show you how to leverage a queue system (Amazon SQS) to build a highly scalable backend.
Using Amazon SQS eliminates the complexity and overhead associated with managing and operating message-oriented middleware and empowers developers to focus on differentiating work. With SQS, you can send, store, and receive messages between software components at any volume without losing messages or requiring other services to be available.
In this guide, we'll create a backend microservice that sends messages on an event queue. Additionally, we'll go through two ways of consuming and processing those messages:
We will use AWS Lambda to process events from the queue in a serverless way
We will use Qovery-managed backend application workers to process events from the queue
As for now, Qovery does not natively integrate with AWS Lambda and SQS, but the integration part is quite easy, and we will go through it in the following steps.
The backend application and workers servers that consume messages from the queue will be fully managed and deployed by Qovery.
To push messages to our SQS queue from the backend app deployed on Qovery, click on the Open button in the application we deployed in the previous step. It will redirect you to the API endpoint exposed by the backend app - the logic inside the application is made so that it sends messages to the SQS queue.
Now, in the Monitoring section of SQS in AWS Console, we will see messages received on metrics charts:
To validate that our consumer Lambdas processed the messages, navigate to your lambda Monitor panel:
In the Invocations chart, you'll notice that our Lambda was triggered several times by the messages sent over the SQS.
In this part of the tutorial, we learned how to send messages over from an application deployed on Qovery to SQS and consume them from serverless Lambda functions. In the next part, we will create a scalable group of worker applications deployed by Qovery that consume messages from the same Queue.
+
client.send(command).then(
(data)=>{
console.log(data);
res.end('Success');
// process data.
},
(error)=>{
console.error(error);
res.end('Error');
// error handling.
}
);
To deploy the app on Qovery, all you need to do is to fork the repository from above and create a new app adding port 3000:
Afterwards, we need to add two environment variables:
accessKeyId - your AWS access key ID
secretAccessKey - your AWS secret access key
You can add them in Environment VarieblesSecret section in your application settings:
After all the setup is all done, click the Deploy button - the application will be shortly deployed.
To push messages to our SQS queue from the backend app deployed on Qovery, click on the Open button in the application we deployed in the previous step. It will redirect you to the API endpoint exposed by the backend app - the logic inside the application is made so that it sends messages to the SQS queue.
Now, in the Monitoring section of SQS in AWS Console, we will see messages received on metrics charts:
To validate that our consumer Lambdas processed the messages, navigate to your lambda Monitor panel:
In the Invocations chart, you'll notice that our Lambda was triggered several times by the messages sent over the SQS.
In this part of the tutorial, we learned how to send messages over from an application deployed on Qovery to SQS and consume them from serverless Lambda functions. In the next part, we will create a scalable group of worker applications deployed by Qovery that consume messages from the same Queue.
-
+
-
+
diff --git a/guides/tutorial/aws-vpc-peering-with-qovery/index.html b/guides/tutorial/aws-vpc-peering-with-qovery/index.html
index 2025c6f00a..690685bbd4 100644
--- a/guides/tutorial/aws-vpc-peering-with-qovery/index.html
+++ b/guides/tutorial/aws-vpc-peering-with-qovery/index.html
@@ -26,9 +26,9 @@
-
+
-
+
@@ -51,14 +51,14 @@
On the peering connection view, click on Actions then Accept request
You should see your peering connection marked as Active
Take note of the peering connection ID. You will need it later.
In the AWS console of your Qovery VPC, go to VPC > Route Tables.
You can filter the list using the IDs you noted at step 1 to find the routing table for your existing VPC.
For your existing VPC edit the route table:
Click on the Edit routes button then Add route.
As a destination, enter the CIDR of your Qovery VPC
As a target, select the Peering connection you created earlier
Click Save changes.
Do not alter existing routes. Make sure you are adding a new one.
Our two VPCs are now connected, but we still need to update the security groups to allow communication between the Qovery applications and your existing resources.
What rules to put on your security groups depends on what you are trying to achieve.
In our case, we would like to access an RDS instance from our Qovery applications.
We will edit the RDS security group in our existing VPC to add an inbound rule allowing PostgreSQL traffic from our Qovery instances:
You should now be able to deploy an application using the RDS PostgreSQL database on your Qovery cluster.
-Refer to this guide if you need help deploying an application on Qovery.
Let's now create a new branch in our repository and create a pull request to our production (master branch) environment. Preview Environments feature will spin up a new environment for us so that we can safely test changes we just introduced!
Once the PR is created, an automatic comment has been dropped on our PR to let us know that the new preview environment has been created.
Now, when we display environments in our project, we will see that a new environment for the pull request is being deployed:
with all the resources we need! A database, backend, frontend - we can now test our changes in complete separation from the production without any manual setting up work:
The Preview Environment feature can be enabled or disabled per app. It creates a complete copy of your environment so that you can test new changes from pull requests in separation. It deploys your databases, backend, and frontend applications to a completely new environment once a pull request is opened. If you update your pull request, all new changes are also reflected in the new environment so that you can test them or fix problems during the review. What is great is that Qovery takes care of managing all environment variables for you as well, creates new aliases just as you had in your prod environment, so that everything is really tested separately and it all happens automagically. After the pull request is merged, Qovery automatically cleans up the preview environment to save your money.
After a few minutes, your preview environment should be up and running. You can now navigate to the frontend app and click Open - in the image gallery, you will see an empty list because we don't yet have any images in the database.
You can add a few images manually by connecting to your mongo instance via CLI. The credentials can be found in the database overview:
After connecting, let's add images by executing the following:
Now, after opening the frontend app in our preview environment, we will see all the images we put in the database! It looks like the feature is working well, so let's merge the PR:
What now happens is automatically after the PR merge, the preview environment is automatically cleaned up:
Great job! Thanks to Qovery Preview Environments, we managed to develop a new feature in a complete separation from our production, we tested it in a real environment deployed in the cloud, and we didn't have to spend any time preparing our environment for tests at all.
In the article, we quickly went through the process of creating a full-stack application with frontend, backend, and database. We enabled the Preview Environment feature to develop new features more quickly. We learned what the benefits of Preview Environments are, how to use them, and how to integrate them to day to day development workflow.
Now, after opening the frontend app in our preview environment, we will see all the images we put in the database! It looks like the feature is working well, so let's merge the PR:
What now happens is automatically after the PR merge, the preview environment is automatically cleaned up:
Great job! Thanks to Qovery Preview Environments, we managed to develop a new feature in a complete separation from our production, we tested it in a real environment deployed in the cloud, and we didn't have to spend any time preparing our environment for tests at all.
In the article, we quickly went through the process of creating a full-stack application with frontend, backend, and database. We enabled the Preview Environment feature to develop new features more quickly. We learned what the benefits of Preview Environments are, how to use them, and how to integrate them to day to day development workflow.
We just use the qovery environment delete command to delete the ephemeral environment. The option -w is used to wait for the deletion to be completed. Qovery will automatically release the resources used by the environment.
Congratulations! You've successfully built an automated E2E testing pipeline with GitHub Actions and Qovery. You can now run your tests in a fully isolated environment, provisioned and de-provisioned automatically, and integrated with your GitHub repository.
We just use the qovery environment delete command to delete the ephemeral environment. The option -w is used to wait for the deletion to be completed. Qovery will automatically release the resources used by the environment.
Congratulations! You've successfully built an automated E2E testing pipeline with GitHub Actions and Qovery. You can now run your tests in a fully isolated environment, provisioned and de-provisioned automatically, and integrated with your GitHub repository.
In the previous step we have assigned the macro qovery.env.qovery.env.AWS_ACCESS_KEY and qovery.env.AWS_SECRET_ACCESS_KEY to the AWS secrets. In this step we will create these secrets within the Qovery console.
Open the service overview of the created Datadog service
Enter the Variables section
Add a new Variable with:
Variable = AWS_SECRET_ACCESS_KEY
Value = <your_SECRET_ACCESS_KEY>
Scope = Service (so that it is accessible only to this service)
Secret variable ✔️
Add a new Variable with:
Variable = AWS_ACCESS_KEY
Value = <your_ACCESS_KEY>
Scope = Service (so that it is accessible only to this service)
Secret variable ✔️
If you need more information on how to manage your environment variables, have a look at this documentation
In the previous step we have assigned the macro qovery.env.qovery.env.AWS_ACCESS_KEY and qovery.env.AWS_SECRET_ACCESS_KEY to the AWS secrets. In this step we will create these secrets within the Qovery console.
Open the service overview of the created Datadog service
Enter the Variables section
Add a new Variable with:
Variable = AWS_SECRET_ACCESS_KEY
Value = <your_SECRET_ACCESS_KEY>
Scope = Service (so that it is accessible only to this service)
Secret variable ✔️
Add a new Variable with:
Variable = AWS_ACCESS_KEY
Value = <your_ACCESS_KEY>
Scope = Service (so that it is accessible only to this service)
Secret variable ✔️
If you need more information on how to manage your environment variables, have a look at this documentation
In this first part we saw how to create a Rust API with Actix and Diesel. In the second part we will compare its performance with a Go application to see which one is the most performant.
Special thanks to Jason and Kokou for your reviews
In this first part we saw how to create a Rust API with Actix and Diesel. In the second part we will compare its performance with a Go application to see which one is the most performant.
Special thanks to Jason and Kokou for your reviews
To prevent your playground environment from impacting your production environment, you can create a dedicated cluster. So every playground environments will be on the same cluster and will not disturb your production.
Here is how to create a playground cluster.
And how to create a playground environment on our playground cluster.
In this guide, we have covered everything you need to know to create a secure staging environment from your production. Now, you can take a look at how to seed your Staging database (Guide for Postgres but applicable for most databases).
To prevent your playground environment from impacting your production environment, you can create a dedicated cluster. So every playground environments will be on the same cluster and will not disturb your production.
Here is how to create a playground cluster.
And how to create a playground environment on our playground cluster.
In this guide, we have covered everything you need to know to create a secure staging environment from your production. Now, you can take a look at how to seed your Staging database (Guide for Postgres but applicable for most databases).
Let's say you have your production environment deployed, and you want to create a staging environment. You have two options:
Create a staging environment from scratch.
Clone your production environment and create a staging environment from it.
This is where the Environment Clone feature of Qovery is useful. No need to create a new environment, just clone your production environment and create a staging environment from it.
In this guide, we will go through the steps to create a staging environment from your production environment. While applying the best practices by isolating the staging and production environments on two separated clusters and VPCs.
Before you begin, this page assumes the following:
You already have a production environment deployed with Qovery.
Isolating the staging and production environments on two separate clusters and VPCs is a good practice to avoid any potential issues on your production caused by your staging. This is not a mandatory step, but it is well recommended.
To create your staging cluster it's also recommended creating a new AWS IAM access key and secret access key in a dedicated subaccount. Then you are sure that both environment are also isolated at the AWS level:
Go to your Organization cluster settings
Add a cluster with a name "staging"
Deploy your staging cluster
#Create your Staging environment from your Production environment
Now, to create your staging environment from your production environment, you need to:
Go inside your production environment and click on the "Clone" button.
Give a name to your staging environment (E.g "staging")
Set the mode to "Staging"
Set the cluster to "staging"
Click on "Create"
That's it!
Cloning your database does not copy the data (yet). To copy your data in Staging consider using Replibyte in standalone. It will be integrated in Qovery soon.
Your environment has been created, but it's not deployed yet. Before we will make some adjustment to change the branch of our applications.
Qovery makes the distinction between Environment Variables and Secrets even if for your app both will be used as Environment Variables. Check out this documentation to learn more about Environment Variables and Secrets.
Let's say you have a production environment with the following environment variables:
NODE_ENV=production
STRIPE_API_KEY=a-secret-production-key
You might need to keep the same keys but change the values. That's exactly what Qovery makes you do with the Environment Variable Override feature. You can keep the same keys but change the values.
Finally, your Staging environment has been created and set up correctly. To deploy your Staging environment, you just need to go to your Staging environment and click on the "Deploy" button.
In this guide, we have covered everything you need to know to create a secure staging environment from your production. Now, you can take a look at how to seed your Staging database (Guide for Postgres but applicable for most databases).
Let's say you have your production environment deployed, and you want to create a staging environment. You have two options:
Create a staging environment from scratch.
Clone your production environment and create a staging environment from it.
This is where the Environment Clone feature of Qovery is useful. No need to create a new environment, just clone your production environment and create a staging environment from it.
In this guide, we will go through the steps to create a staging environment from your production environment. While applying the best practices by isolating the staging and production environments on two separated clusters and VPCs.
Before you begin, this page assumes the following:
You already have a production environment deployed with Qovery.
Isolating the staging and production environments on two separate clusters and VPCs is a good practice to avoid any potential issues on your production caused by your staging. This is not a mandatory step, but it is well recommended.
To create your staging cluster it's also recommended creating a new AWS IAM access key and secret access key in a dedicated subaccount. Then you are sure that both environment are also isolated at the AWS level:
Go to your Organization cluster settings
Add a cluster with a name "staging"
Deploy your staging cluster
#Create your Staging environment from your Production environment
Now, to create your staging environment from your production environment, you need to:
Go inside your production environment and click on the "Clone" button.
Give a name to your staging environment (E.g "staging")
Set the mode to "Staging"
Set the cluster to "staging"
Click on "Create"
That's it!
Cloning your database does not copy the data (yet). To copy your data in Staging consider using Replibyte in standalone. It will be integrated in Qovery soon.
Your environment has been created, but it's not deployed yet. Before we will make some adjustment to change the branch of our applications.
Qovery makes the distinction between Environment Variables and Secrets even if for your app both will be used as Environment Variables. Check out this documentation to learn more about Environment Variables and Secrets.
Let's say you have a production environment with the following environment variables:
NODE_ENV=production
STRIPE_API_KEY=a-secret-production-key
You might need to keep the same keys but change the values. That's exactly what Qovery makes you do with the Environment Variable Override feature. You can keep the same keys but change the values.
Finally, your Staging environment has been created and set up correctly. To deploy your Staging environment, you just need to go to your Staging environment and click on the "Deploy" button.
In this guide, we have covered everything you need to know to create a secure staging environment from your production. Now, you can take a look at how to seed your Staging database (Guide for Postgres but applicable for most databases).
In this quick guide, we will show you how to automatically customize your preview URL when a new environment has been created using the Qovery CLI. By following these steps, you can create a custom domain for your service and link it to your DNS provider.
Before you begin, this guide assumes the following:
Use the validation domain from the previous step to create a CNAME record in your DNS provider. The CNAME record should point to the validation domain.
The idea here is to create a CNAME record that points to the validation domain. The validation domain is a temporary domain that is used to validate the ownership of the custom domain.
Congratulations! You have successfully customized your preview URL using the Qovery CLI. Now, whenever a new environment is created, the custom domain will be automatically configured. If you encounter any issues, please reach out to our support team on the Qovery forum.
Use the validation domain from the previous step to create a CNAME record in your DNS provider. The CNAME record should point to the validation domain.
The idea here is to create a CNAME record that points to the validation domain. The validation domain is a temporary domain that is used to validate the ownership of the custom domain.
Congratulations! You have successfully customized your preview URL using the Qovery CLI. Now, whenever a new environment is created, the custom domain will be automatically configured. If you encounter any issues, please reach out to our support team on the Qovery forum.
The following examples will show the application of seeding the data in dev environments after cloning an environment and using the Preview Environment feature.
Clone environment feature allows you to make a complete clone of a chosen environment, including its all applications, services, and their configs. In the example we will clone a new environment and have our seed data injected automatically.
First, we make a clone of our production environment:
Then, we deploy the new environment:
After navigating to deployment logs, we will notice our seed data inserts logged:
Preview Environment feature allows you to automatically create new development environments to validate new changes before merging them to your production branch.
First, we open a pull request:
Then, in list of environments, we get a new environment automatically created for the pull request:
When you open the logs of the deployment, you’ll see the seed data injection logs:
The following examples will show the application of seeding the data in dev environments after cloning an environment and using the Preview Environment feature.
Clone environment feature allows you to make a complete clone of a chosen environment, including its all applications, services, and their configs. In the example we will clone a new environment and have our seed data injected automatically.
First, we make a clone of our production environment:
Then, we deploy the new environment:
After navigating to deployment logs, we will notice our seed data inserts logged:
Preview Environment feature allows you to automatically create new development environments to validate new changes before merging them to your production branch.
First, we open a pull request:
Then, in list of environments, we get a new environment automatically created for the pull request:
When you open the logs of the deployment, you’ll see the seed data injection logs:
JupyterHub is an easy way to interact with a computing environment through a webpage. It provides a standardized way to serve Jupyter Notebooks for multiple users. Pairing it with Kubernetes and Qovery makes it easier to manage and scale.
Before you begin, this guide assumes the following:
You have a Qovery cluster ready
You have a dedicated Qovery project and environment to deploy JupyterHub (example: Project=JupyterHub, Environment=Production)
Create the JupyterHub service in the Qovery environment of your choice (preferably within a dedicated JupyterHub project) by following this documentation and these values:
General:
Application name: JupyterHub
Source:
Helm source: Helm repository
Repository: JupyterHub (the name given during the JupyterHub helm repository added in the previous step)
Chart name: jupyterhub
Version: 3.3.7 (this is the version we used for this setup, update it based on the chosen version)
Allow cluster-wide resources ✔️
Values
Values override as file:
File source: Raw YAML
Raw YAML:
Default
EKS with Karpenter
fullnameOverride:"jupyterhub"
proxy:
service:
type: ClusterIP
There are many other values you can set to modify the JupyterHub behavior. For advanced usage, check: JupyterHub Customization
Now get to the last step and Create the service on Qovery.
In the previous step, we created the JupyterHub service. In this step, we will update its configuration to make it available on the public network (through Qovery Nginx Ingress).
Open the JupyterHub service details
Enter the Settings section
Click on Networking
Add a new Port with:
Service name: jupyterhub-proxy-public
Service port: 80
Select protocol: HTTP
External port: 443
Port name: jupyterhub-proxy-public-p80
If you need more information on how to manage your ports, have a look at this
JupyterHub is running on your Qovery cluster. This is a simple installation and you should try to customize it according to your needs. You can also check theAdminstrator Guideto better understand how it works.
JupyterHub is an easy way to interact with a computing environment through a webpage. It provides a standardized way to serve Jupyter Notebooks for multiple users. Pairing it with Kubernetes and Qovery makes it easier to manage and scale.
Before you begin, this guide assumes the following:
You have a Qovery cluster ready
You have a dedicated Qovery project and environment to deploy JupyterHub (example: Project=JupyterHub, Environment=Production)
Create the JupyterHub service in the Qovery environment of your choice (preferably within a dedicated JupyterHub project) by following this documentation and these values:
General:
Application name: JupyterHub
Source:
Helm source: Helm repository
Repository: JupyterHub (the name given during the JupyterHub helm repository added in the previous step)
Chart name: jupyterhub
Version: 3.3.7 (this is the version we used for this setup, update it based on the chosen version)
Allow cluster-wide resources ✔️
Values
Values override as file:
File source: Raw YAML
Raw YAML:
Default
EKS with Karpenter
fullnameOverride:"jupyterhub"
proxy:
service:
type: ClusterIP
There are many other values you can set to modify the JupyterHub behavior. For advanced usage, check: JupyterHub Customization
Now get to the last step and Create the service on Qovery.
In the previous step, we created the JupyterHub service. In this step, we will update its configuration to make it available on the public network (through Qovery Nginx Ingress).
Open the JupyterHub service details
Enter the Settings section
Click on Networking
Add a new Port with:
Service name: jupyterhub-proxy-public
Service port: 80
Select protocol: HTTP
External port: 443
Port name: jupyterhub-proxy-public-p80
If you need more information on how to manage your ports, have a look at this
JupyterHub is running on your Qovery cluster. This is a simple installation and you should try to customize it according to your needs. You can also check theAdminstrator Guideto better understand how it works.
Our application will use a PostgreSQL database. Let's add one to our environment:
Click on ADD, then Database
Give a name to your database.
For the Type, select POSTGRESQL.
For the Mode, we'll pick CONTAINER.
Chose the Version you need.
Since we are creating a Staging environment, we used the CONTAINER mode. This is not recommended for Production. In Production environment you should go for the MANAGED option.
Since we're using Sidekiq, we'll also need a Redis database as a backend.
If you didn't close the Database modal, you can click the ADD button, then in the dropbox for Database 2 click Create database.
Fill the form the same way you did for PostgreSQL:
Since we are creating a Staging environment, we used the CONTAINER mode. This is not recommended for Production. In Production environment you should go for the MANAGED option.
On your application dashboard, go to Environment variables:
Here you can add any environment variable your application needs.
Since we are creating a Staging environment, we used the CONTAIWe do not advise you to add secret values here. For sensitive information, like credentials, use the Secret variables, which are encrypted.
We'll now configure a few secrets for our application. Click on the Secret variables tab:
First since our Demo application uses the Rails Encrypted Secrets, we'll add the RAILS_MASTER_KEY secret
Click on CREATE SECRET, then fill the form:
Variable: enter the variable name, RAILS_MASTER_KEY.
Value: enter the actual value for your RAILS_MASTER_KEY.
Scope: chose ENVIRONMENT since the secret will be used by our Sidekiq worker too.
Now we'll need to add the DATABASE_URL and REDIS_URL, that Rails will use to connect to PostgreSQL and Redis. Those are secrets as well, since the URLs contain passwords.
But instead of creating new secrets like we did for the RAILS_MASTER_KEY, we'll use aliases. Aliases are just a way of giving a different name to an existing ENV variable or secret.
-Since Qovery provides us with the secrets corresponding to the two databases we created earlier, we can alias them.
First, create an alias to the QOVERY_POSTGRESQL_ZXXXXXXXX_DATABASE_URL_INTERNAL:
In the form, chose DATABASE_URL for the alias name and set it at the ENVIRONMENT level:
Click Create then do the same thing with a REDIS_URL alias to the QOVERY_REDIS_ZXXXXXXXX_DATABASE_URL_INTERNAL.
You should see your two aliases created:
These are the secrets required for our demo application. Yours might need more. Add all the variables you need before going to the next step.
The last step is to add your Sidekiq Worker. We'll follow the same steps as in the Add your Rails app section with a few differences:
Add a new application:
The settigs are the same as for the Rails application, except:
We use the Dockerfile.sidekiq Dockerfile this time
We don't declare a port since our worker is not a web service but communicates with our application through Redis.
Click Create.
If we check the ENV variables and secrets, we notice that it directly inherited the ones we set at the Environment level. So we don't need to do the configuration again.
You now have a Rails application with PostgreSQL and Sidekiq running on Qovery.
Depending on the gems you are using, their versions or your application configuration, you might need to tweak the Dockerfiles provided. This example is meant to be a starting point for your own configuration, not a one-size-fits-all configuration.
+Since Qovery provides us with the secrets corresponding to the two databases we created earlier, we can alias them.
First, create an alias to the QOVERY_POSTGRESQL_ZXXXXXXXX_DATABASE_URL_INTERNAL:
In the form, chose DATABASE_URL for the alias name and set it at the ENVIRONMENT level:
Click Create then do the same thing with a REDIS_URL alias to the QOVERY_REDIS_ZXXXXXXXX_DATABASE_URL_INTERNAL.
You should see your two aliases created:
These are the secrets required for our demo application. Yours might need more. Add all the variables you need before going to the next step.
The last step is to add your Sidekiq Worker. We'll follow the same steps as in the Add your Rails app section with a few differences:
Add a new application:
The settigs are the same as for the Rails application, except:
We use the Dockerfile.sidekiq Dockerfile this time
We don't declare a port since our worker is not a web service but communicates with our application through Redis.
Click Create.
If we check the ENV variables and secrets, we notice that it directly inherited the ones we set at the Environment level. So we don't need to do the configuration again.
You now have a Rails application with PostgreSQL and Sidekiq running on Qovery.
Depending on the gems you are using, their versions or your application configuration, you might need to tweak the Dockerfiles provided. This example is meant to be a starting point for your own configuration, not a one-size-fits-all configuration.
-
+
-
+
diff --git a/guides/tutorial/deploy-temporal-on-kubernetes/index.html b/guides/tutorial/deploy-temporal-on-kubernetes/index.html
index 19529689a3..ce4c373e72 100644
--- a/guides/tutorial/deploy-temporal-on-kubernetes/index.html
+++ b/guides/tutorial/deploy-temporal-on-kubernetes/index.html
@@ -26,9 +26,9 @@
-
+
-
+
@@ -50,14 +50,14 @@
Save the settings and close the modal.
Now we need to set a bunch of environment variables.
Go back to your Temporal server app and click on Environment variables:
Create all those env variables with the `ENVIRONMENT` scope. It will be useful when we split the server services, to avoid repeating the process for each app.
Add the following environment variables:
DB=postgresql
LOG_LEVEL=debug,info
Now create the following aliases on environment variables:
You can now deploy your environment. Go back to your environment view and click DEPLOY.
Once it's deployed and the status is RUNNING, you can go to the Web UI application and open it.
If you see the Temporal Web UI with no error, well done. Your server is deployed!
#Split the temporal services for independent scaling.
Temporal server is composed of four different services. By default, they will all be running in the same process. But if you would like to scale them independently, you still have the option to deploy them separately.
We could start again from scratch or edit the running environment (which would require resetting the DB), but instead we will leverage the clone feature of Qovery, to start with an identical, clean environment.
On your environment page, click Actions then Clone.
Pick a name and click Create
You will land in an identical environment, not deployed yet. Don't deploy it right away, we will first split our services.
First we will rename the server application to call it temporal-frontend. Go to the server application and click Settings. Then change the name and save.
We have successfully deployed Temporal on Qovery. It can be useful for Staging or Preview environments but this is a very minimal deployment and we would not advise doing it for production.
There is no one-size-fits-all configuration for this type of products.
You would probably like to setup authentication on your Web UI as well. We include the config file in the GitHub repository. You can edit it to your needs, following this documentation.
We have successfully deployed Temporal on Qovery. It can be useful for Staging or Preview environments but this is a very minimal deployment and we would not advise doing it for production.
There is no one-size-fits-all configuration for this type of products.
You would probably like to setup authentication on your Web UI as well. We include the config file in the GitHub repository. You can edit it to your needs, following this documentation.
-
+
-
+
diff --git a/guides/tutorial/generate-qovery-api-client/index.html b/guides/tutorial/generate-qovery-api-client/index.html
index d27b77caf3..a6726349de 100644
--- a/guides/tutorial/generate-qovery-api-client/index.html
+++ b/guides/tutorial/generate-qovery-api-client/index.html
@@ -26,9 +26,9 @@
-
+
-
+
@@ -50,14 +50,14 @@
After generating the client, we simply published the out/client folder as a Git Repository and then simply imported the code in the CLI application as a dependency:
package utils
import(
"github.com/qovery/qovery-client-go"
)
This allowed us to use the generated client code to interact with Qovery API very easily:
Qovery Open API specification allows creating Qovery API stubs extremely quickly. At Qovery, we officially support only Golang Client, but if you use a different language, you can generate your own client in a matter of seconds following the steps of this article.
Qovery Open API specification allows creating Qovery API stubs extremely quickly. At Qovery, we officially support only Golang Client, but if you use a different language, you can generate your own client in a matter of seconds following the steps of this article.
It is critical to have testing and staging environments accurately reflect production, but achieving this can be a major operational hassle. Most engineering teams use a single staging environment which makes it hard for developers to test their changes in isolation; the alternative is for DevOps teams to spin up new testing or staging environments manually and tear them down after testing is done.
Qovery’s Preview Environments solve this problem by automatically creating a clone of your production environment (including applications, databases and configuration) on every pull request, so you can test your changes with confidence without affecting your production.
Qovery keeps your preview environments up to date on every commit and automatically destroys them when the original pull request is merged or closed. You can also set up an expiry time to automatically clean up preview environments after a period of inactivity.
Preview Environments can be helpful in a lot of cases:
Share your changes live in code reviews: no more Git diffs for visual changes!
Get shareable links for upcoming features and collaborate more effectively with internal and external stakeholders.
Run CI tests against a high fidelity copy of your production environment before merging.
In this step-by-step guide you will learn how to get started using the Preview Environments on AWS with Qovery.
This guide also works with other cloud service providers supported by Qovery.
Please contact us via our forum if you have any questions concerning the Preview Environments
Before you begin, this guide assumes the following:
Even if not required, we recommend creating an environment that will serve as a root to create your Preview Environments. The idea is to keep this environment as a template of a fully working environment. This environment should not be directly used. This is what we call "blueprint environment".
I assume you already have a working environment, so to create a blueprint environment you need to:
Go to your working environment
Click on "Actions" > "Clone"
Name your environment "blueprint"
Click on "Create"
We recommend using a different cluster than your production for your Preview Environments.
Now that you have turned on the Preview Environment feature, you need to change the base branch from your applications inside your Blueprint Environment. Let's say, every new feature branch you create are coming from staging. Then you will need to change all your applications to target the staging branch.
Here is a flow example showing what happen when you create a new Pull Request from a feat/xxx branch that has been created from the base branch staging.
A developer creates a git branch feat/xxx is created from staging.
A developer creates a Pull Request for feat/xxx.
Qovery creates a Preview Environment feat/xxx from the blueprint environment. The frontend, backend, PostgreSQL and Redis instances are cloned!
The frontend app from the environment feat/xxx is accessible via a dedicated URL.
To create a Preview Environment, here are the steps:
Checkout your staging branch.
Create a branch test_qovery_preview_environment and push it.
Create a Pull Request/Merge Request.
Qovery take care of cloning all your services and the configuration as well (Environment Variables and Secrets included).
You must see a new environment appearing in your environment list on Qovery. Wait until it is fully deployed, then you will be able to connect to it. This environment is fully isolated from your base environment.
To delete you need to merge test_qovery_preview_environment into staging. You also have the ability to delete it manually on Qovery.
By merging into staging, Qovery will auto-redeploy the new version in your staging environment. Turn off auto-deploy from the staging environment settings if you want to manually deploy new version in staging.
Congrats! You have set up your Preview Environments features. Feel free to check out our forum and open a thread if you have any question. In the next guide, we will go deeper configuration to integrate the Preview Environment with your existing products and workflow.
It is critical to have testing and staging environments accurately reflect production, but achieving this can be a major operational hassle. Most engineering teams use a single staging environment which makes it hard for developers to test their changes in isolation; the alternative is for DevOps teams to spin up new testing or staging environments manually and tear them down after testing is done.
Qovery’s Preview Environments solve this problem by automatically creating a clone of your production environment (including applications, databases and configuration) on every pull request, so you can test your changes with confidence without affecting your production.
Qovery keeps your preview environments up to date on every commit and automatically destroys them when the original pull request is merged or closed. You can also set up an expiry time to automatically clean up preview environments after a period of inactivity.
Preview Environments can be helpful in a lot of cases:
Share your changes live in code reviews: no more Git diffs for visual changes!
Get shareable links for upcoming features and collaborate more effectively with internal and external stakeholders.
Run CI tests against a high fidelity copy of your production environment before merging.
In this step-by-step guide you will learn how to get started using the Preview Environments on AWS with Qovery.
This guide also works with other cloud service providers supported by Qovery.
Please contact us via our forum if you have any questions concerning the Preview Environments
Before you begin, this guide assumes the following:
Even if not required, we recommend creating an environment that will serve as a root to create your Preview Environments. The idea is to keep this environment as a template of a fully working environment. This environment should not be directly used. This is what we call "blueprint environment".
I assume you already have a working environment, so to create a blueprint environment you need to:
Go to your working environment
Click on "Actions" > "Clone"
Name your environment "blueprint"
Click on "Create"
We recommend using a different cluster than your production for your Preview Environments.
Now that you have turned on the Preview Environment feature, you need to change the base branch from your applications inside your Blueprint Environment. Let's say, every new feature branch you create are coming from staging. Then you will need to change all your applications to target the staging branch.
Here is a flow example showing what happen when you create a new Pull Request from a feat/xxx branch that has been created from the base branch staging.
A developer creates a git branch feat/xxx is created from staging.
A developer creates a Pull Request for feat/xxx.
Qovery creates a Preview Environment feat/xxx from the blueprint environment. The frontend, backend, PostgreSQL and Redis instances are cloned!
The frontend app from the environment feat/xxx is accessible via a dedicated URL.
To create a Preview Environment, here are the steps:
Checkout your staging branch.
Create a branch test_qovery_preview_environment and push it.
Create a Pull Request/Merge Request.
Qovery take care of cloning all your services and the configuration as well (Environment Variables and Secrets included).
You must see a new environment appearing in your environment list on Qovery. Wait until it is fully deployed, then you will be able to connect to it. This environment is fully isolated from your base environment.
To delete you need to merge test_qovery_preview_environment into staging. You also have the ability to delete it manually on Qovery.
By merging into staging, Qovery will auto-redeploy the new version in your staging environment. Turn off auto-deploy from the staging environment settings if you want to manually deploy new version in staging.
Congrats! You have set up your Preview Environments features. Feel free to check out our forum and open a thread if you have any question. In the next guide, we will go deeper configuration to integrate the Preview Environment with your existing products and workflow.
-
+
-
+
diff --git a/guides/tutorial/gitops-with-qovery/index.html b/guides/tutorial/gitops-with-qovery/index.html
index 7474a245fc..aaf8cc62f7 100644
--- a/guides/tutorial/gitops-with-qovery/index.html
+++ b/guides/tutorial/gitops-with-qovery/index.html
@@ -26,9 +26,9 @@
-
+
-
+
@@ -89,14 +89,14 @@
Let's say you have a problem with the Terraform configuration. You can debug it by checking the Terraform logs in the GitHub Actions workflow. You can also use the Terraform CLI to debug the configuration locally.
Application logs:
If the problem is not in the Terraform configuration, you can check the Qovery web console to see the resources created and the associated logs.
CI/CD logs:
You can check the GitHub Actions logs to see the Terraform plan and apply outputs.
Qovery logs:
-You can check the Qovery Audit Logs to see the changes made by the Terraform configuration.
Like in the example above, we recommend using a remote Terraform backend to store the state. This way, you can share the state between your team members and have a history of the changes. You can use the Hashicorp Cloud Platform or any other Terraform backend you want.
You can use the Qovery API to get the resources URLs and integrate them in your CI/CD. For example, you can get the URL of the application and use it in your tests. Look at this guide on how to run E2E tests with Qovery and GitHub Actions.
In this tutorial, you learned how to do GitOps with Qovery and the Qovery Terraform provider. You defined all the Qovery resources in a Terraform configuration, tested it locally, pushed it to a GitHub repository, used GitHub Actions to review and apply the Terraform configuration, and checked the Qovery console to see the resources created.
Like in the example above, we recommend using a remote Terraform backend to store the state. This way, you can share the state between your team members and have a history of the changes. You can use the Hashicorp Cloud Platform or any other Terraform backend you want.
You can use the Qovery API to get the resources URLs and integrate them in your CI/CD. For example, you can get the URL of the application and use it in your tests. Look at this guide on how to run E2E tests with Qovery and GitHub Actions.
In this tutorial, you learned how to do GitOps with Qovery and the Qovery Terraform provider. You defined all the Qovery resources in a Terraform configuration, tested it locally, pushed it to a GitHub repository, used GitHub Actions to review and apply the Terraform configuration, and checked the Qovery console to see the resources created.
First of all, create a project and an environment. Then let's create Grafana application.
At the moment, Qovery does not support configuration file injection into Docker. So it can't be connected to an external database.
-The currently used database is stored on the volume, so data will be lost on an application deletion. Qovery is going to implement configuration files for Docker in the coming weeks
Now you can deploy Grafana :). On the top right, you have the Open links button which will help you to get quick access. Then connect with those credentials:
Login: admin
Password: admin
Update the default password with a strong one as it is publicly exposed.
You can add several data sources to Grafana. One we recommend at Qovery for full-text search is Cloudwatch. First of all, you have to follow this guide to ensure all your logs are sent to Cloudwatch. Then, you can add a new data source in Grafana:
We advise you to use assume role or use a dedicated service account in read-only to access your logs. In this case, those permissions will be required:
+The currently used database is stored on the volume, so data will be lost on an application deletion. Qovery is going to implement configuration files for Docker in the coming weeks
Now you can deploy Grafana :). On the top right, you have the Open links button which will help you to get quick access. Then connect with those credentials:
Login: admin
Password: admin
Update the default password with a strong one as it is publicly exposed.
You can add several data sources to Grafana. One we recommend at Qovery for full-text search is Cloudwatch. First of all, you have to follow this guide to ensure all your logs are sent to Cloudwatch. Then, you can add a new data source in Grafana:
We advise you to use assume role or use a dedicated service account in read-only to access your logs. In this case, those permissions will be required:
You will be prompted an SSO session name, put what you want, I used sso-benjamin.
SSO session name (Recommended): sso-benjamin
Attempting to automatically open the SSO authorization page in your default browser.
If the browser does not open or you wish to use a different device to authorize this request, open the following URL:
https://device.sso.us-east-2.amazonaws.com/
Then enter the code:
-
FHTG-****
You will be redirected to your browser, validate the form.
Then you will be prompted to select your AWS account.
There are 1 AWS account available to you.
> qovery, q@qovery.com (283389****)
Then you will be prompted for default region (eu-west-3 in my case), output format (json in my case) and profile name (bchastanier_sso in my case, but feel free to pick whatever you want).
Using the account ID 283389****
The only role available to you is: AdministratorAccess
Look for a role named AWSReservedSSO_xx and select it (name can varies based on what you have configured / how you named your Admins user group, but it should start with AWSReservedSSO_).
Copy its ARN and keep it somewhere, you will need it in next step.
To connect to your EKS cluster you will need to set a context to kubectl. This is done with a Kubeconfig file.
When installing a new cluster, Qovery stores it in an S3 bucket on your account. You can retrieve the Kubeconfig of your cluster directly from the Qovery interface by following the procedure "Get your cluster kubeconfig file" within this section.
This will open your browser and prompt you to connect, validate the form.
Now you should be able to access your cluster without anything else, let's try to get aws-auth configmap showing users and roles allowed to connect to the cluster:
You can access your Qovery clusters via your SSO directly.
+
FHTG-****
You will be redirected to your browser, validate the form.
Then you will be prompted to select your AWS account.
There are 1 AWS account available to you.
> qovery, q@qovery.com (283389****)
Then you will be prompted for default region (eu-west-3 in my case), output format (json in my case) and profile name (bchastanier_sso in my case, but feel free to pick whatever you want).
Using the account ID 283389****
The only role available to you is: AdministratorAccess
Look for a role named AWSReservedSSO_xx and select it (name can varies based on what you have configured / how you named your Admins user group, but it should start with AWSReservedSSO_).
Copy its ARN and keep it somewhere, you will need it in next step.
To connect to your EKS cluster you will need to set a context to kubectl. This is done with a Kubeconfig file.
When installing a new cluster, Qovery stores it in an S3 bucket on your account. You can retrieve the Kubeconfig of your cluster directly from the Qovery interface by following the procedure "Get your cluster kubeconfig file" within this section.
This will open your browser and prompt you to connect, validate the form.
Now you should be able to access your cluster without anything else, let's try to get aws-auth configmap showing users and roles allowed to connect to the cluster:
How to Build a Cloud Version of Your Open Source Software - A Case Study with AppWrite - Part 1
Open-source eat the world. More and more great open-source projects are used. One standard method to make those products financially sustainable is to provide a managed version. Meaning, you can enjoy using their product without the hassle of managing the product updates, the backups, the security, and the scaling. This guide will attempt to explain how to build a cloud-managed version of an open-source project.
As a developer, I am super impressed by the number of great open-source projects popping around. I think of Supabase (an open-source alternative to Firebase), Strapi (open-source headless CMS), Meilisearch (open-source search engine), Posthog (open-source product analytics tool), and so many others. For me, these are the tools that most developers will use in the future. One common method to make those products financially sustainable is to provide a managed version. Meaning, you can enjoy using their product without the hassle of managing the product updates, the backups, the security, and the scaling. It is exactly what Hasura did with its cloud version - and it is pretty convenient to use their product in production. However, building a cloud version takes months (sometimes years). What takes time? Hiring platform engineers, building the infrastructure, testing it, monitoring it... All of that takes a considerable amount of time and effort. Luckily, at Qovery, we provide the infrastructure stack that every open-source project needs to build 90% of their cloud-managed version. The remaining 10% are the UI and the business model logic. In this 6-part article series, I will attempt to explain how to build a cloud-managed version of AppWrite. Let’s go!
Articles:
Part 1: Introduction and architecture
Part 2: Build our AppWrite cloud backend and integrate it with the Qovery API
Part 3: Build our AppWrite cloud frontend and combine it with our cloud backend
Part 4: Monitor our AppWrite cloud version
Part 5: Integrate the payment system with Stripe (optional)
Part 6: Integrate email notification with Courier (optional)
Part 7: Give your customer a production, staging, and dev environment (optional)
Since I launched Qovery in 2019, I have talked to dozens of founders from great open-source software companies. Most of them were looking to build their cloud-managed service at some point. Some of them even asked me for feedback on building one and asked me to use Qovery as a white-label technology when they discovered it was a full-time job. Qovery is a product simplifying app deployment and infrastructure management on AWS. Time flies, and as Qovery evolves, it is now possible for any open-source project to use Qovery as a white-label technology to provide a cloud version of an open-source project. No hidden cost. Just pick the plan that fits you best and build your cloud version in days instead of months. My team will be proud to help you in your success.
AppWrite is quite representative of a “modern web open-source project”. In this guide, AppWrite is used as a demo project to demonstrate the concept of building a cloud-managed version for an open-source web project. AppWrite is written in PHP for the backend and JS for the frontend. It provides a user-friendly web interface connected to a web API, and it stores the data in MariaDB and Redis databases. The idea is: if it works for AppWrite, then it is good to work for any other web open-source project with a similar technical stack. Feel free to contact me if you have any concerns.
AppWrite is a Backend as a Service open-source software. It is similar to Supabase and Firebase to create a backend in a few minutes.
Our goal is to provide a fully managed cloud version of AppWrite. Meaning we need to deliver to our customers a way to order their AppWrite instance and use it, while the maintenance is handled by us. It is the most common managed version out there - think MongoDB Atlas. To achieve this, we will use the following technologies:
This schema represents the different layers composing the cloud version of AppWrite. From top to bottom, we will give the details of each layer.
#User flow 1: Customer request an AppWrite instance
Here is what happens when the customer requests a cloud AppWrite instance:
The customer connects on cloud.appwrite.com (fake domain to represent “AppWrite cloud frontend”).
The customer requests a new AppWrite instance.
The AppWrite cloud backend calls the Qovery API to create an Environment.
The AppWrite cloud backend calls the Qovery API to create a MariaDB database.
The AppWrite cloud backend calls the Qovery API to create a Redis database.
The AppWrite cloud backend calls the Qovery API to create an AppWrite application.
The AppWrite cloud backend calls the Qovery API to bind the AppWrite application to the MariaDB and Redis databases.
The AppWrite cloud backend calls the Qovery API to start the Environment.
The Qovery API returns the temporary URL to the AppWrite cloud backend.
The customer receives the URL of his instance via the AppWrite cloud frontend.
The customer can use his AppWrite instance.
#User flow 2: customer deletes an AppWrite instance
Let’s say our customer now wants to delete his cloud AppWrite instance; this is what happens:
The customer connects on cloud.appwrite.com (fake domain to represent “AppWrite cloud frontend”).
The customer removes his AppWrite instance.
The AppWrite cloud backend calls the Qovery API to delete the customer Environment.
Qovery deletes the AppWrite application, MariaDB, and Redis databases.
We can add other steps like payment (part 5), notifications (part 6), and everything you want - they are not required to make our cloud version functional. Let’s now take a deeper look at the infrastructure.
#AppWrite cloud frontend and backend (control plane)
The AppWrite cloud frontend and backend are the two components that we have to build from scratch. It includes our business logic and customer management system. We will use Hasura for the backend and GatsbyJS for the frontend. We will connect the frontend to the backend via a GraphQL API. The advantage of using Hasura instead of coding our web backend is that we have access to many features (Auth0, Stripe support...) right away. Saving days of work.
The goal here is to provide to the customers a web interface to:
Qovery is the simplest way to deploy apps and manage your infrastructure on AWS. We will use Qovery as an Infrastructure as Code (IaC) API.
Qovery provides a production-ready infrastructure on our AWS account in 30 minutes that we will use to host our customers’ instances. The Qovery API provides a high-level abstraction to create for each customer an isolated Environment including:
An AppWrite app instance with the possibility to scale it horizontally.
A MariaDB database.
A Redis database.
An HTTPS endpoint.
The option to bind a custom domain with TLS.
A secure API to manage Environment variables and Secrets.
Each Environment is isolated and will be accessible for only one customer. And as admin, Qovery provides a web interface to manage all our customers’ instances and troubleshoot any of their issues.
Curious to know more about how Qovery works? Take a look at this page.
Qovery supports AWS, Digital Ocean, and Scaleway. In this guide, we will focus on AWS to make it simpler. But keep in mind that you can use another supported cloud provider. You can even imagine a feature where your customers can choose the cloud provider of their choice. This is exactly what “MongoDB Atlas” and “Hasura Cloud” do.
Our customers expect us to provide a reliable service and manage the database backups by using a cloud version. For AppWrite, MariaDB is the persistent database and needs to be backed up. Four options with pros and cons do exist:
Backup managed by AWS (point-in-time recovery included)
Higher performance than container version
Scalable (managed by AWS)
Expensive for a few customers, but the more customers you have, the cheaper it is.
Cons:
The most expensive option (~$11 per instance for the cheapest one on AWS us-east-2)
Potential security breaches as many customers are using the same database instance.
We will pick the third option (single-tenant with managed MariaDB database) to create a state-of-the-art cloud version, but you are free to choose the one you want for your customer. Do not forget your customer expects you to take care of their business.
Side note: AppWrite uses Redis as a caching system. Then, we will use a Redis container instance which is the cheapest.
Thank you all for taking the time to read until the end. We will build our AppWrite cloud backend and integrate it into the Qovery API in the next part.
As a developer, I am super impressed by the number of great open-source projects popping around. I think of Supabase (an open-source alternative to Firebase), Strapi (open-source headless CMS), Meilisearch (open-source search engine), Posthog (open-source product analytics tool), and so many others. For me, these are the tools that most developers will use in the future. One common method to make those products financially sustainable is to provide a managed version. Meaning, you can enjoy using their product without the hassle of managing the product updates, the backups, the security, and the scaling. It is exactly what Hasura did with its cloud version - and it is pretty convenient to use their product in production. However, building a cloud version takes months (sometimes years). What takes time? Hiring platform engineers, building the infrastructure, testing it, monitoring it... All of that takes a considerable amount of time and effort. Luckily, at Qovery, we provide the infrastructure stack that every open-source project needs to build 90% of their cloud-managed version. The remaining 10% are the UI and the business model logic. In this 6-part article series, I will attempt to explain how to build a cloud-managed version of AppWrite. Let’s go!
Articles:
Part 1: Introduction and architecture
Part 2: Build our AppWrite cloud backend and integrate it with the Qovery API
Part 3: Build our AppWrite cloud frontend and combine it with our cloud backend
Part 4: Monitor our AppWrite cloud version
Part 5: Integrate the payment system with Stripe (optional)
Part 6: Integrate email notification with Courier (optional)
Part 7: Give your customer a production, staging, and dev environment (optional)
Since I launched Qovery in 2019, I have talked to dozens of founders from great open-source software companies. Most of them were looking to build their cloud-managed service at some point. Some of them even asked me for feedback on building one and asked me to use Qovery as a white-label technology when they discovered it was a full-time job. Qovery is a product simplifying app deployment and infrastructure management on AWS. Time flies, and as Qovery evolves, it is now possible for any open-source project to use Qovery as a white-label technology to provide a cloud version of an open-source project. No hidden cost. Just pick the plan that fits you best and build your cloud version in days instead of months. My team will be proud to help you in your success.
AppWrite is quite representative of a “modern web open-source project”. In this guide, AppWrite is used as a demo project to demonstrate the concept of building a cloud-managed version for an open-source web project. AppWrite is written in PHP for the backend and JS for the frontend. It provides a user-friendly web interface connected to a web API, and it stores the data in MariaDB and Redis databases. The idea is: if it works for AppWrite, then it is good to work for any other web open-source project with a similar technical stack. Feel free to contact me if you have any concerns.
AppWrite is a Backend as a Service open-source software. It is similar to Supabase and Firebase to create a backend in a few minutes.
Our goal is to provide a fully managed cloud version of AppWrite. Meaning we need to deliver to our customers a way to order their AppWrite instance and use it, while the maintenance is handled by us. It is the most common managed version out there - think MongoDB Atlas. To achieve this, we will use the following technologies:
This schema represents the different layers composing the cloud version of AppWrite. From top to bottom, we will give the details of each layer.
#User flow 1: Customer request an AppWrite instance
Here is what happens when the customer requests a cloud AppWrite instance:
The customer connects on cloud.appwrite.com (fake domain to represent “AppWrite cloud frontend”).
The customer requests a new AppWrite instance.
The AppWrite cloud backend calls the Qovery API to create an Environment.
The AppWrite cloud backend calls the Qovery API to create a MariaDB database.
The AppWrite cloud backend calls the Qovery API to create a Redis database.
The AppWrite cloud backend calls the Qovery API to create an AppWrite application.
The AppWrite cloud backend calls the Qovery API to bind the AppWrite application to the MariaDB and Redis databases.
The AppWrite cloud backend calls the Qovery API to start the Environment.
The Qovery API returns the temporary URL to the AppWrite cloud backend.
The customer receives the URL of his instance via the AppWrite cloud frontend.
The customer can use his AppWrite instance.
#User flow 2: customer deletes an AppWrite instance
Let’s say our customer now wants to delete his cloud AppWrite instance; this is what happens:
The customer connects on cloud.appwrite.com (fake domain to represent “AppWrite cloud frontend”).
The customer removes his AppWrite instance.
The AppWrite cloud backend calls the Qovery API to delete the customer Environment.
Qovery deletes the AppWrite application, MariaDB, and Redis databases.
We can add other steps like payment (part 5), notifications (part 6), and everything you want - they are not required to make our cloud version functional. Let’s now take a deeper look at the infrastructure.
#AppWrite cloud frontend and backend (control plane)
The AppWrite cloud frontend and backend are the two components that we have to build from scratch. It includes our business logic and customer management system. We will use Hasura for the backend and GatsbyJS for the frontend. We will connect the frontend to the backend via a GraphQL API. The advantage of using Hasura instead of coding our web backend is that we have access to many features (Auth0, Stripe support...) right away. Saving days of work.
The goal here is to provide to the customers a web interface to:
Qovery is the simplest way to deploy apps and manage your infrastructure on AWS. We will use Qovery as an Infrastructure as Code (IaC) API.
Qovery provides a production-ready infrastructure on our AWS account in 30 minutes that we will use to host our customers’ instances. The Qovery API provides a high-level abstraction to create for each customer an isolated Environment including:
An AppWrite app instance with the possibility to scale it horizontally.
A MariaDB database.
A Redis database.
An HTTPS endpoint.
The option to bind a custom domain with TLS.
A secure API to manage Environment variables and Secrets.
Each Environment is isolated and will be accessible for only one customer. And as admin, Qovery provides a web interface to manage all our customers’ instances and troubleshoot any of their issues.
Curious to know more about how Qovery works? Take a look at this page.
Qovery supports AWS, Digital Ocean, and Scaleway. In this guide, we will focus on AWS to make it simpler. But keep in mind that you can use another supported cloud provider. You can even imagine a feature where your customers can choose the cloud provider of their choice. This is exactly what “MongoDB Atlas” and “Hasura Cloud” do.
Our customers expect us to provide a reliable service and manage the database backups by using a cloud version. For AppWrite, MariaDB is the persistent database and needs to be backed up. Four options with pros and cons do exist:
Backup managed by AWS (point-in-time recovery included)
Higher performance than container version
Scalable (managed by AWS)
Expensive for a few customers, but the more customers you have, the cheaper it is.
Cons:
The most expensive option (~$11 per instance for the cheapest one on AWS us-east-2)
Potential security breaches as many customers are using the same database instance.
We will pick the third option (single-tenant with managed MariaDB database) to create a state-of-the-art cloud version, but you are free to choose the one you want for your customer. Do not forget your customer expects you to take care of their business.
Side note: AppWrite uses Redis as a caching system. Then, we will use a Redis container instance which is the cheapest.
Thank you all for taking the time to read until the end. We will build our AppWrite cloud backend and integrate it into the Qovery API in the next part.
After a few minutes of deployment, the first version of our managed cloud solution should be ready. Let's use the Hasura GraphQL API to create a new user.
To do so, open your Hasura by clicking the Open button in your Hasura application. Then, run the following mutation in the GraphQL explorer:
It's the time to start our project. To do so, run the following mutation:
mutation{
StartProject(input:{id:10}){
ok
}
}
We should get this response:
{
"data":{
"StartProject":{
"ok":true
}
}
}
And looking into Qovery, we'll see our environment is starting:
After a few minutes, our AppWrite instance should be available up and running using the URL from the previous response. We can also list our projects to get all projects' URLs:
In this tutorial, we have managed to bootstrap the backend for our AppWrite Cloud solution. Users can register, log in, create and deploy managed AppWrite projects. In the following steps, we will add more functionalities to our AppWrite Cloud offering, set up a nice to use web User Interface and continue adding new features to AppWrite Cloud on top of Qovery.
+
return response,nil
}
You can see the whole code in your forked repository on Github.
After a few minutes of deployment, the first version of our managed cloud solution should be ready. Let's use the Hasura GraphQL API to create a new user.
To do so, open your Hasura by clicking the Open button in your Hasura application. Then, run the following mutation in the GraphQL explorer:
It's the time to start our project. To do so, run the following mutation:
mutation{
StartProject(input:{id:10}){
ok
}
}
We should get this response:
{
"data":{
"StartProject":{
"ok":true
}
}
}
And looking into Qovery, we'll see our environment is starting:
After a few minutes, our AppWrite instance should be available up and running using the URL from the previous response. We can also list our projects to get all projects' URLs:
In this tutorial, we have managed to bootstrap the backend for our AppWrite Cloud solution. Users can register, log in, create and deploy managed AppWrite projects. In the following steps, we will add more functionalities to our AppWrite Cloud offering, set up a nice to use web User Interface and continue adding new features to AppWrite Cloud on top of Qovery.
Clicking on the signup will send a test signup request to our backend - click Signup and see the response with an access token in the network tab of your browser:
To send the request, we use the following piece of code:
We use axios HTTP library to send a POST request to our graphqlApiEndpoint (that uses the value of the environment variable we set previously) to run a GraphQL mutation that creates a new user with a given email and password in our AppWrite Cloud backend. In the response, we receive an access token that we can use in the name of the user to interact with the API.
The anonymous token sent in the request is a way to interact with unauthenticated endpoints in the Hasura backend.
In the next step let’s take care of the list of user projects:
const{ isLoading, error, data }=useQuery('projects',()=>{
returnaxios({
url: graphqlApiEndpoint,
method:'POST',
headers:{Authorization:'Bearer '+ token },
data:{
query:`query Projects {
project {
id
name
url
}
}
`,
},
});
});
In the snippet above, we use ReactQuery to manage the server state (store the info about the project client-side) and axios for performing the HTTP request. In the headers, we send users’ accessToken, and the payload allows us to specify data that we are interested in about projects we have access to.
The response from the query contains info we can use to render the list of AppWrite projects managed by AppWriteCloud:
Now, to display it, add the following piece of code into our dashboard component:
In this article, we bootstrapped a frontend application and added it to our app write cloud. We created the first version of our frontend that makes use of React, Next.js, ReactQuery and Tailwind. The UI is integrated with our backend GraphQL API that is deployed on Qovery and allows us to manage AppWrite projects deployed on AWS for AppWrite Cloud clients.
Clicking on the signup will send a test signup request to our backend - click Signup and see the response with an access token in the network tab of your browser:
To send the request, we use the following piece of code:
We use axios HTTP library to send a POST request to our graphqlApiEndpoint (that uses the value of the environment variable we set previously) to run a GraphQL mutation that creates a new user with a given email and password in our AppWrite Cloud backend. In the response, we receive an access token that we can use in the name of the user to interact with the API.
The anonymous token sent in the request is a way to interact with unauthenticated endpoints in the Hasura backend.
In the next step let’s take care of the list of user projects:
const{ isLoading, error, data }=useQuery('projects',()=>{
returnaxios({
url: graphqlApiEndpoint,
method:'POST',
headers:{Authorization:'Bearer '+ token },
data:{
query:`query Projects {
project {
id
name
url
}
}
`,
},
});
});
In the snippet above, we use ReactQuery to manage the server state (store the info about the project client-side) and axios for performing the HTTP request. In the headers, we send users’ accessToken, and the payload allows us to specify data that we are interested in about projects we have access to.
The response from the query contains info we can use to render the list of AppWrite projects managed by AppWriteCloud:
Now, to display it, add the following piece of code into our dashboard component:
In this article, we bootstrapped a frontend application and added it to our app write cloud. We created the first version of our frontend that makes use of React, Next.js, ReactQuery and Tailwind. The UI is integrated with our backend GraphQL API that is deployed on Qovery and allows us to manage AppWrite projects deployed on AWS for AppWrite Cloud clients.
When creating a managed MongoDB instance on AWS via Qovery, you don't get a publicly accessible endpoint. While it is good from a security point of view, you still might need to connect to it from a local client.
Public endpoint for managed MongoDB instance will be available in Q1 2022. This is a temporary workaround.
Before you begin, this guide assumes the following:
You have a managed MongoDB instance up and running
You have access to your Kubernetes cluster through kubectl: see how here
socat is a relay for bidirectional data transfers between two independent data channels.
It will forward all traffic between your computer and your database.
kubectl run ${SERVICE_NAME} --image=alpine/socat \
Since kubectl will use IAM to authenticate, you need to have one of those things:
1. Add your IAM user (the one the AWS CLI is authenticated with) to the Admins group you created when setting up Qovery
2. Have the permissions to access the EKS cluster via SSO (see cluster advanced settings for it)
INFO[2024-11-01T11:42:49+01:00] Kubeconfig file created in the current directory.
INFO[2024-11-01T11:42:49+01:00] Execute `exportKUBECONFIG=/Users/user/kubeconfig-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx.yaml` to use it.
The path of your kubeconfig file will be displayed in the output. You can now use it to set the context for kubectl.
On AWS you'll need to have the AWS_PROFILE environment variable set to the right profile to be able to download the kubeconfig file or AWS credentials set as environment variables.
The environment namespace is defined the following way: z<project short ID>-z<environment short ID>.
The short ID is the first section of the ID. For example, given the following ID: e0aabc0d-99cb-4867-ad39-332d6162c32c, the short ID will be e0aabc0d.
The following environment URL: https://console.qovery.com/platform/organization/<organisation ID>/projects/e0aabc0d-99cb-4867-ad39-332d6162c32c/environments/b91d2eb8-a850-49b5-8626-ade7afc4a28b/applications
-would translate to the following namespace: ze0aabc0d-zb91d2eb8.
Qovery make it easy to create an RDS database on AWS with a few clicks. You might however want to create your own RDS instance in a separate VPC. For example in case you want to use the same instance with several Qovery clusters.
Before you begin, this guide assumes the following:
General Purpose SSD should be the right option for most cases.
-Chose the allocated storage that fits the needs of your application. We also advise you to Enable storage autoscaling in case you need more storage over time.
Since we want the database to live in it's own VPC, make sure to select the Create new VPC option.
Also select Create new DB Subnet Group.
We advise you to disable Public access for security reason. We'll setup VPC peering in the next guide to allow access from your Qovery clusters through private networking.
Finally chose Create new security group and give it a name.
Your RDS database is ready. Now in order to access it from your Qovery cluster, we will need to setup VPC peering. You can find the procedure in this tutorial
+Chose the allocated storage that fits the needs of your application. We also advise you to Enable storage autoscaling in case you need more storage over time.
Since we want the database to live in it's own VPC, make sure to select the Create new VPC option.
Also select Create new DB Subnet Group.
We advise you to disable Public access for security reason. We'll setup VPC peering in the next guide to allow access from your Qovery clusters through private networking.
Finally chose Create new security group and give it a name.
Your RDS database is ready. Now in order to access it from your Qovery cluster, we will need to setup VPC peering. You can find the procedure in this tutorial
To deploy our Rust app on AWS we are going to use Qovery. Qovery is the simplest way to deploy any app on AWS. It is the perfect candidate to deploy our Rust REST API in a few steps.
Rust combined to Rocket web framework turns building REST API super easy. Deploying your Rust app on AWS with Qovery is as simple as selecting your GitHub repository. Nothing more. Hope you liked it.
To deploy our Rust app on AWS we are going to use Qovery. Qovery is the simplest way to deploy any app on AWS. It is the perfect candidate to deploy our Rust REST API in a few steps.
Rust combined to Rocket web framework turns building REST API super easy. Deploying your Rust app on AWS with Qovery is as simple as selecting your GitHub repository. Nothing more. Hope you liked it.
Access our new installation guide of Qovery on AWS here
+
Stats
1 min read
Updated
Access our new installation guide of Qovery on AWS here
-
+
-
+
diff --git a/guides/tutorial/how-to-integrate-qovery-with-github-actions/index.html b/guides/tutorial/how-to-integrate-qovery-with-github-actions/index.html
index 0b86c58a2e..d12cd0cc94 100644
--- a/guides/tutorial/how-to-integrate-qovery-with-github-actions/index.html
+++ b/guides/tutorial/how-to-integrate-qovery-with-github-actions/index.html
@@ -26,9 +26,9 @@
-
+
-
+
@@ -51,14 +51,14 @@
As an example we will define a workflow for a NodeJS application. We will first run our unit tests, then launch the Qovery deployment if the tests pass.
The on section contains a workflow_call directive. It means that this workflow will be triggered when called from another workflow.
We're doing this because we won't use this workflow directly. Since we might have several environments to deploy to Qovery depending on the branch, we could have one workflow per environment, and we want to avoid repeating all the steps.
The inputs and secrets sections are defining the values that we will need to pass to our workflow
The jobs section lists the jobs and the steps that in needs to accomplish. Here we have two jobs and five steps:
test where we check out the code, we install Yarn modules, and we run tests through Jest
deploy where we check out the code and deploy to Qovery.
Several things worth noting:
The organization / project / environment / application are case-sensitive.
Our deploy job has a needs instructions, telling GitHub Actions that this job can only run when the test job succeeds.
The with section of our last deploy step contains interpolated strings: ${{ inputs.xxxx }}. Those are values passed to our workflow, that our Qovery action needs. They will be passed from the calling workflow.
We're done with the setup. You can now push your code to the main branch. If you did it properly, under the Actions tab on your GitHub repository, you should see your job being run.
You can click on it to see the details of the jobs. Once the testing phase is green, it will start the deployment job.
As soon as the job is set up, and it starts actually deploying, go to the Qovery console and check that your application is actually being deployed.
It's possible to support any use cases by using the Qovery CLI. Like cloning an environment, changing the branch of some applications and deploying only a subset of applications. Refers to the Qovery CLI documentation to explore all the commands that you can use.
Integrating Qovery with GitHub Actions enables more complex workflows than just deploying on code push. You can make sure your test suite succeeds before deploying
-or anything else you need, without sacrificing the simplicity of deployment Qovery brings you.
+or anything else you need, without sacrificing the simplicity of deployment Qovery brings you.
-
+
-
+
diff --git a/guides/tutorial/how-to-run-commands-at-application-startup/index.html b/guides/tutorial/how-to-run-commands-at-application-startup/index.html
index 360c6e8b3e..a88857f673 100644
--- a/guides/tutorial/how-to-run-commands-at-application-startup/index.html
+++ b/guides/tutorial/how-to-run-commands-at-application-startup/index.html
@@ -26,9 +26,9 @@
-
+
-
+
@@ -54,14 +54,14 @@
You can now commit and push your changes to your Git repository. The instructions you specified in the entrypoint.sh file will be executed before the application starts.
You can now commit and push your changes to your Git repository. The instructions you specified in the entrypoint.sh file will be executed before the application starts.
If you'd like to use Cloudflare instead of CloudFront as your CDN, check out this article.
Frontend apps primarily consist of static content which goes unchanged. Web pages that contain static assets are essentially prebuilt, which makes it efficiently quicker to grab and render content. Their static nature makes them a perfect use case for CDNs and caching systems on edge servers is as it boosts the web page performance and user experience with the system.
Now, to deploy the app, create a new application on Qovery with the following configuration:
Port - 80
Build Mode - Docker
Keep other options in default settings
After the app is created and configured as above, you can safely run the app deployment. After a few minutes when the app is running, click on the Open button:
To set up CloudFront as a CDN, first, navigate to CloudFront service in AWS console and click on the new distribution button:
In settings, choose an origin (URL to your frontend app hosted on Qovery):
You can also tweak other settings or leave them in their defaults:
Additionally, you can assign an alternate domain to your application in Alternate domain name:
Adding an alternate domain requires it having a certificate - click on the Request certificate button, type your alternate domain name and use DNS for validation method:
Request the certificate. In the end, you will see a screen with settings you need to set up in your domain name provider:
Copy them and save them in your DNS provider settings:
After it's done, you should be granted a certificate - go back to CloudFront Distribution settings, and pick the certificate for your alternate domain name from the list:
In the end, you should end up with a CloudFront set up with your app on Qovery and using an alternate domain name. Now it's time for you to tweak the CloudFront settings to meet your needs.
Now, to deploy the app, create a new application on Qovery with the following configuration:
Port - 80
Build Mode - Docker
Keep other options in default settings
After the app is created and configured as above, you can safely run the app deployment. After a few minutes when the app is running, click on the Open button:
To set up CloudFront as a CDN, first, navigate to CloudFront service in AWS console and click on the new distribution button:
In settings, choose an origin (URL to your frontend app hosted on Qovery):
You can also tweak other settings or leave them in their defaults:
Additionally, you can assign an alternate domain to your application in Alternate domain name:
Adding an alternate domain requires it having a certificate - click on the Request certificate button, type your alternate domain name and use DNS for validation method:
Request the certificate. In the end, you will see a screen with settings you need to set up in your domain name provider:
Copy them and save them in your DNS provider settings:
After it's done, you should be granted a certificate - go back to CloudFront Distribution settings, and pick the certificate for your alternate domain name from the list:
In the end, you should end up with a CloudFront set up with your app on Qovery and using an alternate domain name. Now it's time for you to tweak the CloudFront settings to meet your needs.
The Lifecycle Job is a powerful feature that allows you to run any kind of commands before or after your environment is deployed. It can be used to run database migrations, create a new database, or even to run a script that will create a new user.
Some use cases:
Run Terraform, Pulumi, or any other infrastructure as code tool to create resources.
You want to deploy SQS, SNS, Lambdas, or any other AWS resources.
You want to deploy MongoDB Atlas, Google BigQuery, or any other cloud services.
Seed your database when your environment is created.
You can find some Lifecycle Jobs examples on our GitHub.
In a more general way, you can see the Lifecycle Job as a way to create and destroy resources when your environment is deployed or deleted. Possibilities are endless.
#How to use Lifecycle Job (example with Terraform)
In this example, we will use Terraform to create a new AWS RDS MySQL instance. I will use this example to schematize the process of using the Lifecycle Job. ⚠️ Note that you can use any other tool to create your resources. Lifecycle Job is not limited to Terraform. However, Terraform is a great way to show the power of the Lifecycle Job since it requires a lot of configuration and can be used to create a lot of different resources.
In our example, we use S3 as a Terraform backend. You can use any other backend you want. However, if you want to use S3, you need to create a new bucket and a new IAM user with the right permissions. You can find more information about this in the Terraform documentation.
Go inside your Environment, and add a Lifecycle Job.
Give a name, description, pick your GitHub account, and select the repository of the Lifecycle Job. In our example, the root application path is /examples/aws-rds-with-terraform.
Since we are using Terraform, we want to make sure that our MySQL RDS instance is created when our Environment is deployed. So we select the Start Event.
-We also want to make sure that our MySQL RDS instance is destroyed when our Environment is deleted. So we select the Deleted Event.
If you look at our Dockerfile in the repository, you will see that we are using the official Terraform image. I have also inserted by default the ENTRYPOINT ["/bin/sh"] to simplify the Qovery Lifecycle Job configuration.
For the Start Event, we want to run the terraform apply -no-color -auto-approve command. We don't need to run the terraform init command since it is already done in the Dockerfile.
You will also notice that we are also using && terraform output -json > /qovery-output/qovery-output.json to create a /qovery-output/qovery-output.json file. This file will be used by Qovery to inject the database credentials into our Environment Variables. We will cover this part later.
For the Deleted Event, we want to run the terraform destroy -no-color -auto-approve command.
So for the Start Event, we have: ["-c","terraform apply -no-color -auto-approve && terraform output -json > /qovery-output/qovery-output.json"] and for the Deleted Event, we have: ["-c","terraform destroy -no-color -auto-approve"]. Feel free to copy/paste these commands.
Yes the commands contains a comma. It is not a typo. It is a JSON array. You need to use a comma to separate the elements of the array.
I recommend setting the Timeout to 1800 seconds (30 minutes). It is the maximum time your Lifecycle Job can run. If your Lifecycle Job takes more than 30 minutes to run it will be stopped by Qovery. In our case, it should take less than 10 minutes to create the AWS RDS MySQL instance. But let's be safe.
Click Continue.
Now we need to set the vCPU and RAM required to run our Job. We can allocate 0.5 CPU and 256 MB of RAM. It's more than enough.
We need to set the Environment Variables required by our Lifecycle Job. In our case, we need to set the AWS credentials and some other environment variables. If you look at our Dockerfile, you will find the declaration of all those environment variables. You can copy/paste them.
Dockerfile
...
ARG TF_VAR_terraform_backend_bucket
ARG TF_VAR_aws_region
ARG TF_VAR_aws_access_key_id
ARG TF_VAR_aws_secret_access_key
ARG TF_VAR_qovery_environment_id
...
Those are the ones that we need to set.
We do not set here the TF_VAR_qovery_environment_id since we will create it in the next step.
TF_VAR_terraform_backend_bucket is the name of the S3 bucket where Terraform will store the state of your infrastructure. You need to create this bucket on S3 before running the Lifecycle Job. You can use the same bucket for all your Lifecycle Jobs. It is not a problem. You will just need to make sure that the S3 object key is unique.
Click on Continue.
Then click on Create (and not Create and Deploy).
Congrats, your Lifecycle Job is created. Now we just need to add the TF_VAR_qovery_environment_id environment variable before launching it.
#Make your Terraform deployment multi-environments ready
To support multiple environments, we need to make sure that the name of the S3 object key where Terraform will store the state of your infrastructure is unique. To do that, we will use the TF_VAR_qovery_environment_id environment variable. This environment variable is automatically created by Qovery and contains the ID of your Environment. We just need to create an environment variable alias.
Go inside your MySQL RDS service, click on the Variables tab.
Search for QOVERY_ENVIRONMENT_ID built-in environment variable. Then click on Creat alias
Set the name of the environment variable to TF_VAR_qovery_environment_id with a service scope and click on Confirm.
Now you are ready to deploy your Lifecycle Job and see what happened.
The job execution will take approximately 3 to 10 minutes.
Follow the logs of the job execution by clicking on the Logs button.
From the Deployment logs tab you can see that your Lifecycle Job is built and that the terraform init command is executed.
From the MySQL RDS tab you can see that the terraform apply -no-color -auto-approve command is executed. The creation of the AWS RDS MySQL instance is in progress.
Once the deployment is done, you should see that the AWS RDS MySQL instance is green and completed.
#Get the MySQL RDS credentials from the Lifecycle Job
Now that the AWS RDS MySQL instance is created, we need to get the credentials to connect to it. We have use the terraform output -json > /qovery-output/qovery-output.json command to get the credentials. If you go back to the Variables tab of your MySQL RDS service, you will see that the QOVERY_OUTPUT_** environment variables are created.
By using terraform output -json > /qovery-output/qovery-output.json Qovery automatically create those environment variables for you. You can use them in your application to connect to the AWS RDS MySQL instance. Learn more on how Lifecycle Job output...
Job output is a powerful feature that allows you to get the output of your Lifecycle Job and use it in your application. You can use it to get the credentials of your database, the URL of your S3 bucket, the URL of your CDN, etc...
#What happen if I delete my environment with your example?
If you delete your environment, the AWS RDS MySQL instance will be deleted too. You can see that in the MySQL RDS service logs. You will see that the terraform destroy -no-color -auto-approve command is executed.
#Can I use the Lifecycle Job to deploy my application?
Some users ask us if they can use the Lifecycle Job to deploy their application. The answer is yes!. The Lifecycle Job is designed to deploy all type of resources. However, we recommend using the official Qovery way to deploy applications. Learn more on how to deploy your application...
#What happen if I clone my Environment with the Lifecycle Job?
If you clone an Environment with the Lifecycle Job, the Lifecycle Job will be cloned too. In our example we have set the TF_VAR_qovery_environment_id environment variable to the QOVERY_ENVIRONMENT_ID built-in environment variable. So when you clone your Environment, the QOVERY_ENVIRONMENT_ID built-in environment variable will be different. That's why you need to create a new alias environment variable for the QOVERY_ENVIRONMENT_ID built-in environment variable. Learn more on how to clone an Environment...
#What happen if I modify my Lifecycle Job after my Environment is deployed?
If you modify your Lifecycle Job after your Environment is deployed, the Lifecycle Job will be redeployed. In our example, since the state of our AWS RDS MySQL instance is stored in the S3 bucket, the AWS RDS MySQL instance will not be recreated. However, if you modify the main.tf file, the AWS RDS MySQL instance will be updated.
In this guide, we have seen how to use the Lifecycle Job to create an AWS RDS MySQL instance with Terraform. We have also seen how to get the credentials of the AWS RDS MySQL instance to connect to it from our application. To learn more about the Lifecycle Job, you can read the Lifecycle Job documentation. To get more examples, check out the Qovery Lifecycle Examples repository.
+We also want to make sure that our MySQL RDS instance is destroyed when our Environment is deleted. So we select the Deleted Event.
If you look at our Dockerfile in the repository, you will see that we are using the official Terraform image. I have also inserted by default the ENTRYPOINT ["/bin/sh"] to simplify the Qovery Lifecycle Job configuration.
For the Start Event, we want to run the terraform apply -no-color -auto-approve command. We don't need to run the terraform init command since it is already done in the Dockerfile.
You will also notice that we are also using && terraform output -json > /qovery-output/qovery-output.json to create a /qovery-output/qovery-output.json file. This file will be used by Qovery to inject the database credentials into our Environment Variables. We will cover this part later.
For the Deleted Event, we want to run the terraform destroy -no-color -auto-approve command.
So for the Start Event, we have: ["-c","terraform apply -no-color -auto-approve && terraform output -json > /qovery-output/qovery-output.json"] and for the Deleted Event, we have: ["-c","terraform destroy -no-color -auto-approve"]. Feel free to copy/paste these commands.
Yes the commands contains a comma. It is not a typo. It is a JSON array. You need to use a comma to separate the elements of the array.
I recommend setting the Timeout to 1800 seconds (30 minutes). It is the maximum time your Lifecycle Job can run. If your Lifecycle Job takes more than 30 minutes to run it will be stopped by Qovery. In our case, it should take less than 10 minutes to create the AWS RDS MySQL instance. But let's be safe.
Click Continue.
Now we need to set the vCPU and RAM required to run our Job. We can allocate 0.5 CPU and 256 MB of RAM. It's more than enough.
We need to set the Environment Variables required by our Lifecycle Job. In our case, we need to set the AWS credentials and some other environment variables. If you look at our Dockerfile, you will find the declaration of all those environment variables. You can copy/paste them.
Dockerfile
...
ARG TF_VAR_terraform_backend_bucket
ARG TF_VAR_aws_region
ARG TF_VAR_aws_access_key_id
ARG TF_VAR_aws_secret_access_key
ARG TF_VAR_qovery_environment_id
...
Those are the ones that we need to set.
We do not set here the TF_VAR_qovery_environment_id since we will create it in the next step.
TF_VAR_terraform_backend_bucket is the name of the S3 bucket where Terraform will store the state of your infrastructure. You need to create this bucket on S3 before running the Lifecycle Job. You can use the same bucket for all your Lifecycle Jobs. It is not a problem. You will just need to make sure that the S3 object key is unique.
Click on Continue.
Then click on Create (and not Create and Deploy).
Congrats, your Lifecycle Job is created. Now we just need to add the TF_VAR_qovery_environment_id environment variable before launching it.
#Make your Terraform deployment multi-environments ready
To support multiple environments, we need to make sure that the name of the S3 object key where Terraform will store the state of your infrastructure is unique. To do that, we will use the TF_VAR_qovery_environment_id environment variable. This environment variable is automatically created by Qovery and contains the ID of your Environment. We just need to create an environment variable alias.
Go inside your MySQL RDS service, click on the Variables tab.
Search for QOVERY_ENVIRONMENT_ID built-in environment variable. Then click on Creat alias
Set the name of the environment variable to TF_VAR_qovery_environment_id with a service scope and click on Confirm.
Now you are ready to deploy your Lifecycle Job and see what happened.
The job execution will take approximately 3 to 10 minutes.
Follow the logs of the job execution by clicking on the Logs button.
From the Deployment logs tab you can see that your Lifecycle Job is built and that the terraform init command is executed.
From the MySQL RDS tab you can see that the terraform apply -no-color -auto-approve command is executed. The creation of the AWS RDS MySQL instance is in progress.
Once the deployment is done, you should see that the AWS RDS MySQL instance is green and completed.
#Get the MySQL RDS credentials from the Lifecycle Job
Now that the AWS RDS MySQL instance is created, we need to get the credentials to connect to it. We have use the terraform output -json > /qovery-output/qovery-output.json command to get the credentials. If you go back to the Variables tab of your MySQL RDS service, you will see that the QOVERY_OUTPUT_** environment variables are created.
By using terraform output -json > /qovery-output/qovery-output.json Qovery automatically create those environment variables for you. You can use them in your application to connect to the AWS RDS MySQL instance. Learn more on how Lifecycle Job output...
Job output is a powerful feature that allows you to get the output of your Lifecycle Job and use it in your application. You can use it to get the credentials of your database, the URL of your S3 bucket, the URL of your CDN, etc...
#What happen if I delete my environment with your example?
If you delete your environment, the AWS RDS MySQL instance will be deleted too. You can see that in the MySQL RDS service logs. You will see that the terraform destroy -no-color -auto-approve command is executed.
#Can I use the Lifecycle Job to deploy my application?
Some users ask us if they can use the Lifecycle Job to deploy their application. The answer is yes!. The Lifecycle Job is designed to deploy all type of resources. However, we recommend using the official Qovery way to deploy applications. Learn more on how to deploy your application...
#What happen if I clone my Environment with the Lifecycle Job?
If you clone an Environment with the Lifecycle Job, the Lifecycle Job will be cloned too. In our example we have set the TF_VAR_qovery_environment_id environment variable to the QOVERY_ENVIRONMENT_ID built-in environment variable. So when you clone your Environment, the QOVERY_ENVIRONMENT_ID built-in environment variable will be different. That's why you need to create a new alias environment variable for the QOVERY_ENVIRONMENT_ID built-in environment variable. Learn more on how to clone an Environment...
#What happen if I modify my Lifecycle Job after my Environment is deployed?
If you modify your Lifecycle Job after your Environment is deployed, the Lifecycle Job will be redeployed. In our example, since the state of our AWS RDS MySQL instance is stored in the S3 bucket, the AWS RDS MySQL instance will not be recreated. However, if you modify the main.tf file, the AWS RDS MySQL instance will be updated.
In this guide, we have seen how to use the Lifecycle Job to create an AWS RDS MySQL instance with Terraform. We have also seen how to get the credentials of the AWS RDS MySQL instance to connect to it from our application. To learn more about the Lifecycle Job, you can read the Lifecycle Job documentation. To get more examples, check out the Qovery Lifecycle Examples repository.
If you read this, you probably don't know why Docker is used and what is the purpose of a Dockerfile.
Docker is a container engine, building and using images to deploy applications in containers. It looks like virtualization, and each container could be compared to a virtual machine with the minimal setup to run an application.
The Dockerfile is your image builder recipe. When Docker uses it, it will follow all instructions to build your application and run it.
The first step is to create a file named Dockerfile at your project root level so Qovery would be able to find and use it.
Also, to avoid unwanted files from your repository (images, .idea, DS_Store etc.), you need to add a .dockerignore. It will prevent heavy copy tasks of useless files, mostly your project dependencies and libraries you'll get back to with your package manager.
The .dockerignore file works like the .gitignore, so add all the path of the useless files and folders in it.
The first line you'll add in your Dockerfile is FROM.
It will pull an already existing image from Docker Hub. You should most of the time use an image that fits your application language (Node, Python, Java, etc.), but you can go a step backward and begin with a simple Linux image.
Your Dockerfile's first line should look like this:
Since most of the images are Linux-based, a good practice is to set up a directory you'll work in. That's the purpose of the WORKDIR line. It defines a directory and moves you in:
FROM <image_name>:<image_version>
WORKDIR /app
If you now work with a relative path (./), it will be in the app directory.
Now you have defined your base image and your working directory, it's time to add your code in. COPY works like cp linux command. First argument is the source and second one is the destination.
It's time to copy your source code in the image.
FROM <image_name>:<image_version>
WORKDIR /app
COPY . .
Here, the elements of your root folder from your current directory will be added inside the /app folder.
You can use your current repository relative path (. can be replaced by ./<my_source>) if you want to add specific element (except the content of .dockerignore) to your image relative path (as we are already in the /app folder, we can use ./<my_destination>).
If your app needs to be reached from outside the container, you have to open its listening port. EXPOSE is made for this.
FROM <image_name>:<image_version>
WORKDIR /app
COPY . .
RUN echo "Installing or doing stuff"
RUN <my_command>
EXPOSE <app_port>
Typical mistakes are made application configuration side. Ensure your application will listen on all interfaces 0.0.0.0 and not only localhost 127.0.0.1.
The last thing to do is to specify how to execute it. Add the CMD line with the same command with all the arguments you use locally to launch your application.
When Qovery uses your Dockerfile, it first builds it before running it.
If the build fails, Qovery won't be able to launch our application. To simplify debugging, you can build your image locally if you have Docker installed on your computer.
Open a terminal and set the path at the Dockerfile location, and use the command:
cd ~/my/folder/where/my/code/is
docker build .
It will build your image based on your Dockerfile. You'll see all the logs related to all lines you've added in the Dockerfile.
If something goes wrong, it will be printed onto the terminal, and you'll be able to debug it.
If you follow this tutorial and everything works perfectly, it's time to deploy your app on Qovery. You will find all the things you need to know here.
If you read this, you probably don't know why Docker is used and what is the purpose of a Dockerfile.
Docker is a container engine, building and using images to deploy applications in containers. It looks like virtualization, and each container could be compared to a virtual machine with the minimal setup to run an application.
The Dockerfile is your image builder recipe. When Docker uses it, it will follow all instructions to build your application and run it.
The first step is to create a file named Dockerfile at your project root level so Qovery would be able to find and use it.
Also, to avoid unwanted files from your repository (images, .idea, DS_Store etc.), you need to add a .dockerignore. It will prevent heavy copy tasks of useless files, mostly your project dependencies and libraries you'll get back to with your package manager.
The .dockerignore file works like the .gitignore, so add all the path of the useless files and folders in it.
The first line you'll add in your Dockerfile is FROM.
It will pull an already existing image from Docker Hub. You should most of the time use an image that fits your application language (Node, Python, Java, etc.), but you can go a step backward and begin with a simple Linux image.
Your Dockerfile's first line should look like this:
Since most of the images are Linux-based, a good practice is to set up a directory you'll work in. That's the purpose of the WORKDIR line. It defines a directory and moves you in:
FROM <image_name>:<image_version>
WORKDIR /app
If you now work with a relative path (./), it will be in the app directory.
Now you have defined your base image and your working directory, it's time to add your code in. COPY works like cp linux command. First argument is the source and second one is the destination.
It's time to copy your source code in the image.
FROM <image_name>:<image_version>
WORKDIR /app
COPY . .
Here, the elements of your root folder from your current directory will be added inside the /app folder.
You can use your current repository relative path (. can be replaced by ./<my_source>) if you want to add specific element (except the content of .dockerignore) to your image relative path (as we are already in the /app folder, we can use ./<my_destination>).
If your app needs to be reached from outside the container, you have to open its listening port. EXPOSE is made for this.
FROM <image_name>:<image_version>
WORKDIR /app
COPY . .
RUN echo "Installing or doing stuff"
RUN <my_command>
EXPOSE <app_port>
Typical mistakes are made application configuration side. Ensure your application will listen on all interfaces 0.0.0.0 and not only localhost 127.0.0.1.
The last thing to do is to specify how to execute it. Add the CMD line with the same command with all the arguments you use locally to launch your application.
When Qovery uses your Dockerfile, it first builds it before running it.
If the build fails, Qovery won't be able to launch our application. To simplify debugging, you can build your image locally if you have Docker installed on your computer.
Open a terminal and set the path at the Dockerfile location, and use the command:
cd ~/my/folder/where/my/code/is
docker build .
It will build your image based on your Dockerfile. You'll see all the logs related to all lines you've added in the Dockerfile.
If something goes wrong, it will be printed onto the terminal, and you'll be able to debug it.
If you follow this tutorial and everything works perfectly, it's time to deploy your app on Qovery. You will find all the things you need to know here.
To import the Environment Variables from this file we run the command qovery env import <dotenv file> and we select the environment variables to import:
$ qovery envimport .env.development
Qovery: dot envfile to import: '.env.development'
? Do you want to import Environment Variables or Secrets? Environment Variables
? What environment variables do you want to import? [Use arrows to move, space to select, <right> to all, <left> to none, type to filter]
[x]COLOR_BACKGROUND=fff
[]AUTH0_API_KEY_SECRET=0xb33f
>[x]API_URL=https://api.mytld.com
[]STRAPI_API_KEY=x.xxyyyzzz
Once validated you will see the following import validation:
? What environment variables do you want to import? COLOR_BACKGROUND=fff, API_URL=https://api.mytld.com
While Qovery will soon provide basic metrics on apps resources usage, you might need a more advanced view on what happens on your infrastructure. There are many solutions on the market, one of them being Datadog.
-Datadog is one of the leading platforms for monitoring and observability, and it is pretty easy to integrate it with Qovery.
Before you begin, this guide assumes the following:
You have a Qovery cluster running
You have a dedicated Qovery project and environment to deploy Datadog (example: Project=Tooling, Environment=Production)
Create the Datadog helm service in the Qovery environment of your choice (preferrably within a dedicated Tooling project) by following this documentation and these values:
General:
Application name: Datadog
Source:
Helm source: Helm repository
Repository: Datadog (the name given during the datadog helm repository added in the previous step)
Chart name: datadog
Version: 3.49.5 (this is the version we used for this setup, update it based on the chosen version)
Allow cluster-wide resources ✔️
Values
Values override as file:
File source: Raw YAML
Raw YAML:
Default
EKS with Karpenter
# The following YAML contains the minimum configuration required to deploy the Datadog Agent
# on your cluster. Update it accordingly to your needs
datadog:
# here we use a Qovery secret to retrieve the Datadog API Key (See next step)
apiKey: qovery.env.DD_API_KEY
# Update the site depending on where you want to store your data in Datadog
site: datadoghq.eu
# Update the cluster name with the name of your choice
In the previous step we have assigned the macro qovery.env.DD_API_KEY to the API Key value. In this step we will create this secret within the Qovery console.
Open the service overview of the created Datadog service
Enter the Variables section
Add a new Variable with:
Variable = DD_API_KEY
Value = <your_API_KEY>
Scope = Service (so that it is accessible only to this service)
Secret variable ✔️
If you need more information on how to manage your environment variables, have a look at this documentation
Access again your Datadog interface and access the Infrastructure > Containers > Kubernetes sections. You should now see the data coming from your Qovery cluster
You now have Datadog agent running on your Qovery cluster. You can check their Getting Started guide to familiarize yourself with the product: https://docs.datadoghq.com/fr/getting_started.
+Datadog is one of the leading platforms for monitoring and observability, and it is pretty easy to integrate it with Qovery.
Before you begin, this guide assumes the following:
You have a Qovery cluster running
You have a dedicated Qovery project and environment to deploy Datadog (example: Project=Tooling, Environment=Production)
Create the Datadog helm service in the Qovery environment of your choice (preferrably within a dedicated Tooling project) by following this documentation and these values:
General:
Application name: Datadog
Source:
Helm source: Helm repository
Repository: Datadog (the name given during the datadog helm repository added in the previous step)
Chart name: datadog
Version: 3.49.5 (this is the version we used for this setup, update it based on the chosen version)
Allow cluster-wide resources ✔️
Values
Values override as file:
File source: Raw YAML
Raw YAML:
Default
EKS with Karpenter
# The following YAML contains the minimum configuration required to deploy the Datadog Agent
# on your cluster. Update it accordingly to your needs
datadog:
# here we use a Qovery secret to retrieve the Datadog API Key (See next step)
apiKey: qovery.env.DD_API_KEY
# Update the site depending on where you want to store your data in Datadog
site: datadoghq.eu
# Update the cluster name with the name of your choice
In the previous step we have assigned the macro qovery.env.DD_API_KEY to the API Key value. In this step we will create this secret within the Qovery console.
Open the service overview of the created Datadog service
Enter the Variables section
Add a new Variable with:
Variable = DD_API_KEY
Value = <your_API_KEY>
Scope = Service (so that it is accessible only to this service)
Secret variable ✔️
If you need more information on how to manage your environment variables, have a look at this documentation
Access again your Datadog interface and access the Infrastructure > Containers > Kubernetes sections. You should now see the data coming from your Qovery cluster
You now have Datadog agent running on your Qovery cluster. You can check their Getting Started guide to familiarize yourself with the product: https://docs.datadoghq.com/fr/getting_started.
In the guide, we went through managing environment variables in react / create-react-apps without resorting to using any bash scripts and host it on Qovery using Ngnix server.
+
location /{
root /usr/share/nginx/html/;
include /etc/nginx/mime.types;
try_files $uri $uri//index.html;
}
}
Now, commit and push your changes - your create-react-app is handling env vars properly and is optimized for production usage.
In the guide, we went through managing environment variables in react / create-react-apps without resorting to using any bash scripts and host it on Qovery using Ngnix server.
Same as connecting your frontend app to your backend app, you can create an environment variable alias DATABASE_URL for the built-in secret finishing with _DATABASE_URL_INTERNAL.
Create an alias on _DATABASE_URL_INTERNAL and not _DATABASE_URL
#4. Copy data from your Heroku databases to your AWS databases
If you use MongoDB Atlas check out our tutorial about VPC peering and how to securely connect to your existing MongoDB Atlas database.
#How can I connect my app to an AWS service not managed by Qovery?
If you want to connect your app to an AWS service not managed by Qovery, check out our tutorial about VPC peering and how to securely connect to this AWS service.
If you have a common question about Qovery, we have a more general FAQ section available.
Same as connecting your frontend app to your backend app, you can create an environment variable alias DATABASE_URL for the built-in secret finishing with _DATABASE_URL_INTERNAL.
Create an alias on _DATABASE_URL_INTERNAL and not _DATABASE_URL
#4. Copy data from your Heroku databases to your AWS databases
If you use MongoDB Atlas check out our tutorial about VPC peering and how to securely connect to your existing MongoDB Atlas database.
#How can I connect my app to an AWS service not managed by Qovery?
If you want to connect your app to an AWS service not managed by Qovery, check out our tutorial about VPC peering and how to securely connect to this AWS service.
If you have a common question about Qovery, we have a more general FAQ section available.
You now have Kubecost running on your Qovery cluster. You can check their Getting Started guide to familiarize yourself with the product: https://docs.kubecost.com/#getting-started.
You now have Kubecost running on your Qovery cluster. You can check their Getting Started guide to familiarize yourself with the product: https://docs.kubecost.com/#getting-started.
The guide assumes that you have an application up and running on Qovery. We'll go through the process of adding a new Custom Domain to the application and use Cloudflare as the domain provider. We also assume that you own a custom domain on Cloudflare (or any other domain registrar):
Cloudflared establishes outbound connections (tunnels) between your resources and Cloudflare’s global network.
You have different ways to install Cloudflared on your cluster, you can find the installation instructions within this documentation
Since Cloudflared establishes a tunnel for you and the domain and TLS management is done by Cloudflare, you don't need to expose publicly the application during the setup (See port setup
You can decide to install Cloudflared by yourself or via Qovery. Within the section below, you will find documentation on how to install Cloudflared as a container in one of the Qovery environments.
-By creating and deploying the following service, using the Cloudflared image:
Create a TUNNEL_TOKEN secret environment variable (Scope: Environment) to pass the Cloudflare token.
Once your tunnel is created and connected, you have to set the public hostname and the related service settings.
To get the service name of your application deployed by Qovery, you can get it in your application variables:
This setup works for static environments but not for dynamic ones since the service name is dynamic. We should probably suggest to use the cloudflared helm chart once we release helm deployment
After following the steps from above, our application should be accessible using the custom domain we selected:
In the guide we went through all the necessary steps to configure Cloudflare and Qovery to make use of your custom domain.
+By creating and deploying the following service, using the Cloudflared image:
Create a TUNNEL_TOKEN secret environment variable (Scope: Environment) to pass the Cloudflare token.
Once your tunnel is created and connected, you have to set the public hostname and the related service settings.
To get the service name of your application deployed by Qovery, you can get it in your application variables:
This setup works for static environments but not for dynamic ones since the service name is dynamic. We should probably suggest to use the cloudflared helm chart once we release helm deployment
Qovery is going to help us to deploy the final application in the Cloud without the need to configure the CI/CD, network, security, load balancing, database and all the DevOps tasks
Qovery is a deployment platform that helps all developers to deploy their applications in the Cloud in just a few seconds
Before you begin, this tutorial assumes the following:
Your code need to be hosted on Github/Gitlab/Bitbucket
We have seen that creating an URL shortener API with Ktor and Kotlin is extremely simple. Connecting the application to PostgreSQL is very easy with the Exposed library. In just a few lines of code, the service is fully functional and can be deployed in production very quickly with the help of Qovery. In the next part, we will see how to create a web interface connecting to this API to convert our URLs without using the curl command.
Part 2: bind a web interface to the API - [link coming soon]
We have seen that creating an URL shortener API with Ktor and Kotlin is extremely simple. Connecting the application to PostgreSQL is very easy with the Exposed library. In just a few lines of code, the service is fully functional and can be deployed in production very quickly with the help of Qovery. In the next part, we will see how to create a web interface connecting to this API to convert our URLs without using the curl command.
Part 2: bind a web interface to the API - [link coming soon]
When you have multiple applications within the same environment, it is difficult to find the appropriate environment variable. A workaround is to:
Go to one of your application
Find the ID of your application in your URL https://console.qovery.com/platform/organization/xxx/projects/yyy/environments/zzz/applications/082e36c4-7fbb-42b2-9046-37ccce21616a/variables
Truncate your application ID and take the first segment. For 082e36c4-7fbb-42b2-9046-37ccce21616a it is 082e36c4
Add the letter z in front of id Z082e36c4.
All the environment variables containing Z082e36c4 are attached to the corresponding app.
When you have multiple applications within the same environment, it is difficult to find the appropriate environment variable. A workaround is to:
Go to one of your application
Find the ID of your application in your URL https://console.qovery.com/platform/organization/xxx/projects/yyy/environments/zzz/applications/082e36c4-7fbb-42b2-9046-37ccce21616a/variables
Truncate your application ID and take the first segment. For 082e36c4-7fbb-42b2-9046-37ccce21616a it is 082e36c4
Add the letter z in front of id Z082e36c4.
All the environment variables containing Z082e36c4 are attached to the corresponding app.
AWS IAM (Identity & Access Management) service allows AWS services to interact with each other by using roles. Those roles can easily be used to give permissions to your Qovery application, container or job.
It is a secure way to give your application permissions without having to manage credentials. More than that, it rotates the token automatically.
This tutorial will show you how to add AWS IAM roles to your Qovery application, container or job.
Then in this container (or any application in this environment) Variables, search for the variable called QOVERY_KUBERNETES_NAMESPACE_NAME and copy its value somewhere.
It is the Kubernetes namespace name where the container is located.
If you want to be able to keep the Role and permissions with the "On-demand environment" and "Clone" features, then you have to scope the role "cluster side" instead of the "Kubernetes namespace" side.
To do so, update the Condition with StringLike instead of StringEquals, and use a wildcard instead of the namespace name:
If you already have an existing service account on your Kubernetes cluster and want to use it, you can skip this step.
Kubernetes reminder: a deployed service account in a Kubernetes namespace, becomes available by all applications in the same namespace.
This step will help you deploying a service account on your Kubernetes cluster. In case you want to do it manually on the cluster with kubectl, you just have to push a service account like:
On the values override file, we do not need to override anything, so select None, and then click on the Continue button.
We then have to add 2 override arguments:
serviceAccount.name: the name of the service account in Kubernetes (the same name you have declared for the role in the Trusted entities policy section)
awsRoleArn: the ARN of the role you have created
Then click on the Continue button.
You can finally Create and Deploy it. If you look at the logs, you should see something like:
The final step is to set this service account (pointing to the AWS role) to your application. Go into your application Advanced settings and set the Service account to the one you have just created:
Deploy your application with the Deploy now button.
At this stage, the job should have been executed and the service account should be deployed on your Kubernetes cluster, and the Debian container, running.
To validate AWS role has correctly been deployed, we can connect to the pod, and see if we have the AWS token. We will use the Qovery CLI to connect to our pod:
$ qovery shell
Qovery: Select organization
Organization:
✔ Qovery
Qovery: Select project
Project:
✔ AWS roles tutorial
Qovery: Select environment
Environment:
✔ aws-role
Qovery: Select service
Services:
✔ debian
Now we are connected to the pod, we can check the AWS token:
The first setup phase can be time-consuming. However, once done, applying roles to your applications is very easy and fast. You can now use roles to access any AWS service!
AWS IAM (Identity & Access Management) service allows AWS services to interact with each other by using roles. Those roles can easily be used to give permissions to your Qovery application, container or job.
It is a secure way to give your application permissions without having to manage credentials. More than that, it rotates the token automatically.
This tutorial will show you how to add AWS IAM roles to your Qovery application, container or job.
Then in this container (or any application in this environment) Variables, search for the variable called QOVERY_KUBERNETES_NAMESPACE_NAME and copy its value somewhere.
It is the Kubernetes namespace name where the container is located.
If you want to be able to keep the Role and permissions with the "On-demand environment" and "Clone" features, then you have to scope the role "cluster side" instead of the "Kubernetes namespace" side.
To do so, update the Condition with StringLike instead of StringEquals, and use a wildcard instead of the namespace name:
If you already have an existing service account on your Kubernetes cluster and want to use it, you can skip this step.
Kubernetes reminder: a deployed service account in a Kubernetes namespace, becomes available by all applications in the same namespace.
This step will help you deploying a service account on your Kubernetes cluster. In case you want to do it manually on the cluster with kubectl, you just have to push a service account like:
On the values override file, we do not need to override anything, so select None, and then click on the Continue button.
We then have to add 2 override arguments:
serviceAccount.name: the name of the service account in Kubernetes (the same name you have declared for the role in the Trusted entities policy section)
awsRoleArn: the ARN of the role you have created
Then click on the Continue button.
You can finally Create and Deploy it. If you look at the logs, you should see something like:
The final step is to set this service account (pointing to the AWS role) to your application. Go into your application Advanced settings and set the Service account to the one you have just created:
Deploy your application with the Deploy now button.
At this stage, the job should have been executed and the service account should be deployed on your Kubernetes cluster, and the Debian container, running.
To validate AWS role has correctly been deployed, we can connect to the pod, and see if we have the AWS token. We will use the Qovery CLI to connect to our pod:
$ qovery shell
Qovery: Select organization
Organization:
✔ Qovery
Qovery: Select project
Project:
✔ AWS roles tutorial
Qovery: Select environment
Environment:
✔ aws-role
Qovery: Select service
Services:
✔ debian
Now we are connected to the pod, we can check the AWS token:
The first setup phase can be time-consuming. However, once done, applying roles to your applications is very easy and fast. You can now use roles to access any AWS service!
-
+
-
+
diff --git a/guides/tutorial/working-with-git-submodules/index.html b/guides/tutorial/working-with-git-submodules/index.html
index df91345a3b..d73594fb7a 100644
--- a/guides/tutorial/working-with-git-submodules/index.html
+++ b/guides/tutorial/working-with-git-submodules/index.html
@@ -26,9 +26,9 @@
-
+
-
+
@@ -48,14 +48,14 @@
This short guide will explain how to use Git Submodules on Qovery.
This adds a private Git Submodule to the application while still allowing it to resolve in non-local environments.
This solution is not recommended.
Since the credentials are stored in plaintext in the .git/submodules directory, you should prefer the SSH / Git option.
For Qovery to be able to access those private submodules when cloning your application repository, you need to add a secret named GIT_SSH_KEY_xxx,
-(where xxx can be replaced by anything), containing a private SSH key with access to your Git repository.
SSH:
[submodule "path/to/module"]
url = ssh://user/repo
Git:
[submodule "path/to/module"]
url = git://github.com/torvalds/linux.git
+(where xxx can be replaced by anything), containing a private SSH key with access to your Git repository.
Qovery is a DevOps Automation Platform that eliminates your DevOps hiring needs. Provision and maintain a secure and compliant infrastructure in hours - not months!.
Qovery is a DevOps Automation Platform that eliminates your DevOps hiring needs. Provision and maintain a secure and compliant infrastructure in hours - not months!.
-
+
-
+
diff --git a/mailing_list/index.html b/mailing_list/index.html
index 0e49bbc645..f6cdb945e9 100644
--- a/mailing_list/index.html
+++ b/mailing_list/index.html
@@ -22,9 +22,9 @@
-
+
-
+
@@ -37,9 +37,9 @@
-
+
-
+
diff --git a/main.6772d5b2.js b/main.394dd298.js
similarity index 99%
rename from main.6772d5b2.js
rename to main.394dd298.js
index bdb782acca..b26d0d5cac 100644
--- a/main.6772d5b2.js
+++ b/main.394dd298.js
@@ -1,2 +1,2 @@
-/*! For license information please see main.6772d5b2.js.LICENSE.txt */
-(window.webpackJsonp=window.webpackJsonp||[]).push([[307],[function(e,t,n){"use strict";e.exports=n(98)},function(e,t,n){"use strict";function o(){return(o=Object.assign||function(e){for(var t=1;t=0;p--){var f=a[p];"."===f?i(a,p):".."===f?(i(a,p),d++):d&&(i(a,p),d--)}if(!u)for(;d--;d)a.unshift("..");!u||""===a[0]||a[0]&&r(a[0])||a.unshift("");var g=a.join("/");return n&&"/"!==g.substr(-1)&&(g+="/"),g};function s(e){return e.valueOf?e.valueOf():Object.prototype.valueOf.call(e)}var l=function e(t,n){if(t===n)return!0;if(null==t||null==n)return!1;if(Array.isArray(t))return Array.isArray(n)&&t.length===n.length&&t.every((function(t,o){return e(t,n[o])}));if("object"==typeof t||"object"==typeof n){var o=s(t),r=s(n);return o!==t||r!==n?e(o,r):Object.keys(Object.assign({},t,n)).every((function(o){return e(t[o],n[o])}))}return!1},u=n(4);function c(e){return"/"===e.charAt(0)?e:"/"+e}function d(e){return"/"===e.charAt(0)?e.substr(1):e}function p(e,t){return function(e,t){return 0===e.toLowerCase().indexOf(t.toLowerCase())&&-1!=="/?#".indexOf(e.charAt(t.length))}(e,t)?e.substr(t.length):e}function f(e){return"/"===e.charAt(e.length-1)?e.slice(0,-1):e}function g(e){var t=e.pathname,n=e.search,o=e.hash,r=t||"/";return n&&"?"!==n&&(r+="?"===n.charAt(0)?n:"?"+n),o&&"#"!==o&&(r+="#"===o.charAt(0)?o:"#"+o),r}function m(e,t,n,r){var i;"string"==typeof e?(i=function(e){var t=e||"/",n="",o="",r=t.indexOf("#");-1!==r&&(o=t.substr(r),t=t.substr(0,r));var i=t.indexOf("?");return-1!==i&&(n=t.substr(i),t=t.substr(0,i)),{pathname:t,search:"?"===n?"":n,hash:"#"===o?"":o}}(e)).state=t:(void 0===(i=Object(o.a)({},e)).pathname&&(i.pathname=""),i.search?"?"!==i.search.charAt(0)&&(i.search="?"+i.search):i.search="",i.hash?"#"!==i.hash.charAt(0)&&(i.hash="#"+i.hash):i.hash="",void 0!==t&&void 0===i.state&&(i.state=t));try{i.pathname=decodeURI(i.pathname)}catch(s){throw s instanceof URIError?new URIError('Pathname "'+i.pathname+'" could not be decoded. This is likely caused by an invalid percent-encoding.'):s}return n&&(i.key=n),r?i.pathname?"/"!==i.pathname.charAt(0)&&(i.pathname=a(i.pathname,r.pathname)):i.pathname=r.pathname:i.pathname||(i.pathname="/"),i}function h(e,t){return e.pathname===t.pathname&&e.search===t.search&&e.hash===t.hash&&e.key===t.key&&l(e.state,t.state)}function b(){var e=null;var t=[];return{setPrompt:function(t){return e=t,function(){e===t&&(e=null)}},confirmTransitionTo:function(t,n,o,r){if(null!=e){var i="function"==typeof e?e(t,n):e;"string"==typeof i?"function"==typeof o?o(i,r):r(!0):r(!1!==i)}else r(!0)},appendListener:function(e){var n=!0;function o(){n&&e.apply(void 0,arguments)}return t.push(o),function(){n=!1,t=t.filter((function(e){return e!==o}))}},notifyListeners:function(){for(var e=arguments.length,n=new Array(e),o=0;ot?n.splice(t,n.length-t,o):n.push(o),d({action:"PUSH",location:o,index:t,entries:n})}}))},replace:function(e,t){var o=m(e,t,p(),w.location);c.confirmTransitionTo(o,"REPLACE",n,(function(e){e&&(w.entries[w.index]=o,d({action:"REPLACE",location:o}))}))},go:y,goBack:function(){y(-1)},goForward:function(){y(1)},canGo:function(e){var t=w.index+e;return t>=0&&t=0||(r[n]=e[n]);return r}n.d(t,"a",(function(){return o}))},function(e,t,n){e.exports=!n(14)((function(){return 7!=Object.defineProperty({},"a",{get:function(){return 7}}).a}))},function(e,t,n){var o=n(28),r=n(57);e.exports=n(10)?function(e,t,n){return o.f(e,t,r(1,n))}:function(e,t,n){return e[t]=n,e}},function(e,t,n){var o=n(5),r=n(17),i=n(11),a=n(16),s=n(30),l=function(e,t,n){var u,c,d,p,f=e&l.F,g=e&l.G,m=e&l.S,h=e&l.P,b=e&l.B,v=g?o:m?o[t]||(o[t]={}):(o[t]||{}).prototype,y=g?r:r[t]||(r[t]={}),w=y.prototype||(y.prototype={});for(u in g&&(n=t),n)d=((c=!f&&v&&void 0!==v[u])?v:n)[u],p=b&&c?s(d,o):h&&"function"==typeof d?s(Function.call,d):d,v&&a(v,u,d,e&l.U),y[u]!=d&&i(y,u,p),h&&w[u]!=d&&(w[u]=d)};o.core=r,l.F=1,l.G=2,l.S=4,l.P=8,l.B=16,l.W=32,l.U=64,l.R=128,e.exports=l},function(e,t){e.exports=function(e){return"object"==typeof e?null!==e:"function"==typeof e}},function(e,t){e.exports=function(e){try{return!!e()}catch(t){return!0}}},function(e,t,n){e.exports=n(110)()},function(e,t,n){var o=n(5),r=n(11),i=n(31),a=n(40)("src"),s=n(104),l=(""+s).split("toString");n(17).inspectSource=function(e){return s.call(e)},(e.exports=function(e,t,n,s){var u="function"==typeof n;u&&(i(n,"name")||r(n,"name",t)),e[t]!==n&&(u&&(i(n,a)||r(n,a,e[t]?""+e[t]:l.join(String(t)))),e===o?e[t]=n:s?e[t]?e[t]=n:r(e,t,n):(delete e[t],r(e,t,n)))})(Function.prototype,"toString",(function(){return"function"==typeof this&&this[a]||s.call(this)}))},function(e,t){var n=e.exports={version:"2.6.11"};"number"==typeof __e&&(__e=n)},function(e,t,n){"use strict";t.a={plugins:["plugin-image-zoom","posthog-docusaurus",["@docusaurus/plugin-content-docs",{sidebarPath:"/home/runner/work/documentation/documentation/website/sidebars.js"}],["@docusaurus/plugin-content-blog",{feedOptions:{type:"all",copyright:"Copyright \xa9 2024 Qovery, Inc.",baseUrl:""}}],"/home/runner/work/documentation/documentation/website/plugins/guides",["@docusaurus/plugin-content-pages",{}],["/home/runner/work/documentation/documentation/website/plugins/sitemap",{}]],themes:[["@docusaurus/theme-classic",{customCss:"/home/runner/work/documentation/documentation/website/src/css/custom.css"}],"@docusaurus/theme-search-algolia"],customFields:{metadata:{databases:[{dark_logo_path:"/img/logos/docker.svg",logo_path:"/img/logos/docker.svg",name:"mysql"},{dark_logo_path:"/img/logos/docker.svg",logo_path:"/img/logos/docker.svg",name:"postgresql"},{dark_logo_path:"/img/logos/docker.svg",logo_path:"/img/logos/docker.svg",name:"mongodb"},{dark_logo_path:"/img/logos/aws_white.svg",logo_path:"/img/logos/aws.svg",name:"rds"}],event_types:[],frameworks:[{dark_logo_path:"/img/logos/hasura_white.svg",logo_path:"/img/logos/hasura.svg",name:"hasura"},{dark_logo_path:"/img/logos/laravel.svg",logo_path:"/img/logos/laravel.svg",name:"laravel"},{dark_logo_path:"/img/logos/springboot.svg",logo_path:"/img/logos/springboot.svg",name:"springboot"},{dark_logo_path:"/img/logos/nodejs.svg",logo_path:"/img/logos/nodejs.svg",name:"nodejs"},{dark_logo_path:"/img/logos/flask_white.svg",logo_path:"/img/logos/flask.svg",name:"flask"},{dark_logo_path:"/img/logos/jhipster.svg",logo_path:"/img/logos/jhipster.svg",name:"jhipster"},{dark_logo_path:"/img/logos/gin.svg",logo_path:"/img/logos/gin.svg",name:"gin"},{dark_logo_path:"/img/logos/rails.svg",logo_path:"/img/logos/rails.svg",name:"rails"},{dark_logo_path:"/img/logos/django.svg",logo_path:"/img/logos/django.svg",name:"django"},{dark_logo_path:"/img/logos/deno.svg",logo_path:"/img/logos/deno.svg",name:"deno"},{dark_logo_path:"/img/logos/strapi.svg",logo_path:"/img/logos/strapi.svg",name:"strapi"},{dark_logo_path:"/img/logos/nuxtjs.svg",logo_path:"/img/logos/nuxtjs.svg",name:"nuxtjs"},{dark_logo_path:"/img/logos/sinatra.svg",logo_path:"/img/logos/sinatra.svg",name:"sinatra"},{dark_logo_path:"/img/logos/meilisearch.svg",logo_path:"/img/logos/meilisearch.svg",name:"meilisearch"}],guides:{"getting-started":{children:{},description:"Take Qovery from zero to production in less than 10 minutes.",guides:[{author_github:"https://github.com/evoxmusic",description:null,id:"/getting-started/create-a-database",last_modified_on:null,path:"website/guides/getting-started/create-a-database.md",series_position:null,title:"Create a database"},{author_github:"https://github.com/evoxmusic",description:null,id:"/getting-started/setting-custom-domain",last_modified_on:null,path:"website/guides/getting-started/setting-custom-domain.md",series_position:null,title:"Custom domain"},{author_github:"https://github.com/evoxmusic",description:null,id:"/getting-started/debugging",last_modified_on:null,path:"website/guides/getting-started/debugging.md",series_position:null,title:"Debugging"},{author_github:"https://github.com/evoxmusic",description:null,id:"/getting-started/managing-environment-variables",last_modified_on:null,path:"website/guides/getting-started/managing-environment-variables.md",series_position:null,title:"Environment variables"},{author_github:"https://github.com/evoxmusic",description:null,id:"/getting-started/deploy-your-first-application",last_modified_on:null,path:"website/guides/getting-started/deploy-your-first-application.md",series_position:null,title:"Hello World. Deploy your first application."}],name:"getting-started",series:!0,title:"Getting Started"},"installation-guide":{children:{},description:"Install Qovery on your technical stack.",guides:[{author_github:"https://github.com/evoxmusic",description:null,id:"/installation-guide/guide-amazon-web-services",last_modified_on:null,path:"website/guides/installation-guide/guide-amazon-web-services.md",series_position:null,title:"Install Qovery on your Amazon Web Services account"},{author_github:"https://github.com/evoxmusic",description:null,id:"/installation-guide/guide-kubernetes",last_modified_on:null,path:"website/guides/installation-guide/guide-kubernetes.md",series_position:null,title:"Install Qovery on your Kubernetes cluster"},{author_github:"https://github.com/evoxmusic",description:null,id:"/installation-guide/guide-microsoft-azure",last_modified_on:null,path:"website/guides/installation-guide/guide-microsoft-azure.md",series_position:null,title:"Install Qovery on your Microsoft Azure account"},{author_github:"https://github.com/evoxmusic",description:null,id:"/installation-guide/guide-scaleway",last_modified_on:null,path:"website/guides/installation-guide/guide-scaleway.md",series_position:null,title:"Install Qovery on your Scaleway account"},{author_github:"https://github.com/evoxmusic",description:null,id:"/installation-guide/guide-google-cloud-platform",last_modified_on:null,path:"website/guides/installation-guide/guide-google-cloud-platform.md",series_position:null,title:"Install Qovery your Google Cloud Platform account"}],name:"installation-guide",series:!1,title:"Installation Guide"},advanced:{children:{},description:"Go beyond the basics, become a Qovery pro, and extract the full potential of Qovery.",guides:[{author_github:"https://github.com/deimosfr",description:null,id:"/advanced/sub-account-sso",last_modified_on:null,path:"website/guides/advanced/sub-account-sso.md",series_position:null,title:"Configure Single Sign-On (SSO) for an AWS sub-account"},{author_github:"https://github.com/evoxmusic",description:null,id:"/advanced/continuous-integration",last_modified_on:null,path:"website/guides/advanced/continuous-integration.md",series_position:null,title:"Continuous Integration"},{author_github:"https://github.com/evoxmusic",description:null,id:"/advanced/costs-control",last_modified_on:null,path:"website/guides/advanced/costs-control.md",series_position:null,title:"Costs Control"},{author_github:"https://github.com/evoxmusic",description:null,id:"/advanced/deploy-api-gateway",last_modified_on:null,path:"website/guides/advanced/deploy-api-gateway.md",series_position:null,title:"Deploy API Gateway"},{author_github:"https://github.com/evoxmusic",description:null,id:"/advanced/deploy-aws-services",last_modified_on:null,path:"website/guides/advanced/deploy-aws-services.md",series_position:null,title:"Deploy AWS Services"},{author_github:"https://github.com/evoxmusic",description:null,id:"/advanced/deploy-external-services",last_modified_on:null,path:"website/guides/advanced/deploy-external-services.md",series_position:null,title:"Deploy External Services"},{author_github:"https://github.com/evoxmusic",description:null,id:"/advanced/deploy-frontend",last_modified_on:null,path:"website/guides/advanced/deploy-frontend.md",series_position:null,title:"Deploy Frontend App"},{author_github:"https://github.com/baalooos",description:null,id:"/advanced/deploy-daemonset-with-karpenter",last_modified_on:null,path:"website/guides/advanced/deploy-daemonset-with-karpenter.md",series_position:null,title:"Deploy a DaemonSet in a Karpenter context"},{author_github:"https://github.com/acarranoqovery",description:null,id:"/advanced/adding-multi-region-backup-rds",last_modified_on:null,path:"website/guides/advanced/adding-multi-region-backup-rds.md",series_position:null,title:"Enable multi-region backup on your RDS instances with AWS Backup"},{author_github:"https://github.com/evoxmusic",description:null,id:"/advanced/helm-chart",last_modified_on:null,path:"website/guides/advanced/helm-chart.md",series_position:null,title:"Helm Charts"},{author_github:"https://github.com/pjeziorowski",description:null,id:"/advanced/microservices",last_modified_on:null,path:"website/guides/advanced/microservices.md",series_position:null,title:"Microservices"},{author_github:"https://github.com/evoxmusic",description:null,id:"/advanced/migration",last_modified_on:null,path:"website/guides/advanced/migration.md",series_position:null,title:"Migration"},{author_github:"https://github.com/acarranoqovery",description:null,id:"/advanced/upgrading-rds-instance",last_modified_on:null,path:"website/guides/advanced/upgrading-rds-instance.md",series_position:null,title:"Minimize downtime while upgrading RDS instances"},{author_github:"https://github.com/evoxmusic",description:null,id:"/advanced/monitoring",last_modified_on:null,path:"website/guides/advanced/monitoring.md",series_position:null,title:"Monitoring"},{author_github:"https://github.com/pjeziorowski",description:null,id:"/advanced/monorepository",last_modified_on:null,path:"website/guides/advanced/monorepository.md",series_position:null,title:"Mono repository"},{author_github:"https://github.com/evoxmusic",description:null,id:"/advanced/use-preview-environments",last_modified_on:null,path:"website/guides/advanced/use-preview-environments.md",series_position:null,title:"Preview Environments"},{author_github:"https://github.com/evoxmusic",description:null,id:"/advanced/production",last_modified_on:null,path:"website/guides/advanced/production.md",series_position:null,title:"Production"},{author_github:"https://github.com/evoxmusic",description:null,id:"/advanced/seed-database",last_modified_on:null,path:"website/guides/advanced/seed-database.md",series_position:null,title:"Seed Database"},{author_github:"https://github.com/evoxmusic",description:null,id:"/advanced/terraform",last_modified_on:null,path:"website/guides/advanced/terraform.md",series_position:null,title:"Terraform"}],name:"advanced",series:!1,title:"Advanced"},tutorial:{children:{},description:"Additional step-by-step resources to leverage even more Qovery. ",guides:[{author_github:"https://github.com/pjeziorowski",description:null,id:"/tutorial/blazingly-fast-preview-environments-for-nextjs-nodejs-and-mongodb-on-aws",last_modified_on:null,path:"website/guides/tutorial/blazingly-fast-preview-environments-for-nextjs-nodejs-and-mongodb-on-aws.md",series_position:null,title:"Blazingly fast Preview Environments for NextJS, NodeJS, and MongoDB on AWS"},{author_github:"https://github.com/evoxmusic",description:null,id:"/tutorial/build-e2e-testing-ephemeral-environments",last_modified_on:null,path:"website/guides/tutorial/build-e2e-testing-ephemeral-environments.md",series_position:null,title:"Build E2E Testing Ephemeral Environments with GitHub Actions and Qovery"},{author_github:"https://github.com/evoxmusic",description:null,id:"/tutorial/create-a-playground-environment-on-aws",last_modified_on:null,path:"website/guides/tutorial/create-a-playground-environment-on-aws.md",series_position:null,title:"Create a Playground Environment on AWS"},{author_github:"https://github.com/evoxmusic",description:null,id:"/tutorial/create-a-blazingly-fast-api-in-rust-part-1",last_modified_on:null,path:"website/guides/tutorial/create-a-blazingly-fast-api-in-rust-part-1.md",series_position:null,title:"Create a blazingly fast REST API in Rust (Part 1/2)"},{author_github:"https://github.com/evoxmusic",description:null,id:"/tutorial/create-your-staging-environment-from-your-production-environment-on-aws",last_modified_on:null,path:"website/guides/tutorial/create-your-staging-environment-from-your-production-environment-on-aws.md",series_position:null,title:"Create your Staging environment from your Production environment on AWS"},{author_github:"https://github.com/pjeziorowski",description:null,id:"/tutorial/generate-qovery-api-client",last_modified_on:null,path:"website/guides/tutorial/generate-qovery-api-client.md",series_position:null,title:"Creating API clients using OpenAPI Tools"},{author_github:"https://github.com/evoxmusic",description:null,id:"/tutorial/customizing-preview-url-with-qovery-cli",last_modified_on:null,path:"website/guides/tutorial/customizing-preview-url-with-qovery-cli.md",series_position:null,title:"Customizing Preview URL with Qovery CLI"},{author_github:"https://github.com/baalooos",description:null,id:"/tutorial/deploy-jupyterhub-qovery",last_modified_on:null,path:"website/guides/tutorial/deploy-jupyterhub-qovery.md",series_position:null,title:"Deploy JupyterHub using Helm"},{author_github:"https://github.com/l0ck3",description:null,id:"/tutorial/deploy-rails-with-postgresql-and-sidekiq",last_modified_on:null,path:"website/guides/tutorial/deploy-rails-with-postgresql-and-sidekiq.md",series_position:null,title:"Deploy Rails with PostgreSQL and Sidekiq"},{author_github:"https://github.com/l0ck3",description:null,id:"/tutorial/deploy-temporal-on-kubernetes",last_modified_on:null,path:"website/guides/tutorial/deploy-temporal-on-kubernetes.md",series_position:null,title:"Deploy Temporal on Kubernetes"},{author_github:"https://github.com/evoxmusic",description:null,id:"/tutorial/getting-started-with-preview-environments-on-aws-for-beginners",last_modified_on:null,path:"website/guides/tutorial/getting-started-with-preview-environments-on-aws-for-beginners.md",series_position:null,title:"Getting Started with Preview Environments on AWS"},{author_github:"https://github.com/evoxmusic",description:null,id:"/tutorial/gitops-with-qovery",last_modified_on:null,path:"website/guides/tutorial/gitops-with-qovery.md",series_position:null,title:"GitOps with Qovery"},{author_github:"https://github.com/deimosfr",description:null,id:"/tutorial/grafana-install",last_modified_on:null,path:"website/guides/tutorial/grafana-install.md",series_position:null,title:"Grafana setup with Qovery"},{author_github:"https://github.com/evoxmusic",description:null,id:"/tutorial/how-to-use-lifecycle-job-to-deploy-any-kind-of-resources",last_modified_on:null,path:"website/guides/tutorial/how-to-use-lifecycle-job-to-deploy-any-kind-of-resources.md",series_position:null,title:"How To Use Lifecycle Job To Deploy Any Kind Of Resources"},{author_github:"https://github.com/evoxmusic",description:null,id:"/tutorial/how-to-build-a-cloud-version-of-your-open-source-software-part-1",last_modified_on:null,path:"website/guides/tutorial/how-to-build-a-cloud-version-of-your-open-source-software-part-1.md",series_position:null,title:"How to Build a Cloud Version of Your Open Source Software - A Case Study with AppWrite - Part 1"},{author_github:"https://github.com/pjeziorowski",description:null,id:"/tutorial/how-to-build-a-cloud-version-of-your-open-source-software-part-2",last_modified_on:null,path:"website/guides/tutorial/how-to-build-a-cloud-version-of-your-open-source-software-part-2.md",series_position:null,title:"How to Build a Cloud Version of Your Open Source Software - A Case Study with AppWrite - Part 2"},{author_github:"https://github.com/pjeziorowski",description:null,id:"/tutorial/how-to-build-a-cloud-version-of-your-open-source-software-part-3",last_modified_on:null,path:"website/guides/tutorial/how-to-build-a-cloud-version-of-your-open-source-software-part-3.md",series_position:null,title:"How to Build a Cloud Version of Your Open Source Software - A Case Study with AppWrite - Part 3"},{author_github:"https://github.com/benjaminch",description:null,id:"/tutorial/how-to-activate-sso-to-connect-to-your-eks-cluster",last_modified_on:null,path:"website/guides/tutorial/how-to-activate-sso-to-connect-to-your-eks-cluster.md",series_position:null,title:"How to activate SSO to connect to your EKS cluster"},{author_github:"https://github.com/l0ck3",description:null,id:"/tutorial/how-to-connect-to-a-managed-mongodb-instance-on-aws",last_modified_on:null,path:"website/guides/tutorial/how-to-connect-to-a-managed-mongodb-instance-on-aws.md",series_position:null,title:"How to connect to a managed MongoDB instance on AWS"},{author_github:"https://github.com/l0ck3",description:null,id:"/tutorial/how-to-connect-to-your-eks-cluster-with-kubectl",last_modified_on:null,path:"website/guides/tutorial/how-to-connect-to-your-eks-cluster-with-kubectl.md",series_position:null,title:"How to connect to your EKS cluster with kubectl"},{author_github:"https://github.com/l0ck3",description:null,id:"/tutorial/how-to-create-an-rds-instance-through-aws-console",last_modified_on:null,path:"website/guides/tutorial/how-to-create-an-rds-instance-through-aws-console.md",series_position:null,title:"How to create an RDS instance through the AWS console"},{author_github:"https://github.com/evoxmusic",description:null,id:"/tutorial/how-to-deploy-a-rust-rest-api-application-on-aws-with-ease",last_modified_on:null,path:"website/guides/tutorial/how-to-deploy-a-rust-rest-api-application-on-aws-with-ease.md",series_position:null,title:"How to deploy a Rust REST API application on AWS with ease"},{author_github:"https://github.com/l0ck3",description:null,id:"/tutorial/how-to-integrate-qovery-with-github-actions",last_modified_on:null,path:"website/guides/tutorial/how-to-integrate-qovery-with-github-actions.md",series_position:null,title:"How to integrate Qovery with GitHub Actions"},{author_github:"https://github.com/l0ck3",description:null,id:"/tutorial/how-to-run-commands-at-application-startup",last_modified_on:null,path:"website/guides/tutorial/how-to-run-commands-at-application-startup.md",series_position:null,title:"How to run commands before the application starts"},{author_github:"https://github.com/pjeziorowski",description:null,id:"/tutorial/data-seeding-in-postgres",last_modified_on:null,path:"website/guides/tutorial/data-seeding-in-postgres.md",series_position:null,title:"How to seed a Postgres database on a dev environment"},{author_github:"https://github.com/pjeziorowski",description:null,id:"/tutorial/how-to-use-cloudfront-with-react-frontend-application-on-qovery",last_modified_on:null,path:"website/guides/tutorial/how-to-use-cloudfront-with-react-frontend-application-on-qovery.md",series_position:null,title:"How to use CloudFront with a React frontend application on Qovery"},{author_github:"https://github.com/MacLikorne",description:null,id:"/tutorial/how-to-write-a-dockerfile",last_modified_on:null,path:"website/guides/tutorial/how-to-write-a-dockerfile.md",series_position:null,title:"How to write a Dockerfile"},{author_github:"https://github.com/evoxmusic",description:null,id:"/tutorial/import-your-environment-variables-with-the-qovery-cli",last_modified_on:null,path:"website/guides/tutorial/import-your-environment-variables-with-the-qovery-cli.md",series_position:null,title:"Import your environment variables with the Qovery CLI"},{author_github:"https://github.com/deimosfr",description:null,id:"/tutorial/cloudwatch-integration",last_modified_on:null,path:"website/guides/tutorial/cloudwatch-integration.md",series_position:null,title:"Integrate your application logs to Cloudwatch"},{author_github:"https://github.com/acarranoqovery",description:null,id:"/tutorial/kubernetes-observability-and-monitoring-with-datadog",last_modified_on:null,path:"website/guides/tutorial/kubernetes-observability-and-monitoring-with-datadog.md",series_position:null,title:"Kubernetes observability and monitoring with Datadog"},{author_github:"https://github.com/pjeziorowski",description:null,id:"/tutorial/managing-env-variables-in-create-react-app",last_modified_on:null,path:"website/guides/tutorial/managing-env-variables-in-create-react-app.md",series_position:null,title:"Managing Environment Variables in React (create-react-app)"},{author_github:"https://github.com/evoxmusic",description:null,id:"/tutorial/migrate-your-application-from-heroku-to-aws",last_modified_on:null,path:"website/guides/tutorial/migrate-your-application-from-heroku-to-aws.md",series_position:null,title:"Migrate your application from Heroku to AWS"},{author_github:"https://github.com/jul-dan",description:null,id:"/tutorial/monitor-and-reduce-kubernetes-spend-with-kubecost",last_modified_on:null,path:"website/guides/tutorial/monitor-and-reduce-kubernetes-spend-with-kubecost.md",series_position:null,title:"Monitor and reduce Kubernetes spend with Kubecost"},{author_github:"https://github.com/jul-dan",description:null,id:"/tutorial/setting-up-cloudflare-and-custom-domain-on-qovery",last_modified_on:null,path:"website/guides/tutorial/setting-up-cloudflare-and-custom-domain-on-qovery.md",series_position:null,title:"Setting up Cloudflare and Custom Domain on Qovery"},{author_github:"https://github.com/l0ck3",description:null,id:"/tutorial/aws-vpc-peering-with-qovery",last_modified_on:null,path:"website/guides/tutorial/aws-vpc-peering-with-qovery.md",series_position:null,title:"Setup VPC peering on AWS with Qovery"},{author_github:"https://github.com/evoxmusic",description:null,id:"/tutorial/url-shortener-api-with-kotlin",last_modified_on:null,path:"website/guides/tutorial/url-shortener-api-with-kotlin.md",series_position:null,title:"URL Shortener API with Kotlin (Part 1/2)"},{author_github:"https://github.com/deimosfr",description:null,id:"/tutorial/use-aws-iam-roles-with-qovery",last_modified_on:null,path:"website/guides/tutorial/use-aws-iam-roles-with-qovery.md",series_position:null,title:"Use AWS IAM roles with Qovery"},{author_github:"https://github.com/evoxmusic",description:null,id:"/tutorial/use-an-api-gateway-in-front-of-multiple-services",last_modified_on:null,path:"website/guides/tutorial/use-an-api-gateway-in-front-of-multiple-services.md",series_position:null,title:"Use an API gateway in front of multiple services"},{author_github:"https://github.com/pjeziorowski",description:null,id:"/tutorial/aws-sqs-lambda-with-qovery",last_modified_on:null,path:"website/guides/tutorial/aws-sqs-lambda-with-qovery.md",series_position:null,title:"Using Amazon SQS and Lambda on Qovery"},{author_github:"https://github.com/pjeziorowski",description:null,id:"/tutorial/working-with-git-submodules",last_modified_on:null,path:"website/guides/tutorial/working-with-git-submodules.md",series_position:null,title:"Working with Git Submodules"},{author_github:"https://github.com/evoxmusic",description:null,id:"/tutorial/how-to-deploy-your-application-on-aws-in-30-minutes",last_modified_on:null,path:"website/guides/tutorial/how-to-deploy-your-application-on-aws-in-30-minutes.md",series_position:null,title:"Zero to Hero - How to deploy your apps on AWS in 30 minutes"}],name:"tutorial",series:!1,title:"Tutorial"},engineering:{children:{},description:"We share our engineering learning with all of you. ",guides:[],name:"engineering",series:!1,title:"Engineering"}},highlights:[],installation:{},installation_guides:[{dark_logo_path:"/img/logos/aws_white.svg",logo_path:"/img/logos/aws.svg",name:"aws"},{dark_logo_path:"/img/logos/digitalocean_white.svg",logo_path:"/img/logos/digitalocean.svg",name:"digital_ocean"},{dark_logo_path:"/img/logos/scaleway_white.svg",logo_path:"/img/logos/scaleway.svg",name:"scaleway"},{dark_logo_path:"/img/logos/gcp_white.svg",logo_path:"/img/logos/gcp.svg",name:"gcp"},{dark_logo_path:"/img/logos/azure_white.svg",logo_path:"/img/logos/azure.svg",name:"azure"},{dark_logo_path:"/img/logos/kubernetes_white.svg",logo_path:"/img/logos/kubernetes.svg",name:"kubernetes"}],languages:[{dark_logo_path:"/img/logos/php.svg",logo_path:"/img/logos/php.svg",name:"php"},{dark_logo_path:"/img/logos/kotlin.svg",logo_path:"/img/logos/kotlin.svg",name:"kotlin"},{dark_logo_path:"/img/logos/java.svg",logo_path:"/img/logos/java.svg",name:"java"},{dark_logo_path:"/img/logos/javascript.svg",logo_path:"/img/logos/javascript.svg",name:"javascript"},{dark_logo_path:"/img/logos/python.svg",logo_path:"/img/logos/python.svg",name:"python"},{dark_logo_path:"/img/logos/rust_white.svg",logo_path:"/img/logos/rust.svg",name:"rust"},{dark_logo_path:"/img/logos/go.svg",logo_path:"/img/logos/go.svg",name:"go"},{dark_logo_path:"/img/logos/ruby.svg",logo_path:"/img/logos/ruby.svg",name:"ruby"},{dark_logo_path:"/img/logos/scala.svg",logo_path:"/img/logos/scala.svg",name:"scala"}],latest_highlight:{},latest_post:{},latest_release:{},post_tags:[],posts:[],releases:{},sinks:{},sources:{},team:[{avatar:"https://github.com/evoxmusic.png",bio:'Romaric is a Software Engineer, and CEO at Qovery. He has 10+ years of experience in R&D. From the Ad-Tech to the financial industry, he has deep expertise in highly-reliable and performant systems.\n',github:"https://github.com/evoxmusic",id:"romaric",keybase:"https://keybase.io/evoxmusic",name:"Romaric P."},{avatar:"https://github.com/deimosfr.png",bio:'Pierre is an SRE, and CTO of Qovery. He has 15+ years of experience in R&D. From the financial to the Ad-Tech industry, he has a strong knowledge in distributed and highly-reliable systems. He\'s also the MariaDB High Performance book author.\n',github:"https://github.com/deimosfr",id:"pierre",keybase:"https://keybase.io/pierre",name:"Pierre M."},{avatar:"https://github.com/pjeziorowski.png",bio:'Patryk is an experienced Software Engineer, and a Backend Developer at Qovery. ',github:"https://github.com/pjeziorowski",id:"patryk",keybase:"https://keybase.io/patryk",name:"Patryk J."},{avatar:"https://github.com/maclikorne.png",bio:'Enzo is a Backend Developer at Qovery. ',github:"https://github.com/MacLikorne",id:"enzo",keybase:"https://keybase.io/enzo",name:"Enzo R."},{avatar:"https://github.com/l0ck3.png",bio:'Yann is a Developer Experience Engineer at Qovery. He has 15+ years of experience in development and SRE.\n',github:"https://github.com/l0ck3",id:"yann",keybase:"https://keybase.io/l0ck3",name:"Yann I."},{avatar:"https://github.com/sileht.png",bio:'Mehdi is Senior DevOps Engineer at Qovery, with 15+ years of software development and managing infrastructures, Co-founder of Mergify, active member of non-profit Tetaneutral.net ISP and Hosting provider, and he also likes to dance on crazy swing rhythm.\n',github:"https://github.com/sileht",id:"mehdi",keybase:"https://keybase.io/mehdi",name:"Mehdi A."},{avatar:"https://github.com/Stun3R.png",bio:'Thibaut is an experienced developer, CTO of Shelt.in and active Qovery contributor. ',github:"https://github.com/Stun3R",id:"thibaut_david",keybase:"https://keybase.io/Stun3R",name:"Thibaut David"},{avatar:"https://github.com/Aggis15.png",bio:"Angelos is a self-taught programmer using Python, Qovery ambassador and contributor. ",github:"https://github.com/Aggis15",id:"Aggis15",keybase:"https://keybase.io/Aggis15",name:"Angelos Rinas"},{avatar:"https://github.com/ilmiont.png",bio:"James Walker is the founder of Heron Web, a UK-based digital agency providing bespoke software development services to SMEs. He has experience managing complete end-to-end web development workflows with DevOps, CI/CD, Docker, and Kubernetes.\n",github:"https://github.com/ilmiont",id:"james_walker",keybase:"https://keybase.io/ilmiont",name:"James Walker"},{avatar:"https://github.com/Qovery.png",bio:"Dhiraj Kumar has 10+ years of experience in Python and Machine learning. I specialize in Data analytics and Machine learning using python. My Primary Expertise includes Python, Flask, Django, Pandas, NumPy, SciKit-Learn, NLP, Docker, Machine Learning, Deep Learning, Chatbot, NLP, Spark, AWS, C#, and Azure\n",github:"https://github.com/dhiraj_kumar",id:"dhiraj_kumar",keybase:"https://keybase.io/dhiraj_kumar",name:"Dhiraj Kumar"},{avatar:"https://github.com/Qovery.png",bio:"Shingai Zivuku is a softwage engineer passionated by the cloud.\n",github:"https://github.com/shingai_zivuku",id:"shingai_zivuku",keybase:"https://keybase.io/shingai_zivuku",name:"Shingai Zivuku"},{avatar:"https://github.com/benjaminch.png",bio:'Benjamin is a senior Backend Developer at Qovery.',github:"https://github.com/benjaminch",id:"benjaminch",keybase:"https://keybase.io/benjaminch",name:"Benjamin Chastanier"},{avatar:"https://github.com/jul-dan.png",bio:'Julien is a Technical Product Manager at Qovery.',github:"https://github.com/jul-dan",id:"jul-dan",keybase:"https://keybase.io/jul-dan",name:"Julien Dan"},{avatar:"https://github.com/acarranoqovery.png",bio:'Alessandro is a Lead Product Manager at Qovery.',github:"https://github.com/acarranoqovery",id:"acarranoqovery",keybase:"https://keybase.io/acarranoqovery",name:"Alessandro Carrano"},{avatar:"https://github.com/baalooos.png",bio:'Charles-Edouard is Technical Account Manager at Qovery.',github:"https://github.com/baalooos",id:"cegagnaire",keybase:"https://keybase.io/baalooos",name:"Charles-Edouard Gagnaire"}],technologies:[{dark_logo_path:"/img/logos/kubernetes_white.svg",logo_path:"/img/logos/kubernetes.svg",name:"kubernetes"},{dark_logo_path:"/img/logos/helm_white.svg",logo_path:"/img/logos/helm.svg",name:"helm"},{dark_logo_path:"/img/logos/docker.svg",logo_path:"/img/logos/docker.svg",name:"docker"},{dark_logo_path:"/img/logos/kotlin.svg",logo_path:"/img/logos/kotlin.svg",name:"kotlin"},{dark_logo_path:"/img/logos/qovery.svg",logo_path:"/img/logos/qovery.svg",name:"qovery"},{dark_logo_path:"/img/logos/posthog.svg",logo_path:"/img/logos/posthog.svg",name:"posthog"},{dark_logo_path:"/img/logos/terraform.svg",logo_path:"/img/logos/terraform.svg",name:"terraform"},{dark_logo_path:"/img/logos/github.svg",logo_path:"/img/logos/github.png",name:"github"}],transforms:{}}},themeConfig:{disableDarkMode:!1,navbar:{hideOnScroll:!0,logo:{alt:"Qovery",src:"img/logo-light.svg",srcDark:"img/logo-dark.svg",url:"https://www.qovery.com"},links:[{to:"guides/",label:"Guides",position:"left"},{to:"docs/",label:"Docs",position:"left"},{to:"guides/tutorial",label:"Tutorials",position:"left"},{href:"https://discuss.qovery.com",label:"Forum",position:"left"},{href:"https://start.qovery.com",label:"Web Console",position:"right"},{href:"https://www.qovery.com",label:"Home",position:"right"},{href:"https://github.com/Qovery",label:"GitHub",position:"right"}]},image:"img/open-graph.png",prism:{theme:{plain:{color:"#393A34",backgroundColor:"#f6f8fa"},styles:[{types:["comment","prolog","doctype","cdata"],style:{color:"#999988",fontStyle:"italic"}},{types:["namespace"],style:{opacity:.7}},{types:["string","attr-value"],style:{color:"#e3116c"}},{types:["punctuation","operator"],style:{color:"#393A34"}},{types:["entity","url","symbol","number","boolean","variable","constant","property","regex","inserted"],style:{color:"#36acaa"}},{types:["atrule","keyword","attr-name","selector"],style:{color:"#00a4db"}},{types:["function","deleted","tag"],style:{color:"#d73a49"}},{types:["function-variable"],style:{color:"#6f42c1"}},{types:["tag","selector","keyword"],style:{color:"#00009f"}}]},darkTheme:{plain:{color:"#F8F8F2",backgroundColor:"#282A36"},styles:[{types:["prolog","constant","builtin"],style:{color:"rgb(189, 147, 249)"}},{types:["inserted","function"],style:{color:"rgb(80, 250, 123)"}},{types:["deleted"],style:{color:"rgb(255, 85, 85)"}},{types:["changed"],style:{color:"rgb(255, 184, 108)"}},{types:["punctuation","symbol"],style:{color:"rgb(248, 248, 242)"}},{types:["string","char","tag","selector"],style:{color:"rgb(255, 121, 198)"}},{types:["keyword","variable"],style:{color:"rgb(189, 147, 249)",fontStyle:"italic"}},{types:["comment"],style:{color:"rgb(98, 114, 164)"}},{types:["attr-name"],style:{color:"rgb(241, 250, 140)"}}]},additionalLanguages:["hcl","rust"]},footer:{links:[{title:"Resources",items:[{label:"Documentation",to:"docs"},{label:"Guides",to:"guides"},{label:"Tutorials",to:"guides/tutorial"},{label:"Engineering",to:"guides/engineering"},{label:"Pricing",to:"https://www.qovery.com/pricing"},{label:"Enterprise",to:"https://www.qovery.com/enterprise"},{label:"API",to:"https://api-doc.qovery.com"},{label:"Github",to:"https://github.com/Qovery"}]},{title:"Community",items:[{label:"Forum",to:"https://community.qovery.com"},{label:"Community call",to:"https://www.qovery.com/community-call"},{label:"Goodies",to:"https://shop.qovery.com"},{label:"Roadmap",to:"https://roadmap.qovery.com"},{label:"Replibyte",to:"https://github.com/Qovery/replibyte"}]},{title:"Company",items:[{label:"Blog",to:"https://www.qovery.com/blog"},{label:"Jobs",to:"https://jobs.qovery.com"},{label:"Team",to:"https://www.qovery.com/team"},{label:"Investors",to:"https://www.qovery.com/investors"},{label:"Contact",to:"https://www.qovery.com/contact"}]}],copyright:"\xa9 2024 DESIGNED BY QOVERY | PROUD SILVER MEMBER OF CNCF AND LINUX FOUNDATION | QOVERY BY BIRDSIGHT - ALL RIGHTS RESERVED"},algolia:{appId:"FT65SBJ2DA",apiKey:"02604e8b2e0918e90edd1d9eb8e30f5e",indexName:"qovery",algoliaOptions:{}},googleAnalytics:{trackingId:"UA-129773960-5"},posthog:{apiKey:"phc_IgdG1K2GveDUte1gJ6hlwNbFHCv9nViWETUyLMU7ciq",appUrl:"https://phprox.qovery.com",enableInDevelopment:!0},imageZoom:{selector:"img"}},title:"Qovery",tagline:"Deploy On-demand Environments on AWS, Remarkably Fast",url:"https://hub.qovery.com",baseUrl:"/",favicon:"img/logo-square.svg",organizationName:"Qovery",projectName:"documentation",presets:[],scripts:["/js/intercom.js",{src:"https://www.googletagmanager.com/gtag/js?id=UA-129773960-5",async:!0},"/js/ga.js"],stylesheets:["https://fonts.googleapis.com/css?family=Ubuntu|Roboto|Source+Code+Pro","https://at-ui.github.io/feather-font/css/iconfont.css"]}},function(e,t,n){"use strict";n.d(t,"a",(function(){return s})),n.d(t,"b",(function(){return l}));var o=n(3),r=n(1),i=n(0),a=n.n(i);function s(e,t,n){return void 0===n&&(n=[]),e.some((function(e){var r=e.path?Object(o.f)(t,e):n.length?n[n.length-1].match:o.c.computeRootMatch(t);return r&&(n.push({route:e,match:r}),e.routes&&s(e.routes,t,n)),r})),n}function l(e,t,n){return void 0===t&&(t={}),void 0===n&&(n={}),e?a.a.createElement(o.d,n,e.map((function(e,n){return a.a.createElement(o.b,{key:e.key||n,path:e.path,exact:e.exact,strict:e.strict,render:function(n){return e.render?e.render(Object(r.a)({},n,{},t,{route:e})):a.a.createElement(e.component,Object(r.a)({},n,t,{route:e}))}})}))):null}},function(e,t){var n=!("undefined"==typeof window||!window.document||!window.document.createElement),o={canUseDOM:n,canUseEventListeners:n&&!(!window.addEventListener&&!window.attachEvent),canUseIntersectionObserver:n&&"IntersectionObserver"in window,canUseViewport:n&&!!window.screen};e.exports=o},function(e,t,n){"use strict";var o=n(36),r={};r[n(2)("toStringTag")]="z",r+""!="[object z]"&&n(16)(Object.prototype,"toString",(function(){return"[object "+o(this)+"]"}),!0)},function(e,t,n){"use strict";var o=n(74),r=n(88),i=n(24),a=n(33);e.exports=n(61)(Array,"Array",(function(e,t){this._t=a(e),this._i=0,this._k=t}),(function(){var e=this._t,t=this._k,n=this._i++;return!e||n>=e.length?(this._t=void 0,r(1)):r(0,"keys"==t?n:"values"==t?e[n]:[n,e[n]])}),"values"),i.Arguments=i.Array,o("keys"),o("values"),o("entries")},function(e,t){var n={}.toString;e.exports=function(e){return n.call(e).slice(8,-1)}},function(e,t){e.exports={}},function(e,t,n){var o=n(107),r=n(65);e.exports=Object.keys||function(e){return o(e,r)}},function(e,t,n){var o=n(35),r=Math.min;e.exports=function(e){return e>0?r(o(e),9007199254740991):0}},function(e,t,n){var o=n(34);e.exports=function(e){return Object(o(e))}},function(e,t,n){var o=n(8),r=n(86),i=n(87),a=Object.defineProperty;t.f=n(10)?Object.defineProperty:function(e,t,n){if(o(e),t=i(t,!0),o(n),r)try{return a(e,t,n)}catch(s){}if("get"in n||"set"in n)throw TypeError("Accessors not supported!");return"value"in n&&(e[t]=n.value),e}},function(e,t,n){for(var o=n(22),r=n(25),i=n(16),a=n(5),s=n(11),l=n(24),u=n(2),c=u("iterator"),d=u("toStringTag"),p=l.Array,f={CSSRuleList:!0,CSSStyleDeclaration:!1,CSSValueList:!1,ClientRectList:!1,DOMRectList:!1,DOMStringList:!1,DOMTokenList:!0,DataTransferItemList:!1,FileList:!1,HTMLAllCollection:!1,HTMLCollection:!1,HTMLFormElement:!1,HTMLSelectElement:!1,MediaList:!0,MimeTypeArray:!1,NamedNodeMap:!1,NodeList:!0,PaintRequestList:!1,Plugin:!1,PluginArray:!1,SVGLengthList:!1,SVGNumberList:!1,SVGPathSegList:!1,SVGPointList:!1,SVGStringList:!1,SVGTransformList:!1,SourceBufferList:!1,StyleSheetList:!0,TextTrackCueList:!1,TextTrackList:!1,TouchList:!1},g=r(f),m=0;m0?o:n)(e)}},function(e,t,n){var o=n(23),r=n(2)("toStringTag"),i="Arguments"==o(function(){return arguments}());e.exports=function(e){var t,n,a;return void 0===e?"Undefined":null===e?"Null":"string"==typeof(n=function(e,t){try{return e[t]}catch(n){}}(t=Object(e),r))?n:i?o(t):"Object"==(a=o(t))&&"function"==typeof t.callee?"Arguments":a}},function(e){e.exports=JSON.parse('{"/":{"component":"c4f5d8e4"},"/community":{"component":"672ba3d6"},"/components":{"component":"54e7632e"},"/contact":{"component":"83e9e333"},"/docs":{"component":"25b7c3f2"},"/guides":{"component":"c6d06197","items":[{"content":"d2397242"},{"content":"44b423be"},{"content":"e4310ee0"},{"content":"0578cd49"},{"content":"48764d63"},{"content":"9fe26b56"},{"content":"946bf02d"},{"content":"73d96058"},{"content":"ff2506fd"},{"content":"86a0e6ef"},{"content":"a156f6a6"},{"content":"56c0a343"},{"content":"f41dc7b3"},{"content":"1a39f24c"},{"content":"da253275"},{"content":"89caf623"},{"content":"967beaa8"},{"content":"1a6d3985"},{"content":"ff0cde69"},{"content":"cbcbf0e3"},{"content":"3248e450"},{"content":"5e5fefd2"},{"content":"3e6b1f84"},{"content":"36676680"},{"content":"498daee8"},{"content":"3ecdd190"},{"content":"50bab564"},{"content":"2cb76395"},{"content":"ab2f7458"},{"content":"3088ad98"},{"content":"dfcfd2f3"},{"content":"df1c18d8"},{"content":"8f02216a"},{"content":"1b633bfd"},{"content":"bc592dc7"},{"content":"acaf40e9"},{"content":"5b95bed2"},{"content":"3986a7a9"},{"content":"de0a75d9"},{"content":"bdd6d8c6"},{"content":"b565c464"},{"content":"40ec3bc1"},{"content":"fb1d0a83"},{"content":"9107e302"},{"content":"a1fea8fb"},{"content":"8d146bfd"},{"content":"bbfbe73c"},{"content":"60296d59"},{"content":"e1becc8e"},{"content":"b5eab6bb"},{"content":"072d4c63"},{"content":"dea3d534"},{"content":"6ce627d6"},{"content":"e5b9b0aa"},{"content":"522ef453"},{"content":"e1e1580b"},{"content":"9ecfa6fe"},{"content":"16c36934"},{"content":"16976906"},{"content":"68c0e7f9"},{"content":"e8b0321f"},{"content":"ba43933d"},{"content":"c8223350"},{"content":"05049f86"},{"content":"f7098925"},{"content":"7952d159"},{"content":"c0ab55e0"},{"content":"cbb976f4"},{"content":"f3d8c143"},{"content":"0c18cf89"}],"metadata":"49d2885e"},"/guides/advanced":{"component":"d9deea5f","items":[{"content":"f41dc7b3"},{"content":"1a39f24c"},{"content":"da253275"},{"content":"3248e450"},{"content":"5e5fefd2"},{"content":"3e6b1f84"},{"content":"36676680"},{"content":"498daee8"},{"content":"ab2f7458"},{"content":"8f02216a"},{"content":"dea3d534"},{"content":"e5b9b0aa"},{"content":"522ef453"},{"content":"9ecfa6fe"},{"content":"16c36934"},{"content":"16976906"},{"content":"68c0e7f9"},{"content":"e8b0321f"},{"content":"05049f86"}],"metadata":"3e1d77c1"},"/guides/advanced/adding-multi-region-backup-rds":{"component":"1c13b173","content":"77a87849"},"/guides/advanced/continuous-integration":{"component":"1c13b173","content":"03d003d1"},"/guides/advanced/costs-control":{"component":"1c13b173","content":"a8a9c166"},"/guides/advanced/deploy-api-gateway":{"component":"1c13b173","content":"b7d53051"},"/guides/advanced/deploy-aws-services":{"component":"1c13b173","content":"5385e737"},"/guides/advanced/deploy-daemonset-with-karpenter":{"component":"1c13b173","content":"766a314f"},"/guides/advanced/deploy-external-services":{"component":"1c13b173","content":"e7d0ec68"},"/guides/advanced/deploy-frontend":{"component":"1c13b173","content":"1dd2c233"},"/guides/advanced/helm-chart":{"component":"1c13b173","content":"c24a85bb"},"/guides/advanced/microservices":{"component":"1c13b173","content":"66bbed7b"},"/guides/advanced/migration":{"component":"1c13b173","content":"10c2e3e6"},"/guides/advanced/monitoring":{"component":"1c13b173","content":"18415bef"},"/guides/advanced/monorepository":{"component":"1c13b173","content":"f756422c"},"/guides/advanced/production":{"component":"1c13b173","content":"93701b40"},"/guides/advanced/seed-database":{"component":"1c13b173","content":"2309a9c8"},"/guides/advanced/sub-account-sso":{"component":"1c13b173","content":"59d03e1e"},"/guides/advanced/terraform":{"component":"1c13b173","content":"9c8ed74f"},"/guides/advanced/upgrading-rds-instance":{"component":"1c13b173","content":"1f631fe9"},"/guides/advanced/use-preview-environments":{"component":"1c13b173","content":"8bfd1931"},"/guides/getting-started":{"component":"d9deea5f","items":[{"content":"d2397242"},{"content":"44b423be"},{"content":"e4310ee0"},{"content":"0578cd49"},{"content":"48764d63"}],"metadata":"0e2fb061"},"/guides/getting-started/create-a-database":{"component":"1c13b173","content":"24e60f8a"},"/guides/getting-started/debugging":{"component":"1c13b173","content":"6504a542"},"/guides/getting-started/deploy-your-first-application":{"component":"1c13b173","content":"cc9be38a"},"/guides/getting-started/managing-environment-variables":{"component":"1c13b173","content":"b7280cb5"},"/guides/getting-started/setting-custom-domain":{"component":"1c13b173","content":"c0594016"},"/guides/installation-guide":{"component":"d9deea5f","items":[{"content":"9fe26b56"},{"content":"946bf02d"},{"content":"73d96058"},{"content":"ff2506fd"},{"content":"86a0e6ef"}],"metadata":"6852f5b3"},"/guides/installation-guide/guide-amazon-web-services":{"component":"1c13b173","content":"225ad2ad"},"/guides/installation-guide/guide-google-cloud-platform":{"component":"1c13b173","content":"9b266254"},"/guides/installation-guide/guide-kubernetes":{"component":"1c13b173","content":"dffbf523"},"/guides/installation-guide/guide-microsoft-azure":{"component":"1c13b173","content":"f6a16982"},"/guides/installation-guide/guide-scaleway":{"component":"1c13b173","content":"7cc8f9b8"},"/guides/tags":{"component":"3116c1fa","tags":"a81fb19d"},"/guides/tags/database-postgresql":{"component":"004ec9e5","items":[{"content":"50bab564"},{"content":"2cb76395"},{"content":"f7098925"}],"metadata":"4a111132"},"/guides/tags/database-rds":{"component":"004ec9e5","items":[{"content":"ab2f7458"},{"content":"522ef453"}],"metadata":"42a63c79"},"/guides/tags/domain-operations":{"component":"004ec9e5","items":[{"content":"522ef453"}],"metadata":"68da1f92"},"/guides/tags/framework-rails":{"component":"004ec9e5","items":[{"content":"50bab564"}],"metadata":"a264e41a"},"/guides/tags/installation-guide-aws":{"component":"004ec9e5","items":[{"content":"9fe26b56"},{"content":"a156f6a6"},{"content":"3248e450"},{"content":"3e6b1f84"},{"content":"1b633bfd"},{"content":"3986a7a9"},{"content":"de0a75d9"},{"content":"bdd6d8c6"},{"content":"a1fea8fb"},{"content":"6ce627d6"},{"content":"c8223350"},{"content":"cbb976f4"},{"content":"0c18cf89"}],"metadata":"c539337b"},"/guides/tags/installation-guide-azure":{"component":"004ec9e5","items":[{"content":"86a0e6ef"}],"metadata":"73709b64"},"/guides/tags/installation-guide-gcp":{"component":"004ec9e5","items":[{"content":"946bf02d"}],"metadata":"1e2e1850"},"/guides/tags/installation-guide-kubernetes":{"component":"004ec9e5","items":[{"content":"ff2506fd"}],"metadata":"7e863710"},"/guides/tags/installation-guide-scaleway":{"component":"004ec9e5","items":[{"content":"73d96058"}],"metadata":"a601bb0b"},"/guides/tags/language-javascript":{"component":"004ec9e5","items":[{"content":"498daee8"},{"content":"072d4c63"}],"metadata":"cb05c8fa"},"/guides/tags/language-kotlin":{"component":"004ec9e5","items":[{"content":"f7098925"}],"metadata":"dbe0f891"},"/guides/tags/language-ruby":{"component":"004ec9e5","items":[{"content":"50bab564"}],"metadata":"f7aa8e39"},"/guides/tags/language-rust":{"component":"004ec9e5","items":[{"content":"89caf623"},{"content":"b565c464"}],"metadata":"2e212509"},"/guides/tags/technology-docker":{"component":"004ec9e5","items":[{"content":"bbfbe73c"}],"metadata":"d4b6ce89"},"/guides/tags/technology-github":{"component":"004ec9e5","items":[{"content":"40ec3bc1"}],"metadata":"60ad046d"},"/guides/tags/technology-helm":{"component":"004ec9e5","items":[{"content":"8f02216a"}],"metadata":"49dea187"},"/guides/tags/technology-qovery":{"component":"004ec9e5","items":[{"content":"d2397242"},{"content":"44b423be"},{"content":"e4310ee0"},{"content":"0578cd49"},{"content":"48764d63"},{"content":"56c0a343"},{"content":"f41dc7b3"},{"content":"1a39f24c"},{"content":"da253275"},{"content":"967beaa8"},{"content":"1a6d3985"},{"content":"ff0cde69"},{"content":"cbcbf0e3"},{"content":"3248e450"},{"content":"5e5fefd2"},{"content":"36676680"},{"content":"3ecdd190"},{"content":"2cb76395"},{"content":"3088ad98"},{"content":"dfcfd2f3"},{"content":"df1c18d8"},{"content":"bc592dc7"},{"content":"acaf40e9"},{"content":"5b95bed2"},{"content":"fb1d0a83"},{"content":"9107e302"},{"content":"8d146bfd"},{"content":"60296d59"},{"content":"e1becc8e"},{"content":"b5eab6bb"},{"content":"dea3d534"},{"content":"e5b9b0aa"},{"content":"e1e1580b"},{"content":"9ecfa6fe"},{"content":"16c36934"},{"content":"16976906"},{"content":"68c0e7f9"},{"content":"e8b0321f"},{"content":"ba43933d"},{"content":"7952d159"},{"content":"c0ab55e0"},{"content":"f3d8c143"}],"metadata":"4c0b3d74"},"/guides/tags/technology-terraform":{"component":"004ec9e5","items":[{"content":"05049f86"}],"metadata":"63ea0c72"},"/guides/tags/type-guide":{"component":"004ec9e5","items":[{"content":"d2397242"},{"content":"44b423be"},{"content":"e4310ee0"},{"content":"0578cd49"},{"content":"48764d63"},{"content":"9fe26b56"},{"content":"946bf02d"},{"content":"73d96058"},{"content":"ff2506fd"},{"content":"86a0e6ef"},{"content":"f41dc7b3"},{"content":"1a39f24c"},{"content":"da253275"},{"content":"5e5fefd2"},{"content":"3e6b1f84"},{"content":"36676680"},{"content":"498daee8"},{"content":"ab2f7458"},{"content":"8f02216a"},{"content":"dea3d534"},{"content":"e5b9b0aa"},{"content":"522ef453"},{"content":"9ecfa6fe"},{"content":"16c36934"},{"content":"16976906"},{"content":"68c0e7f9"},{"content":"e8b0321f"},{"content":"05049f86"}],"metadata":"f11e9a8e"},"/guides/tags/type-tutorial":{"component":"004ec9e5","items":[{"content":"a156f6a6"},{"content":"56c0a343"},{"content":"89caf623"},{"content":"967beaa8"},{"content":"1a6d3985"},{"content":"ff0cde69"},{"content":"cbcbf0e3"},{"content":"3248e450"},{"content":"3ecdd190"},{"content":"50bab564"},{"content":"2cb76395"},{"content":"3088ad98"},{"content":"dfcfd2f3"},{"content":"df1c18d8"},{"content":"1b633bfd"},{"content":"bc592dc7"},{"content":"acaf40e9"},{"content":"5b95bed2"},{"content":"3986a7a9"},{"content":"de0a75d9"},{"content":"bdd6d8c6"},{"content":"b565c464"},{"content":"40ec3bc1"},{"content":"fb1d0a83"},{"content":"9107e302"},{"content":"a1fea8fb"},{"content":"8d146bfd"},{"content":"bbfbe73c"},{"content":"60296d59"},{"content":"e1becc8e"},{"content":"b5eab6bb"},{"content":"072d4c63"},{"content":"6ce627d6"},{"content":"e1e1580b"},{"content":"ba43933d"},{"content":"c8223350"},{"content":"f7098925"},{"content":"7952d159"},{"content":"c0ab55e0"},{"content":"cbb976f4"},{"content":"f3d8c143"},{"content":"0c18cf89"}],"metadata":"bf22200e"},"/guides/tutorial":{"component":"d9deea5f","items":[{"content":"a156f6a6"},{"content":"56c0a343"},{"content":"89caf623"},{"content":"967beaa8"},{"content":"1a6d3985"},{"content":"ff0cde69"},{"content":"cbcbf0e3"},{"content":"3ecdd190"},{"content":"50bab564"},{"content":"2cb76395"},{"content":"3088ad98"},{"content":"dfcfd2f3"},{"content":"df1c18d8"},{"content":"1b633bfd"},{"content":"bc592dc7"},{"content":"acaf40e9"},{"content":"5b95bed2"},{"content":"3986a7a9"},{"content":"de0a75d9"},{"content":"bdd6d8c6"},{"content":"b565c464"},{"content":"40ec3bc1"},{"content":"fb1d0a83"},{"content":"9107e302"},{"content":"a1fea8fb"},{"content":"8d146bfd"},{"content":"bbfbe73c"},{"content":"60296d59"},{"content":"e1becc8e"},{"content":"b5eab6bb"},{"content":"072d4c63"},{"content":"6ce627d6"},{"content":"e1e1580b"},{"content":"ba43933d"},{"content":"c8223350"},{"content":"f7098925"},{"content":"7952d159"},{"content":"c0ab55e0"},{"content":"cbb976f4"},{"content":"f3d8c143"},{"content":"0c18cf89"}],"metadata":"af9ec14b"},"/guides/tutorial/aws-sqs-lambda-with-qovery":{"component":"1c13b173","content":"bbedfc29"},"/guides/tutorial/aws-vpc-peering-with-qovery":{"component":"1c13b173","content":"e9c994cf"},"/guides/tutorial/blazingly-fast-preview-environments-for-nextjs-nodejs-and-mongodb-on-aws":{"component":"1c13b173","content":"94a00d4e"},"/guides/tutorial/build-e2e-testing-ephemeral-environments":{"component":"1c13b173","content":"2121549d"},"/guides/tutorial/cloudwatch-integration":{"component":"1c13b173","content":"83a41d86"},"/guides/tutorial/create-a-blazingly-fast-api-in-rust-part-1":{"component":"1c13b173","content":"db372ba8"},"/guides/tutorial/create-a-playground-environment-on-aws":{"component":"1c13b173","content":"2ea1d02e"},"/guides/tutorial/create-your-staging-environment-from-your-production-environment-on-aws":{"component":"1c13b173","content":"410a9ba0"},"/guides/tutorial/customizing-preview-url-with-qovery-cli":{"component":"1c13b173","content":"b76eb9a9"},"/guides/tutorial/data-seeding-in-postgres":{"component":"1c13b173","content":"4592dbe6"},"/guides/tutorial/deploy-jupyterhub-qovery":{"component":"1c13b173","content":"abbfd6bd"},"/guides/tutorial/deploy-rails-with-postgresql-and-sidekiq":{"component":"1c13b173","content":"a3cf753a"},"/guides/tutorial/deploy-temporal-on-kubernetes":{"component":"1c13b173","content":"49a59b02"},"/guides/tutorial/generate-qovery-api-client":{"component":"1c13b173","content":"a4401f0f"},"/guides/tutorial/getting-started-with-preview-environments-on-aws-for-beginners":{"component":"1c13b173","content":"1a3e0044"},"/guides/tutorial/gitops-with-qovery":{"component":"1c13b173","content":"dfb1c803"},"/guides/tutorial/grafana-install":{"component":"1c13b173","content":"5b8d4026"},"/guides/tutorial/how-to-activate-sso-to-connect-to-your-eks-cluster":{"component":"1c13b173","content":"06e8d299"},"/guides/tutorial/how-to-build-a-cloud-version-of-your-open-source-software-part-1":{"component":"1c13b173","content":"10dee872"},"/guides/tutorial/how-to-build-a-cloud-version-of-your-open-source-software-part-2":{"component":"1c13b173","content":"a4c8ecc0"},"/guides/tutorial/how-to-build-a-cloud-version-of-your-open-source-software-part-3":{"component":"1c13b173","content":"b74d0aaa"},"/guides/tutorial/how-to-connect-to-a-managed-mongodb-instance-on-aws":{"component":"1c13b173","content":"eb0c7ce5"},"/guides/tutorial/how-to-connect-to-your-eks-cluster-with-kubectl":{"component":"1c13b173","content":"7aa59ca3"},"/guides/tutorial/how-to-create-an-rds-instance-through-aws-console":{"component":"1c13b173","content":"e4768112"},"/guides/tutorial/how-to-deploy-a-rust-rest-api-application-on-aws-with-ease":{"component":"1c13b173","content":"3da71a70"},"/guides/tutorial/how-to-deploy-your-application-on-aws-in-30-minutes":{"component":"1c13b173","content":"97f5d064"},"/guides/tutorial/how-to-integrate-qovery-with-github-actions":{"component":"1c13b173","content":"c7bfb1d3"},"/guides/tutorial/how-to-run-commands-at-application-startup":{"component":"1c13b173","content":"1d3be599"},"/guides/tutorial/how-to-use-cloudfront-with-react-frontend-application-on-qovery":{"component":"1c13b173","content":"311fe203"},"/guides/tutorial/how-to-use-lifecycle-job-to-deploy-any-kind-of-resources":{"component":"1c13b173","content":"6b7a52aa"},"/guides/tutorial/how-to-write-a-dockerfile":{"component":"1c13b173","content":"a9994e72"},"/guides/tutorial/import-your-environment-variables-with-the-qovery-cli":{"component":"1c13b173","content":"bb89e1a0"},"/guides/tutorial/kubernetes-observability-and-monitoring-with-datadog":{"component":"1c13b173","content":"b479fc9a"},"/guides/tutorial/managing-env-variables-in-create-react-app":{"component":"1c13b173","content":"a4459aa8"},"/guides/tutorial/migrate-your-application-from-heroku-to-aws":{"component":"1c13b173","content":"03dbc155"},"/guides/tutorial/monitor-and-reduce-kubernetes-spend-with-kubecost":{"component":"1c13b173","content":"4b542f80"},"/guides/tutorial/setting-up-cloudflare-and-custom-domain-on-qovery":{"component":"1c13b173","content":"e5653b8d"},"/guides/tutorial/url-shortener-api-with-kotlin":{"component":"1c13b173","content":"ab8f5b83"},"/guides/tutorial/use-an-api-gateway-in-front-of-multiple-services":{"component":"1c13b173","content":"35d9179e"},"/guides/tutorial/use-aws-iam-roles-with-qovery":{"component":"1c13b173","content":"5b5f8b70"},"/guides/tutorial/working-with-git-submodules":{"component":"1c13b173","content":"f26e55ec"},"/mailing_list":{"component":"48912b2c"},"/docs/:route":{"component":"1be78505","docsMetadata":"20ac7829"},"/docs/getting-started":{"component":"17896441","content":"d589d3a7"},"/docs/getting-started/basic-concepts":{"component":"17896441","content":"d85dc1ef"},"/docs/getting-started/deploy-my-app":{"component":"17896441","content":"4354960d"},"/docs/getting-started/how-qovery-works":{"component":"17896441","content":"cb2208c1"},"/docs/getting-started/install-qovery":{"component":"17896441","content":"1a1dfe25"},"/docs/getting-started/install-qovery/aws":{"component":"17896441","content":"4132998e"},"/docs/getting-started/install-qovery/aws/cluster-managed-by-qovery":{"component":"17896441","content":"e862b20f"},"/docs/getting-started/install-qovery/aws/cluster-managed-by-qovery/create-credentials":{"component":"17896441","content":"04b748dc"},"/docs/getting-started/install-qovery/aws/cluster-managed-by-qovery/faq":{"component":"17896441","content":"48dbd876"},"/docs/getting-started/install-qovery/aws/cluster-managed-by-qovery/infrastructure":{"component":"17896441","content":"c8dfbbe7"},"/docs/getting-started/install-qovery/aws/cluster-managed-by-qovery/quickstart":{"component":"17896441","content":"099598c5"},"/docs/getting-started/install-qovery/aws/self-managed-cluster":{"component":"17896441","content":"ab1ec509"},"/docs/getting-started/install-qovery/azure":{"component":"17896441","content":"115eba8e"},"/docs/getting-started/install-qovery/azure/cluster-managed-by-qovery":{"component":"17896441","content":"0f632e24"},"/docs/getting-started/install-qovery/azure/cluster-managed-by-qovery/quickstart":{"component":"17896441","content":"256f5506"},"/docs/getting-started/install-qovery/azure/self-managed-cluster":{"component":"17896441","content":"ac0a13b6"},"/docs/getting-started/install-qovery/gcp":{"component":"17896441","content":"d99b987c"},"/docs/getting-started/install-qovery/gcp/cluster-managed-by-qovery":{"component":"17896441","content":"cc3d7007"},"/docs/getting-started/install-qovery/gcp/cluster-managed-by-qovery/create-credentials":{"component":"17896441","content":"be464708"},"/docs/getting-started/install-qovery/gcp/cluster-managed-by-qovery/quickstart":{"component":"17896441","content":"150479d1"},"/docs/getting-started/install-qovery/gcp/self-managed-cluster":{"component":"17896441","content":"b49a87dd"},"/docs/getting-started/install-qovery/kubernetes":{"component":"17896441","content":"87080b01"},"/docs/getting-started/install-qovery/kubernetes/byok-config":{"component":"17896441","content":"3ccabad0"},"/docs/getting-started/install-qovery/kubernetes/faq":{"component":"17896441","content":"6f4ba85a"},"/docs/getting-started/install-qovery/kubernetes/quickstart":{"component":"17896441","content":"9d099993"},"/docs/getting-started/install-qovery/kubernetes/validate-installation":{"component":"17896441","content":"b91b4421"},"/docs/getting-started/install-qovery/local":{"component":"17896441","content":"60154927"},"/docs/getting-started/install-qovery/scaleway":{"component":"17896441","content":"9c253a96"},"/docs/getting-started/install-qovery/scaleway/cluster-managed-by-qovery":{"component":"17896441","content":"b0059451"},"/docs/getting-started/install-qovery/scaleway/cluster-managed-by-qovery/create-credentials":{"component":"17896441","content":"40c64f54"},"/docs/getting-started/install-qovery/scaleway/cluster-managed-by-qovery/faq":{"component":"17896441","content":"b557ef1e"},"/docs/getting-started/install-qovery/scaleway/cluster-managed-by-qovery/quickstart":{"component":"17896441","content":"27d7a36c"},"/docs/getting-started/install-qovery/scaleway/self-managed-cluster":{"component":"17896441","content":"952063ba"},"/docs/getting-started/what-is-qovery":{"component":"17896441","content":"68b95634"},"/docs/getting-started/whats-next":{"component":"17896441","content":"543e268a"},"/docs/security-and-compliance":{"component":"17896441","content":"fcb698a1"},"/docs/security-and-compliance/backup-and-restore":{"component":"17896441","content":"b98931a2"},"/docs/security-and-compliance/encryption":{"component":"17896441","content":"2486bcfc"},"/docs/security-and-compliance/gdpr":{"component":"17896441","content":"7278678a"},"/docs/security-and-compliance/soc2":{"component":"17896441","content":"cf490432"},"/docs/useful-resources/faq":{"component":"17896441","content":"59157ba2"},"/docs/useful-resources/help-and-support":{"component":"17896441","content":"d2075f7f"},"/docs/using-qovery":{"component":"17896441","content":"56cfbe62"},"/docs/using-qovery/audit-logs":{"component":"17896441","content":"b8490823"},"/docs/using-qovery/configuration":{"component":"17896441","content":"fc376fea"},"/docs/using-qovery/configuration/advanced-settings":{"component":"17896441","content":"4f6caeac"},"/docs/using-qovery/configuration/application":{"component":"17896441","content":"8d5726d6"},"/docs/using-qovery/configuration/application-health-checks":{"component":"17896441","content":"91473650"},"/docs/using-qovery/configuration/cloud-service-provider":{"component":"17896441","content":"33b1fe0f"},"/docs/using-qovery/configuration/cluster-advanced-settings":{"component":"17896441","content":"2f1afd92"},"/docs/using-qovery/configuration/clusters":{"component":"17896441","content":"dc00a797"},"/docs/using-qovery/configuration/clusters/aws":{"component":"17896441","content":"5adf400e"},"/docs/using-qovery/configuration/clusters/aws-with-karpenter":{"component":"17896441","content":"acbf1af7"},"/docs/using-qovery/configuration/clusters/gcp":{"component":"17896441","content":"54ca7d36"},"/docs/using-qovery/configuration/clusters/scaleway":{"component":"17896441","content":"4e05c534"},"/docs/using-qovery/configuration/cronjob":{"component":"17896441","content":"54ad54c7"},"/docs/using-qovery/configuration/database":{"component":"17896441","content":"9feef5a0"},"/docs/using-qovery/configuration/database/mongodb":{"component":"17896441","content":"9ddfc3dc"},"/docs/using-qovery/configuration/database/mysql":{"component":"17896441","content":"accdb2b4"},"/docs/using-qovery/configuration/database/postgresql":{"component":"17896441","content":"baf9cc25"},"/docs/using-qovery/configuration/database/redis":{"component":"17896441","content":"c536ba8c"},"/docs/using-qovery/configuration/deployment-rule":{"component":"17896441","content":"db96bb7d"},"/docs/using-qovery/configuration/environment":{"component":"17896441","content":"a4a09dfe"},"/docs/using-qovery/configuration/environment-variable":{"component":"17896441","content":"07c2f310"},"/docs/using-qovery/configuration/helm":{"component":"17896441","content":"02ec211a"},"/docs/using-qovery/configuration/lifecycle-job":{"component":"17896441","content":"16557ade"},"/docs/using-qovery/configuration/object-storage":{"component":"17896441","content":"9d3c5a68"},"/docs/using-qovery/configuration/organization":{"component":"17896441","content":"ff91a867"},"/docs/using-qovery/configuration/organization/api-token":{"component":"17896441","content":"1d187ae3"},"/docs/using-qovery/configuration/organization/container-registry":{"component":"17896441","content":"6b0e113a"},"/docs/using-qovery/configuration/organization/git-repository-access":{"component":"17896441","content":"9406f053"},"/docs/using-qovery/configuration/organization/helm-repository":{"component":"17896441","content":"2737c3be"},"/docs/using-qovery/configuration/organization/labels-annotations":{"component":"17896441","content":"91bdc394"},"/docs/using-qovery/configuration/organization/members-rbac":{"component":"17896441","content":"b2880863"},"/docs/using-qovery/configuration/project":{"component":"17896441","content":"bd10520b"},"/docs/using-qovery/configuration/provider":{"component":"17896441","content":"89de14d0"},"/docs/using-qovery/configuration/service-health-checks":{"component":"17896441","content":"073aa0b0"},"/docs/using-qovery/configuration/user-account":{"component":"17896441","content":"376f4c3b"},"/docs/using-qovery/deployment":{"component":"17896441","content":"8ca6d3cf"},"/docs/using-qovery/deployment/deploying-with-auto-deploy":{"component":"17896441","content":"39686ad9"},"/docs/using-qovery/deployment/deploying-with-ci-cd":{"component":"17896441","content":"36b4c04d"},"/docs/using-qovery/deployment/deployment-actions":{"component":"17896441","content":"8ae34d0a"},"/docs/using-qovery/deployment/deployment-history":{"component":"17896441","content":"47a329cb"},"/docs/using-qovery/deployment/deployment-pipeline":{"component":"17896441","content":"55ef6d6a"},"/docs/using-qovery/deployment/deployment-strategies":{"component":"17896441","content":"b79e7411"},"/docs/using-qovery/deployment/image-mirroring":{"component":"17896441","content":"6308ca27"},"/docs/using-qovery/deployment/logs":{"component":"17896441","content":"6ebd4d49"},"/docs/using-qovery/deployment/running-and-deployment-statuses":{"component":"17896441","content":"e3c664e0"},"/docs/using-qovery/integration":{"component":"17896441","content":"8d1c77c1"},"/docs/using-qovery/integration/api-integration":{"component":"17896441","content":"d28d5470"},"/docs/using-qovery/integration/container-registry":{"component":"17896441","content":"7f79072b"},"/docs/using-qovery/integration/continuous-integration":{"component":"17896441","content":"1772e35f"},"/docs/using-qovery/integration/continuous-integration/circle-ci":{"component":"17896441","content":"1aa86e56"},"/docs/using-qovery/integration/continuous-integration/github-actions":{"component":"17896441","content":"3a11bd48"},"/docs/using-qovery/integration/continuous-integration/gitlab-ci":{"component":"17896441","content":"120e882c"},"/docs/using-qovery/integration/continuous-integration/jenkins":{"component":"17896441","content":"4dcdbf34"},"/docs/using-qovery/integration/git-repository":{"component":"17896441","content":"2a88660b"},"/docs/using-qovery/integration/helm-repository":{"component":"17896441","content":"8bd1b610"},"/docs/using-qovery/integration/iac":{"component":"17896441","content":"bfcdd23f"},"/docs/using-qovery/integration/iac/cloudformation":{"component":"17896441","content":"29def772"},"/docs/using-qovery/integration/iac/other":{"component":"17896441","content":"95683447"},"/docs/using-qovery/integration/iac/terraform":{"component":"17896441","content":"8e32e4fc"},"/docs/using-qovery/integration/monitoring":{"component":"17896441","content":"592d28ca"},"/docs/using-qovery/integration/monitoring/datadog":{"component":"17896441","content":"d471c358"},"/docs/using-qovery/integration/monitoring/new-relic":{"component":"17896441","content":"e1e0a511"},"/docs/using-qovery/integration/secret-manager":{"component":"17896441","content":"888595cd"},"/docs/using-qovery/integration/secret-manager/aws-secrets-manager":{"component":"17896441","content":"dab3a2be"},"/docs/using-qovery/integration/secret-manager/doppler":{"component":"17896441","content":"5e60e078"},"/docs/using-qovery/integration/slack":{"component":"17896441","content":"40a919e7"},"/docs/using-qovery/integration/terraform-provider":{"component":"17896441","content":"f9df4186"},"/docs/using-qovery/integration/webhook":{"component":"17896441","content":"7df50433"},"/docs/using-qovery/interface":{"component":"17896441","content":"3a03b8f9"},"/docs/using-qovery/interface/cli":{"component":"17896441","content":"d9a4c8ef"},"/docs/using-qovery/interface/rest-api":{"component":"17896441","content":"c3f02c14"},"/docs/using-qovery/interface/terraform-interface":{"component":"17896441","content":"f0f90e68"},"/docs/using-qovery/interface/web-interface":{"component":"17896441","content":"58379094"},"/docs/using-qovery/maintenance":{"component":"17896441","content":"ac2c90fd"},"/docs/using-qovery/troubleshoot":{"component":"17896441","content":"b4dda200"},"/docs/using-qovery/troubleshoot/cluster-troubleshoot":{"component":"17896441","content":"3cfde410"},"/docs/using-qovery/troubleshoot/service-deployment-troubleshoot":{"component":"17896441","content":"1350cb71"},"/docs/using-qovery/troubleshoot/service-run-troubleshoot":{"component":"17896441","content":"b538f6fb"}}')},function(e,t,n){var o,r;void 0===(r="function"==typeof(o=function(){var e,t,n={version:"0.2.0"},o=n.settings={minimum:.08,easing:"ease",positionUsing:"",speed:200,trickle:!0,trickleRate:.02,trickleSpeed:800,showSpinner:!0,barSelector:'[role="bar"]',spinnerSelector:'[role="spinner"]',parent:"body",template:'