diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml
new file mode 100644
index 00000000..31424d65
--- /dev/null
+++ b/.github/workflows/release.yaml
@@ -0,0 +1,74 @@
+name: Release Charts
+
+on:
+ push:
+ branches:
+ - master
+ paths:
+ - "charts/**"
+
+jobs:
+ pre-release:
+ runs-on: ubuntu-latest
+ timeout-minutes: 5
+ steps:
+ - name: Block concurrent releases
+ uses: softprops/turnstyle@v1
+ with:
+ continue-after-seconds: 180
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+
+ release:
+ needs: pre-release
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v2
+ with:
+ fetch-depth: 0
+
+ - name: Configure Git
+ run: |
+ git config user.name "$GITHUB_ACTOR"
+ git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
+
+ - name: Install Helm
+ uses: azure/setup-helm@v1
+ with:
+ version: v3.4.0
+
+ - name: Run chart-releaser
+ uses: helm/chart-releaser-action@v1.1.0
+ with:
+ charts_repo_url: https://gandazgul.github.io/k8s-infrastructure/helmrepo/
+ env:
+ CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
+
+ # Update the generated timestamp in the index.yaml
+ # needed until https://github.com/helm/chart-releaser/issues/90
+ # or helm/chart-releaser-action supports this
+ post-release:
+ needs: release
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v2
+ with:
+ ref: "gh-pages"
+ fetch-depth: 0
+
+ - name: Configure Git
+ run: |
+ git config user.name "$GITHUB_ACTOR"
+ git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
+
+ - name: Commit and push timestamp updates
+ run: |
+ if [[ -f index.yaml ]]; then
+ export generated_date=$(date --utc +%FT%T.%9NZ)
+ sed -i -e "s/^generated:.*/generated: \"$generated_date\"/" index.yaml
+ git add index.yaml
+ git commit -sm "Update generated timestamp [ci-skip]" || exit 0
+ git push
+ fi
diff --git a/.idea/infrastructure.iml b/.idea/k8s-infrastructure.iml
similarity index 78%
rename from .idea/infrastructure.iml
rename to .idea/k8s-infrastructure.iml
index 75391074..b76bd97d 100644
--- a/.idea/infrastructure.iml
+++ b/.idea/k8s-infrastructure.iml
@@ -6,9 +6,6 @@
-
-
-
diff --git a/.idea/modules.xml b/.idea/modules.xml
index 60287d3e..4fe5de73 100644
--- a/.idea/modules.xml
+++ b/.idea/modules.xml
@@ -2,7 +2,7 @@
-
+
\ No newline at end of file
diff --git a/README.md b/README.md
index 832dd53a..313e761f 100644
--- a/README.md
+++ b/README.md
@@ -16,7 +16,7 @@ created that pods can use as volumes. There's a k8s cron job included to make di
## My Home Setup
A small business server running as a master node and worker. I plan to add at least one other
-node to learn to manage a "cluster" and to try and automate node onboarding. I've tested the manual node onboarding with VMs and it works well. Look at this script [https://github.com/gandazgul/k8s-infrastructure/blob/master/k8s-config/2-configK8SNode.sh]()
+node to learn to manage a "cluster" and to try to automate node on-boarding. I've tested the manual node on-boarding with VMs, and it works well. Look at this script [https://github.com/gandazgul/k8s-infrastructure/blob/master/k8s-config/2-configK8SNode.sh]()
## Helm repo
diff --git a/index.md b/index.md
index eda9ff3b..8a084cb6 100644
--- a/index.md
+++ b/index.md
@@ -3,7 +3,7 @@
This is a collection of scripts to deploy kubernetes on Fedora. Tested on Fedora 31.
It's also a collection of helm charts that I developed or customized (See [Repo](#helm-repo)), as well as [helmfiles](https://github.com/roboll/helmfile/)
-to deploy all of the supported applications.
+to deploy all the supported applications.
The storage is handled with PersistenceVolumes mapped to mount points on the host and pre-existing claims
created that pods can use as volumes. There's a k8s cron job included to make differential backups between the main mount point and the backup one.
@@ -16,7 +16,9 @@ created that pods can use as volumes. There's a k8s cron job included to make di
## My Home Setup
A small business server running as a master node and worker. I plan to add at least one other
-node to learn to manage a "cluster" and to try and automate node onboarding. I've tested the manual node onboarding with VMs and it works well. Look at this script [https://github.com/gandazgul/k8s-infrastructure/blob/master/k8s-config/2-configK8SNode.sh]()
+node to learn to manage a "cluster" and to try to automate node on-boarding. I've tested the
+manual node on-boarding with VMs, and it works well.
+Look at this script [https://github.com/gandazgul/k8s-infrastructure/blob/master/k8s-config/2-configK8SNode.sh]()
## Helm repo
@@ -37,16 +39,17 @@ By following these steps you will install a fully functioning kubernetes master
points as I like them
4. Copy the scripts over `scp -r ./k8s-config fedora-ip:~/`
5. `ssh fedora-ip`
-6. Run `~/k8s-config/2-configK8SMaster` - This will install K8s and configure the master to run pods, it will also install
+6. Run your modified `~/k8s-config/1-fedoraPostInstall.sh`
+7. Then run `~/k8s-config/2-configK8SMaster` - This will install K8s and configure the master to run pods, it will also install
Flannel network plugin
* Wait for the flannel for your architecture to show `1` in all columns then press ctrl+c
-7. If something fails, you can reset with `sudo kubeadm reset`, delete kubeadminit.lock and try again, all of the
+8. If something fails, you can reset with `sudo kubeadm reset`, delete kubeadminit.lock and try again, all the
scripts are safe to re-run.
-Verify Kubelet that is running with `sudo systemctl status kubelet`
-Once Flannel is working:
-8. Install Storage, Helm, etc. run `3-installStorageAndHelm.sh`
+9. Verify Kubelet that is running with `sudo systemctl status kubelet`
+Once Flannel is working, and you verified kubelet:
+10. Install Storage, Helm, etc. run `3-installStorageAndHelm.sh`
This will install a hostpath auto provisioner for quick testing of new pod configs, it will also install the helm
-client with the tiller and diff plugins.
+client with the plugins.
9. Verify kubectl works: (NOTE: Kubectl does not need sudo, it will fail with sudo)
* `kubectl get nodes` ← gets all nodes, you should see your node listed and `Ready`
* `kubectl get all --all-namespaces` ← shows everything that’s running in kubernetes