Documentation Restructure
This commit is contained in:
187
platforms/containerization/kubernetes/deployment/k8s.md
Normal file
187
platforms/containerization/kubernetes/deployment/k8s.md
Normal file
@@ -0,0 +1,187 @@
|
||||
# Deploy Generic Kubernetes
|
||||
The instructions outlined below assume you are deploying the environment using Ansible Playbooks either via Ansible's CLI or AWX.
|
||||
|
||||
### Deploy K8S User
|
||||
```jsx title="01-deploy-k8s-user.yml"
|
||||
- hosts: 'controller-nodes, worker-nodes'
|
||||
become: yes
|
||||
|
||||
tasks:
|
||||
- name: create the k8sadmin user account
|
||||
user: name=k8sadmin append=yes state=present createhome=yes shell=/bin/bash
|
||||
|
||||
- name: allow 'k8sadmin' to use sudo without needing a password
|
||||
lineinfile:
|
||||
dest: /etc/sudoers
|
||||
line: 'k8sadmin ALL=(ALL) NOPASSWD: ALL'
|
||||
validate: 'visudo -cf %s'
|
||||
|
||||
- name: set up authorized keys for the k8sadmin user
|
||||
authorized_key: user=k8sadmin key="{{item}}"
|
||||
with_file:
|
||||
- ~/.ssh/id_rsa.pub
|
||||
```
|
||||
|
||||
### Install K8S
|
||||
```jsx title="02-install-k8s.yml"
|
||||
---
|
||||
- hosts: "controller-nodes, worker-nodes"
|
||||
remote_user: nicole
|
||||
become: yes
|
||||
become_method: sudo
|
||||
become_user: root
|
||||
gather_facts: yes
|
||||
connection: ssh
|
||||
|
||||
tasks:
|
||||
- name: Create containerd config file
|
||||
file:
|
||||
path: "/etc/modules-load.d/containerd.conf"
|
||||
state: "touch"
|
||||
|
||||
- name: Add conf for containerd
|
||||
blockinfile:
|
||||
path: "/etc/modules-load.d/containerd.conf"
|
||||
block: |
|
||||
overlay
|
||||
br_netfilter
|
||||
|
||||
- name: modprobe
|
||||
shell: |
|
||||
sudo modprobe overlay
|
||||
sudo modprobe br_netfilter
|
||||
|
||||
|
||||
- name: Set system configurations for Kubernetes networking
|
||||
file:
|
||||
path: "/etc/sysctl.d/99-kubernetes-cri.conf"
|
||||
state: "touch"
|
||||
|
||||
- name: Add conf for containerd
|
||||
blockinfile:
|
||||
path: "/etc/sysctl.d/99-kubernetes-cri.conf"
|
||||
block: |
|
||||
net.bridge.bridge-nf-call-iptables = 1
|
||||
net.ipv4.ip_forward = 1
|
||||
net.bridge.bridge-nf-call-ip6tables = 1
|
||||
|
||||
- name: Apply new settings
|
||||
command: sudo sysctl --system
|
||||
|
||||
- name: install containerd
|
||||
shell: |
|
||||
sudo apt-get update && sudo apt-get install -y containerd
|
||||
sudo mkdir -p /etc/containerd
|
||||
sudo containerd config default | sudo tee /etc/containerd/config.toml
|
||||
sudo systemctl restart containerd
|
||||
|
||||
- name: disable swap
|
||||
shell: |
|
||||
sudo swapoff -a
|
||||
sudo sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
|
||||
|
||||
- name: install and configure dependencies
|
||||
shell: |
|
||||
sudo apt-get update && sudo apt-get install -y apt-transport-https curl
|
||||
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
|
||||
|
||||
- name: Create kubernetes repo file
|
||||
file:
|
||||
path: "/etc/apt/sources.list.d/kubernetes.list"
|
||||
state: "touch"
|
||||
|
||||
- name: Add K8s Source
|
||||
blockinfile:
|
||||
path: "/etc/apt/sources.list.d/kubernetes.list"
|
||||
block: |
|
||||
deb https://apt.kubernetes.io/ kubernetes-xenial main
|
||||
|
||||
- name: Install Kubernetes
|
||||
shell: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y kubelet=1.20.1-00 kubeadm=1.20.1-00 kubectl=1.20.1-00
|
||||
sudo apt-mark hold kubelet kubeadm kubectl
|
||||
```
|
||||
|
||||
### Configure ControlPlanes
|
||||
```jsx title="03-configure-controllers.yml"
|
||||
- hosts: controller-nodes
|
||||
become: yes
|
||||
|
||||
tasks:
|
||||
- name: Initialize the K8S Cluster
|
||||
shell: kubeadm init --pod-network-cidr=10.244.0.0/16
|
||||
args:
|
||||
chdir: $HOME
|
||||
creates: cluster_initialized.txt
|
||||
|
||||
- name: Create .kube directory
|
||||
become: yes
|
||||
become_user: k8sadmin
|
||||
file:
|
||||
path: /home/k8sadmin/.kube
|
||||
state: directory
|
||||
mode: 0755
|
||||
|
||||
- name: Copy admin.conf to user's kube config
|
||||
copy:
|
||||
src: /etc/kubernetes/admin.conf
|
||||
dest: /home/k8sadmin/.kube/config
|
||||
remote_src: yes
|
||||
owner: k8sadmin
|
||||
|
||||
- name: Install the Pod Network
|
||||
become: yes
|
||||
become_user: k8sadmin
|
||||
shell: kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml
|
||||
args:
|
||||
chdir: $HOME
|
||||
|
||||
- name: Get the token for joining the worker nodes
|
||||
become: yes
|
||||
become_user: k8sadmin
|
||||
shell: kubeadm token create --print-join-command
|
||||
register: kubernetes_join_command
|
||||
|
||||
- name: Output Join Command to the Screen
|
||||
debug:
|
||||
msg: "{{ kubernetes_join_command.stdout }}"
|
||||
|
||||
- name: Copy join command to local file.
|
||||
become: yes
|
||||
local_action: copy content="{{ kubernetes_join_command.stdout_lines[0] }}" dest="/tmp/kubernetes_join_command" mode=0777
|
||||
```
|
||||
|
||||
### Join Worker Node(s)
|
||||
```jsx title="04-join-worker-nodes.yml"
|
||||
- hosts: worker-nodes
|
||||
become: yes
|
||||
gather_facts: yes
|
||||
|
||||
tasks:
|
||||
- name: Copy join command from Ansible host to the worker nodes.
|
||||
become: yes
|
||||
copy:
|
||||
src: /tmp/kubernetes_join_command
|
||||
dest: /tmp/kubernetes_join_command
|
||||
mode: 0777
|
||||
|
||||
- name: Join the Worker nodes to the cluster.
|
||||
become: yes
|
||||
command: sh /tmp/kubernetes_join_command
|
||||
register: joined_or_not
|
||||
```
|
||||
|
||||
### Host Inventory File Template
|
||||
```jsx title="hosts"
|
||||
[controller-nodes]
|
||||
k8s-ctrlr-01 ansible_host=192.168.3.6 ansible_user=nicole
|
||||
|
||||
[worker-nodes]
|
||||
k8s-node-01 ansible_host=192.168.3.4 ansible_user=nicole
|
||||
k8s-node-02 ansible_host=192.168.3.5 ansible_user=nicole
|
||||
|
||||
[all:vars]
|
||||
ansible_become_user=root
|
||||
ansible_become_method=sudo
|
||||
```
|
||||
218
platforms/containerization/kubernetes/deployment/rancher-rke2.md
Normal file
218
platforms/containerization/kubernetes/deployment/rancher-rke2.md
Normal file
@@ -0,0 +1,218 @@
|
||||
# Deploy RKE2 Cluster
|
||||
Deploying a Rancher RKE2 Cluster is fairly straightforward. Just run the commands in-order and pay attention to which steps apply to all machines in the cluster, the controlplanes, and the workers.
|
||||
|
||||
!!! note "Prerequisites"
|
||||
This document assumes you are running **Ubuntu Server 24.04.3 LTS**. It also assumes that every node in the cluster has a unique hostname.
|
||||
|
||||
## All Cluster Nodes
|
||||
Assume all commands are running as root moving forward. (e.g. `sudo su`)
|
||||
|
||||
### Run Updates
|
||||
You will need to run these commands on every server that participates in the cluster then perform a reboot of the server **PRIOR** to moving onto the next section.
|
||||
``` sh
|
||||
apt update && apt upgrade -y
|
||||
apt install nfs-common iptables nano htop -y
|
||||
echo "Adding 15 Second Delay to Ensure Previous Commands finish running"
|
||||
sleep 15
|
||||
apt autoremove -y
|
||||
reboot
|
||||
```
|
||||
!!! tip
|
||||
If this is a virtual machine, now would be the best time to take a checkpoint / snapshot of the VM before moving forward, in case you need to perform rollbacks of the server(s) if you accidentally misconfigure something.
|
||||
## Initial ControlPlane Node
|
||||
When you are starting a brand new cluster, you need to create what is referred to as the "Initial ControlPlane". This node is responsible for bootstrapping the entire cluster together in the beginning, and will eventually assist in handling container workloads and orchestrating operations in the cluster.
|
||||
!!! warning
|
||||
You only want to follow the instructions for the **initial** controlplane once. Running it on another machine to create additional controlplanes will cause the cluster to try to set up two different clusters, wrecking havok. Instead, follow the instructions in the next section to add redundant controlplanes.
|
||||
|
||||
### Download the Run Server Deployment Script
|
||||
```
|
||||
curl -sfL https://get.rke2.io | INSTALL_RKE2_TYPE=server sh -
|
||||
```
|
||||
### Enable & Configure Services
|
||||
``` sh
|
||||
# Start and Enable the Kubernetes Service
|
||||
systemctl enable --now rke2-server.service
|
||||
|
||||
# Symlink the Kubectl Management Command
|
||||
ln -s $(find /var/lib/rancher/rke2/data/ -name kubectl) /usr/local/bin/kubectl
|
||||
|
||||
# Temporarily Export the Kubeconfig to manage the cluster from CLI during initial deployment.
|
||||
export KUBECONFIG=/etc/rancher/rke2/rke2.yaml
|
||||
|
||||
# Add a Delay to Allow Cluster to Finish Initializing / Get Ready
|
||||
echo "Adding 60 Second Delay to Ensure Cluster is Ready - Run (kubectl get node) if the server is still not ready to know when to proceed."
|
||||
sleep 60
|
||||
|
||||
# Check that the Cluster Node is Running and Ready
|
||||
kubectl get node
|
||||
```
|
||||
|
||||
!!! example
|
||||
When the cluster is ready, you should see something like this when you run `kubectl get node`
|
||||
|
||||
This may be a good point to step away for 5 minutes, get a cup of coffee, and come back so it has a little extra time to be fully ready before moving on.
|
||||
```
|
||||
root@awx:/home/nicole# kubectl get node
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
awx Ready control-plane,etcd,master 3m21s v1.26.12+rke2r1
|
||||
```
|
||||
|
||||
### Install Helm, Rancher, CertManager, Jetstack, Rancher, and Longhorn
|
||||
``` sh
|
||||
# Install Helm
|
||||
curl -L https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-4 | bash
|
||||
|
||||
# Install Necessary Helm Repositories
|
||||
helm repo add rancher-latest https://releases.rancher.com/server-charts/latest
|
||||
helm repo add jetstack https://charts.jetstack.io
|
||||
helm repo add longhorn https://charts.longhorn.io
|
||||
helm repo update
|
||||
|
||||
# Install Cert-Manager via Helm
|
||||
kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.19.2/cert-manager.crds.yaml
|
||||
|
||||
# Install Jetstack via Helm
|
||||
helm upgrade -i cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace
|
||||
|
||||
# Install Rancher via Helm
|
||||
helm upgrade -i rancher rancher-latest/rancher --create-namespace --namespace cattle-system --set hostname=rke2-cluster.bunny-lab.io --set bootstrapPassword=bootStrapAllTheThings --set replicas=1
|
||||
|
||||
# Install Longhorn via Helm
|
||||
helm upgrade -i longhorn longhorn/longhorn --namespace longhorn-system --create-namespace
|
||||
```
|
||||
|
||||
!!! example "Be Patient - Come back in 20 Minutes"
|
||||
Rancher is going to take a while to fully set itself up, things will appear broken. Depending on how many resources you gave the cluster, it may take longer or shorter. A good ballpark is giving it at least 20 minutes to deploy itself before attempting to log into the webUI at https://awx.bunny-lab.io.
|
||||
|
||||
If you want to keep an eye on the deployment progress, you need to run the following command: `KUBECONFIG=/etc/rancher/rke2/rke2.yaml kubectl get pods --all-namespaces`
|
||||
The output should look like how it does below:
|
||||
```
|
||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||
cattle-fleet-system fleet-controller-59cdb866d7-94r2q 1/1 Running 0 4m31s
|
||||
cattle-fleet-system gitjob-f497866f8-t726l 1/1 Running 0 4m31s
|
||||
cattle-provisioning-capi-system capi-controller-manager-6f87d6bd74-xx22v 1/1 Running 0 55s
|
||||
cattle-system helm-operation-28dcp 0/2 Completed 0 109s
|
||||
cattle-system helm-operation-f9qww 0/2 Completed 0 4m39s
|
||||
cattle-system helm-operation-ft8gq 0/2 Completed 0 26s
|
||||
cattle-system helm-operation-m27tq 0/2 Completed 0 61s
|
||||
cattle-system helm-operation-qrgj8 0/2 Completed 0 5m11s
|
||||
cattle-system rancher-64db9f48c-qm6v4 1/1 Running 3 (8m8s ago) 13m
|
||||
cattle-system rancher-webhook-65f5455d9c-tzbv4 1/1 Running 0 98s
|
||||
cert-manager cert-manager-55cf8685cb-86l4n 1/1 Running 0 14m
|
||||
cert-manager cert-manager-cainjector-fbd548cb8-9fgv4 1/1 Running 0 14m
|
||||
cert-manager cert-manager-webhook-655b4d58fb-s2cjh 1/1 Running 0 14m
|
||||
kube-system cloud-controller-manager-awx 1/1 Running 5 (3m37s ago) 19m
|
||||
kube-system etcd-awx 1/1 Running 0 19m
|
||||
kube-system helm-install-rke2-canal-q9vm6 0/1 Completed 0 19m
|
||||
kube-system helm-install-rke2-coredns-q8w57 0/1 Completed 0 19m
|
||||
kube-system helm-install-rke2-ingress-nginx-54vgk 0/1 Completed 0 19m
|
||||
kube-system helm-install-rke2-metrics-server-87zhw 0/1 Completed 0 19m
|
||||
kube-system helm-install-rke2-snapshot-controller-crd-q6bh6 0/1 Completed 0 19m
|
||||
kube-system helm-install-rke2-snapshot-controller-tjk5f 0/1 Completed 0 19m
|
||||
kube-system helm-install-rke2-snapshot-validation-webhook-r9pcn 0/1 Completed 0 19m
|
||||
kube-system kube-apiserver-awx 1/1 Running 0 19m
|
||||
kube-system kube-controller-manager-awx 1/1 Running 5 (3m37s ago) 19m
|
||||
kube-system kube-proxy-awx 1/1 Running 0 19m
|
||||
kube-system kube-scheduler-awx 1/1 Running 5 (3m35s ago) 19m
|
||||
kube-system rke2-canal-gm45f 2/2 Running 0 19m
|
||||
kube-system rke2-coredns-rke2-coredns-565dfc7d75-qp64p 1/1 Running 0 19m
|
||||
kube-system rke2-coredns-rke2-coredns-autoscaler-6c48c95bf9-fclz5 1/1 Running 0 19m
|
||||
kube-system rke2-ingress-nginx-controller-lhjwq 1/1 Running 0 17m
|
||||
kube-system rke2-metrics-server-c9c78bd66-fnvx8 1/1 Running 0 18m
|
||||
kube-system rke2-snapshot-controller-6f7bbb497d-dw6v4 1/1 Running 4 (6m17s ago) 18m
|
||||
kube-system rke2-snapshot-validation-webhook-65b5675d5c-tdfcf 1/1 Running 0 18m
|
||||
longhorn-system csi-attacher-785fd6545b-6jfss 1/1 Running 1 (6m17s ago) 9m39s
|
||||
longhorn-system csi-attacher-785fd6545b-k7jdh 1/1 Running 0 9m39s
|
||||
longhorn-system csi-attacher-785fd6545b-rr6k4 1/1 Running 0 9m39s
|
||||
longhorn-system csi-provisioner-8658f9bd9c-58dc8 1/1 Running 0 9m38s
|
||||
longhorn-system csi-provisioner-8658f9bd9c-g8cv2 1/1 Running 0 9m38s
|
||||
longhorn-system csi-provisioner-8658f9bd9c-mbwh2 1/1 Running 0 9m38s
|
||||
longhorn-system csi-resizer-68c4c75bf5-d5vdd 1/1 Running 0 9m36s
|
||||
longhorn-system csi-resizer-68c4c75bf5-r96lf 1/1 Running 0 9m36s
|
||||
longhorn-system csi-resizer-68c4c75bf5-tnggs 1/1 Running 0 9m36s
|
||||
longhorn-system csi-snapshotter-7c466dd68f-5szxn 1/1 Running 0 9m30s
|
||||
longhorn-system csi-snapshotter-7c466dd68f-w96lw 1/1 Running 0 9m30s
|
||||
longhorn-system csi-snapshotter-7c466dd68f-xt42z 1/1 Running 0 9m30s
|
||||
longhorn-system engine-image-ei-68f17757-jn986 1/1 Running 0 10m
|
||||
longhorn-system instance-manager-fab02be089480f35c7b2288110eb9441 1/1 Running 0 10m
|
||||
longhorn-system longhorn-csi-plugin-5j77p 3/3 Running 0 9m30s
|
||||
longhorn-system longhorn-driver-deployer-75fff9c757-dps2j 1/1 Running 0 13m
|
||||
longhorn-system longhorn-manager-2vfr4 1/1 Running 4 (10m ago) 13m
|
||||
longhorn-system longhorn-ui-7dc586665c-hzt6k 1/1 Running 0 13m
|
||||
longhorn-system longhorn-ui-7dc586665c-lssfj 1/1 Running 0 13m
|
||||
```
|
||||
|
||||
!!! note
|
||||
Be sure to write down the "*bootstrapPassword*" variable for when you log into Rancher later. In this example, the password is `bootStrapAllTheThings`.
|
||||
Also be sure to adjust the "*hostname*" variable to reflect the FQDN of the cluster. You can leave it default like this and change it upon first login if you want. This is important for the last step where you adjust DNS. The example given is `rke2-cluster.bunny-lab.io`.
|
||||
|
||||
### Log into webUI
|
||||
At this point, you can log into the webUI at https://rke2-cluster.bunny-lab.io using the default `bootStrapAllTheThings` password, or whatever password you configured, you can change the password after logging in if you need to by navigating to **Home > Users & Authentication > "..." > Edit Config > "New Password" > Save**. From here, you can deploy more nodes, or deploy single-node workloads such as an Ansible AWX Operator.
|
||||
|
||||
### Rebooting the ControlNode
|
||||
If you ever find yourself needing to reboot the ControlNode, and need to run kubectl CLI commands, you will need to run the command below to import the cluster credentials upon every reboot. Reboots should take much less time to get the cluster ready again as compared to the original deployments.
|
||||
```
|
||||
export KUBECONFIG=/etc/rancher/rke2/rke2.yaml
|
||||
```
|
||||
|
||||
## Create Additional ControlPlane Node(s)
|
||||
This is the part where you can add additional controlplane nodes to add additional redundancy to the RKE2 Cluster. This is important for high-availability environments.
|
||||
|
||||
### Download the Server Deployment Script
|
||||
``` sh
|
||||
curl -sfL https://get.rke2.io | INSTALL_RKE2_TYPE=server sh -
|
||||
```
|
||||
### Configure and Connect to Existing/Initial ControlPlane Node
|
||||
``` sh
|
||||
# Symlink the Kubectl Management Command
|
||||
ln -s $(find /var/lib/rancher/rke2/data/ -name kubectl) /usr/local/bin/kubectl
|
||||
|
||||
# Manually Create a Rancher-Kubernetes-Specific Config File
|
||||
mkdir -p /etc/rancher/rke2/
|
||||
|
||||
# Inject IP of Initial ControlPlane Node into Config File
|
||||
echo "server: https://192.168.3.69:9345" > /etc/rancher/rke2/config.yaml
|
||||
|
||||
# Inject the Initial ControlPlane Node trust token into the config file
|
||||
# You can get the token by running the following command on the first node in the cluster: `cat /var/lib/rancher/rke2/server/node-token`
|
||||
echo "token: K10aa0632863da4ae4e2ccede0ca6a179f510a0eee0d6d6eb53dca96050048f055e::server:3b130ceebfbb7ed851cd990fe55e6f3a" >> /etc/rancher/rke2/config.yaml
|
||||
|
||||
# Start and Enable the Kubernetes Service
|
||||
systemctl enable --now rke2-server.service
|
||||
```
|
||||
!!! note
|
||||
Be sure to change the IP address of the initial controlplane node provided in the example above to match your environment.
|
||||
|
||||
## Add Worker Node(s)
|
||||
Worker nodes are the bread-and-butter of a Kubernetes cluster. They handle running container workloads, and acting as storage for the cluster (this can be configured to varying degrees based on your needs).
|
||||
|
||||
### Download the Server Worker Script
|
||||
``` sh
|
||||
curl -sfL https://get.rke2.io | INSTALL_RKE2_TYPE=agent sh -
|
||||
```
|
||||
### Configure and Connect to RKE2 Cluster
|
||||
``` sh
|
||||
# Manually Create a Rancher-Kubernetes-Specific Config File
|
||||
mkdir -p /etc/rancher/rke2/
|
||||
|
||||
# Inject IP of Initial ControlPlane Node into Config File
|
||||
echo "server: https://192.168.3.21:9345" > /etc/rancher/rke2/config.yaml
|
||||
|
||||
# Inject the Initial ControlPlane Node trust token into the config file
|
||||
# You can get the token by running the following command on the first node in the cluster: `cat /var/lib/rancher/rke2/server/node-token`
|
||||
echo "token: K10aa0632863da4ae4e2ccede0ca6a179f510a0eee0d6d6eb53dca96050048f055e::server:3b130ceebfbb7ed851cd990fe55e6f3a" >> /etc/rancher/rke2/config.yaml
|
||||
|
||||
# Start and Enable the Kubernetes Service**
|
||||
systemctl enable --now rke2-agent.service
|
||||
```
|
||||
|
||||
## DNS Server Record
|
||||
You will need to set up some kind of DNS server record to point the FQDN of the cluster (e.g. `rke2-cluster.bunny-lab.io`) to the IP address of the Initial ControlPlane. This can be achieved in a number of ways, such as editing the Windows `HOSTS` file, Linux's `/etc/resolv.conf` file, a Windows DNS Server "A" Record, or an NGINX/Traefik Reverse Proxy.
|
||||
|
||||
Once you have added the DNS record, you should be able to access the login page for the Rancher RKE2 Kubernetes cluster. Use the `bootstrapPassword` mentioned previously to log in, then change it immediately from the user management area of Rancher.
|
||||
|
||||
| TYPE OF ACCESS | FQDN | IP ADDRESS |
|
||||
| -------------- | ------------------------------------- | ------------ |
|
||||
| HOST FILE | rke2-cluster.bunny-lab.io | 192.168.3.69 |
|
||||
| REVERSE PROXY | http://rke2-cluster.bunny-lab.io:80 | 192.168.5.29 |
|
||||
| DNS RECORD | A Record: rke2-cluster.bunny-lab.io | 192.168.3.69 |
|
||||
@@ -0,0 +1,28 @@
|
||||
AWX:
|
||||
enabled: true
|
||||
name: awx
|
||||
postgres:
|
||||
dbName: Unset
|
||||
enabled: false
|
||||
host: Unset
|
||||
password: Unset
|
||||
port: 5678
|
||||
sslmode: prefer
|
||||
type: unmanaged
|
||||
username: admin
|
||||
spec:
|
||||
admin_user: admin
|
||||
admin_email: cyberstrawberry101@gmail.com
|
||||
auto_upgrade: true
|
||||
hostname: awx.cyberstrawberry.net
|
||||
ingress_path: /
|
||||
ingress_path_type: Prefix
|
||||
ingress_type: ingress
|
||||
ipv6_disabled: true
|
||||
projects_persistence: true
|
||||
projects_storage_class: longhorn
|
||||
projects_storage_size: 32Gi
|
||||
task_privileged: true
|
||||
global:
|
||||
cattle:
|
||||
systemProjectId: p-78f96
|
||||
@@ -0,0 +1,2 @@
|
||||
awx-operator
|
||||
https://ansible.github.io/awx-operator/
|
||||
@@ -0,0 +1,25 @@
|
||||
krb5.conf
|
||||
|
||||
--------------------------------------------
|
||||
|
||||
[libdefaults]
|
||||
default_realm = MOONGATE.LOCAL
|
||||
dns_lookup_realm = true
|
||||
dns_lookup_kdc = true
|
||||
ticket_lifetime = 24h
|
||||
renew_lifetime = 7d
|
||||
forwardable = true
|
||||
default_ccache_name = KEYRING:persistent:%{uid}
|
||||
|
||||
[realms]
|
||||
MOONGATE.LOCAL = {
|
||||
kdc = NEXUS-DC-01.MOONGATE.LOCAL
|
||||
admin_server = NEXUS-DC-01.MOONGATE.LOCAL
|
||||
}
|
||||
|
||||
[domain_realm]
|
||||
.moongate.local = MOONGATE.LOCAL
|
||||
moongate.local = MOONGATE.LOCAL
|
||||
|
||||
--------------------------------------------
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
v1.3.0
|
||||
@@ -0,0 +1,158 @@
|
||||
affinity: {}
|
||||
checkDeprecation: true
|
||||
clusterDomain: cluster.local
|
||||
containerSecurityContext: {}
|
||||
dnsConfig: {}
|
||||
extraContainerVolumeMounts: []
|
||||
extraInitVolumeMounts: []
|
||||
extraVolumeMounts: []
|
||||
extraVolumes: []
|
||||
gitea:
|
||||
additionalConfigFromEnvs:
|
||||
- name: ENV_TO_INI__SERVER__ROOT_URL
|
||||
value: https://git.cyberstrawberry.net
|
||||
additionalConfigSources: []
|
||||
admin:
|
||||
email: cyberstrawberry101@gmail.com
|
||||
existingSecret: null
|
||||
password: SUPER-SECRET-ADMIN-PASSWORD-THAT-NOONE-WILL-GUESS
|
||||
username: nicole.rappe
|
||||
config:
|
||||
APP_NAME: "CyberStrawberry"
|
||||
ldap: []
|
||||
livenessProbe:
|
||||
enabled: true
|
||||
failureThreshold: 10
|
||||
initialDelaySeconds: 200
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
tcpSocket:
|
||||
port: http
|
||||
timeoutSeconds: 1
|
||||
metrics:
|
||||
enabled: false
|
||||
serviceMonitor:
|
||||
enabled: false
|
||||
oauth: []
|
||||
podAnnotations: {}
|
||||
readinessProbe:
|
||||
enabled: true
|
||||
failureThreshold: 3
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
tcpSocket:
|
||||
port: http
|
||||
timeoutSeconds: 1
|
||||
ssh:
|
||||
logLevel: INFO
|
||||
startupProbe:
|
||||
enabled: false
|
||||
failureThreshold: 10
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
tcpSocket:
|
||||
port: http
|
||||
timeoutSeconds: 1
|
||||
global:
|
||||
hostAliases: []
|
||||
imagePullSecrets: []
|
||||
imageRegistry: ''
|
||||
storageClass: longhorn
|
||||
image:
|
||||
pullPolicy: Always
|
||||
registry: ''
|
||||
repository: gitea/gitea
|
||||
rootless: false
|
||||
tag: ''
|
||||
imagePullSecrets: []
|
||||
ingress:
|
||||
annotations: {}
|
||||
className: null
|
||||
enabled: false
|
||||
hosts:
|
||||
- host: git.cyberstrawberry.net
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
tls: []
|
||||
initPreScript: ''
|
||||
memcached:
|
||||
enabled: true
|
||||
service:
|
||||
ports:
|
||||
memcached: 11211
|
||||
nodeSelector: {}
|
||||
persistence:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
annotations: {}
|
||||
enabled: true
|
||||
existingClaim: null
|
||||
labels: {}
|
||||
size: 32Gi
|
||||
storageClass: null
|
||||
subPath: null
|
||||
podSecurityContext:
|
||||
fsGroup: 1000
|
||||
postgresql:
|
||||
enabled: true
|
||||
global:
|
||||
postgresql:
|
||||
auth:
|
||||
database: gitea
|
||||
password: gitea
|
||||
username: gitea
|
||||
service:
|
||||
ports:
|
||||
postgresql: 5432
|
||||
primary:
|
||||
persistence:
|
||||
size: 32Gi
|
||||
replicaCount: 1
|
||||
resources: {}
|
||||
schedulerName: ''
|
||||
securityContext: {}
|
||||
service:
|
||||
http:
|
||||
annotations: {}
|
||||
clusterIP: None
|
||||
externalIPs: null
|
||||
externalTrafficPolicy: null
|
||||
ipFamilies: null
|
||||
ipFamilyPolicy: null
|
||||
loadBalancerIP: null
|
||||
loadBalancerSourceRanges: []
|
||||
nodePort: null
|
||||
port: 3000
|
||||
type: ClusterIP
|
||||
ssh:
|
||||
annotations: {}
|
||||
clusterIP: None
|
||||
externalIPs: null
|
||||
externalTrafficPolicy: null
|
||||
hostPort: null
|
||||
ipFamilies: null
|
||||
ipFamilyPolicy: null
|
||||
loadBalancerIP: null
|
||||
loadBalancerSourceRanges: []
|
||||
nodePort: null
|
||||
port: 22
|
||||
type: ClusterIP
|
||||
signing:
|
||||
enabled: false
|
||||
existingSecret: ''
|
||||
gpgHome: /data/git/.gnupg
|
||||
privateKey: ''
|
||||
statefulset:
|
||||
annotations: {}
|
||||
env: []
|
||||
labels: {}
|
||||
terminationGracePeriodSeconds: 60
|
||||
test:
|
||||
enabled: true
|
||||
image:
|
||||
name: busybox
|
||||
tag: latest
|
||||
tolerations: []
|
||||
@@ -0,0 +1,194 @@
|
||||
affinity: {}
|
||||
cronjob:
|
||||
enabled: false
|
||||
lifecycle: {}
|
||||
resources: {}
|
||||
securityContext: {}
|
||||
deploymentAnnotations: {}
|
||||
deploymentLabels: {}
|
||||
externalDatabase:
|
||||
database: nextcloud
|
||||
enabled: true
|
||||
existingSecret:
|
||||
enabled: false
|
||||
host: cluster-nextcloud-postgresql
|
||||
password: SecurePasswordGoesHere
|
||||
type: postgresql
|
||||
user: nextcloud
|
||||
fullnameOverride: ''
|
||||
hpa:
|
||||
cputhreshold: 60
|
||||
enabled: false
|
||||
maxPods: 10
|
||||
minPods: 1
|
||||
image:
|
||||
pullPolicy: IfNotPresent
|
||||
repository: nextcloud
|
||||
ingress:
|
||||
annotations: {}
|
||||
enabled: false
|
||||
labels: {}
|
||||
path: /
|
||||
pathType: Prefix
|
||||
internalDatabase:
|
||||
enabled: false
|
||||
name: nextcloud
|
||||
lifecycle: {}
|
||||
livenessProbe:
|
||||
enabled: true
|
||||
failureThreshold: 3
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
mariadb:
|
||||
architecture: standalone
|
||||
auth:
|
||||
database: nextcloud
|
||||
password: changeme
|
||||
username: nextcloud
|
||||
enabled: false
|
||||
primary:
|
||||
persistence:
|
||||
accessMode: ReadWriteOnce
|
||||
enabled: false
|
||||
size: 8Gi
|
||||
metrics:
|
||||
enabled: false
|
||||
https: false
|
||||
image:
|
||||
pullPolicy: IfNotPresent
|
||||
repository: xperimental/nextcloud-exporter
|
||||
tag: 0.6.0
|
||||
replicaCount: 1
|
||||
service:
|
||||
annotations:
|
||||
prometheus.io/port: '9205'
|
||||
prometheus.io/scrape: 'true'
|
||||
labels: {}
|
||||
type: ClusterIP
|
||||
serviceMonitor:
|
||||
enabled: false
|
||||
interval: 30s
|
||||
jobLabel: ''
|
||||
labels: {}
|
||||
namespace: ''
|
||||
scrapeTimeout: ''
|
||||
timeout: 5s
|
||||
tlsSkipVerify: false
|
||||
token: ''
|
||||
nameOverride: ''
|
||||
nextcloud:
|
||||
configs: {}
|
||||
datadir: /var/www/html/data
|
||||
defaultConfigs:
|
||||
.htaccess: true
|
||||
apache-pretty-urls.config.php: true
|
||||
apcu.config.php: true
|
||||
apps.config.php: true
|
||||
autoconfig.php: true
|
||||
redis.config.php: true
|
||||
smtp.config.php: true
|
||||
existingSecret:
|
||||
enabled: false
|
||||
extraEnv: null
|
||||
extraInitContainers: []
|
||||
extraSidecarContainers: []
|
||||
extraVolumeMounts: null
|
||||
extraVolumes: null
|
||||
host: storage.cyberstrawberry.net
|
||||
mail:
|
||||
domain: domain.com
|
||||
enabled: false
|
||||
fromAddress: user
|
||||
smtp:
|
||||
authtype: LOGIN
|
||||
host: domain.com
|
||||
name: user
|
||||
password: pass
|
||||
port: 465
|
||||
secure: ssl
|
||||
password: SUPER-SECRET-PASSWORD-FOR-ADMIN
|
||||
persistence:
|
||||
subPath: null
|
||||
phpConfigs: {}
|
||||
podSecurityContext: {}
|
||||
securityContext: {}
|
||||
strategy:
|
||||
type: Recreate
|
||||
update: 0
|
||||
username: Nicole
|
||||
nginx:
|
||||
config:
|
||||
default: true
|
||||
enabled: false
|
||||
image:
|
||||
pullPolicy: IfNotPresent
|
||||
repository: nginx
|
||||
tag: alpine
|
||||
resources: {}
|
||||
securityContext: {}
|
||||
nodeSelector: {}
|
||||
persistence:
|
||||
accessMode: ReadWriteOnce
|
||||
annotations: {}
|
||||
enabled: true
|
||||
nextcloudData:
|
||||
accessMode: ReadWriteOnce
|
||||
annotations: {}
|
||||
enabled: true
|
||||
size: 800Gi
|
||||
subPath: null
|
||||
size: 16Gi
|
||||
phpClientHttpsFix:
|
||||
enabled: true
|
||||
protocol: https
|
||||
podAnnotations: {}
|
||||
postgresql:
|
||||
enabled: true
|
||||
global:
|
||||
postgresql:
|
||||
auth:
|
||||
database: nextcloud
|
||||
password: SUPER-SECRET-PASSWORD-FOR-DB
|
||||
username: nextcloud
|
||||
primary:
|
||||
persistence:
|
||||
enabled: true
|
||||
rbac:
|
||||
enabled: false
|
||||
serviceaccount:
|
||||
annotations: {}
|
||||
create: true
|
||||
name: nextcloud-serviceaccount
|
||||
readinessProbe:
|
||||
enabled: true
|
||||
failureThreshold: 3
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
redis:
|
||||
auth:
|
||||
enabled: true
|
||||
password: changeme
|
||||
enabled: false
|
||||
replicaCount: 1
|
||||
resources: {}
|
||||
securityContext: {}
|
||||
service:
|
||||
loadBalancerIP: nil
|
||||
nodePort: nil
|
||||
port: 8080
|
||||
type: ClusterIP
|
||||
startupProbe:
|
||||
enabled: false
|
||||
failureThreshold: 30
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
tolerations: []
|
||||
global:
|
||||
cattle:
|
||||
systemProjectId: p-78f96
|
||||
@@ -0,0 +1,268 @@
|
||||
# Migrating `docker-compose.yml` to Rancher RKE2 Cluster
|
||||
You may be comfortable operating with Portainer or `docker-compose`, but there comes a point where you might want to migrate those existing workloads to a Kubernetes cluster as easily-as-possible. Lucklily, there is a way to do this using a tool called "**Kompose**'. Follow the instructions seen below to convert and deploy your existing `docker-compose.yml` into a Kubernetes cluster such as Rancher RKE2.
|
||||
|
||||
!!! info "RKE2 Cluster Deployment"
|
||||
This document assumes that you have an existing Rancher RKE2 cluster deployed. If not, you can deploy one following the [Deploy RKE2 Cluster](./deployment/rancher-rke2.md) documentation.
|
||||
|
||||
We also assume that the cluster name within Rancher RKE2 is named `local`, which is the default cluster name when setting up a Kubernetes Cluster in the way seen in the above documentation.
|
||||
|
||||
## Installing Kompose
|
||||
The first step involves downloading Kompose from https://kompose.io/installation. Once you have it downloaded and installed onto your environment of choice, save a copy of your `docker-compose.yml` file somewhere on-disk, then open up a terminal and run the following command:
|
||||
|
||||
```sh
|
||||
kompose --file docker-compose.yaml convert --stdout > ntfy-k8s.yaml
|
||||
```
|
||||
|
||||
This will attempt to convert the `docker-compose.yml` file into a Kubernetes manifest YAML file. The Before and after example can be seen below:
|
||||
|
||||
=== "(Original) docker-compose.yml"
|
||||
|
||||
``` yaml
|
||||
version: "2.1"
|
||||
services:
|
||||
ntfy:
|
||||
image: binwiederhier/ntfy
|
||||
container_name: ntfy
|
||||
command:
|
||||
- serve
|
||||
environment:
|
||||
- NTFY_ATTACHMENT_CACHE_DIR=/var/lib/ntfy/attachments
|
||||
- NTFY_BASE_URL=https://ntfy.bunny-lab.io
|
||||
- TZ=America/Denver # optional: Change to your desired timezone
|
||||
#user: UID:GID # optional: Set custom user/group or uid/gid
|
||||
volumes:
|
||||
- /srv/containers/ntfy/cache:/var/cache/ntfy
|
||||
- /srv/containers/ntfy/etc:/etc/ntfy
|
||||
ports:
|
||||
- 80:80
|
||||
restart: always
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.45
|
||||
|
||||
networks:
|
||||
default:
|
||||
external:
|
||||
name: docker_network
|
||||
docker_network:
|
||||
external: true
|
||||
```
|
||||
|
||||
=== "(Converted) ntfy-k8s.yaml"
|
||||
|
||||
``` yaml
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations:
|
||||
kompose.cmd: C:\ProgramData\chocolatey\lib\kubernetes-kompose\tools\kompose.exe --file ntfy-k8s.yaml convert --stdout
|
||||
kompose.version: 1.37.0 (fb0539e64)
|
||||
labels:
|
||||
io.kompose.service: ntfy
|
||||
name: ntfy
|
||||
spec:
|
||||
ports:
|
||||
- name: "80"
|
||||
port: 80
|
||||
targetPort: 80
|
||||
selector:
|
||||
io.kompose.service: ntfy
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
kompose.cmd: C:\ProgramData\chocolatey\lib\kubernetes-kompose\tools\kompose.exe --file ntfy-k8s.yaml convert --stdout
|
||||
kompose.version: 1.37.0 (fb0539e64)
|
||||
labels:
|
||||
io.kompose.service: ntfy
|
||||
name: ntfy
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
io.kompose.service: ntfy
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
kompose.cmd: C:\ProgramData\chocolatey\lib\kubernetes-kompose\tools\kompose.exe --file ntfy-k8s.yaml convert --stdout
|
||||
kompose.version: 1.37.0 (fb0539e64)
|
||||
labels:
|
||||
io.kompose.service: ntfy
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- serve
|
||||
env:
|
||||
- name: NTFY_ATTACHMENT_CACHE_DIR
|
||||
value: /var/lib/ntfy/attachments
|
||||
- name: NTFY_BASE_URL
|
||||
value: https://ntfy.bunny-lab.io
|
||||
- name: TZ
|
||||
value: America/Denver
|
||||
image: binwiederhier/ntfy
|
||||
name: ntfy
|
||||
ports:
|
||||
- containerPort: 80
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- mountPath: /var/cache/ntfy
|
||||
name: ntfy-claim0
|
||||
- mountPath: /etc/ntfy
|
||||
name: ntfy-claim1
|
||||
restartPolicy: Always
|
||||
volumes:
|
||||
- name: ntfy-claim0
|
||||
persistentVolumeClaim:
|
||||
claimName: ntfy-claim0
|
||||
- name: ntfy-claim1
|
||||
persistentVolumeClaim:
|
||||
claimName: ntfy-claim1
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
labels:
|
||||
io.kompose.service: ntfy-claim0
|
||||
name: ntfy-claim0
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 100Mi
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
labels:
|
||||
io.kompose.service: ntfy-claim1
|
||||
name: ntfy-claim1
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 100Mi
|
||||
```
|
||||
|
||||
## Deploy Workload into Rancher RKE2 Cluster
|
||||
At this point, you need to import the yaml file you created into the Kubernetes cluster. This will occur in four sequential stages:
|
||||
|
||||
- Setting up a "**Project**" to logically organize your containers
|
||||
- Setting up a "**Namespace**" for your container to isolate it from other containers in your Kubernetes cluster
|
||||
- Importing the YAML file into the aforementioned namespace
|
||||
- Configuring Ingress to allow external access to the container / service stack.
|
||||
|
||||
### Create a Project
|
||||
The purpose of the project is to logically organize your services together. This can be something like `Home Automation`, `Log Analysis Systems`, `Network Tools`, etc. You can do this by logging into your Rancher RKE2 cluster (e.g. https://rke2-cluster.bunny-lab.io). This Project name is unique to Rancher and purely used for organizational purposes and does not affect the namespaces / containers in any way.
|
||||
|
||||
- Navigate to: **Clusters > `local` > Cluster > Projects/Namespaces > "Create Project"**
|
||||
- **Name**: <Friendly Name> (e.g. `Home Automation`)
|
||||
- **Description**: <Useful Description for the Group of Services> (e.g. `Various services that automate things within Bunny Lab`)
|
||||
- Click the "**Create**" button
|
||||
|
||||
### Create a Namespace within the Project
|
||||
At this point, we need to create a namespace. This basically isolates the networking, credentials, secrets, and storage between the services/stacks. This ensures that if someone exploits one of your services, they will not be able to laterally move into another service within the same Kubernetes cluster.
|
||||
|
||||
- Navigate to: **Clusters > `local` > Cluster > Projects/Namespaces > <ProjectName> > "Create Namespace"**
|
||||
- The name for the namespace should be named based on its operational-context, such as `prod-ntfy` or `dev-ntfy`.
|
||||
|
||||
### Import Converted YAML Manifest into Namespace
|
||||
At this point, we can now proceed to import the YAML file we generated in the beginning of this document.
|
||||
|
||||
- Navigate to: **Clusters > `local` > Cluster > Projects/Namespaces**
|
||||
- At the top-right of the screen will be an upload / up-arrow button with tooltip text stating "Import YAML" > Click on this button
|
||||
- Click the "**Read from File**" button
|
||||
- Navigate to your `ntfy-k8s.yaml` file. (Name will differ from your actual converted file) > then click the "**Open**" button.
|
||||
- On the top-right of the dialog box will be a "**Default Namespace**" dropdown menu, select the `prod-ntfy` namespace we created earlier.
|
||||
- Click the blue "**Import** button at the bottom of the dialog box.
|
||||
|
||||
!!! warning "Be Patient"
|
||||
This part of the process can take a while depending on the container stack and complexity of the service. It has to download container images and deploy them into newly spun-up pods within Kubernetes. Just be patient and click on the `prod-ntfy` namespace, then look at the "**Workloads**" tab to see if the "ntfy" service exists and is Active, then you can move onto the next step.
|
||||
|
||||
### Configuring Ingress
|
||||
This final step within Kubernetes itself involves reconfiguring the container to list via a "NodePort" instead of "ClusterIP". Don't worry, you do not have to mangle with the ports that the container uses, this is entirely within Kubernetes itself and does not make changes to the original `docker-compose.yml` ports of the container(s) you imported.
|
||||
|
||||
- Navigate to: **Clusters > `local` > Service Discovery > Services > ntfy**
|
||||
- On the top-right, click on the blue "**Show Configuration**" button
|
||||
- On the bottom-right, click the blue "**Edit Config**" button
|
||||
- On the bottom-right, click the "**Edit as YAML**" button
|
||||
- Within the yaml editor, you will see a section named `spec:`, within that section is a subsection named `type:`. You will see a value of `type: ClusterIP` > You want to change that to `type: NodePort`
|
||||
- On the bottom-right, click the blue "**Save**" button and wait for the process to finish.
|
||||
- On the new page that appears, click on the `ntfy` service again
|
||||
- Click on the "**Ports**" tab
|
||||
- You will see a column of the table labeled "Node Port" with a number in the 30,000s such as `30996`. This will be important for later.
|
||||
|
||||
!!! success "Verifying Access Before Configuring Reverse Proxy"
|
||||
At this point, you will want to verify that you can access the service via the cluster node IP addresses such as the examples seen below, all of the cluster nodes should route the traffic to the container's service and will be used for load-balancing later in the reverse proxy configuration file.
|
||||
|
||||
- http://192.168.3.69:30996
|
||||
- http://192.168.3.70:30996
|
||||
- http://192.168.3.71:30996
|
||||
- http://192.168.3.72:30996
|
||||
|
||||
## Configuring Reverse Proxy
|
||||
If you were able to successfully verify access to the service when talking to it directly via one of the cluster node IP addresses with its given Node Port port number, then you can proceed to creating a reverse proxy configuration file for the service. This will be very similar to the original `docker-compose.yml` version of the reverse proxy configuration file, but with additional IP addresses to load-balance across the Kubernetes cluster nodes.
|
||||
|
||||
!!! info "Section Considerations"
|
||||
This section of the document does not (*currently*) cover the process of setting up health checks to ensure that the load-balanced server destinations in the reverse proxy are online before redirecting traffic to them. This is on my to-do list of things to implement to further harden the deployment process.
|
||||
|
||||
This section also does not cover the process of setting up a reverse proxy. If you want to follow along with this document, you can deploy a Traefik reverse proxy via the [Traefik](../../../services/edge/traefik.md) deployment documentation.
|
||||
|
||||
With the above considerations in-mind, we just need to make some small changes to the existing Traefik configuration file to ensure that it load-balanced across every node of the cluster to ensure high-availability functions as-expected.
|
||||
|
||||
=== "(Original) ntfy.bunny-lab.io.yml"
|
||||
|
||||
``` yaml
|
||||
http:
|
||||
routers:
|
||||
ntfy:
|
||||
entryPoints:
|
||||
- websecure
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
service: ntfy
|
||||
rule: Host(`ntfy.bunny-lab.io`)
|
||||
|
||||
services:
|
||||
ntfy:
|
||||
loadBalancer:
|
||||
passHostHeader: true
|
||||
servers:
|
||||
- url: http://192.168.5.45:80
|
||||
```
|
||||
|
||||
=== "(Updated) ntfy.bunny-lab.io.yml"
|
||||
|
||||
``` yaml
|
||||
http:
|
||||
routers:
|
||||
ntfy:
|
||||
entryPoints:
|
||||
- websecure
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
service: ntfy
|
||||
rule: Host(`ntfy.bunny-lab.io`)
|
||||
|
||||
services:
|
||||
ntfy:
|
||||
loadBalancer:
|
||||
passHostHeader: true
|
||||
servers:
|
||||
- url: http://192.168.3.69:30996
|
||||
- url: http://192.168.3.70:30996
|
||||
- url: http://192.168.3.71:30996
|
||||
- url: http://192.168.3.72:30996
|
||||
```
|
||||
|
||||
!!! success "Verify Access via Reverse Proxy"
|
||||
If everything worked, you should be able to access the service at https://ntfy.bunny-lab.io, and if one of the cluster nodes goes offline, Rancher will automatically migrate the load to another cluster node which will take over the web request.
|
||||
|
||||
Reference in New Issue
Block a user