Re-Structured Documentation
This commit is contained in:
70
Servers/Containerization/Docker/Compose/ActivePieces.md
Normal file
70
Servers/Containerization/Docker/Compose/ActivePieces.md
Normal file
@ -0,0 +1,70 @@
|
||||
**Purpose**: Self-hosted open-source no-code business automation tool.
|
||||
|
||||
```jsx title="docker-compose.yml"
|
||||
version: '3.0'
|
||||
services:
|
||||
activepieces:
|
||||
image: activepieces/activepieces:0.3.11
|
||||
container_name: activepieces
|
||||
restart: unless-stopped
|
||||
privileged: true
|
||||
ports:
|
||||
- '8080:80'
|
||||
environment:
|
||||
- 'POSTGRES_DB=${AP_POSTGRES_DATABASE}'
|
||||
- 'POSTGRES_PASSWORD=${AP_POSTGRES_PASSWORD}'
|
||||
- 'POSTGRES_USER=${AP_POSTGRES_USERNAME}'
|
||||
env_file: stack.env
|
||||
depends_on:
|
||||
- postgres
|
||||
- redis
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.62
|
||||
postgres:
|
||||
image: 'postgres:14.4'
|
||||
container_name: postgres
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- 'POSTGRES_DB=${AP_POSTGRES_DATABASE}'
|
||||
- 'POSTGRES_PASSWORD=${AP_POSTGRES_PASSWORD}'
|
||||
- 'POSTGRES_USER=${AP_POSTGRES_USERNAME}'
|
||||
volumes:
|
||||
- /srv/containers/activepieces/postgresql:/var/lib/postgresql/data'
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.61
|
||||
redis:
|
||||
image: 'redis:7.0.7'
|
||||
container_name: redis
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- /srv/containers/activepieces/redis:/data'
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.60
|
||||
networks:
|
||||
default:
|
||||
external:
|
||||
name: docker_network
|
||||
docker_network:
|
||||
external: true
|
||||
```
|
||||
|
||||
```jsx title=".env"
|
||||
AP_ENGINE_EXECUTABLE_PATH=dist/packages/engine/main.js
|
||||
AP_ENCRYPTION_KEY=e81f8754faa04acaa7b13caa5d2c6a5a
|
||||
AP_JWT_SECRET=REDACTED #BE SURE TO SET THIS WITH A VALID JWT SECRET > REFER TO OFFICIAL DOCUMENTATION
|
||||
AP_ENVIRONMENT=prod
|
||||
AP_FRONTEND_URL=https://ap.cyberstrawberry.net
|
||||
AP_NODE_EXECUTABLE_PATH=/usr/local/bin/node
|
||||
AP_POSTGRES_DATABASE=activepieces
|
||||
AP_POSTGRES_HOST=192.168.5.61
|
||||
AP_POSTGRES_PORT=5432
|
||||
AP_POSTGRES_USERNAME=postgres
|
||||
AP_POSTGRES_PASSWORD=REDACTED #USE A SECURE SHORT PASSWORD > ENSURE ITS NOT TOO LONG FOR POSTGRESQL
|
||||
AP_REDIS_HOST=redis
|
||||
AP_REDIS_PORT=6379
|
||||
AP_SANDBOX_RUN_TIME_SECONDS=600
|
||||
AP_TELEMETRY_ENABLED=true
|
||||
```
|
30
Servers/Containerization/Docker/Compose/Adguard-Home.md
Normal file
30
Servers/Containerization/Docker/Compose/Adguard-Home.md
Normal file
@ -0,0 +1,30 @@
|
||||
**Purpose**: AdGuard Home is a network-wide software for blocking ads & tracking. After you set it up, it will cover ALL your home devices, and you don’t need any client-side software for that. With the rise of Internet-Of-Things and connected devices, it becomes more and more important to be able to control your whole network.
|
||||
|
||||
```jsx title="docker-compose.yml"
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
app:
|
||||
image: adguard/adguardhome
|
||||
ports:
|
||||
- 3000:3000
|
||||
- 53:53
|
||||
- 80:80
|
||||
volumes:
|
||||
- /srv/containers/adguard_home/workingdir:/opt/adguardhome/work
|
||||
- /srv/containers/adguard_home/config:/opt/adguardhome/conf
|
||||
restart: always
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.189
|
||||
networks:
|
||||
default:
|
||||
external:
|
||||
name: docker_network
|
||||
docker_network:
|
||||
external: true
|
||||
```
|
||||
|
||||
```jsx title=".env"
|
||||
Not Applicable
|
||||
```
|
150
Servers/Containerization/Docker/Compose/Apache Guacamole.md
Normal file
150
Servers/Containerization/Docker/Compose/Apache Guacamole.md
Normal file
@ -0,0 +1,150 @@
|
||||
**Purpose**: HTML5-based Remote Access Broker for SSH, RDP, and VNC. Useful for remote access into an environment.
|
||||
|
||||
### Docker Compose Stack
|
||||
=== "docker-compose.yml"
|
||||
|
||||
``` sh
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
app:
|
||||
image: jasonbean/guacamole
|
||||
ports:
|
||||
- 8080:8080
|
||||
volumes:
|
||||
- /srv/containers/guacamole:/config
|
||||
environment:
|
||||
- OPT_MYSQL=Y
|
||||
- OPT_MYSQL_EXTENSION=N
|
||||
- OPT_SQLSERVER=N
|
||||
- OPT_LDAP=N
|
||||
- OPT_DUO=N
|
||||
- OPT_CAS=N
|
||||
- OPT_TOTP=Y # (1)
|
||||
- OPT_QUICKCONNECT=N
|
||||
- OPT_HEADER=N
|
||||
- OPT_SAML=N
|
||||
- PUID=99
|
||||
- PGID=100
|
||||
- TZ=America/Denver # (2)
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.43
|
||||
|
||||
networks:
|
||||
default:
|
||||
external:
|
||||
name: docker_network
|
||||
docker_network:
|
||||
external: true
|
||||
```
|
||||
|
||||
1. Enable this if you want multi-factor authentication enabled. Must be set BEFORE the container is initially deployed. Cannot be added retroactively.
|
||||
2. Set to your own timezone.
|
||||
|
||||
=== "docker-compose.yml (OpenID / Keycloak Integration)"
|
||||
|
||||
``` sh
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
app:
|
||||
image: jasonbean/guacamole
|
||||
ports:
|
||||
- 8080:8080
|
||||
volumes:
|
||||
- /srv/containers/apache-guacamole:/config
|
||||
environment:
|
||||
- OPT_MYSQL=Y
|
||||
- OPT_MYSQL_EXTENSION=N
|
||||
- OPT_SQLSERVER=N
|
||||
- OPT_LDAP=N
|
||||
- OPT_DUO=N
|
||||
- OPT_CAS=N
|
||||
- OPT_TOTP=N
|
||||
- OPT_QUICKCONNECT=N
|
||||
- OPT_HEADER=N
|
||||
- OPT_SAML=N
|
||||
- OPT_OIDC=Y # Enable OpenID Connect
|
||||
- OIDC_ISSUER=${OPENID_REALM_URL} # Your Keycloak realm URL
|
||||
- OIDC_CLIENT_ID=${OPENID_CLIENT_ID} # Client ID for Guacamole in Keycloak
|
||||
- OIDC_CLIENT_SECRET=${OPENID_CLIENT_SECRET} # Client Secret for Guacamole in Keycloak
|
||||
- OIDC_REDIRECT_URI=${OPENID_REDIRECT_URI} # Redirect URI for Guacamole
|
||||
- PUID=99
|
||||
- PGID=100
|
||||
- TZ=America/Denver
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.43
|
||||
|
||||
networks:
|
||||
default:
|
||||
external:
|
||||
name: docker_network
|
||||
docker_network:
|
||||
external: true
|
||||
```
|
||||
|
||||
1. You cannot enable TOTP / Multi-factor authentication if you have OpenID configured. This is just a known issue.
|
||||
2. Set to your own timezone.
|
||||
|
||||
### Environment Variables
|
||||
=== ".env"
|
||||
|
||||
``` sh
|
||||
N/A
|
||||
```
|
||||
|
||||
=== ".env (OpenID / Keycloak Integration)"
|
||||
|
||||
``` sh
|
||||
OPENID_REALM_URL=https://auth.bunny-lab.io/realms/master
|
||||
OPENID_CLIENT_ID=apache-guacamole
|
||||
OPENID_CLIENT_SECRET=<YOUR-CLIENT-ID-SECRET>
|
||||
OPENID_REDIRECT_URI=http://remote.bunny-lab.io
|
||||
```
|
||||
|
||||
## Reverse Proxy Configuration
|
||||
|
||||
=== "Traefik"
|
||||
|
||||
``` yaml
|
||||
http:
|
||||
routers:
|
||||
apache-guacamole:
|
||||
entryPoints:
|
||||
- websecure
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
service: apache-guacamole
|
||||
rule: Host(`remote.bunny-lab.io`)
|
||||
|
||||
services:
|
||||
apache-guacamole:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: http://192.168.5.43:8080
|
||||
passHostHeader: true
|
||||
```
|
||||
|
||||
=== "NGINX"
|
||||
|
||||
``` yaml
|
||||
server {
|
||||
listen 443 ssl;
|
||||
server_name remote.bunny-lab.io;
|
||||
client_max_body_size 0;
|
||||
ssl on;
|
||||
location / {
|
||||
proxy_pass http://192.168.5.43:8080;
|
||||
proxy_buffering off;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $http_connection;
|
||||
access_log off;
|
||||
}
|
||||
}
|
||||
```
|
45
Servers/Containerization/Docker/Compose/Authelia.md
Normal file
45
Servers/Containerization/Docker/Compose/Authelia.md
Normal file
@ -0,0 +1,45 @@
|
||||
**Purpose**: Authelia is an open-source authentication and authorization server and portal fulfilling the identity and access management (IAM) role of information security in providing multi-factor authentication and single sign-on (SSO) for your applications via a web portal. It acts as a companion for common reverse proxies.
|
||||
|
||||
```jsx title="docker-compose.yml"
|
||||
services:
|
||||
authelia:
|
||||
image: authelia/authelia
|
||||
container_name: authelia
|
||||
volumes:
|
||||
- /mnt/authelia/config:/config
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.159
|
||||
expose:
|
||||
- 9091
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
disable: true
|
||||
environment:
|
||||
- TZ=America/Denver
|
||||
|
||||
redis:
|
||||
image: redis:alpine
|
||||
container_name: redis
|
||||
volumes:
|
||||
- /mnt/authelia/redis:/data
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.158
|
||||
expose:
|
||||
- 6379
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- TZ=America/Denver
|
||||
|
||||
networks:
|
||||
default:
|
||||
external:
|
||||
name: docker_network
|
||||
docker_network:
|
||||
external: true
|
||||
```
|
||||
|
||||
```jsx title=".env"
|
||||
Not Applicable
|
||||
```
|
168
Servers/Containerization/Docker/Compose/Authentik.md
Normal file
168
Servers/Containerization/Docker/Compose/Authentik.md
Normal file
@ -0,0 +1,168 @@
|
||||
!!! bug
|
||||
The docker-compose version of the deployment appears bugged and has known issues, deployment via Kubernetes is required to stability and support.
|
||||
|
||||
**Purpose**: Authentik is an open-source Identity Provider, focused on flexibility and versatility. With authentik, site administrators, application developers, and security engineers have a dependable and secure solution for authentication in almost any type of environment. There are robust recovery actions available for the users and applications, including user profile and password management. You can quickly edit, deactivate, or even impersonate a user profile, and set a new password for new users or reset an existing password.
|
||||
|
||||
This document is based on the [Official Docker-Compose Documentation](https://goauthentik.io/docs/installation/docker-compose). It is meant for testing / small-scale production deployments.
|
||||
|
||||
## Docker Configuration
|
||||
```yaml title="docker-compose.yml"
|
||||
---
|
||||
version: "3.4"
|
||||
|
||||
services:
|
||||
postgresql:
|
||||
image: docker.io/library/postgres:12-alpine
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -d $${POSTGRES_DB} -U $${POSTGRES_USER}"]
|
||||
start_period: 20s
|
||||
interval: 30s
|
||||
retries: 5
|
||||
timeout: 5s
|
||||
volumes:
|
||||
- /srv/containers/authentik/db:/var/lib/postgresql/data
|
||||
environment:
|
||||
POSTGRES_PASSWORD: ${PG_PASS:?database password required}
|
||||
POSTGRES_USER: ${PG_USER:-authentik}
|
||||
POSTGRES_DB: ${PG_DB:-authentik}
|
||||
env_file:
|
||||
- stack.env
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.2
|
||||
|
||||
redis:
|
||||
image: docker.io/library/redis:alpine
|
||||
command: --save 60 1 --loglevel warning
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "redis-cli ping | grep PONG"]
|
||||
start_period: 20s
|
||||
interval: 30s
|
||||
retries: 5
|
||||
timeout: 3s
|
||||
volumes:
|
||||
- /srv/containers/authentik/redis:/data
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.3
|
||||
|
||||
server:
|
||||
image: ${AUTHENTIK_IMAGE:-ghcr.io/goauthentik/server}:${AUTHENTIK_TAG:-2023.10.7}
|
||||
restart: unless-stopped
|
||||
command: server
|
||||
environment:
|
||||
AUTHENTIK_REDIS__HOST: redis
|
||||
AUTHENTIK_POSTGRESQL__HOST: postgresql
|
||||
AUTHENTIK_POSTGRESQL__USER: ${PG_USER:-authentik}
|
||||
AUTHENTIK_POSTGRESQL__NAME: ${PG_DB:-authentik}
|
||||
AUTHENTIK_POSTGRESQL__PASSWORD: ${PG_PASS}
|
||||
volumes:
|
||||
- /srv/containers/authentik/media:/media
|
||||
- /srv/containers/authentik/custom-templates:/templates
|
||||
env_file:
|
||||
- stack.env
|
||||
ports:
|
||||
- "${COMPOSE_PORT_HTTP:-9000}:9000"
|
||||
- "${COMPOSE_PORT_HTTPS:-9443}:9443"
|
||||
depends_on:
|
||||
- postgresql
|
||||
- redis
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.4
|
||||
|
||||
worker:
|
||||
image: ${AUTHENTIK_IMAGE:-ghcr.io/goauthentik/server}:${AUTHENTIK_TAG:-2023.10.7}
|
||||
restart: unless-stopped
|
||||
command: worker
|
||||
environment:
|
||||
AUTHENTIK_REDIS__HOST: redis
|
||||
AUTHENTIK_POSTGRESQL__HOST: postgresql
|
||||
AUTHENTIK_POSTGRESQL__USER: ${PG_USER:-authentik}
|
||||
AUTHENTIK_POSTGRESQL__NAME: ${PG_DB:-authentik}
|
||||
AUTHENTIK_POSTGRESQL__PASSWORD: ${PG_PASS}
|
||||
# `user: root` and the docker socket volume are optional.
|
||||
# See more for the docker socket integration here:
|
||||
# https://goauthentik.io/docs/outposts/integrations/docker
|
||||
# Removing `user: root` also prevents the worker from fixing the permissions
|
||||
# on the mounted folders, so when removing this make sure the folders have the correct UID/GID
|
||||
# (1000:1000 by default)
|
||||
user: root
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /srv/containers/authentik/media:/media
|
||||
- /srv/containers/authentik/certs:/certs
|
||||
- /srv/containers/authentik/custom-templates:/templates
|
||||
env_file:
|
||||
- stack.env
|
||||
depends_on:
|
||||
- postgresql
|
||||
- redis
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.5
|
||||
|
||||
networks:
|
||||
default:
|
||||
external:
|
||||
name: docker_network
|
||||
docker_network:
|
||||
external: true
|
||||
```
|
||||
|
||||
```yaml title=".env"
|
||||
PG_PASS=<See Below>
|
||||
AUTHENTIK_SECRET_KEY=<See Below>
|
||||
AUTHENTIK_BOOTSTRAP_PASSWORD=<SecurePassword>
|
||||
AUTHENTIK_BOOTSTRAP_TOKEN=<SecureOneTimePassword>
|
||||
AUTHENTIK_BOOTSTRAP_EMAIL=nicole.rappe@bunny-lab.io
|
||||
|
||||
## SMTP Host Emails are sent to
|
||||
#AUTHENTIK_EMAIL__HOST=localhost
|
||||
#AUTHENTIK_EMAIL__PORT=25
|
||||
## Optionally authenticate (don't add quotation marks to your password)
|
||||
#AUTHENTIK_EMAIL__USERNAME=
|
||||
#AUTHENTIK_EMAIL__PASSWORD=
|
||||
## Use StartTLS
|
||||
#AUTHENTIK_EMAIL__USE_TLS=false
|
||||
## Use SSL
|
||||
#AUTHENTIK_EMAIL__USE_SSL=false
|
||||
#AUTHENTIK_EMAIL__TIMEOUT=10
|
||||
## Email address authentik will send from, should have a correct @domain
|
||||
#AUTHENTIK_EMAIL__FROM=authentik@localhost
|
||||
```
|
||||
|
||||
!!! note "Generating Passwords"
|
||||
Navigate to the online [PWGen Password Generator](https://pwgen.io/en/) to generate the passwords for `PG_PASS` (40 characters) and `AUTHENTIK_SECRET_KEY` (50 characters).
|
||||
|
||||
Because of a PostgreSQL limitation, only passwords up to 99 characters are supported
|
||||
See https://www.postgresql.org/message-id/09512C4F-8CB9-4021-B455-EF4C4F0D55A0@amazon.com
|
||||
|
||||
!!! warning "Password Symbols"
|
||||
You may encounter the Authentik WebUI throwing `Forbidden` errors, and this is likely caused by you using a password with "problematic" characters for the `PG_PASS` environment variable. Try to avoid using `,` or `;` or `:` in the password you generate.
|
||||
|
||||
## WebUI Initial Setup
|
||||
To start the initial setup, navigate to https://192.168.5.4:9443/if/flow/initial-setup/
|
||||
|
||||
## Traefik Reverse Proxy Configuration
|
||||
If the container does not run on the same host as Traefik, you will need to manually add configuration to Traefik's dynamic config file, outlined below.
|
||||
``` yaml
|
||||
http:
|
||||
routers:
|
||||
PLACEHOLDER:
|
||||
entryPoints:
|
||||
- websecure
|
||||
tls:
|
||||
certResolver: myresolver
|
||||
service: PLACEHOLDER
|
||||
rule: Host(`PLACEHOLDER.bunny-lab.io`)
|
||||
|
||||
services:
|
||||
PLACEHOLDER:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: http://PLACEHOLDER:80
|
||||
passHostHeader: true
|
||||
```
|
59
Servers/Containerization/Docker/Compose/ChangeDetection.md
Normal file
59
Servers/Containerization/Docker/Compose/ChangeDetection.md
Normal file
@ -0,0 +1,59 @@
|
||||
**Purpose**: Detect website content changes and perform meaningful actions - trigger notifications via Discord, Email, Slack, Telegram, API calls and many more.
|
||||
|
||||
## Docker Configuration
|
||||
```jsx title="docker-compose.yml"
|
||||
version: "3.8"
|
||||
services:
|
||||
app:
|
||||
image: dgtlmoon/changedetection.io
|
||||
container_name: changedetection.io
|
||||
environment:
|
||||
- TZ=America/Denver
|
||||
volumes:
|
||||
- /srv/containers/changedetection/datastore:/datastore
|
||||
ports:
|
||||
- 5000:5000
|
||||
restart: always
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.changedetection.rule=Host(`changedetection.bunny-lab.io`)"
|
||||
- "traefik.http.routers.changedetection.entrypoints=websecure"
|
||||
- "traefik.http.routers.changedetection.tls.certresolver=letsencrypt"
|
||||
- "traefik.http.services.changedetection.loadbalancer.server.port=5000"
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.49
|
||||
|
||||
networks:
|
||||
default:
|
||||
external:
|
||||
name: docker_network
|
||||
docker_network:
|
||||
external: true
|
||||
```
|
||||
|
||||
```jsx title=".env"
|
||||
N/A
|
||||
```
|
||||
|
||||
## Traefik Reverse Proxy Configuration
|
||||
If the container does not run on the same host as Traefik, you will need to manually add configuration to Traefik's dynamic config file, outlined below.
|
||||
``` yaml
|
||||
http:
|
||||
routers:
|
||||
changedetection:
|
||||
entryPoints:
|
||||
- websecure
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
http2:
|
||||
service: changedetection
|
||||
rule: Host(`changedetection.bunny-lab.io`)
|
||||
|
||||
services:
|
||||
changedetection:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: http://192.168.5.49:5000
|
||||
passHostHeader: true
|
||||
```
|
28
Servers/Containerization/Docker/Compose/CyberChef.md
Normal file
28
Servers/Containerization/Docker/Compose/CyberChef.md
Normal file
@ -0,0 +1,28 @@
|
||||
**Purpose**: The Cyber Swiss Army Knife - a web app for encryption, encoding, compression and data analysis.
|
||||
|
||||
```jsx title="docker-compose.yml"
|
||||
version: "3.8"
|
||||
services:
|
||||
app:
|
||||
image: mpepping/cyberchef:latest
|
||||
container_name: cyberchef
|
||||
environment:
|
||||
- TZ=America/Denver
|
||||
ports:
|
||||
- 8000:8000
|
||||
restart: always
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.55
|
||||
|
||||
networks:
|
||||
default:
|
||||
external:
|
||||
name: docker_network
|
||||
docker_network:
|
||||
external: true
|
||||
```
|
||||
|
||||
```jsx title=".env"
|
||||
N/A
|
||||
```
|
59
Servers/Containerization/Docker/Compose/Dashy.md
Normal file
59
Servers/Containerization/Docker/Compose/Dashy.md
Normal file
@ -0,0 +1,59 @@
|
||||
**Purpose**: A self-hostable personal dashboard built for you. Includes status-checking, widgets, themes, icon packs, a UI editor and tons more!
|
||||
|
||||
```jsx title="docker-compose.yml"
|
||||
version: "3.8"
|
||||
services:
|
||||
dashy:
|
||||
container_name: Dashy
|
||||
|
||||
# Pull latest image from DockerHub
|
||||
image: lissy93/dashy
|
||||
|
||||
# Set port that web service will be served on. Keep container port as 80
|
||||
ports:
|
||||
- 4000:80
|
||||
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.dashy.rule=Host(`dashboard.cyberstrawberry.net`)"
|
||||
- "traefik.http.routers.dashy.entrypoints=websecure"
|
||||
- "traefik.http.routers.dashy.tls.certresolver=myresolver"
|
||||
- "traefik.http.services.dashy.loadbalancer.server.port=80"
|
||||
|
||||
# Set any environmental variables
|
||||
environment:
|
||||
- NODE_ENV=production
|
||||
- UID=1000
|
||||
- GID=1000
|
||||
|
||||
# Pass in your config file below, by specifying the path on your host machine
|
||||
volumes:
|
||||
- /srv/Containers/Dashy/conf.yml:/app/public/conf.yml
|
||||
- /srv/Containers/Dashy/item-icons:/app/public/item-icons
|
||||
|
||||
# Specify restart policy
|
||||
restart: unless-stopped
|
||||
|
||||
# Configure healthchecks
|
||||
healthcheck:
|
||||
test: ['CMD', 'node', '/app/services/healthcheck']
|
||||
interval: 1m30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
|
||||
# Connect container to Docker_Network
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.57
|
||||
networks:
|
||||
default:
|
||||
external:
|
||||
name: docker_network
|
||||
docker_network:
|
||||
external: true
|
||||
```
|
||||
|
||||
```jsx title=".env"
|
||||
Not Applicable
|
||||
```
|
31
Servers/Containerization/Docker/Compose/Document Template.md
Normal file
31
Servers/Containerization/Docker/Compose/Document Template.md
Normal file
@ -0,0 +1,31 @@
|
||||
**Purpose**: PLACEHOLDER
|
||||
|
||||
## Docker Configuration
|
||||
```jsx title="docker-compose.yml"
|
||||
PLACEHOLDER
|
||||
```
|
||||
|
||||
```jsx title=".env"
|
||||
PLACEHOLDER
|
||||
```
|
||||
|
||||
## Traefik Reverse Proxy Configuration
|
||||
If the container does not run on the same host as Traefik, you will need to manually add configuration to Traefik's dynamic config file, outlined below.
|
||||
``` yaml
|
||||
http:
|
||||
routers:
|
||||
PLACEHOLDER:
|
||||
entryPoints:
|
||||
- websecure
|
||||
tls:
|
||||
certResolver: myresolver
|
||||
service: PLACEHOLDER
|
||||
rule: Host(`PLACEHOLDER.bunny-lab.io`)
|
||||
|
||||
services:
|
||||
PLACEHOLDER:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: http://PLACEHOLDER:80
|
||||
passHostHeader: true
|
||||
```
|
34
Servers/Containerization/Docker/Compose/Docusaurus.md
Normal file
34
Servers/Containerization/Docker/Compose/Docusaurus.md
Normal file
@ -0,0 +1,34 @@
|
||||
**Purpose**: An optimized site generator in React. Docusaurus helps you to move fast and write content. Build documentation websites, blogs, marketing pages, and more.
|
||||
|
||||
```jsx title="docker-compose.yml"
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
docusaurus:
|
||||
image: awesometic/docusaurus
|
||||
container_name: docusaurus
|
||||
environment:
|
||||
- TARGET_UID=1000
|
||||
- TARGET_GID=1000
|
||||
- AUTO_UPDATE=true
|
||||
- WEBSITE_NAME=docusaurus
|
||||
- TEMPLATE=classic
|
||||
- TZ=America/Denver
|
||||
restart: always
|
||||
volumes:
|
||||
- /srv/containers/docusaurus:/docusaurus
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
ports:
|
||||
- "80:80"
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.72
|
||||
networks:
|
||||
docker_network:
|
||||
external: true
|
||||
```
|
||||
|
||||
```jsx title=".env"
|
||||
Not Applicable
|
||||
```
|
103
Servers/Containerization/Docker/Compose/Firefox.md
Normal file
103
Servers/Containerization/Docker/Compose/Firefox.md
Normal file
@ -0,0 +1,103 @@
|
||||
**Purpose**: Sometimes you just want an instance of Firefox running on an Alpine Linux container, that has persistence (Extensions, bookmarks, history, etc) outside of the container (with bind-mapped folders). This is useful for a number of reasons, but insecure by default, so you have to protect it behind something like a [Keycloak Server](https://docs.bunny-lab.io/Docker%20%2526%20Kubernetes/Docker/Docker%20Compose/Keycloak/) so it is not misused.
|
||||
|
||||
## Keycloak Authentication Sequence
|
||||
``` mermaid
|
||||
sequenceDiagram
|
||||
participant User
|
||||
participant Traefik as Traefik Reverse Proxy
|
||||
participant Keycloak
|
||||
participant RockyLinux as Rocky Linux VM
|
||||
participant FirewallD as FirewallD
|
||||
participant Alpine as Alpine Container
|
||||
|
||||
User->>Traefik: Access https://work-environment.bunny-lab.io
|
||||
Traefik->>Keycloak: Redirect to Authenticate against Work Realm
|
||||
User->>Keycloak: Authenticate
|
||||
Keycloak->>User: Authorization Cookie Stored on Internet Browser
|
||||
User->>Traefik: Pass Authorization Cookie to Traefik
|
||||
Traefik->>RockyLinux: Traefik Forwards Traffic to Rocky Linux VM
|
||||
RockyLinux->>FirewallD: Traffic Passes Local Firewall
|
||||
FirewallD->>RockyLinux: Filter traffic (Port 5800)
|
||||
FirewallD->>Alpine: Allow Traffic from Traefik
|
||||
Alpine->>User: WebUI Access to Firefox Work Environment Granted
|
||||
```
|
||||
|
||||
## Docker Configuration
|
||||
```jsx title="docker-compose.yml"
|
||||
version: '3'
|
||||
services:
|
||||
firefox:
|
||||
image: jlesage/firefox # Docker image for Firefox
|
||||
environment:
|
||||
- TZ=America/Denver # Timezone setting
|
||||
- DARK_MODE=1 # Enable dark mode
|
||||
- WEB_AUDIO=1 # Enable web audio
|
||||
- KEEP_APP_RUNNING=1 # Keep the application running
|
||||
ports:
|
||||
- "5800:5800" # Port mapping for VNC WebUI
|
||||
volumes:
|
||||
- /srv/containers/firefox:/config:rw # Persistent storage for configuration
|
||||
restart: always # Always restart the container in case of failure
|
||||
network_mode: host # Use the host network
|
||||
```
|
||||
|
||||
```jsx title=".env"
|
||||
N/A
|
||||
```
|
||||
|
||||
## Local Firewall Hardening
|
||||
It is important, due to how this browser just allows anyone to access it, to lock it down to only allow access to the SSH port and port 5800 to specifically-allowed devices, in this case, the Traefik Reverse Proxy. This ensures that it only allows the proxy to communicate with Firefox's container, keeping it securely protected behind Keycloak's middware in Traefik.
|
||||
|
||||
These rules will drop all traffic by default, allow port 22, and restrict access to port 5800.
|
||||
|
||||
``` sh
|
||||
# Set the default zone to drop
|
||||
sudo firewall-cmd --set-default-zone=drop
|
||||
|
||||
# Create a new zone named custom-trusted
|
||||
sudo firewall-cmd --permanent --new-zone=traefik-proxy
|
||||
|
||||
# Allow traffic to port 5800 only from 192.168.5.29 in the traefik-proxy zone
|
||||
sudo firewall-cmd --permanent --zone=traefik-proxy --add-source=192.168.5.29
|
||||
sudo firewall-cmd --permanent --zone=traefik-proxy --add-port=5800/tcp
|
||||
|
||||
# Allow SSH traffic on port 22 from any IP in the drop zone
|
||||
sudo firewall-cmd --permanent --zone=drop --add-service=ssh
|
||||
|
||||
# Reload FirewallD to apply the changes
|
||||
sudo firewall-cmd --reload
|
||||
```
|
||||
|
||||
## Traefik Reverse Proxy Configuration
|
||||
If the container does not run on the same host as Traefik, you will need to manually add configuration to Traefik's dynamic config file, outlined below.
|
||||
``` yaml
|
||||
http:
|
||||
routers:
|
||||
work-environment:
|
||||
entryPoints:
|
||||
- websecure
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
service: work-environment
|
||||
rule: Host(`work-environment.bunny-lab.io`)
|
||||
middlewares:
|
||||
- work-environment # Referencing the Keycloak Server
|
||||
|
||||
services:
|
||||
work-environment:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: http://192.168.5.4:5800
|
||||
passHostHeader: true
|
||||
# # Adding forwardingTimeouts to set the send and read timeouts to 1 hour (3600 seconds)
|
||||
# forwardingTimeouts:
|
||||
# dialTimeout: "3600s"
|
||||
# responseHeaderTimeout: "3600s"
|
||||
```
|
||||
|
||||
## Firefox Special Configurations
|
||||
Due to the nature of how this is deployed, you need to make some additional configurations to the Firefox settings after-the-fact. Some of this could be automated with environment variables at deployment time, but for now will be handled manually.
|
||||
|
||||
- **Install Power Tabs Extension**: This extension is useful for keeping things organized.
|
||||
- **Install Merge All Windows Extension**: At times, you may misclick somewhere in the Firefox environment causing Firefox to open a new instance / window losing all of your tabs, and because there is no window manager, there is no way to alt+tab or switch between the instances of Firefox, effectively breaking your current session forcing you to re-open tabs. With this extension, you can merge all of the windows, collapsing them into one window, resolving the issue.
|
||||
- **Configure New Tab behavior**: If a new tab opens in a new window, it will absolutely throw everything into disarray, that is why all hyperlinks will be forced to open in a new tab instead of a new window. You can do this by navigating to `about:config` and setting the variable `browser.link.open_newwindow.restriction` to a value of `0`. [Original Reference Documentation](https://support.mozilla.org/en-US/questions/1066799)
|
49
Servers/Containerization/Docker/Compose/Frigate.md
Normal file
49
Servers/Containerization/Docker/Compose/Frigate.md
Normal file
@ -0,0 +1,49 @@
|
||||
**Purpose**: A complete and local NVR designed for Home Assistant with AI object detection. Uses OpenCV and Tensorflow to perform realtime object detection locally for IP cameras.
|
||||
|
||||
```jsx title="docker-compose.yml"
|
||||
version: "3.9"
|
||||
services:
|
||||
frigate:
|
||||
container_name: frigate
|
||||
privileged: true # this may not be necessary for all setups
|
||||
restart: unless-stopped
|
||||
image: blakeblackshear/frigate:stable
|
||||
shm_size: "256mb" # update for your cameras based on calculation above
|
||||
# devices:
|
||||
# - /dev/bus/usb:/dev/bus/usb # passes the USB Coral, needs to be modified for other versions
|
||||
# - /dev/apex_0:/dev/apex_0 # passes a PCIe Coral, follow driver instructions here https://coral.ai/docs/m2/get-started/#2a-on-linux
|
||||
# - /dev/dri/renderD128 # for intel hwaccel, needs to be updated for your hardware
|
||||
volumes:
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /mnt/1TB_STORAGE/frigate/config.yml:/config/config.yml:ro
|
||||
- /mnt/1TB_STORAGE/frigate/media:/media/frigate
|
||||
- type: tmpfs # Optional: 1GB of memory, reduces SSD/SD Card wear
|
||||
target: /tmp/cache
|
||||
tmpfs:
|
||||
size: 4000000000
|
||||
ports:
|
||||
- "5000:5000"
|
||||
- "1935:1935" # RTMP feeds
|
||||
environment:
|
||||
FRIGATE_RTSP_PASSWORD: ${FRIGATE_RTSP_PASSWORD}
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.201
|
||||
|
||||
mqtt:
|
||||
container_name: mqtt
|
||||
image: eclipse-mosquitto:1.6
|
||||
ports:
|
||||
- "1883:1883"
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.202
|
||||
|
||||
networks:
|
||||
docker_network:
|
||||
external: true
|
||||
```
|
||||
|
||||
```jsx title=".env"
|
||||
FRIGATE_RTSP_PASSWORD=SomethingSecure101
|
||||
```
|
68
Servers/Containerization/Docker/Compose/Gatus.md
Normal file
68
Servers/Containerization/Docker/Compose/Gatus.md
Normal file
@ -0,0 +1,68 @@
|
||||
**Purpose**: Gatus Service Status Server.
|
||||
|
||||
## Docker Configuration
|
||||
```jsx title="docker-compose.yml"
|
||||
version: "3.9"
|
||||
services:
|
||||
postgres:
|
||||
image: postgres
|
||||
volumes:
|
||||
- /srv/containers/gatus/db:/var/lib/postgresql/data
|
||||
ports:
|
||||
- "5432:5432"
|
||||
env_file:
|
||||
- stack.env
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.9
|
||||
|
||||
gatus:
|
||||
image: twinproduction/gatus:latest
|
||||
restart: always
|
||||
ports:
|
||||
- "8080:8080"
|
||||
env_file:
|
||||
- stack.env
|
||||
volumes:
|
||||
- /srv/containers/gatus/config:/config
|
||||
depends_on:
|
||||
- postgres
|
||||
dns:
|
||||
- 192.168.3.25
|
||||
- 192.168.3.26
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.8
|
||||
|
||||
networks:
|
||||
docker_network:
|
||||
external: true
|
||||
|
||||
```
|
||||
|
||||
```jsx title=".env"
|
||||
N/A
|
||||
```
|
||||
|
||||
## Traefik Reverse Proxy Configuration
|
||||
If the container does not run on the same host as Traefik, you will need to manually add configuration to Traefik's dynamic config file, outlined below.
|
||||
``` yaml
|
||||
http:
|
||||
routers:
|
||||
status-bunny-lab:
|
||||
entryPoints:
|
||||
- websecure
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
service: status-bunny-lab
|
||||
rule: Host(`status.bunny-lab.io`)
|
||||
middlewares:
|
||||
- "auth-bunny-lab-io" # Referencing the Keycloak Server
|
||||
|
||||
services:
|
||||
status-bunny-lab:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: http://192.168.5.8:8080
|
||||
passHostHeader: true
|
||||
```
|
95
Servers/Containerization/Docker/Compose/Gitea.md
Normal file
95
Servers/Containerization/Docker/Compose/Gitea.md
Normal file
@ -0,0 +1,95 @@
|
||||
**Purpose**: Gitea is a painless self-hosted all-in-one software development service, it includes Git hosting, code review, team collaboration, package registry and CI/CD. It is similar to GitHub, Bitbucket and GitLab. Gitea was forked from Gogs originally and almost all the code has been changed.
|
||||
|
||||
[Detailed SMTP Configuration Reference](https://docs.gitea.com/administration/config-cheat-sheet)
|
||||
|
||||
|
||||
## Docker Configuration
|
||||
```jsx title="docker-compose.yml"
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
server:
|
||||
image: gitea/gitea:latest
|
||||
container_name: gitea
|
||||
privileged: true
|
||||
environment:
|
||||
- USER_UID=1000
|
||||
- USER_GID=1000
|
||||
- TZ=America/Denver
|
||||
- GITEA__mailer__ENABLED=true
|
||||
- GITEA__mailer__FROM=${GITEA__mailer__FROM:?GITEA__mailer__FROM not set}
|
||||
- GITEA__mailer__PROTOCOL=smtp+starttls
|
||||
- GITEA__mailer__HOST=${GITEA__mailer__HOST:?GITEA__mailer__HOST not set}
|
||||
- GITEA__mailer__IS_TLS_ENABLED=true
|
||||
- GITEA__mailer__USER=${GITEA__mailer__USER:-apikey}
|
||||
- GITEA__mailer__PASSWD="""${GITEA__mailer__PASSWD:?GITEA__mailer__PASSWD not set}"""
|
||||
restart: always
|
||||
volumes:
|
||||
- /srv/containers/gitea:/data
|
||||
# - /etc/timezone:/etc/timezone:ro
|
||||
# - /etc/localtime:/etc/localtime:ro
|
||||
ports:
|
||||
- "3000:3000"
|
||||
- "222:22"
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.70
|
||||
# labels:
|
||||
# - "traefik.enable=true"
|
||||
# - "traefik.http.routers.gitea.rule=Host(`git.bunny-lab.io`)"
|
||||
# - "traefik.http.routers.gitea.entrypoints=websecure"
|
||||
# - "traefik.http.routers.gitea.tls.certresolver=letsencrypt"
|
||||
# - "traefik.http.services.gitea.loadbalancer.server.port=3000"
|
||||
depends_on:
|
||||
- postgres
|
||||
|
||||
postgres:
|
||||
image: postgres:12-alpine
|
||||
ports:
|
||||
- 5432:5432
|
||||
volumes:
|
||||
- /srv/containers/gitea/db:/var/lib/postgresql/data
|
||||
environment:
|
||||
- POSTGRES_DB=gitea
|
||||
- POSTGRES_USER=gitea
|
||||
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
|
||||
- TZ=America/Denver
|
||||
restart: always
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.71
|
||||
|
||||
networks:
|
||||
docker_network:
|
||||
external: true
|
||||
```
|
||||
|
||||
```jsx title=".env"
|
||||
GITEA__mailer__FROM=noreply@bunny-lab.io
|
||||
GITEA__mailer__HOST=mail.bunny-lab.io
|
||||
GITEA__mailer__PASSWD=SecureSMTPPassword
|
||||
GITEA__mailer__USER=noreply@bunny-lab.io
|
||||
POSTGRES_PASSWORD=SomethingSuperSecure
|
||||
```
|
||||
|
||||
## Traefik Reverse Proxy Configuration
|
||||
If the container does not run on the same host as Traefik, you will need to manually add configuration to Traefik's dynamic config file, outlined below.
|
||||
``` yaml
|
||||
http:
|
||||
routers:
|
||||
git:
|
||||
entryPoints:
|
||||
- websecure
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
http2:
|
||||
service: git
|
||||
rule: Host(`git.bunny-lab.io`)
|
||||
|
||||
services:
|
||||
git:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: http://192.168.5.70:3000
|
||||
passHostHeader: true
|
||||
```
|
37
Servers/Containerization/Docker/Compose/HomeAssistant.md
Normal file
37
Servers/Containerization/Docker/Compose/HomeAssistant.md
Normal file
@ -0,0 +1,37 @@
|
||||
**Purpose**: Open source home automation that puts local control and privacy first. Powered by a worldwide community of tinkerers and DIY enthusiasts.
|
||||
|
||||
```jsx title="docker-compose.yml"
|
||||
version: '3'
|
||||
services:
|
||||
homeassistant:
|
||||
container_name: homeassistant
|
||||
image: "ghcr.io/home-assistant/home-assistant:stable"
|
||||
environment:
|
||||
- TZ=America/Denver
|
||||
volumes:
|
||||
- /srv/containers/Home-Assistant-Core:/config
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
restart: always
|
||||
privileged: true
|
||||
ports:
|
||||
- 8123:8123
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.252
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.homeassistant.rule=Host(`automation.cyberstrawberry.net`)"
|
||||
- "traefik.http.routers.homeassistant.entrypoints=websecure"
|
||||
- "traefik.http.routers.homeassistant.tls.certresolver=myresolver"
|
||||
- "traefik.http.services.homeassistant.loadbalancer.server.port=8123"
|
||||
networks:
|
||||
default:
|
||||
external:
|
||||
name: docker_network
|
||||
docker_network:
|
||||
external: true
|
||||
```
|
||||
|
||||
```jsx title=".env"
|
||||
Not Applicable
|
||||
```
|
75
Servers/Containerization/Docker/Compose/Homebox.md
Normal file
75
Servers/Containerization/Docker/Compose/Homebox.md
Normal file
@ -0,0 +1,75 @@
|
||||
**Purpose**: Homebox is the inventory and organization system built for the Home User! With a focus on simplicity and ease of use, Homebox is the perfect solution for your home inventory, organization, and management needs.
|
||||
|
||||
[Reference Documentation](https://hay-kot.github.io/homebox/quick-start/)
|
||||
|
||||
!!! warning "Protect with Keycloak"
|
||||
The GitHub project for this software appears to have been archived in a read-only state in June 2024. There is no default admin credential, so setting the environment variable `HBOX_OPTIONS_ALLOW_REGISTRATION` to `false` will literally make you unable to log into the system. You also cannot change it after-the-fact, so you cannot just register an account then disable it and restart the container, it doesn't work that way.
|
||||
|
||||
Due to this behavior, it is imperative that you deploy this either only internally, or if its external, put it behind something like [Authentik](https://docs.bunny-lab.io/Docker %26 Kubernetes/Docker/Docker Compose/Authentik/) or [Keycloak](https://docs.bunny-lab.io/Docker%20%26%20Kubernetes/Docker/Docker%20Compose/Keycloak/).
|
||||
|
||||
## Docker Configuration
|
||||
```jsx title="docker-compose.yml"
|
||||
version: "3.4"
|
||||
|
||||
services:
|
||||
homebox:
|
||||
image: ghcr.io/hay-kot/homebox:latest
|
||||
container_name: homebox
|
||||
restart: always
|
||||
environment:
|
||||
- HBOX_LOG_LEVEL=info
|
||||
- HBOX_LOG_FORMAT=text
|
||||
- HBOX_WEB_MAX_UPLOAD_SIZE=10
|
||||
- HBOX_MODE=production
|
||||
- HBOX_OPTIONS_ALLOW_REGISTRATION=true
|
||||
- HBOX_WEB_MAX_UPLOAD_SIZE=50
|
||||
- HBOX_WEB_READ_TIMEOUT=20
|
||||
- HBOX_WEB_WRITE_TIMEOUT=20
|
||||
- HBOX_WEB_IDLE_TIMEOUT=60
|
||||
- HBOX_MAILER_HOST=${HBOX_MAILER_HOST}
|
||||
- HBOX_MAILER_PORT=${HBOX_MAILER_PORT}
|
||||
- HBOX_MAILER_USERNAME=${HBOX_MAILER_USERNAME}
|
||||
- HBOX_MAILER_PASSWORD=${HBOX_MAILER_PASSWORD}
|
||||
- HBOX_MAILER_FROM=${HBOX_MAILER_FROM}
|
||||
volumes:
|
||||
- /srv/containers/homebox:/data/
|
||||
ports:
|
||||
- 7745:7745
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.25
|
||||
networks:
|
||||
docker_network:
|
||||
external: true
|
||||
```
|
||||
|
||||
```jsx title=".env"
|
||||
HBOX_MAILER_HOST=mail.bunny-lab.io
|
||||
HBOX_MAILER_PORT=587
|
||||
HBOX_MAILER_USERNAME=noreply@bunny-lab.io
|
||||
HBOX_MAILER_PASSWORD=REDACTED
|
||||
HBOX_MAILER_FROM=noreply@bunny-lab.io
|
||||
```
|
||||
|
||||
## Traefik Reverse Proxy Configuration
|
||||
If the container does not run on the same host as Traefik, you will need to manually add configuration to Traefik's dynamic config file, outlined below.
|
||||
``` yaml
|
||||
http:
|
||||
routers:
|
||||
homebox:
|
||||
entryPoints:
|
||||
- websecure
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
http2:
|
||||
service: homebox
|
||||
rule: Host(`box.bunny-lab.io`)
|
||||
middlewares:
|
||||
- "auth-bunny-lab-io" # Referencing the Keycloak Server
|
||||
services:
|
||||
homebox:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: http://192.168.5.25:7745
|
||||
passHostHeader: true
|
||||
```
|
41
Servers/Containerization/Docker/Compose/Homepage-Docker.md
Normal file
41
Servers/Containerization/Docker/Compose/Homepage-Docker.md
Normal file
@ -0,0 +1,41 @@
|
||||
**Purpose**: A highly customizable homepage (or startpage / application dashboard) with Docker and service API integrations.
|
||||
|
||||
```jsx title="docker-compose.yml"
|
||||
version: '3.8'
|
||||
services:
|
||||
homepage:
|
||||
image: ghcr.io/benphelps/homepage:latest
|
||||
container_name: homepage
|
||||
volumes:
|
||||
- /srv/containers/homepage-docker:/config
|
||||
- /srv/containers/homepage-docker/icons:/app/public/icons
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
ports:
|
||||
- 80:80
|
||||
- 443:443
|
||||
- 3000:3000
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- TZ=America/Denver
|
||||
dns:
|
||||
- 192.168.3.10
|
||||
- 192.168.3.11
|
||||
restart: unless-stopped
|
||||
extra_hosts:
|
||||
- "rancher.cyberstrawberry.net:192.168.3.21"
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.44
|
||||
|
||||
networks:
|
||||
default:
|
||||
external:
|
||||
name: docker_network
|
||||
docker_network:
|
||||
external: true
|
||||
```
|
||||
|
||||
```jsx title=".env"
|
||||
Not Applicable
|
||||
```
|
26
Servers/Containerization/Docker/Compose/IT-Tools.md
Normal file
26
Servers/Containerization/Docker/Compose/IT-Tools.md
Normal file
@ -0,0 +1,26 @@
|
||||
**Purpose**: Collection of handy online tools for developers, with great UX.
|
||||
|
||||
```jsx title="docker-compose.yml"
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
server:
|
||||
image: corentinth/it-tools:latest
|
||||
container_name: it-tools
|
||||
environment:
|
||||
- TZ=America/Denver
|
||||
restart: always
|
||||
ports:
|
||||
- "80:80"
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.16
|
||||
|
||||
networks:
|
||||
docker_network:
|
||||
external: true
|
||||
```
|
||||
|
||||
```jsx title=".env"
|
||||
Not Applicable
|
||||
```
|
230
Servers/Containerization/Docker/Compose/Keycloak.md
Normal file
230
Servers/Containerization/Docker/Compose/Keycloak.md
Normal file
@ -0,0 +1,230 @@
|
||||
**Purpose**: Keycloak is an open source identity and access management system for modern applications and services.
|
||||
|
||||
- [Original Reference Compose File](https://github.com/JamesTurland/JimsGarage/blob/main/Keycloak/docker-compose.yaml)
|
||||
- [Original Reference Deployment Video](https://www.youtube.com/watch?v=6ye4lP9EA2Y)
|
||||
- [Theme Customization Documentation](https://www.baeldung.com/spring-keycloak-custom-themes)
|
||||
|
||||
## Keycloak Authentication Sequence
|
||||
``` mermaid
|
||||
sequenceDiagram
|
||||
participant User
|
||||
participant Traefik as Traefik Reverse Proxy
|
||||
participant Keycloak
|
||||
participant Services
|
||||
|
||||
User->>Traefik: Access service URL
|
||||
Traefik->>Keycloak: Redirect to Keycloak for authentication
|
||||
User->>Keycloak: Provide credentials for authentication
|
||||
Keycloak->>User: Return authorization token/cookie
|
||||
User->>Traefik: Send request with authorization token/cookie
|
||||
Traefik->>Keycloak: Validate token/cookie
|
||||
Keycloak->>Traefik: Token/cookie is valid
|
||||
Traefik->>Services: Forward request to services
|
||||
Services->>Traefik: Response back to Traefik
|
||||
Traefik->>User: Return service response
|
||||
```
|
||||
## Docker Configuration
|
||||
|
||||
=== "docker-compose.yml"
|
||||
|
||||
``` yaml
|
||||
version: '3.7'
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:16.2
|
||||
volumes:
|
||||
- /srv/containers/keycloak/db:/var/lib/postgresql/data
|
||||
environment:
|
||||
POSTGRES_DB: ${POSTGRES_DB}
|
||||
POSTGRES_USER: ${POSTGRES_USER}
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U keycloak"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
networks:
|
||||
keycloak_internal_network: # Network for internal communication
|
||||
ipv4_address: 172.16.238.3 # Static IP for PostgreSQL in internal network
|
||||
|
||||
keycloak:
|
||||
image: quay.io/keycloak/keycloak:23.0.6
|
||||
command: start
|
||||
volumes:
|
||||
- /srv/containers/keycloak/themes:/opt/keycloak/themes
|
||||
- /srv/containers/keycloak/base-theme:/opt/keycloak/themes/base
|
||||
environment:
|
||||
TZ: America/Denver # (1)
|
||||
KC_PROXY_ADDRESS_FORWARDING: true # (2)
|
||||
KC_HOSTNAME_STRICT: false
|
||||
KC_HOSTNAME: auth.bunny-lab.io # (3)
|
||||
KC_PROXY: edge # (4)
|
||||
KC_HTTP_ENABLED: true
|
||||
KC_DB: postgres
|
||||
KC_DB_USERNAME: ${POSTGRES_USER}
|
||||
KC_DB_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
KC_DB_URL_HOST: postgres
|
||||
KC_DB_URL_PORT: 5432
|
||||
KC_DB_URL_DATABASE: ${POSTGRES_DB}
|
||||
KC_TRANSACTION_RECOVERY: true
|
||||
KEYCLOAK_ADMIN: ${KEYCLOAK_ADMIN}
|
||||
KEYCLOAK_ADMIN_PASSWORD: ${KEYCLOAK_ADMIN_PASSWORD}
|
||||
KC_HEALTH_ENABLED: true
|
||||
DB_POOL_MAX_SIZE: 20 # (5)
|
||||
DB_POOL_MIN_SIZE: 5 # (6)
|
||||
DB_POOL_ACQUISITION_TIMEOUT: 30 # (7)
|
||||
DB_POOL_IDLE_TIMEOUT: 300 # (8)
|
||||
JDBC_PARAMS: "connectTimeout=30"
|
||||
KC_HOSTNAME_DEBUG: false # (9)
|
||||
ports:
|
||||
- 8080:8080
|
||||
restart: always
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8080/auth"] # Health check for Keycloak
|
||||
interval: 30s # Health check interval
|
||||
timeout: 10s # Health check timeout
|
||||
retries: 3 # Health check retries
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.2
|
||||
keycloak_internal_network: # Network for internal communication
|
||||
ipv4_address: 172.16.238.2 # Static IP for Keycloak in internal network
|
||||
|
||||
networks:
|
||||
default:
|
||||
external:
|
||||
name: docker_network
|
||||
docker_network:
|
||||
external: true
|
||||
keycloak_internal_network: # Internal network for private communication
|
||||
driver: bridge # Network driver
|
||||
ipam: # IP address management
|
||||
config:
|
||||
- subnet: 172.16.238.0/24 # Subnet for internal network
|
||||
|
||||
```
|
||||
|
||||
1. This sets the timezone of the Keycloak server to your timezone. This is not really necessary according to the official documentation, however I just like to add it to all of my containers as a baseline environment variable to add
|
||||
2. This assumes you are running Keycloak behind a reverse proxy, in my particular case, Traefik
|
||||
3. Set this to the FQDN that you are expecting to reach the Keycloak server at behind your reverse proxy
|
||||
4. This assumes you are running Keycloak behind a reverse proxy, in my particular case, Traefik
|
||||
5. Maximum connections in the database pool
|
||||
6. Minimum idle connections in the database pool
|
||||
7. Timeout for acquiring a connection from the database pool
|
||||
8. Timeout for closing idle connections to the database
|
||||
9. If this is enabled, Navigate to https://auth.bunny-lab.io/realms/master/hostname-debug to troubleshoot issues with the deployment if you experience any issues logging into the web portal or admin UI
|
||||
|
||||
=== ".env"
|
||||
|
||||
``` yaml
|
||||
POSTGRES_DB=keycloak
|
||||
POSTGRES_USER=keycloak
|
||||
POSTGRES_PASSWORD=SomethingSecure # (1)
|
||||
KEYCLOAK_ADMIN=admin
|
||||
KEYCLOAK_ADMIN_PASSWORD=SomethingSuperSecureToLoginAsAdmin # (2)
|
||||
```
|
||||
|
||||
1. This is used internally by Keycloak to interact with the PostgreSQL database server
|
||||
2. This is used to log into the web admin portal at https://auth.bunny-lab.io
|
||||
|
||||
## Traefik Reverse Proxy Configuration
|
||||
If the container does not run on the same host as Traefik, you will need to manually add configuration to Traefik's dynamic config file, outlined below.
|
||||
``` yaml
|
||||
http:
|
||||
routers:
|
||||
auth:
|
||||
entryPoints:
|
||||
- websecure
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
service: auth
|
||||
rule: Host(`auth.bunny-lab.io`)
|
||||
middlewares:
|
||||
- auth-headers
|
||||
|
||||
services:
|
||||
auth:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: http://192.168.5.2:8080
|
||||
passHostHeader: true
|
||||
|
||||
middlewares:
|
||||
auth-headers:
|
||||
headers:
|
||||
sslRedirect: true
|
||||
stsSeconds: 31536000
|
||||
stsIncludeSubdomains: true
|
||||
stsPreload: true
|
||||
forceSTSHeader: true
|
||||
customRequestHeaders:
|
||||
X-Forwarded-Proto: https
|
||||
X-Forwarded-Port: "443"
|
||||
```
|
||||
|
||||
# Traefik Keycloak Middleware
|
||||
At this point, we need to add the official Keycloak plugin to Traefik's main configuration. In this example, it will be assumed you need to configure this in Portainer/Docker Compose, and not via a static yml/toml file. Assume you follow the [Docker Compose based Traefik Deployment](https://docs.bunny-lab.io/Docker%20%26%20Kubernetes/Docker/Docker%20Compose/Traefik/).
|
||||
|
||||
## Install Keycloak Plugin
|
||||
If you do not already have the following added to the end of your `command:` section of the docker-compose.yml file in Portainer, go ahead and add it:
|
||||
``` yaml
|
||||
# Keycloak plugin configuration
|
||||
- "--experimental.plugins.keycloakopenid.moduleName=github.com/Gwojda/keycloakopenid"
|
||||
- "--experimental.plugins.keycloakopenid.version=v0.1.34"
|
||||
```
|
||||
|
||||
## Add Middleware to Traefik Dynamic Configuration
|
||||
You will want to ensure the following exists in the dynamically-loaded config file folder, you can name the file whatever you want, but it will be a one-all middleware for any services you want to have communicating as a specific OAuth2 `Client ID`. For example, you might want to have some services exist in a particular realm of Keycloak, or to have different client rules apply to certain services. If this is the case, you can create multiple middlewares in this single yaml file, each handling a different service / realm. It can get pretty complicated if you want to handle a multi-tenant environment, such as one seen in an enterprise environment.
|
||||
|
||||
```jsx title="keycloak-middleware.yml"
|
||||
http:
|
||||
middlewares:
|
||||
auth-bunny-lab-io:
|
||||
plugin:
|
||||
keycloakopenid:
|
||||
KeycloakURL: "https://auth.bunny-lab.io" # <- Also supports complete URL, e.g. https://my-keycloak-url.com/auth
|
||||
ClientID: "traefik-reverse-proxy"
|
||||
ClientSecret: "https://auth.bunny-lab.io > Clients > traefik-reverse-proxy > Credentials > Client Secret"
|
||||
KeycloakRealm: "master"
|
||||
Scope: "openid profile email"
|
||||
TokenCookieName: "AUTH_TOKEN"
|
||||
UseAuthHeader: "false"
|
||||
# IgnorePathPrefixes: "/api,/favicon.ico [comma deliminated] (optional)"
|
||||
```
|
||||
|
||||
## Configure Valid Redirect URLs
|
||||
At this point, within Keycloak, you need to configure domains that you are allowed to visit after authenticating. You can do this with wildcards, but generally you navigate to "**https://auth.bunny-lab.io > Clients > traefik-reverse-proxy > Valid redirect URIs**" A simple example is adding `https://tools.bunny-lab.io/*` to the list of valid redirect URLs. If the site is not in this list, even if it has the middleware configured in Traefik, it will fail to authenticate and not let the user proceed to the website being protected behind Keycloak.
|
||||
|
||||
## Adding Middleware to Dynamic Traefik Service Config Files
|
||||
At this point, you are in the final stretch, you just need to add the middleware to the Traefik dynamic config files to ensure that it routes the traffic to Keycloak when someone attempts to access that service. Put the following middleware section under the `routers:` section of the config file.
|
||||
|
||||
``` yaml
|
||||
middlewares:
|
||||
- auth-bunny-lab-io # Referencing the Keycloak Server
|
||||
```
|
||||
|
||||
A full example config file would look like the following:
|
||||
``` yaml
|
||||
http:
|
||||
routers:
|
||||
example:
|
||||
entryPoints:
|
||||
- websecure
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
service: example
|
||||
rule: Host(`example.bunny-lab.io`)
|
||||
middlewares:
|
||||
- auth-bunny-lab-io # Referencing the Keycloak Server Traefik Middleware
|
||||
|
||||
services:
|
||||
example:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: http://192.168.5.16:80
|
||||
passHostHeader: true
|
||||
```
|
43
Servers/Containerization/Docker/Compose/Kopia.md
Normal file
43
Servers/Containerization/Docker/Compose/Kopia.md
Normal file
@ -0,0 +1,43 @@
|
||||
**Purpose**: Cross-platform backup tool for Windows, macOS & Linux with fast, incremental backups, client-side end-to-end encryption, compression and data deduplication. CLI and GUI included.
|
||||
|
||||
```jsx title="docker-compose.yml"
|
||||
version: '3.7'
|
||||
services:
|
||||
kopia:
|
||||
image: kopia/kopia:latest
|
||||
hostname: kopia-backup
|
||||
user: root
|
||||
restart: always
|
||||
ports:
|
||||
- 51515:51515
|
||||
environment:
|
||||
- KOPIA_PASSWORD=${KOPIA_ENRYPTION_PASSWORD}
|
||||
- TZ=America/Denver
|
||||
privileged: true
|
||||
volumes:
|
||||
- /srv/containers/kopia/config:/app/config
|
||||
- /srv/containers/kopia/cache:/app/cache
|
||||
- /srv/containers/kopia/logs:/app/logs
|
||||
- /srv:/srv
|
||||
- /usr/share/zoneinfo:/usr/share/zoneinfo
|
||||
entrypoint: ["/bin/kopia", "server", "start", "--insecure", "--timezone=America/Denver", "--address=0.0.0.0:51515", "--override-username=${KOPIA_SERVER_USERNAME}", "--server-username=${KOPIA_SERVER_USERNAME}", "--server-password=${KOPIA_SERVER_PASSWORD}", "--disable-csrf-token-checks"]
|
||||
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.14
|
||||
networks:
|
||||
default:
|
||||
external:
|
||||
name: docker_network
|
||||
docker_network:
|
||||
external: true
|
||||
```
|
||||
!!! note "Credentials"
|
||||
Your username will be `kopia@kopia-backup` and the password will be the value you set for `--server-password` in the entrypoint section of the compose file. The `KOPIA_PASSWORD:` is used by the backup repository, such as Backblaze B2, to encrypt/decrypt the backed-up data, and must be updated in the compose file if the repository is changed / updated.
|
||||
|
||||
|
||||
```jsx title=".env"
|
||||
KOPIA_ENRYPTION_PASSWORD=PasswordUsedToEncryptDataOnBackblazeB2
|
||||
KOPIA_SERVER_PASSWORD=ThisIsUsedToLogIntoKopiaWebUI
|
||||
KOPIA_SERVER_USERNAME=kopia@kopia-backup
|
||||
```
|
191
Servers/Containerization/Docker/Compose/Material MkDocs.md
Normal file
191
Servers/Containerization/Docker/Compose/Material MkDocs.md
Normal file
@ -0,0 +1,191 @@
|
||||
**Purpose**: Documentation that simply works. Write your documentation in Markdown and create a professional static site for your Open Source or commercial project in minutes – searchable, customizable, more than 60 languages, for all devices.
|
||||
|
||||
!!! note
|
||||
This is best deployed in tandem with the [Git Repo Updater](https://docs.bunny-lab.io/Docker%20%26%20Kubernetes/Docker/Custom%20Containers/Git%20Repo%20Updater/) container in its own stack. Utilizing this will allow you to push commits to a repository to immediately (within 5 seconds) push changes into MKDocs without needing SSH/Portainer access to the server hosting MKDocs. If you don't have a GitHub account, consider deploying a [Gitea](https://docs.bunny-lab.io/Docker%20%26%20Kubernetes/Docker/Docker%20Compose/Gitea/) container to host your own code repository! This all assumes you have already deployed [Docker and Portainer](https://docs.bunny-lab.io/Docker%20%26%20Kubernetes/Servers/Docker/Portainer/).
|
||||
|
||||
## Documentation / Pull Sequence
|
||||
``` mermaid
|
||||
sequenceDiagram
|
||||
participant Gitea
|
||||
participant Git_Repo_Updater as Git-Repo-Updater
|
||||
participant MkDocs
|
||||
participant NTFY
|
||||
|
||||
loop Every 5 seconds
|
||||
Git_Repo_Updater->>Gitea: Check for changes in repository
|
||||
alt Changes Detected
|
||||
Gitea->>Git_Repo_Updater: Notify change
|
||||
Git_Repo_Updater->>NTFY: Send change notification
|
||||
Git_Repo_Updater->>Gitea: Download data from repository
|
||||
Git_Repo_Updater->>MkDocs: Copy data to MkDocs
|
||||
MkDocs->>MkDocs: Reload and render webpages
|
||||
end
|
||||
end
|
||||
```
|
||||
|
||||
## Deploy Material MKDocs
|
||||
```jsx title="docker-compose.yml"
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
mkdocs:
|
||||
container_name: mkdocs
|
||||
image: squidfunk/mkdocs-material
|
||||
restart: always
|
||||
environment:
|
||||
- TZ=America/Denver
|
||||
ports:
|
||||
- "8000:8000"
|
||||
volumes:
|
||||
- /srv/containers/material-mkdocs/docs:/docs
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.76
|
||||
networks:
|
||||
docker_network:
|
||||
external: true
|
||||
```
|
||||
|
||||
```jsx title=".env"
|
||||
N/A
|
||||
```
|
||||
|
||||
## Config Example
|
||||
When you deploy MKDocs, you will need to give it a configuration to tell MKDocs how to structure itself. The configuration below is what I used in my deployment. This file is one folder level higher than the `/docs` folder that holds the documentation of the website.
|
||||
```jsx title="/srv/containers/material-mkdocs/docs/mkdocs.yml"
|
||||
# Project information
|
||||
site_name: Homelab Documentation
|
||||
site_url: https://docs.bunny-lab.io
|
||||
site_author: Nicole Rappe
|
||||
site_description: >-
|
||||
Bunny Lab Server, Script, and Container Documentation
|
||||
|
||||
# Configuration
|
||||
theme:
|
||||
name: material
|
||||
custom_dir: material/overrides
|
||||
features:
|
||||
- announce.dismiss
|
||||
- content.action.edit
|
||||
- content.action.view
|
||||
- content.code.annotate
|
||||
- content.code.copy
|
||||
- content.code.select
|
||||
- content.tabs.link
|
||||
- content.tooltips
|
||||
# - header.autohide
|
||||
- navigation.expand
|
||||
# - navigation.footer
|
||||
- navigation.indexes
|
||||
- navigation.instant
|
||||
- navigation.instant.prefetch
|
||||
- navigation.instant.progress
|
||||
- navigation.prune
|
||||
- navigation.sections
|
||||
- navigation.tabs
|
||||
- navigation.tabs.sticky
|
||||
- navigation.top
|
||||
- navigation.tracking
|
||||
- search.highlight
|
||||
- search.share
|
||||
- search.suggest
|
||||
- toc.follow
|
||||
# - toc.integrate ## If this is enabled, the TOC will appear on the left navigation menu.
|
||||
palette:
|
||||
- media: "(prefers-color-scheme)"
|
||||
toggle:
|
||||
icon: material/link
|
||||
name: Switch to light mode
|
||||
- media: "(prefers-color-scheme: light)"
|
||||
scheme: default
|
||||
primary: deep purple
|
||||
accent: deep purple
|
||||
toggle:
|
||||
icon: material/toggle-switch
|
||||
name: Switch to dark mode
|
||||
- media: "(prefers-color-scheme: dark)"
|
||||
scheme: slate
|
||||
primary: black
|
||||
accent: deep purple
|
||||
toggle:
|
||||
icon: material/toggle-switch-off
|
||||
name: Switch to system preference
|
||||
font:
|
||||
text: Roboto
|
||||
code: Roboto Mono
|
||||
favicon: assets/favicon.png
|
||||
icon:
|
||||
logo: logo
|
||||
|
||||
# Plugins
|
||||
plugins:
|
||||
- search:
|
||||
separator: '[\s\u200b\-_,:!=\[\]()"`/]+|\.(?!\d)|&[lg]t;|(?!\b)(?=[A-Z][a-z])'
|
||||
- minify:
|
||||
minify_html: true
|
||||
|
||||
# Hooks
|
||||
hooks:
|
||||
- material/overrides/hooks/shortcodes.py
|
||||
- material/overrides/hooks/translations.py
|
||||
|
||||
# Additional configuration
|
||||
extra:
|
||||
status:
|
||||
new: Recently added
|
||||
deprecated: Deprecated
|
||||
|
||||
# Extensions
|
||||
markdown_extensions:
|
||||
- abbr
|
||||
- admonition
|
||||
- attr_list
|
||||
- def_list
|
||||
- footnotes
|
||||
- md_in_html
|
||||
- toc:
|
||||
permalink: true
|
||||
toc_depth: 3
|
||||
- pymdownx.arithmatex:
|
||||
generic: true
|
||||
- pymdownx.betterem:
|
||||
smart_enable: all
|
||||
- pymdownx.caret
|
||||
- pymdownx.details
|
||||
- pymdownx.emoji:
|
||||
emoji_generator: !!python/name:material.extensions.emoji.to_svg
|
||||
emoji_index: !!python/name:material.extensions.emoji.twemoji
|
||||
- pymdownx.highlight:
|
||||
anchor_linenums: true
|
||||
line_spans: __span
|
||||
pygments_lang_class: true
|
||||
- pymdownx.inlinehilite
|
||||
- pymdownx.keys
|
||||
- pymdownx.magiclink:
|
||||
normalize_issue_symbols: true
|
||||
repo_url_shorthand: true
|
||||
user: squidfunk
|
||||
repo: mkdocs-material
|
||||
- pymdownx.mark
|
||||
- pymdownx.smartsymbols
|
||||
- pymdownx.snippets:
|
||||
auto_append:
|
||||
- includes/mkdocs.md
|
||||
- pymdownx.superfences:
|
||||
custom_fences:
|
||||
- name: mermaid
|
||||
class: mermaid
|
||||
format: !!python/name:pymdownx.superfences.fence_code_format
|
||||
- pymdownx.tabbed:
|
||||
alternate_style: true
|
||||
combine_header_slug: true
|
||||
slugify: !!python/object/apply:pymdownx.slugs.slugify
|
||||
kwds:
|
||||
case: lower
|
||||
- pymdownx.tasklist:
|
||||
custom_checkbox: true
|
||||
- pymdownx.tilde
|
||||
```
|
||||
|
||||
## Cleaning up
|
||||
When the server is deployed, it will come with a bunch of unnecessary documentation that tells you how to use it. You will want to go into the `/docs` folder, and delete everything except `assets/favicon.png`, `schema.json`, and `/schema`. These files are necessary to allow MKDocs to automatically detect and structure the documentation based on the file folder structure under `/docs`.
|
34
Servers/Containerization/Docker/Compose/NGINX.md
Normal file
34
Servers/Containerization/Docker/Compose/NGINX.md
Normal file
@ -0,0 +1,34 @@
|
||||
**Purpose**: NGINX is open source software for web serving, reverse proxying, caching, load balancing, media streaming, and more.
|
||||
|
||||
```jsx title="docker-compose.yml"
|
||||
---
|
||||
version: "2.1"
|
||||
services:
|
||||
nginx:
|
||||
image: lscr.io/linuxserver/nginx:latest
|
||||
container_name: nginx
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- TZ=America/Denver
|
||||
volumes:
|
||||
- /srv/containers/nginx-portfolio-website:/config
|
||||
ports:
|
||||
- 80:80
|
||||
- 443:443
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.12
|
||||
|
||||
networks:
|
||||
default:
|
||||
external:
|
||||
name: docker_network
|
||||
docker_network:
|
||||
external: true
|
||||
```
|
||||
|
||||
```jsx title=".env"
|
||||
Not Applicable
|
||||
```
|
161
Servers/Containerization/Docker/Compose/Nextcloud-AIO.md
Normal file
161
Servers/Containerization/Docker/Compose/Nextcloud-AIO.md
Normal file
@ -0,0 +1,161 @@
|
||||
**Purpose**:
|
||||
Deploy a Nextcloud AIO Server. [Official Nextcloud All-in-One Documentation](https://github.com/nextcloud/all-in-one).
|
||||
This version of Nextcloud consists of 12 containers that are centrally managed by a single "master" container. It is more orchestrated and automates the implementation of Nextcloud Office, Nextcloud Talk, and other integrations / apps.
|
||||
|
||||
!!! note "Assumptions"
|
||||
It is assumed you are running Rocky Linux 9.3.
|
||||
|
||||
It is also assumed that you are using Traefik as your reverse proxy in front of Nextcloud AIO. If it isnt, refer to the [reverse proxy documentation](https://github.com/nextcloud/all-in-one/blob/main/reverse-proxy.md) to configure other reverse proxies such as NGINX.
|
||||
|
||||
=== "Simplified Docker-Compose.yml"
|
||||
|
||||
```jsx title="docker-compose.yml"
|
||||
services:
|
||||
nextcloud-aio-mastercontainer:
|
||||
image: nextcloud/all-in-one:latest
|
||||
init: true
|
||||
restart: always
|
||||
container_name: nextcloud-aio-mastercontainer
|
||||
volumes:
|
||||
- nextcloud_aio_mastercontainer:/mnt/docker-aio-config
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
ports:
|
||||
- 8080:8080
|
||||
dns:
|
||||
- 1.1.1.1
|
||||
- 1.0.0.1
|
||||
environment:
|
||||
- APACHE_PORT=11000
|
||||
- APACHE_IP_BINDING=0.0.0.0
|
||||
- NEXTCLOUD_MEMORY_LIMIT=4096M
|
||||
- NEXTCLOUD_ADDITIONAL_APKS=imagemagick
|
||||
- NEXTCLOUD_ADDITIONAL_PHP_EXTENSIONS=imagick
|
||||
volumes:
|
||||
nextcloud_aio_mastercontainer:
|
||||
name: nextcloud_aio_mastercontainer
|
||||
```
|
||||
|
||||
=== "Extended Docker-Compose.yml"
|
||||
|
||||
```jsx title="docker-compose.yml"
|
||||
services:
|
||||
nextcloud-aio-mastercontainer:
|
||||
image: nextcloud/all-in-one:latest
|
||||
init: true
|
||||
restart: always
|
||||
container_name: nextcloud-aio-mastercontainer # This line is not allowed to be changed as otherwise AIO will not work correctly
|
||||
volumes:
|
||||
- nextcloud_aio_mastercontainer:/mnt/docker-aio-config # This line is not allowed to be changed as otherwise the built-in backup solution will not work
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro # May be changed on macOS, Windows or docker rootless. See the applicable documentation. If adjusting, don't forget to also set 'WATCHTOWER_DOCKER_SOCKET_PATH'!
|
||||
ports:
|
||||
# - 80:80 # Can be removed when running behind a web server or reverse proxy (like Apache, Nginx, Cloudflare Tunnel and else). See https://github.com/nextcloud/all-in-one/blob/main/reverse-proxy.md
|
||||
- 8080:8080
|
||||
# - 8443:8443 # Can be removed when running behind a web server or reverse proxy (like Apache, Nginx, Cloudflare Tunnel and else). See https://github.com/nextcloud/all-in-one/blob/main/reverse-proxy.md
|
||||
dns:
|
||||
- 1.1.1.1
|
||||
- 1.0.0.1
|
||||
environment: # Is needed when using any of the options below
|
||||
# AIO_DISABLE_BACKUP_SECTION: false # Setting this to true allows to hide the backup section in the AIO interface. See https://github.com/nextcloud/all-in-one#how-to-disable-the-backup-section
|
||||
- APACHE_PORT=11000 # Is needed when running behind a web server or reverse proxy (like Apache, Nginx, Cloudflare Tunnel and else). See https://github.com/nextcloud/all-in-one/blob/main/reverse-proxy.md
|
||||
- APACHE_IP_BINDING=0.0.0.0 # Should be set when running behind a web server or reverse proxy (like Apache, Nginx, Cloudflare Tunnel and else) that is running on the same host. See https://github.com/nextcloud/all-in-one/blob/main/reverse-proxy.md
|
||||
# BORG_RETENTION_POLICY: --keep-within=7d --keep-weekly=4 --keep-monthly=6 # Allows to adjust borgs retention policy. See https://github.com/nextcloud/all-in-one#how-to-adjust-borgs-retention-policy
|
||||
# COLLABORA_SECCOMP_DISABLED: false # Setting this to true allows to disable Collabora's Seccomp feature. See https://github.com/nextcloud/all-in-one#how-to-disable-collaboras-seccomp-feature
|
||||
# NEXTCLOUD_DATADIR: /mnt/ncdata # Allows to set the host directory for Nextcloud's datadir. ⚠️⚠️⚠️ Warning: do not set or adjust this value after the initial Nextcloud installation is done! See https://github.com/nextcloud/all-in-one#how-to-change-the-default-location-of-nextclouds-datadir
|
||||
# NEXTCLOUD_MOUNT: /mnt/ # Allows the Nextcloud container to access the chosen directory on the host. See https://github.com/nextcloud/all-in-one#how-to-allow-the-nextcloud-container-to-access-directories-on-the-host
|
||||
# NEXTCLOUD_UPLOAD_LIMIT: 10G # Can be adjusted if you need more. See https://github.com/nextcloud/all-in-one#how-to-adjust-the-upload-limit-for-nextcloud
|
||||
# NEXTCLOUD_MAX_TIME: 3600 # Can be adjusted if you need more. See https://github.com/nextcloud/all-in-one#how-to-adjust-the-max-execution-time-for-nextcloud
|
||||
- NEXTCLOUD_MEMORY_LIMIT=4096M # Can be adjusted if you need more. See https://github.com/nextcloud/all-in-one#how-to-adjust-the-php-memory-limit-for-nextcloud
|
||||
# NEXTCLOUD_TRUSTED_CACERTS_DIR: /path/to/my/cacerts # CA certificates in this directory will be trusted by the OS of the nexcloud container (Useful e.g. for LDAPS) See See https://github.com/nextcloud/all-in-one#how-to-trust-user-defined-certification-authorities-ca
|
||||
# NEXTCLOUD_STARTUP_APPS="deck twofactor_totp tasks calendar contacts notes" # Allows to modify the Nextcloud apps that are installed on starting AIO the first time. See https://github.com/nextcloud/all-in-one#how-to-change-the-nextcloud-apps-that-are-installed-on-the-first-startup
|
||||
- NEXTCLOUD_ADDITIONAL_APKS=imagemagick # This allows to add additional packages to the Nextcloud container permanently. Default is imagemagick but can be overwritten by modifying this value. See https://github.com/nextcloud/all-in-one#how-to-add-os-packages-permanently-to-the-nextcloud-container
|
||||
- NEXTCLOUD_ADDITIONAL_PHP_EXTENSIONS=imagick # This allows to add additional php extensions to the Nextcloud container permanently. Default is imagick but can be overwritten by modifying this value. See https://github.com/nextcloud/all-in-one#how-to-add-php-extensions-permanently-to-the-nextcloud-container
|
||||
# NEXTCLOUD_ENABLE_DRI_DEVICE: true # This allows to enable the /dev/dri device in the Nextcloud container. ⚠️⚠️⚠️ Warning: this only works if the '/dev/dri' device is present on the host! If it should not exist on your host, don't set this to true as otherwise the Nextcloud container will fail to start! See https://github.com/nextcloud/all-in-one#how-to-enable-hardware-transcoding-for-nextcloud
|
||||
# NEXTCLOUD_KEEP_DISABLED_APPS: false # Setting this to true will keep Nextcloud apps that are disabled in the AIO interface and not uninstall them if they should be installed. See https://github.com/nextcloud/all-in-one#how-to-keep-disabled-apps
|
||||
# TALK_PORT: 3478 # This allows to adjust the port that the talk container is using. See https://github.com/nextcloud/all-in-one#how-to-adjust-the-talk-port
|
||||
# WATCHTOWER_DOCKER_SOCKET_PATH: /var/run/docker.sock # Needs to be specified if the docker socket on the host is not located in the default '/var/run/docker.sock'. Otherwise mastercontainer updates will fail. For macos it needs to be '/var/run/docker.sock'
|
||||
# networks: # Is needed when you want to create the nextcloud-aio network with ipv6-support using this file, see the network config at the bottom of the file
|
||||
# - nextcloud-aio # Is needed when you want to create the nextcloud-aio network with ipv6-support using this file, see the network config at the bottom of the file
|
||||
# security_opt: ["label:disable"] # Is needed when using SELinux
|
||||
|
||||
# # Optional: Caddy reverse proxy. See https://github.com/nextcloud/all-in-one/blob/main/reverse-proxy.md
|
||||
# # You can find further examples here: https://github.com/nextcloud/all-in-one/discussions/588
|
||||
# caddy:
|
||||
# image: caddy:alpine
|
||||
# restart: always
|
||||
# container_name: caddy
|
||||
# volumes:
|
||||
# - ./Caddyfile:/etc/caddy/Caddyfile
|
||||
# - ./certs:/certs
|
||||
# - ./config:/config
|
||||
# - ./data:/data
|
||||
# - ./sites:/srv
|
||||
# network_mode: "host"
|
||||
|
||||
volumes: # If you want to store the data on a different drive, see https://github.com/nextcloud/all-in-one#how-to-store-the-filesinstallation-on-a-separate-drive
|
||||
nextcloud_aio_mastercontainer:
|
||||
name: nextcloud_aio_mastercontainer # This line is not allowed to be changed as otherwise the built-in backup solution will not work
|
||||
|
||||
# # Optional: If you need ipv6, follow step 1 and 2 of https://github.com/nextcloud/all-in-one/blob/main/docker-ipv6-support.md first and then uncomment the below config in order to activate ipv6 for the internal nextcloud-aio network.
|
||||
# # Please make sure to uncomment also the networking lines of the mastercontainer above in order to actually create the network with docker-compose
|
||||
# networks:
|
||||
# nextcloud-aio:
|
||||
# name: nextcloud-aio # This line is not allowed to be changed as otherwise the created network will not be used by the other containers of AIO
|
||||
# driver: bridge
|
||||
# enable_ipv6: true
|
||||
# ipam:
|
||||
# driver: default
|
||||
# config:
|
||||
# - subnet: fd12:3456:789a:2::/64 # IPv6 subnet to use
|
||||
```
|
||||
|
||||
## Traefik Reverse Proxy Configuration
|
||||
```jsx title="cloud.bunny-lab.io.yml"
|
||||
http:
|
||||
routers:
|
||||
nextcloud-aio:
|
||||
entryPoints:
|
||||
- websecure
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
http2:
|
||||
service: nextcloud-aio
|
||||
middlewares:
|
||||
- nextcloud-chain
|
||||
rule: Host(`cloud.bunny-lab.io`)
|
||||
|
||||
services:
|
||||
nextcloud-aio:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: http://192.168.3.29:11000
|
||||
|
||||
middlewares:
|
||||
nextcloud-secure-headers:
|
||||
headers:
|
||||
hostsProxyHeaders:
|
||||
- "X-Forwarded-Host"
|
||||
referrerPolicy: "same-origin"
|
||||
|
||||
https-redirect:
|
||||
redirectscheme:
|
||||
scheme: https
|
||||
|
||||
nextcloud-chain:
|
||||
chain:
|
||||
middlewares:
|
||||
# - ... (e.g. rate limiting middleware)
|
||||
- https-redirect
|
||||
- nextcloud-secure-headers
|
||||
```
|
||||
|
||||
## Initial Setup
|
||||
You will need to navigate to https://192.168.3.29:8080 to access the Nextcloud AIO configuration tool. This is where you will get the AIO password, encryption passphrase for backups, and be able to configure the timezone, among other things.
|
||||
|
||||
### Domain Validation
|
||||
It will ask you to provide a domain name. In this example, we will use `cloud.bunny-lab.io`. Assuming you have configured the Traefik reverse proxy as seen above, when you press the "**Validate Domain**" button, Nextcloud will spin up a container named something similar to `domain-validator`. This will spin up a server listening on https://cloud.bunny-lab.io. If you visit that address, it should give you something similar to `f940935260b41691ac2246ba9e7823a301a1605ae8a023ee`. This will confirm that the domain validation will succeed.
|
||||
|
||||
!!! warning "Domain Validation Failing"
|
||||
If visiting the web server at https://cloud.bunny-lab.io results in an error 502 or 404, try to destroy the domain validation container in Portainer / Docker, then click the validation button in the Nextcloud AIO WebUI to spin up a new container automatically, at which point it should be function.
|
||||
|
||||
### Configuring Additional Packages
|
||||
At this point, the rest of the setup is fairly straightforward. You just check every checkbox for the apps you want to install automatically, and be patient while Nextcloud deploys about 11 containers. You can track the progress more accurately if you log into Portainer and watch the container listing and logs to follow-along until every container reports "**Healthy**" indicating everything is ready, then press the "**Refresh**" button on the Nextcloud AIO WebUI to confirm it's ready to be used.
|
64
Servers/Containerization/Docker/Compose/Nextcloud.md
Normal file
64
Servers/Containerization/Docker/Compose/Nextcloud.md
Normal file
@ -0,0 +1,64 @@
|
||||
**Purpose**: Deploy a Nextcloud and PostgreSQL database together.
|
||||
|
||||
```jsx title="docker-compose.yml"
|
||||
version: "2.1"
|
||||
services:
|
||||
app:
|
||||
image: nextcloud:apache
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.nextcloud.rule=Host(`files.bunny-lab.io`)"
|
||||
- "traefik.http.routers.nextcloud.entrypoints=websecure"
|
||||
- "traefik.http.routers.nextcloud.tls.certresolver=letsencrypt"
|
||||
- "traefik.http.services.nextcloud.loadbalancer.server.port=80"
|
||||
environment:
|
||||
- TZ=${TZ}
|
||||
- POSTGRES_DB=${POSTGRES_DB}
|
||||
- POSTGRES_USER=${POSTGRES_USER}
|
||||
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
|
||||
- POSTGRES_HOST=${POSTGRES_HOST}
|
||||
- OVERWRITEPROTOCOL=https
|
||||
- NEXTCLOUD_ADMIN_USER=${NEXTCLOUD_ADMIN_USER}
|
||||
- NEXTCLOUD_ADMIN_PASSWORD=${NEXTCLOUD_ADMIN_PASSWORD}
|
||||
- NEXTCLOUD_TRUSTED_DOMAINS=${NEXTCLOUD_TRUSTED_DOMAINS}
|
||||
volumes:
|
||||
- /srv/containers/nextcloud/html:/var/www/html
|
||||
ports:
|
||||
- 443:443
|
||||
- 80:80
|
||||
restart: always
|
||||
depends_on:
|
||||
- db
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.17
|
||||
db:
|
||||
image: postgres:12-alpine
|
||||
environment:
|
||||
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
|
||||
- POSTGRES_USER=${POSTGRES_USER}
|
||||
- POSTGRES_DB=${POSTGRES_DB}
|
||||
volumes:
|
||||
- /srv/containers/nextcloud/db:/var/lib/postgresql/data
|
||||
ports:
|
||||
- 5432:5432
|
||||
restart: always
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.18
|
||||
|
||||
networks:
|
||||
docker_network:
|
||||
external: true
|
||||
```
|
||||
|
||||
```jsx title=".env"
|
||||
TZ=America/Denver
|
||||
POSTGRES_PASSWORD=SomeSecurePassword
|
||||
POSTGRES_USER=ncadmin
|
||||
POSTGRES_HOST=192.168.5.18
|
||||
POSTGRES_DB=nextcloud
|
||||
NEXTCLOUD_ADMIN_USER=admin
|
||||
NEXTCLOUD_ADMIN_PASSWORD=SomeSuperSecurePassword
|
||||
NEXTCLOUD_TRUSTED_DOMAINS=cloud.bunny-lab.io
|
||||
```
|
45
Servers/Containerization/Docker/Compose/Niltalk.md
Normal file
45
Servers/Containerization/Docker/Compose/Niltalk.md
Normal file
@ -0,0 +1,45 @@
|
||||
**Purpose**: Niltalk is a web based disposable chat server. It allows users to create password protected disposable, ephemeral chatrooms and invite peers to chat rooms.
|
||||
|
||||
```jsx title="docker-compose.yml"
|
||||
version: "3.7"
|
||||
|
||||
services:
|
||||
redis:
|
||||
image: redis:alpine
|
||||
volumes:
|
||||
- /srv/niltalk
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.196
|
||||
|
||||
niltalk:
|
||||
image: kailashnadh/niltalk:latest
|
||||
ports:
|
||||
- "9000:9000"
|
||||
depends_on:
|
||||
- redis
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.197
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.niltalk.rule=Host(`temp.cyberstrawberry.net`)"
|
||||
- "traefik.http.routers.niltalk.entrypoints=websecure"
|
||||
- "traefik.http.routers.niltalk.tls.certresolver=myresolver"
|
||||
- "traefik.http.services.niltalk.loadbalancer.server.port=9000"
|
||||
networks:
|
||||
default:
|
||||
external:
|
||||
name: docker_network
|
||||
docker_network:
|
||||
external: true
|
||||
|
||||
volumes:
|
||||
niltalk-data:
|
||||
```
|
||||
|
||||
```jsx title=".env"
|
||||
Not Applicable
|
||||
```
|
29
Servers/Containerization/Docker/Compose/Node-Red.md
Normal file
29
Servers/Containerization/Docker/Compose/Node-Red.md
Normal file
@ -0,0 +1,29 @@
|
||||
**Purpose**: Node-RED is a programming tool for wiring together hardware devices, APIs and online services in new and interesting ways.
|
||||
|
||||
```jsx title="docker-compose.yml"
|
||||
version: "3.7"
|
||||
|
||||
services:
|
||||
node-red:
|
||||
image: nodered/node-red:latest
|
||||
environment:
|
||||
- TZ=America/Denver
|
||||
ports:
|
||||
- "1880:1880"
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.92
|
||||
volumes:
|
||||
- /srv/containers/node-red:/data
|
||||
networks:
|
||||
default:
|
||||
external:
|
||||
name: docker_network
|
||||
docker_network:
|
||||
external: true
|
||||
|
||||
```
|
||||
|
||||
```jsx title=".env"
|
||||
Not Applicable
|
||||
```
|
34
Servers/Containerization/Docker/Compose/Ntfy.md
Normal file
34
Servers/Containerization/Docker/Compose/Ntfy.md
Normal file
@ -0,0 +1,34 @@
|
||||
**Purpose**: ntfy (pronounced notify) is a simple HTTP-based pub-sub notification service. It allows you to send notifications to your phone or desktop via scripts from any computer, and/or using a REST API. It's infinitely flexible, and 100% free software.
|
||||
|
||||
```jsx title="docker-compose.yml"
|
||||
version: "2.1"
|
||||
services:
|
||||
ntfy:
|
||||
image: binwiederhier/ntfy
|
||||
container_name: ntfy
|
||||
command:
|
||||
- serve
|
||||
environment:
|
||||
- TZ=America/Denver # optional: Change to your desired timezone
|
||||
#user: UID:GID # optional: Set custom user/group or uid/gid
|
||||
volumes:
|
||||
- /srv/containers/ntfy/cache:/var/cache/ntfy
|
||||
- /srv/containers/ntfy/etc:/etc/ntfy
|
||||
ports:
|
||||
- 80:80
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.45
|
||||
|
||||
networks:
|
||||
default:
|
||||
external:
|
||||
name: docker_network
|
||||
docker_network:
|
||||
external: true
|
||||
```
|
||||
|
||||
```jsx title=".env"
|
||||
Not Applicable
|
||||
```
|
63
Servers/Containerization/Docker/Compose/OnlyOffice-ee.md
Normal file
63
Servers/Containerization/Docker/Compose/OnlyOffice-ee.md
Normal file
@ -0,0 +1,63 @@
|
||||
**Purpose**: ONLYOFFICE offers a secure online office suite highly compatible with MS Office formats. Generally used with Nextcloud to edit documents directly within the web browser.
|
||||
|
||||
```jsx title="docker-compose.yml"
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
app:
|
||||
image: onlyoffice/documentserver-ee
|
||||
ports:
|
||||
- 80:80
|
||||
- 443:443
|
||||
volumes:
|
||||
- /srv/containers/onlyoffice/DocumentServer/logs:/var/log/onlyoffice
|
||||
- /srv/containers/onlyoffice/DocumentServer/data:/var/www/onlyoffice/Data
|
||||
- /srv/containers/onlyoffice/DocumentServer/lib:/var/lib/onlyoffice
|
||||
- /srv/containers/onlyoffice/DocumentServer/db:/var/lib/postgresql
|
||||
- /srv/containers/onlyoffice/DocumentServer/fonts:/usr/share/fonts/truetype/custom
|
||||
- /srv/containers/onlyoffice/DocumentServer/forgotten:/var/lib/onlyoffice/documentserver/App_Data/cache/files/forgotten
|
||||
- /srv/containers/onlyoffice/DocumentServer/rabbitmq:/var/lib/rabbitmq
|
||||
- /srv/containers/onlyoffice/DocumentServer/redis:/var/lib/redis
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.cyberstrawberry-onlyoffice.rule=Host(`office.cyberstrawberry.net`)"
|
||||
- "traefik.http.routers.cyberstrawberry-onlyoffice.entrypoints=websecure"
|
||||
- "traefik.http.routers.cyberstrawberry-onlyoffice.tls.certresolver=myresolver"
|
||||
- "traefik.http.services.cyberstrawberry-onlyoffice.loadbalancer.server.port=80"
|
||||
- "traefik.http.routers.cyberstrawberry-onlyoffice.middlewares=onlyoffice-headers"
|
||||
- "traefik.http.middlewares.onlyoffice-headers.headers.customrequestheaders.X-Forwarded-Proto=https"
|
||||
#- "traefik.http.middlewares.onlyoffice-headers.headers.accessControlAllowOrigin=*"
|
||||
environment:
|
||||
- JWT_ENABLED=true
|
||||
- JWT_SECRET=REDACTED #SET THIS TO SOMETHING SECURE
|
||||
restart: always
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.143
|
||||
networks:
|
||||
default:
|
||||
external:
|
||||
name: docker_network
|
||||
docker_network:
|
||||
external: true
|
||||
```
|
||||
|
||||
```jsx title=".env"
|
||||
Not Applicable
|
||||
```
|
||||
:::tip
|
||||
If you wish to use this in a non-commercial homelab environment without limits, [this script](https://wiki.muwahhid.ru/ru/Unraid/Docker/Onlyoffice-Document-Server) does an endless trial without functionality limits.
|
||||
```
|
||||
docker stop office-document-server-ee
|
||||
docker rm office-document-server-ee
|
||||
rm -r /mnt/user/appdata/onlyoffice/DocumentServer
|
||||
sleep 5
|
||||
<USE A PORTAINER WEBHOOK TO RECREATE THE CONTAINER OR REFERENCE THE DOCKER RUN METHOD BELOW>
|
||||
```
|
||||
|
||||
Docker Run Method:
|
||||
```
|
||||
docker run -d --name='office-document-server-ee' --net='bridge' -e TZ="Europe/Moscow" -e HOST_OS="Unraid" -e 'JWT_ENABLED'='true' -e 'JWT_SECRET'='mySecret' -p '8082:80/tcp' -p '4432:443/tcp' -v '/mnt/user/appdata/onlyoffice/DocumentServer/logs':'/var/log/onlyoffice':'rw' -v '/mnt/user/appdata/onlyoffice/DocumentServer/data':'/var/www/onlyoffice/Data':'rw' -v '/mnt/user/appdata/onlyoffice/DocumentServer/lib':'/var/lib/onlyoffice':'rw' -v '/mnt/user/appdata/onlyoffice/DocumentServer/db':'/var/lib/postgresql':'rw' -v '/mnt/user/appdata/onlyoffice/DocumentServer/fonts':'/usr/share/fonts/truetype/custom':'rw' -v '/mnt/user/appdata/onlyoffice/DocumentServer/forgotten':'/var/lib/onlyoffice/documentserver/App_Data/cache/files/forgotten':'rw' -v '/mnt/user/appdata/onlyoffice/DocumentServer/rabbitmq':'/var/lib/rabbitmq':'rw' -v '/mnt/user/appdata/onlyoffice/DocumentServer/redis':'/var/lib/redis':'rw' 'onlyoffice/documentserver-ee'
|
||||
```
|
||||
:::
|
||||
|
82
Servers/Containerization/Docker/Compose/Password Pusher.md
Normal file
82
Servers/Containerization/Docker/Compose/Password Pusher.md
Normal file
@ -0,0 +1,82 @@
|
||||
**Purpose**: An application to securely communicate passwords over the web. Passwords automatically expire after a certain number of views and/or time has passed. Track who, what and when.
|
||||
|
||||
## Docker Configuration
|
||||
```jsx title="docker-compose.yml"
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
passwordpusher:
|
||||
image: docker.io/pglombardo/pwpush:release
|
||||
expose:
|
||||
- 5100
|
||||
restart: always
|
||||
environment:
|
||||
# Read Documention on how to generate a master key, then put it below
|
||||
- PWPUSH_MASTER_KEY=${PWPUSH_MASTER_KEY}
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.170
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.passwordpusher.rule=Host(`temp.bunny-lab.io`)"
|
||||
- "traefik.http.routers.passwordpusher.entrypoints=websecure"
|
||||
- "traefik.http.routers.passwordpusher.tls.certresolver=letsencrypt"
|
||||
- "traefik.http.services.passwordpusher.loadbalancer.server.port=5100"
|
||||
networks:
|
||||
docker_network:
|
||||
external: true
|
||||
```
|
||||
|
||||
```jsx title=".env"
|
||||
PWPUSH_MASTER_KEY=<PASSWORD>
|
||||
PWP__BRAND__TITLE="Bunny Lab"
|
||||
PWP__BRAND__SHOW_FOOTER_MENU=false
|
||||
PWP__BRAND__LIGHT_LOGO="https://cloud.bunny-lab.io/apps/theming/image/logo?v=22"
|
||||
PWP__BRAND__DARK_LOGO="https://cloud.bunny-lab.io/apps/theming/image/logo?v=22"
|
||||
PWP__BRAND__TAGLINE="Secure Temporary Information Exchange"
|
||||
PWP__MAIL__RAISE_DELIVERY_ERRORS=true
|
||||
PWP__MAIL__SMTP_ADDRESS=mail.bunny-lab.io
|
||||
PWP__MAIL__SMTP_PORT=587
|
||||
PWP__MAIL__SMTP_USER_NAME=noreply@bunny-lab.io
|
||||
PWP__MAIL__SMTP_PASSWORD=<SMTP_CREDENTIALS>
|
||||
PWP__MAIL__SMTP_AUTHENTICATION=plain
|
||||
PWP__MAIL__SMTP_STARTTLS=true
|
||||
PWP__MAIL__SMTP_OPEN_TIMEOUT=10
|
||||
PWP__MAIL__SMTP_READ_TIMEOUT=10
|
||||
PWP__HOST_DOMAIN=bunny-lab.io
|
||||
PWP__HOST_PROTOCOL=https
|
||||
PWP__MAIL__MAILER_SENDER='"noreply" <noreply@bunny-lab.io>'
|
||||
PWP__SHOW_VERSION=false
|
||||
PWP__ENABLE_FILE_PUSHES=true
|
||||
PWP__FILES__EXPIRE_AFTER_DAYS_DEFAULT=2
|
||||
PWP__FILES__EXPIRE_AFTER_DAYS_MAX=7
|
||||
PWP__FILES__EXPIRE_AFTER_VIEWS_DEFAULT=5
|
||||
PWP__FILES__EXPIRE_AFTER_VIEWS_MAX=10
|
||||
PWP__FILES__RETRIEVAL_STEP_DEFAULT=true
|
||||
PWP__ENABLE_URL_PUSHES=true
|
||||
PWP__LOG_LEVEL=info
|
||||
```
|
||||
|
||||
!!! note "PWPUSH_MASTER_KEY"
|
||||
Generate a master key by visiting the [official online key generator](https://pwpush.com/en/pages/generate_key).
|
||||
|
||||
## Traefik Reverse Proxy Configuration
|
||||
If the container does not run on the same host as Traefik, you will need to manually add configuration to Traefik's dynamic config file, outlined below.
|
||||
``` yaml
|
||||
http:
|
||||
routers:
|
||||
password-pusher:
|
||||
entryPoints:
|
||||
- websecure
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
service: password-pusher
|
||||
rule: Host(`temp.bunny-lab.io`)
|
||||
|
||||
services:
|
||||
password-pusher:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: http://192.168.5.170:5100
|
||||
passHostHeader: true
|
||||
```
|
41
Servers/Containerization/Docker/Compose/Pi-Hole.md
Normal file
41
Servers/Containerization/Docker/Compose/Pi-Hole.md
Normal file
@ -0,0 +1,41 @@
|
||||
**Purpose**: Pi-hole is a Linux network-level advertisement and Internet tracker blocking application which acts as a DNS sinkhole and optionally a DHCP server, intended for use on a private network.
|
||||
|
||||
```jsx title="docker-compose.yml"
|
||||
version: "3"
|
||||
|
||||
# More info at https://github.com/pi-hole/docker-pi-hole/ and https://docs.pi-hole.net/
|
||||
services:
|
||||
pihole:
|
||||
container_name: pihole
|
||||
image: pihole/pihole:latest
|
||||
# For DHCP it is recommended to remove these ports and instead add: network_mode: "host"
|
||||
ports:
|
||||
- "53:53/tcp"
|
||||
- "53:53/udp"
|
||||
- "67:67/udp" # Only required if you are using Pi-hole as your DHCP server
|
||||
- "80:80/tcp"
|
||||
environment:
|
||||
TZ: 'America/Denver'
|
||||
WEBPASSWORD: 'REDACTED' #USE A SECURE PASSWORD HERE
|
||||
# Volumes store your data between container upgrades
|
||||
volumes:
|
||||
- /srv/containers/pihole/app:/etc/pihole
|
||||
- /srv/containers/pihole/etc-dnsmasq.d:/etc/dnsmasq.d
|
||||
# https://github.com/pi-hole/docker-pi-hole#note-on-capabilities
|
||||
cap_add:
|
||||
- NET_ADMIN # Required if you are using Pi-hole as your DHCP server, else not needed
|
||||
restart: always
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.190
|
||||
networks:
|
||||
default:
|
||||
external:
|
||||
name: docker_network
|
||||
docker_network:
|
||||
external: true
|
||||
```
|
||||
|
||||
```jsx title=".env"
|
||||
Not Applicable
|
||||
```
|
78
Servers/Containerization/Docker/Compose/Pyload.md
Normal file
78
Servers/Containerization/Docker/Compose/Pyload.md
Normal file
@ -0,0 +1,78 @@
|
||||
**Purpose**: pyLoad-ng is a Free and Open Source download manager written in Python and designed to be extremely lightweight, easily extensible and fully manageable via web.
|
||||
|
||||
[Detailed LinuxServer.io Deployment Info](https://docs.linuxserver.io/images/docker-pyload-ng/)
|
||||
|
||||
|
||||
## Docker Configuration
|
||||
```jsx title="docker-compose.yml"
|
||||
version: '3.9'
|
||||
|
||||
services:
|
||||
pyload-ng:
|
||||
image: lscr.io/linuxserver/pyload-ng:latest
|
||||
container_name: pyload-ng
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- TZ=America/Denver
|
||||
volumes:
|
||||
- /srv/containers/pyload-ng/config:/config
|
||||
- nfs-share:/downloads
|
||||
ports:
|
||||
- 8000:8000
|
||||
- 9666:9666 #optional
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.30
|
||||
|
||||
volumes:
|
||||
nfs-share:
|
||||
driver: local
|
||||
driver_opts:
|
||||
type: nfs
|
||||
o: addr=192.168.3.3,nolock,soft,rw # Options for the NFS mount
|
||||
device: ":/mnt/STORAGE/Downloads" # NFS path on the server
|
||||
|
||||
networks:
|
||||
docker_network:
|
||||
external: true
|
||||
```
|
||||
|
||||
1. Set this to your own timezone.
|
||||
2. This is optional. Additional documentation needed to convey what this port is used for. Possibly API access.
|
||||
3. This assumes you want your download folder to be a SMB network share, this section allows you to connect to the share so Pyload can download content directly into the network folder. Replace the username and `REDACTED` password with your actual credentials. Remove the `domain` argument if the SMB server is not domain-joined.
|
||||
4. This is the destination network share to target with the given credentials in section 3.
|
||||
|
||||
!!! note "NFS Mount Assumptions"
|
||||
The NFS folder in this example is both exported via NFS on a TrueNAS Core server, while also being exported as an NFS export. `mapall user` and `mapall group` is configured to the user and group owners of the folder set in the permissions of the dataset in TrueNAS Core. In this case, the mapall user is `BUNNY-LAB\nicole.rappe` and the mapall group is `BUNNY-LAB\Domain Admins`.
|
||||
|
||||
```jsx title=".env"
|
||||
N/A
|
||||
```
|
||||
|
||||
## Traefik Reverse Proxy Configuration
|
||||
If the container does not run on the same host as Traefik, you will need to manually add configuration to Traefik's dynamic config file, outlined below.
|
||||
``` yaml
|
||||
http:
|
||||
routers:
|
||||
pyload:
|
||||
entryPoints:
|
||||
- websecure
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
http2:
|
||||
service: pyload
|
||||
rule: Host(`pyload.bunny-lab.io`)
|
||||
|
||||
services:
|
||||
pyload:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: http://192.168.5.30:8000
|
||||
passHostHeader: true
|
||||
```
|
||||
|
||||
!!! warning "Change Default Admin Credentials"
|
||||
Pyload ships with the username `pyload` and password `pyload`. Make sure you change the credentials immediately after initial login.
|
||||
Navigate to "**Settings > Users > Pyload:"Change Password"**"
|
100
Servers/Containerization/Docker/Compose/RocketChat.md
Normal file
100
Servers/Containerization/Docker/Compose/RocketChat.md
Normal file
@ -0,0 +1,100 @@
|
||||
**Purpose**: Deploy a RocketChat and MongoDB database together.
|
||||
|
||||
!!! caution Folder Pre-Creation
|
||||
You need to make the folders for the Mongo database before launching the container stack for the first time. If you do not make this folder ahead of time, Mongo will give Permission Denied errors to the data directorry. You can create the folder as well as adjust permissions with the following commands:
|
||||
``` sh
|
||||
mkdir -p /srv/containers/rocketchat/mongodb/data
|
||||
chmod -R 777 /srv/containers/rocketchat
|
||||
```
|
||||
|
||||
```jsx title="docker-compose.yml"
|
||||
services:
|
||||
rocketchat:
|
||||
image: registry.rocket.chat/rocketchat/rocket.chat:${RELEASE:-latest}
|
||||
restart: always
|
||||
# labels:
|
||||
# traefik.enable: "true"
|
||||
# traefik.http.routers.rocketchat.rule: Host(`${DOMAIN:-}`)
|
||||
# traefik.http.routers.rocketchat.tls: "true"
|
||||
# traefik.http.routers.rocketchat.entrypoints: https
|
||||
# traefik.http.routers.rocketchat.tls.certresolver: le
|
||||
environment:
|
||||
MONGO_URL: "${MONGO_URL:-\
|
||||
mongodb://${MONGODB_ADVERTISED_HOSTNAME:-rc_mongodb}:${MONGODB_INITIAL_PRIMARY_PORT_NUMBER:-27017}/\
|
||||
${MONGODB_DATABASE:-rocketchat}?replicaSet=${MONGODB_REPLICA_SET_NAME:-rs0}}"
|
||||
MONGO_OPLOG_URL: "${MONGO_OPLOG_URL:\
|
||||
-mongodb://${MONGODB_ADVERTISED_HOSTNAME:-rc_mongodb}:${MONGODB_INITIAL_PRIMARY_PORT_NUMBER:-27017}/\
|
||||
local?replicaSet=${MONGODB_REPLICA_SET_NAME:-rs0}}"
|
||||
ROOT_URL: ${ROOT_URL:-http://localhost:${HOST_PORT:-3000}}
|
||||
PORT: ${PORT:-3000}
|
||||
DEPLOY_METHOD: docker
|
||||
DEPLOY_PLATFORM: ${DEPLOY_PLATFORM:-}
|
||||
REG_TOKEN: ${REG_TOKEN:-}
|
||||
depends_on:
|
||||
- rc_mongodb
|
||||
expose:
|
||||
- ${PORT:-3000}
|
||||
dns:
|
||||
- 1.1.1.1
|
||||
- 1.0.0.1
|
||||
- 8.8.8.8
|
||||
- 8.8.4.4
|
||||
ports:
|
||||
- "${BIND_IP:-0.0.0.0}:${HOST_PORT:-3000}:${PORT:-3000}"
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.2
|
||||
|
||||
rc_mongodb:
|
||||
image: docker.io/bitnami/mongodb:${MONGODB_VERSION:-5.0}
|
||||
restart: always
|
||||
volumes:
|
||||
- /srv/deeptree/rocket.chat/mongodb:/bitnami/mongodb
|
||||
environment:
|
||||
MONGODB_REPLICA_SET_MODE: primary
|
||||
MONGODB_REPLICA_SET_NAME: ${MONGODB_REPLICA_SET_NAME:-rs0}
|
||||
MONGODB_PORT_NUMBER: ${MONGODB_PORT_NUMBER:-27017}
|
||||
MONGODB_INITIAL_PRIMARY_HOST: ${MONGODB_INITIAL_PRIMARY_HOST:-rc_mongodb}
|
||||
MONGODB_INITIAL_PRIMARY_PORT_NUMBER: ${MONGODB_INITIAL_PRIMARY_PORT_NUMBER:-27017}
|
||||
MONGODB_ADVERTISED_HOSTNAME: ${MONGODB_ADVERTISED_HOSTNAME:-rc_mongodb}
|
||||
MONGODB_ENABLE_JOURNAL: ${MONGODB_ENABLE_JOURNAL:-true}
|
||||
ALLOW_EMPTY_PASSWORD: ${ALLOW_EMPTY_PASSWORD:-yes}
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.3
|
||||
|
||||
networks:
|
||||
docker__network:
|
||||
external: true
|
||||
```
|
||||
|
||||
```jsx title=".env"
|
||||
TZ=America/Denver
|
||||
RELEASE=6.3.0
|
||||
PORT=3000 #Redundant - Can be Removed
|
||||
MONGODB_VERSION=6.0
|
||||
MONGODB_INITIAL_PRIMARY_HOST=rc_mongodb #Redundant - Can be Removed
|
||||
MONGODB_ADVERTISED_HOSTNAME=rc_mongodb #Redundant - Can be Removed
|
||||
```
|
||||
## Reverse Proxy Configuration
|
||||
```jsx title="nginx.conf"
|
||||
# Rocket.Chat Server
|
||||
server {
|
||||
listen 443 ssl;
|
||||
server_name rocketchat.domain.net;
|
||||
error_log /var/log/nginx/new_rocketchat_error.log;
|
||||
client_max_body_size 500M;
|
||||
location / {
|
||||
proxy_pass http://192.168.5.2:3000;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto https;
|
||||
proxy_set_header X-Nginx-Proxy true;
|
||||
proxy_redirect off;
|
||||
}
|
||||
}
|
||||
```
|
51
Servers/Containerization/Docker/Compose/SearX.md
Normal file
51
Servers/Containerization/Docker/Compose/SearX.md
Normal file
@ -0,0 +1,51 @@
|
||||
**Purpose**: Deploys a SearX Meta Search Engine Server
|
||||
|
||||
## Docker Configuration
|
||||
```jsx title="docker-compose.yml"
|
||||
version: '3'
|
||||
services:
|
||||
searx:
|
||||
image: searx/searx:latest
|
||||
ports:
|
||||
- 8080:8080
|
||||
volumes:
|
||||
- /srv/containers/searx/:/etc/searx
|
||||
restart: always
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.searx.rule=Host(`searx.bunny-lab.io`)"
|
||||
- "traefik.http.routers.searx.entrypoints=websecure"
|
||||
- "traefik.http.routers.searx.tls.certresolver=letsencrypt"
|
||||
- "traefik.http.services.searx.loadbalancer.server.port=8080"
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.124
|
||||
networks:
|
||||
docker_network:
|
||||
external: true
|
||||
```
|
||||
|
||||
```jsx title=".env"
|
||||
Not Applicable
|
||||
```
|
||||
|
||||
## Traefik Reverse Proxy Configuration
|
||||
If the container does not run on the same host as Traefik, you will need to manually add configuration to Traefik's dynamic config file, outlined below.
|
||||
``` yaml
|
||||
http:
|
||||
routers:
|
||||
searx:
|
||||
entryPoints:
|
||||
- websecure
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
service: searx
|
||||
rule: Host(`searx.bunny-lab.io`)
|
||||
|
||||
services:
|
||||
searx:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: http://192.168.5.124:8080
|
||||
passHostHeader: true
|
||||
```
|
135
Servers/Containerization/Docker/Compose/Snipe-IT.md
Normal file
135
Servers/Containerization/Docker/Compose/Snipe-IT.md
Normal file
@ -0,0 +1,135 @@
|
||||
**Purpose**: A free open source IT asset/license management system.
|
||||
|
||||
!!! warning
|
||||
The Snipe-IT container will attempt to launch after the MariaDB container starts, but MariaDB takes a while set itself up before it can accept connections; as a result, Snipe-IT will fail to initialize the database. Just wait about 30 seconds after deploying the stack, then restart the Snipe-IT container to initialize the database. You will know it worked if you see notes about data being `Migrated`.
|
||||
|
||||
## Docker Configuration
|
||||
```jsx title="docker-compose.yml"
|
||||
version: '3.7'
|
||||
|
||||
services:
|
||||
snipeit:
|
||||
image: snipe/snipe-it
|
||||
ports:
|
||||
- "8000:80"
|
||||
depends_on:
|
||||
- db
|
||||
env_file:
|
||||
- stack.env
|
||||
volumes:
|
||||
- /srv/containers/snipe-it:/var/lib/snipeit
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.50
|
||||
|
||||
redis:
|
||||
image: redis:6.2.5-buster
|
||||
ports:
|
||||
- "6379:6379"
|
||||
env_file:
|
||||
- stack.env
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.51
|
||||
|
||||
db:
|
||||
image: mariadb:10.5
|
||||
ports:
|
||||
- "3306:3306"
|
||||
env_file:
|
||||
- stack.env
|
||||
volumes:
|
||||
- /srv/containers/snipe-it/db:/var/lib/mysql
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.52
|
||||
|
||||
mailhog:
|
||||
image: mailhog/mailhog:v1.0.1
|
||||
ports:
|
||||
# - 1025:1025
|
||||
- "8025:8025"
|
||||
env_file:
|
||||
- stack.env
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.53
|
||||
|
||||
networks:
|
||||
docker_network:
|
||||
external: true
|
||||
```
|
||||
|
||||
```jsx title=".env"
|
||||
APP_ENV=production
|
||||
APP_DEBUG=false
|
||||
APP_KEY=base64:SomethingSecure
|
||||
APP_URL=https://assets.bunny-lab.io
|
||||
APP_TIMEZONE='America/Denver'
|
||||
APP_LOCALE=en
|
||||
MAX_RESULTS=500
|
||||
PRIVATE_FILESYSTEM_DISK=local
|
||||
PUBLIC_FILESYSTEM_DISK=local_public
|
||||
DB_CONNECTION=mysql
|
||||
DB_HOST=db
|
||||
DB_DATABASE=snipedb
|
||||
DB_USERNAME=snipeuser
|
||||
DB_PASSWORD=SomethingSecure
|
||||
DB_PREFIX=null
|
||||
DB_DUMP_PATH='/usr/bin'
|
||||
DB_CHARSET=utf8mb4
|
||||
DB_COLLATION=utf8mb4_unicode_ci
|
||||
IMAGE_LIB=gd
|
||||
MYSQL_DATABASE=snipedb
|
||||
MYSQL_USER=snipeuser
|
||||
MYSQL_PASSWORD=SomethingSecure
|
||||
MYSQL_ROOT_PASSWORD=SomethingSecure
|
||||
REDIS_HOST=redis
|
||||
REDIS_PASSWORD=SomethingSecure
|
||||
REDIS_PORT=6379
|
||||
MAIL_DRIVER=smtp
|
||||
MAIL_HOST=mail.bunny-lab.io
|
||||
MAIL_PORT=587
|
||||
MAIL_USERNAME=assets@bunny-lab.io
|
||||
MAIL_PASSWORD=SomethingSecure
|
||||
MAIL_ENCRYPTION=starttls
|
||||
MAIL_FROM_ADDR=assets@bunny-lab.io
|
||||
MAIL_FROM_NAME='Bunny Lab Asset Management'
|
||||
MAIL_REPLYTO_ADDR=assets@bunny-lab.io
|
||||
MAIL_REPLYTO_NAME='Bunny Lab Asset Management'
|
||||
MAIL_AUTO_EMBED_METHOD='attachment'
|
||||
DATA_LOCATION=/srv/containers/snipe-it
|
||||
APP_TRUSTED_PROXIES=192.168.5.29
|
||||
```
|
||||
|
||||
## Traefik Reverse Proxy Configuration
|
||||
If the container does not run on the same host as Traefik, you will need to manually add configuration to Traefik's dynamic config file, outlined below.
|
||||
``` yaml
|
||||
http:
|
||||
routers:
|
||||
assets:
|
||||
entryPoints:
|
||||
- websecure
|
||||
rule: Host(`assets.bunny-lab.io`)
|
||||
service: assets
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
middlewares:
|
||||
- assets
|
||||
|
||||
middlewares:
|
||||
assets:
|
||||
headers:
|
||||
customRequestHeaders:
|
||||
X-Forwarded-Proto: https
|
||||
X-Forwarded-Host: assets.bunny-lab.io
|
||||
customResponseHeaders:
|
||||
X-Custom-Header: CustomValue # Example of a static header
|
||||
|
||||
services:
|
||||
assets:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: http://192.168.5.50:80
|
||||
passHostHeader: true
|
||||
```
|
64
Servers/Containerization/Docker/Compose/Stirling-PDF.md
Normal file
64
Servers/Containerization/Docker/Compose/Stirling-PDF.md
Normal file
@ -0,0 +1,64 @@
|
||||
**Purpose**: This is a powerful locally hosted web based PDF manipulation tool using docker that allows you to perform various operations on PDF files, such as splitting merging, converting, reorganizing, adding images, rotating, compressing, and more. This locally hosted web application started as a 100% ChatGPT-made application and has evolved to include a wide range of features to handle all your PDF needs.
|
||||
|
||||
## Docker Configuration
|
||||
```jsx title="docker-compose.yml"
|
||||
version: "3.8"
|
||||
services:
|
||||
app:
|
||||
image: frooodle/s-pdf:latest
|
||||
container_name: stirling-pdf
|
||||
environment:
|
||||
- TZ=America/Denver
|
||||
- DOCKER_ENABLE_SECURITY=false
|
||||
volumes:
|
||||
- /srv/containers/stirling-pdf/datastore:/datastore
|
||||
- /srv/containers/stirling-pdf/trainingData:/usr/share/tesseract-ocr/5/tessdata #Required for extra OCR languages
|
||||
- /srv/containers/stirling-pdf/extraConfigs:/configs
|
||||
- /srv/containers/stirling-pdf/customFiles:/customFiles/
|
||||
- /srv/containers/stirling-pdf/logs:/logs/
|
||||
ports:
|
||||
- 8080:8080
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.stirling-pdf.rule=Host(`pdf.bunny-lab.io`)"
|
||||
- "traefik.http.routers.stirling-pdf.entrypoints=websecure"
|
||||
- "traefik.http.routers.stirling-pdf.tls.certresolver=letsencrypt"
|
||||
- "traefik.http.services.stirling-pdf.loadbalancer.server.port=8080"
|
||||
restart: always
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.54
|
||||
|
||||
networks:
|
||||
default:
|
||||
external:
|
||||
name: docker_network
|
||||
docker_network:
|
||||
external: true
|
||||
```
|
||||
|
||||
```jsx title=".env"
|
||||
N/A
|
||||
```
|
||||
|
||||
## Traefik Reverse Proxy Configuration
|
||||
If the container does not run on the same host as Traefik, you will need to manually add configuration to Traefik's dynamic config file, outlined below.
|
||||
``` yaml
|
||||
http:
|
||||
routers:
|
||||
stirling-pdf:
|
||||
entryPoints:
|
||||
- websecure
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
http2:
|
||||
service: stirling-pdf
|
||||
rule: Host(`pdf.bunny-lab.io`)
|
||||
|
||||
services:
|
||||
stirling-pdf:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: http://192.168.5.54:8080
|
||||
passHostHeader: true
|
||||
```
|
182
Servers/Containerization/Docker/Compose/Traefik.md
Normal file
182
Servers/Containerization/Docker/Compose/Traefik.md
Normal file
@ -0,0 +1,182 @@
|
||||
**Purpose**: A traefik reverse proxy is a server that sits between your network firewall and servers hosting various web services on your private network(s). Traefik automatically handles the creation of Let's Encrypt SSL certificates if you have a domain registrar that is supported by Traefik such as CloudFlare; by leveraging API keys, Traefik can automatically make the DNS records for Let's Encrypt's DNS "challenges" whenever you add a service behind the Traefik reverse proxy.
|
||||
|
||||
!!! info "Assumptions"
|
||||
This Traefik deployment document assumes you have deployed [Portainer](https://docs.bunny-lab.io/Docker %26 Kubernetes/Servers/Docker/Portainer/) to either a Rocky Linux or Ubuntu Server environment. Other docker-compose friendly operating systems have not been tested, so your mileage may vary regarding successful deployment ouside of these two operating systems.
|
||||
|
||||
Portainer makes deploying and updating Traefik so much easier than via a CLI. It's also much more intuitive.
|
||||
|
||||
## Deployment on Portainer
|
||||
- Login to Portainer (e.g. https://<portainer-ip>:9443)
|
||||
- Navigate to "**Environment (usually "local") > Stacks > "+ Add Stack"**"
|
||||
- Enter the following `docker-compose.yml` and `.env` environment variables into the webpage
|
||||
- When you have finished making adjustments to the environment variables (and docker-compose data if needed), click the "**Deploy the Stack**" button
|
||||
|
||||
!!! warning "Get DNS Registrar API Keys BEFORE DEPLOYMENT"
|
||||
When you are deploying this container, you have to be mindful to set valid data for the environment variables related to the DNS registrar. In this example, it is CloudFlare.
|
||||
|
||||
```jsx title="Environment Variables"
|
||||
CF_API_EMAIL=nicole.rappe@bunny-lab.io
|
||||
CF_API_KEY=REDACTED-CLOUDFLARE-DOMAIN-API-KEY
|
||||
```
|
||||
|
||||
If these are not set, Traefik will still work, but SSL certificates will not be issued from Let's Encrypt, and SSL traffic will be terminated using a self-signed Traefik-based certificate, which is only good for local non-production testing.
|
||||
|
||||
If you plan on using HTTP-based challenges, you will need to make the following changes in the docker-compose.yml data:
|
||||
|
||||
- Un-comment `"--certificatesresolvers.myresolver.acme.tlschallenge=true"`
|
||||
- Comment-out `"--certificatesresolvers.letsencrypt.acme.dnschallenge=true"`
|
||||
- Comment-out `"--certificatesresolvers.letsencrypt.acme.dnschallenge.provider=cloudflare"`
|
||||
- Lastly, you need to ensure that port 80 on your firewall is opened to the IP of the Traefik Reverse Proxy to allow Let's Encrypt to do TLS-based challenges.
|
||||
|
||||
### Stack Deployment Information
|
||||
```jsx title="docker-compose.yml"
|
||||
version: "3.3"
|
||||
services:
|
||||
traefik:
|
||||
image: "traefik:latest"
|
||||
restart: always
|
||||
container_name: "traefik-bunny-lab-io"
|
||||
ulimits:
|
||||
nofile:
|
||||
soft: 65536
|
||||
hard: 65536
|
||||
labels:
|
||||
- "traefik.http.routers.traefik-proxy.middlewares=my-buffering"
|
||||
- "traefik.http.middlewares.my-buffering.buffering.maxRequestBodyBytes=104857600"
|
||||
- "traefik.http.middlewares.my-buffering.buffering.maxResponseBodyBytes=104857600"
|
||||
- "traefik.http.middlewares.my-buffering.buffering.memRequestBodyBytes=2097152"
|
||||
- "traefik.http.middlewares.my-buffering.buffering.memResponseBodyBytes=2097152"
|
||||
- "traefik.http.middlewares.my-buffering.buffering.retryExpression=IsNetworkError() && Attempts() <= 2"
|
||||
command:
|
||||
# Globals
|
||||
- "--log.level=ERROR"
|
||||
- "--api.insecure=true"
|
||||
- "--global.sendAnonymousUsage=false"
|
||||
# Docker
|
||||
- "--providers.docker=true"
|
||||
- "--providers.docker.exposedbydefault=false"
|
||||
# File Provider
|
||||
- "--providers.file.directory=/etc/traefik/dynamic"
|
||||
- "--providers.file.watch=true"
|
||||
|
||||
# Entrypoints
|
||||
- "--entrypoints.web.address=:80"
|
||||
- "--entrypoints.websecure.address=:443"
|
||||
- "--entrypoints.web.http.redirections.entrypoint.to=websecure" # Redirect HTTP to HTTPS
|
||||
- "--entrypoints.web.http.redirections.entrypoint.scheme=https" # Redirect HTTP to HTTPS
|
||||
- "--entrypoints.web.http.redirections.entrypoint.permanent=true" # Redirect HTTP to HTTPS
|
||||
# LetsEncrypt
|
||||
### - "--certificatesresolvers.myresolver.acme.tlschallenge=true" # Enable if doing Port 80 Let's Encrypt Challenges
|
||||
- "--certificatesresolvers.letsencrypt.acme.dnschallenge=true" # Disable if doing Port 80 Let's Encrypt Challenges
|
||||
- "--certificatesresolvers.letsencrypt.acme.dnschallenge.provider=cloudflare" # Disable if doing Port 80 Let's Encrypt Challenges
|
||||
- "--certificatesresolvers.letsencrypt.acme.email=${LETSENCRYPT_EMAIL}"
|
||||
- "--certificatesresolvers.letsencrypt.acme.storage=/letsencrypt/acme.json"
|
||||
|
||||
# Keycloak plugin configuration
|
||||
- "--experimental.plugins.keycloakopenid.moduleName=github.com/Gwojda/keycloakopenid" # Optional if you have Keycloak Deployed
|
||||
- "--experimental.plugins.keycloakopenid.version=v0.1.34" # Optional if you have Keycloak Deployed
|
||||
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
- "8080:8080"
|
||||
volumes:
|
||||
- "/srv/containers/traefik/letsencrypt:/letsencrypt"
|
||||
- "/srv/containers/traefik/config:/etc/traefik"
|
||||
- "/var/run/docker.sock:/var/run/docker.sock:ro"
|
||||
- "/srv/containers/traefik/cloudflare:/cloudflare"
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.29
|
||||
environment:
|
||||
- CF_API_EMAIL=${CF_API_EMAIL}
|
||||
- CF_API_KEY=${CF_API_KEY}
|
||||
extra_hosts:
|
||||
- "mail.bunny-lab.io:192.168.3.13" # Just an Example
|
||||
|
||||
networks:
|
||||
default:
|
||||
external:
|
||||
name: docker_network
|
||||
docker_network:
|
||||
external: true
|
||||
|
||||
```
|
||||
|
||||
```jsx title=".env"
|
||||
CF_API_EMAIL=nicole.rappe@bunny-lab.io
|
||||
CF_API_KEY=REDACTED-CLOUDFLARE-DOMAIN-API-KEY
|
||||
LETSENCRYPT_EMAIL=nicole.rappe@bunny-lab.io
|
||||
```
|
||||
|
||||
!!! info
|
||||
There is a distinction between the "Global API Key" and a "Token API Key". The main difference being that the "Global API Key" can change anything in Cloudflare, while the "Token API Key" can only change what it was granted delegated permissions to.
|
||||
|
||||
## Adding Servers / Services to Traefik
|
||||
Traefik operates in two ways, the first is labels, while the second are dynamic configuration files. We will go over each below.
|
||||
|
||||
### Docker-Compose Labels
|
||||
The first is that it reads "labels" from the docker-compose file of any deployed containers on the same host as Traefik. These labels typically look something like the following:
|
||||
|
||||
```jsx title="docker-compose.yml"
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.gitea.rule=Host(`example.bunny-lab.io`)"
|
||||
- "traefik.http.routers.gitea.entrypoints=websecure"
|
||||
- "traefik.http.routers.gitea.tls.certresolver=letsencrypt"
|
||||
- "traefik.http.services.gitea.loadbalancer.server.port=8080"
|
||||
```
|
||||
|
||||
By adding these labels to any container on the same server as Traefik, traefik will automatically "adopt" this service and route traffic to it as well as assign an SSL certificate to it from Let's Encrypt. The only downside is as mentioned above, if you are dealing with something that is not just a container, or maybe a container on a different physical server, you need to rely on dynamic configuration files, such as the one seen below.
|
||||
|
||||
### Dynamic Configuration Files
|
||||
Dynamic configuration files exist under the Traefik container located at `/etc/traefik/dynamic`. Any `*.yml` files located in this folder will be hot-loaded anytime they are modified. This makes it convenient to leverage something such as the [Git Repo Updater](https://docs.bunny-lab.io/Docker%20%26%20Kubernetes/Docker/Custom%20Containers/Git%20Repo%20Updater/) container to leverage [Gitea](https://docs.bunny-lab.io/Docker%20%26%20Kubernetes/Docker/Docker%20Compose/Gitea/) to push configuration files from Git into the production environment, saving yourself headache and enabling version control over every service behind the reverse proxy.
|
||||
|
||||
An example of a dynamic configuration file would look something like this:
|
||||
|
||||
```jsx title="/etc/traefik/dynamic/example.bunny-lab.io.yml"
|
||||
http:
|
||||
routers:
|
||||
example:
|
||||
entryPoints:
|
||||
- websecure
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
http2:
|
||||
service: example
|
||||
rule: Host(`example.bunny-lab.io`)
|
||||
|
||||
services:
|
||||
example:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: http://192.168.5.70:8080
|
||||
passHostHeader: true
|
||||
```
|
||||
|
||||
You can see the similarities between the labeling method and how you designate the proxy name `example.bunny-lab.io` the internal ip address `192.168.5.70` the protocol to request the data from the service internally `http`, and the port the server is listening on internally `8080`. If you want to know more about the parameters such as `passHostHeader: true` then you will need to do some of your own research into it.
|
||||
|
||||
!!! example "Service Naming Considerations"
|
||||
When you deploy a service into a Traefik-based reverse proxy, the name of the `router` and `service` have to be unique. The router can have the same name as the service, such as `example`, but I recommend naming the services to match the FQDN of the service itself.
|
||||
|
||||
For example, `remote.bunny-lab.io` would be written as `remote-bunny-lab-io`. This keeps things organized and easy to read if you are troubleshooting things in Traefik's logs or webUI. The complete configuration file would look like the example below:
|
||||
|
||||
```jsx title="/etc/traefik/dynamic/remote.bunny-lab.io.yml"
|
||||
http:
|
||||
routers:
|
||||
remote-bunny-lab-io:
|
||||
entryPoints:
|
||||
- websecure
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
http2:
|
||||
service: remote-bunny-lab-io
|
||||
rule: Host(`remote.bunny-lab.io`)
|
||||
|
||||
services:
|
||||
remote-bunny-lab-io:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: http://192.168.5.70:8080
|
||||
passHostHeader: true
|
||||
```
|
49
Servers/Containerization/Docker/Compose/Trilium.md
Normal file
49
Servers/Containerization/Docker/Compose/Trilium.md
Normal file
@ -0,0 +1,49 @@
|
||||
**Purpose**: Build your personal knowledge base with [Trilium Notes](https://github.com/zadam/trilium/tree/master).
|
||||
|
||||
```jsx title="docker-compose.yml"
|
||||
version: '2.1'
|
||||
services:
|
||||
trilium:
|
||||
image: zadam/trilium
|
||||
restart: always
|
||||
environment:
|
||||
- TRILIUM_DATA_DIR=/home/node/trilium-data
|
||||
ports:
|
||||
- "8080:8080"
|
||||
volumes:
|
||||
- /srv/containers/trilium:/home/node/trilium-data
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.11
|
||||
networks:
|
||||
default:
|
||||
external:
|
||||
name: docker_network
|
||||
docker_network:
|
||||
external: true
|
||||
```
|
||||
|
||||
```jsx title=".env"
|
||||
N/A
|
||||
```
|
||||
|
||||
# Traefik Configuration
|
||||
```jsx title="notes.bunny-lab.io.yml"
|
||||
http:
|
||||
routers:
|
||||
notes:
|
||||
entryPoints:
|
||||
- websecure
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
http2:
|
||||
service: notes
|
||||
rule: Host(`notes.bunny-lab.io`)
|
||||
|
||||
services:
|
||||
notes:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: http://192.168.5.11:8080
|
||||
passHostHeader: true
|
||||
```
|
41
Servers/Containerization/Docker/Compose/Unifi-Controller.md
Normal file
41
Servers/Containerization/Docker/Compose/Unifi-Controller.md
Normal file
@ -0,0 +1,41 @@
|
||||
**Purpose**: The UniFi® Controller is a wireless network management software solution from Ubiquiti Networks™. It allows you to manage multiple wireless networks using a web browser.
|
||||
|
||||
```jsx title="docker-compose.yml"
|
||||
version: "2.1"
|
||||
services:
|
||||
controller:
|
||||
image: lscr.io/linuxserver/unifi-controller:latest
|
||||
container_name: controller
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
#- MEM_LIMIT=1024 #optional
|
||||
#- MEM_STARTUP=1024 #optional
|
||||
volumes:
|
||||
- /srv/containers/unifi-controller:/config
|
||||
ports:
|
||||
- 8443:8443
|
||||
- 3478:3478/udp
|
||||
- 10001:10001/udp
|
||||
- 8080:8080
|
||||
- 1900:1900/udp #optional
|
||||
- 8843:8843 #optional
|
||||
- 8880:8880 #optional
|
||||
- 6789:6789 #optional
|
||||
- 5514:5514/udp #optional
|
||||
restart: always
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.140
|
||||
# ipv4_address: 192.168.3.140
|
||||
networks:
|
||||
default:
|
||||
external:
|
||||
name: docker_network
|
||||
docker_network:
|
||||
external: true
|
||||
```
|
||||
|
||||
```jsx title=".env"
|
||||
Not Applicable
|
||||
```
|
33
Servers/Containerization/Docker/Compose/UptimeKuma.md
Normal file
33
Servers/Containerization/Docker/Compose/UptimeKuma.md
Normal file
@ -0,0 +1,33 @@
|
||||
**Purpose**: Deploy Uptime Kuma uptime monitor to monitor services in the homelab and send notifications to various services.
|
||||
|
||||
```jsx title="docker-compose.yml"
|
||||
version: '3'
|
||||
services:
|
||||
uptimekuma:
|
||||
image: louislam/uptime-kuma
|
||||
ports:
|
||||
- 3001:3001
|
||||
volumes:
|
||||
- /mnt/uptimekuma:/app/data
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
environment:
|
||||
# Allow status page to exist within an iframe
|
||||
- UPTIME_KUMA_DISABLE_FRAME_SAMEORIGIN=1
|
||||
restart: always
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.uptime-kuma.rule=Host(`status.cyberstrawberry.net`)"
|
||||
- "traefik.http.routers.uptime-kuma.entrypoints=websecure"
|
||||
- "traefik.http.routers.uptime-kuma.tls.certresolver=letsencrypt"
|
||||
- "traefik.http.services.uptime-kuma.loadbalancer.server.port=3001"
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.211
|
||||
networks:
|
||||
docker_network:
|
||||
external: true
|
||||
```
|
||||
|
||||
```jsx title=".env"
|
||||
Not Applicable
|
||||
```
|
62
Servers/Containerization/Docker/Compose/VaultWarden.md
Normal file
62
Servers/Containerization/Docker/Compose/VaultWarden.md
Normal file
@ -0,0 +1,62 @@
|
||||
**Purpose**: Unofficial Bitwarden compatible server written in Rust, formerly known as bitwarden_rs.
|
||||
|
||||
```jsx title="docker-compose.yml"
|
||||
---
|
||||
version: "2.1"
|
||||
services:
|
||||
vaultwarden:
|
||||
image: vaultwarden/server:latest
|
||||
container_name: vaultwarden
|
||||
environment:
|
||||
- TZ=America/Denver
|
||||
- INVITATIONS_ALLOWED=false
|
||||
- SIGNUPS_ALLOWED=false
|
||||
- WEBSOCKET_ENABLED=false
|
||||
- ADMIN_TOKEN=REDACTED #PUT A REALLY REALLY REALLY SECURE PASSWORD HERE
|
||||
volumes:
|
||||
- /srv/containers/vaultwarden:/data
|
||||
ports:
|
||||
- 80:80
|
||||
restart: always
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.15
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.bunny-vaultwarden.rule=Host(`vault.bunny-lab.io`)"
|
||||
- "traefik.http.routers.bunny-vaultwarden.entrypoints=websecure"
|
||||
- "traefik.http.routers.bunny-vaultwarden.tls.certresolver=letsencrypt"
|
||||
- "traefik.http.services.bunny-vaultwarden.loadbalancer.server.port=80"
|
||||
networks:
|
||||
default:
|
||||
external:
|
||||
name: docker_network
|
||||
docker_network:
|
||||
external: true
|
||||
```
|
||||
!!! warning "ADMIN_TOKEN"
|
||||
It is **CRITICAL** that you never share the `ADMIN_TOKEN` with anyone. It allows you to log into the instance at https://vault.example.com/admin to add users, delete users, make changes system wide, etc.
|
||||
|
||||
```jsx title=".env"
|
||||
Not Applicable
|
||||
```
|
||||
## Traefik Reverse Proxy Configuration
|
||||
If the container does not run on the same host as Traefik, you will need to manually add configuration to Traefik's dynamic config file, outlined below.
|
||||
``` yaml
|
||||
http:
|
||||
routers:
|
||||
bunny-vaultwarden:
|
||||
entryPoints:
|
||||
- websecure
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
service: vaultwarden
|
||||
rule: Host(`vault.bunny-lab.io`)
|
||||
|
||||
services:
|
||||
vaultwarden:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: http://192.168.5.15:80
|
||||
passHostHeader: true
|
||||
```
|
49
Servers/Containerization/Docker/Compose/Wordpress.md
Normal file
49
Servers/Containerization/Docker/Compose/Wordpress.md
Normal file
@ -0,0 +1,49 @@
|
||||
**Purpose**: At its core, WordPress is the simplest, most popular way to create your own website or blog. In fact, WordPress powers over 43.3% of all the websites on the Internet. Yes – more than one in four websites that you visit are likely powered by WordPress.
|
||||
|
||||
```jsx title="docker-compose.yml"
|
||||
version: '3.7'
|
||||
services:
|
||||
wordpress:
|
||||
image: wordpress:latest
|
||||
restart: always
|
||||
ports:
|
||||
- 80:80
|
||||
environment:
|
||||
WORDPRESS_DB_HOST: 192.168.5.216
|
||||
WORDPRESS_DB_USER: wordpress
|
||||
WORDPRESS_DB_PASSWORD: ${WORDPRESS_DB_PASSWORD}
|
||||
WORDPRESS_DB_NAME: wordpress
|
||||
volumes:
|
||||
- /srv/Containers/WordPress/Server:/var/www/html
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.217
|
||||
depends_on:
|
||||
- db
|
||||
db:
|
||||
image: lscr.io/linuxserver/mariadb
|
||||
restart: always
|
||||
ports:
|
||||
- 3306:3306
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: ${MYSQL_ROOT_PASSWORD}
|
||||
MYSQL_DATABASE: wordpress
|
||||
MYSQL_USER: wordpress
|
||||
REMOTE_SQL: http://URL1/your.sql,https://URL2/your.sql
|
||||
volumes:
|
||||
- /srv/Containers/WordPress/DB:/config
|
||||
networks:
|
||||
docker_network:
|
||||
ipv4_address: 192.168.5.216
|
||||
networks:
|
||||
default:
|
||||
external:
|
||||
name: docker_network
|
||||
docker_network:
|
||||
external: true
|
||||
```
|
||||
|
||||
```jsx title=".env"
|
||||
WORDPRESS_DB_PASSWORD=SecurePassword101
|
||||
MYSQL_ROOT_PASSWORD=SecurePassword202
|
||||
```
|
@ -0,0 +1,72 @@
|
||||
**Purpose**:
|
||||
This document will outline the general workflow of using Visual Studio Code to author and update custom containers and push them to a container registry hosted in Gitea. This will be referencing the `git-repo-updater` project throughout.
|
||||
|
||||
!!! note "Assumptions"
|
||||
This document assumes you are authoring the containers in Microsoft Windows, and does not include the fine-tuning necessary to work in Linux or MacOS environments. You are on your own if you want to author containers in Linux.
|
||||
|
||||
## Install Visual Studio Code
|
||||
The management of the Gitea repositories, Dockerfile building, and pushing container images to the Gitea container registry will all involve using just Visual Studio Code. You can download Visual Studio Code from this [direct download link](https://code.visualstudio.com/docs/?dv=win64user).
|
||||
|
||||
## Configure Required Docker Extensions
|
||||
You will need to locate and install the `Dev Containers`, `Docker`, and `WSL` extensions in Visual Studio Code to move forward. This may request that you install Docker Desktop onto your computer as part of the installation process. Proceed to do so, then when the Docker "Engine" is running, you can proceed to the next step.
|
||||
|
||||
!!! warning
|
||||
You need to have Docker Desktop "Engine" running whenever working with containers, as it is necessary to build the images. VSCode will complain if it is not running.
|
||||
|
||||
## Add Gitea Container Registry
|
||||
At this point, we need to add a registry to Visual Studio Code so it can proceed with pulling down the repository data.
|
||||
|
||||
- Click the Docker icon on the left-hand toolbar
|
||||
- Under "**Registries**", click "**Connect Registry...**"
|
||||
- In the dropdown menu that appears, click "**Generic Registry V2**"
|
||||
- Enter `https://git.bunny-lab.io/container-registry`
|
||||
- Registry Username: `nicole.rappe`
|
||||
- Registry Password or Personal Access Token: `Personal Access API Token You Generated in Gitea`
|
||||
- You will now see a sub-listing named "**Generic Registry V2**"
|
||||
- If you click the dropdown, you will see "**https://git.bunny-lab.io/container-registry**"
|
||||
- Under this section, you will see any containers in the registry that you have access to, in this case, you will see `container-registry/git-repo-updater`
|
||||
|
||||
## Add Source Control Repository
|
||||
Now it is time to pull down the repository where the container's core elements are stored on Gitea.
|
||||
|
||||
- Click the "**Source Control**" button on the left-hand menu then click the "**Clone Repository**" button
|
||||
- Enter `https://git.bunny-lab.io/container-registry/git-repo-updater.git`
|
||||
- Click the dropdown menu option "**Clone from URL**" then choose a location to locally store the repository on your computer
|
||||
- When prompted with "**Would you like to open the cloned repository**", click the "**Open**" button
|
||||
|
||||
## Making Changes
|
||||
You will be presented with four files in this specific repository. `.env`, `docker-compose.yml`, `Dockerfile`, and `repo_watcher.sh`
|
||||
|
||||
- `.env` is the environment variables passed to the container to tell it which ntfy server to talk to, which credentials to use with Gitea, and which repositories to download and push into production servers
|
||||
- `docker-compose.yml` is an example docker-compose file that can be used in Portainer to deploy the server along with the contents of the `.env` file
|
||||
- `Dockerfile` is the base of the container, telling docker what operating system to use and how to start the script in the container
|
||||
- `repo_watcher.sh` is the script called by the `Dockerfile` which loops checking for updates in Gitea repositories that were configured in the `.env` file
|
||||
|
||||
### Push to Repository
|
||||
When you make any changes, you will need to first commit them to the repository
|
||||
|
||||
- Save all of the edited files
|
||||
- Click the "**Source Control**" button in the toolbar
|
||||
- Write a message about what you changed in the commit description field
|
||||
- Click the "**Commit**" button
|
||||
- Click the "**Sync Changes**" button that appears
|
||||
- You may be presented with various dialogs, just click the equivalant of "**Yes/OK**" to each of them
|
||||
|
||||
### Build the Dockerfile
|
||||
At this point, we need to build the dockerfile, which takes all of the changes and packages it into a container image
|
||||
|
||||
- Navigate back to the file explorer inside of Visual Studio Code
|
||||
- Right-click the `Dockerfile`, then click "**Build Image...**"
|
||||
- In the "Tag Image As..." window, type in `git.bunny-lab.io/container-registry/git-repo-updater:latest`
|
||||
- When you navigate back to the Docker menu, you will see a new image appear under the "**Images**" section
|
||||
- You should see something similar to "Latest - X Seconds Ago` indicating this is the image you just built
|
||||
- Delete the older image(s) by right-clicking on them and selecting "**Remove...**"
|
||||
- Push the image to the container registry in Gitea by right-clicking the latest image, and selecting "**Push...**"
|
||||
- In the dropdown menu that appears, enter `git.bunny-lab.io/container-registry/git-repo-updater:latest`
|
||||
- You can confirm if it was successful by navigating to the [Gitea Container Webpage](https://git.bunny-lab.io/container-registry/-/packages/container/git-repo-updater/latest) and seeing if it says "**Published Now**" or "**Published 1 Minute Ago**"
|
||||
|
||||
!!! warning "CRLF End of Line Sequences"
|
||||
When you are editing files in the container's repository, you need to ensure that Visual Studio Code is editing that file in "**LF**" mode and not "**CRLF**". You can find this toggle at the bottom-right of the VSCode window. Simply clicking on the letters "**CRLF**" will let you toggle the file to "**LF**". If you do not make this change, the container will misunderstand the dockerfile and/or scripts inside of the container and have runtime errors.
|
||||
|
||||
## Deploy the Container
|
||||
You can now use the `.env` file along with the `docker-compose.yml` file inside of Portainer to deploy a stack using the container you just built / updated.
|
@ -0,0 +1,106 @@
|
||||
**Purpose**: Docker container running Alpine Linux that automates and improves upon much of the script mentioned in the [Git Repo Updater](https://docs.bunny-lab.io/Scripts/Bash/Git%20Repo%20Updater) document. It offers the additional benefits of checking for updates every 5 seconds instead of every 60 seconds. It also accepts environment variables to provide credentials and notification settings, and can have an infinite number of monitored repositories.
|
||||
|
||||
### Deployment
|
||||
You can find the current up-to-date Gitea repository that includes the `docker-compose.yml` and `.env` files that you need to deploy everything [here](https://git.bunny-lab.io/container-registry/-/packages/container/git-repo-updater/latest)
|
||||
```jsx title="docker-compose.yml"
|
||||
version: '3.3'
|
||||
services:
|
||||
git-repo-updater:
|
||||
privileged: true
|
||||
container_name: git-repo-updater
|
||||
env_file:
|
||||
- stack.env
|
||||
image: git.bunny-lab.io/container-registry/git-repo-updater:latest
|
||||
volumes:
|
||||
- /srv/containers:/srv/containers
|
||||
- /srv/containers/git-repo-updater/Repo_Cache:/root/Repo_Cache
|
||||
restart: always
|
||||
```
|
||||
|
||||
```jsx title=".env"
|
||||
# Gitea Credentials
|
||||
GIT_USERNAME=nicole.rappe
|
||||
GIT_PASSWORD=USE-AN-APP-PASSWORD
|
||||
|
||||
# NTFY Push Notification Server URL
|
||||
NTFY_URL=https://ntfy.cyberstrawberry.net/git-repo-updater
|
||||
|
||||
# Repository/Destination Pairs (Add as Many as Needed)
|
||||
REPO_01="https://${GIT_USERNAME}:${GIT_PASSWORD}@git.bunny-lab.io/bunny-lab/docs.git,/srv/containers/material-mkdocs/docs/docs"
|
||||
REPO_02="https://${GIT_USERNAME}:${GIT_PASSWORD}@git.bunny-lab.io/GitOps/servers.bunny-lab.io.git,/srv/containers/homepage-docker"
|
||||
```
|
||||
### Build / Development
|
||||
If you want to learn how the container was assembled, the related build files are located [here](https://git.cyberstrawberry.net/container-registry/git-repo-updater)
|
||||
```jsx title="Dockerfile"
|
||||
# Use Alpine as the base image of the container
|
||||
FROM alpine:latest
|
||||
|
||||
# Install necessary packages
|
||||
RUN apk --no-cache add git curl rsync
|
||||
|
||||
# Add script
|
||||
COPY repo_watcher.sh /repo_watcher.sh
|
||||
RUN chmod +x /repo_watcher.sh
|
||||
|
||||
#Create Directory to store Repositories
|
||||
RUN mkdir -p /root/Repo_Cache
|
||||
|
||||
# Start script (Alpine uses /bin/sh instead of /bin/bash)
|
||||
CMD ["/bin/sh", "-c", "/repo_watcher.sh"]
|
||||
```
|
||||
|
||||
```jsx title="repo_watcher.sh"
|
||||
#!/bin/sh
|
||||
|
||||
# Function to process each repo-destination pair
|
||||
process_repo() {
|
||||
FULL_REPO_URL=$1
|
||||
DESTINATION=$2
|
||||
|
||||
# Extract the URL without credentials for logging and notifications
|
||||
CLEAN_REPO_URL=$(echo "$FULL_REPO_URL" | sed 's/https:\/\/[^@]*@/https:\/\//')
|
||||
|
||||
# Directory to hold the repository locally
|
||||
REPO_DIR="/root/Repo_Cache/$(basename $CLEAN_REPO_URL .git)"
|
||||
|
||||
# Clone the repo if it doesn't exist, or navigate to it if it does
|
||||
if [ ! -d "$REPO_DIR" ]; then
|
||||
curl -d "Cloning: $CLEAN_REPO_URL" $NTFY_URL
|
||||
git clone "$FULL_REPO_URL" "$REPO_DIR" > /dev/null 2>&1
|
||||
fi
|
||||
cd "$REPO_DIR" || exit
|
||||
|
||||
# Fetch the latest changes
|
||||
git fetch origin main > /dev/null 2>&1
|
||||
|
||||
# Check if the local repository is behind the remote
|
||||
LOCAL=$(git rev-parse @)
|
||||
REMOTE=$(git rev-parse @{u})
|
||||
|
||||
if [ "$LOCAL" != "$REMOTE" ]; then
|
||||
curl -d "Updating: $CLEAN_REPO_URL" $NTFY_URL
|
||||
git pull origin main > /dev/null 2>&1
|
||||
rsync -av --delete --exclude '.git/' ./ "$DESTINATION" > /dev/null 2>&1
|
||||
fi
|
||||
}
|
||||
|
||||
# Main loop
|
||||
while true; do
|
||||
# Iterate over each environment variable matching 'REPO_[0-9]+'
|
||||
env | grep '^REPO_[0-9]\+=' | while IFS='=' read -r name value; do
|
||||
# Split the value by comma and read into separate variables
|
||||
OLD_IFS="$IFS" # Save the original IFS
|
||||
IFS=',' # Set IFS to comma for splitting
|
||||
set -- $value # Set positional parameters ($1, $2, ...)
|
||||
REPO_URL="$1" # Assign first parameter to REPO_URL
|
||||
DESTINATION="$2" # Assign second parameter to DESTINATION
|
||||
IFS="$OLD_IFS" # Restore original IFS
|
||||
|
||||
process_repo "$REPO_URL" "$DESTINATION"
|
||||
done
|
||||
|
||||
# Wait for 5 seconds before the next iteration
|
||||
sleep 5
|
||||
done
|
||||
|
||||
```
|
55
Servers/Containerization/Docker/Deploy Portainer.md
Normal file
55
Servers/Containerization/Docker/Deploy Portainer.md
Normal file
@ -0,0 +1,55 @@
|
||||
### Update The Package Manager
|
||||
We need to update the server before installing Docker
|
||||
|
||||
=== "Ubuntu Server"
|
||||
|
||||
``` sh
|
||||
sudo apt update
|
||||
sudo apt upgrade -y
|
||||
```
|
||||
|
||||
=== "Rocky Linux"
|
||||
|
||||
``` sh
|
||||
sudo dnf check-update
|
||||
```
|
||||
|
||||
### Deploy Docker
|
||||
Install Docker then deploy Portainer
|
||||
|
||||
Convenience Script:
|
||||
```
|
||||
curl -fsSL https://get.docker.com | sudo sh
|
||||
```
|
||||
|
||||
Alternative Methods:
|
||||
|
||||
=== "Ubuntu Server"
|
||||
|
||||
``` sh
|
||||
sudo apt install docker.io -y
|
||||
docker run -d -p 8000:8000 -p 9443:9443 --name portainer --restart=always -v /var/run/docker.sock:/var/run/docker.sock -v /srv/containers/portainer:/data portainer/portainer-ee:latest # (1)
|
||||
```
|
||||
|
||||
1. Be sure to set the `-v /srv/containers/portainer:/data` value to a safe place that gets backed up regularily.
|
||||
|
||||
=== "Rocky Linux"
|
||||
|
||||
``` sh
|
||||
sudo dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
|
||||
sudo dnf install docker-ce docker-ce-cli containerd.io
|
||||
sudo systemctl start docker
|
||||
sudo systemctl enable docker # (1)
|
||||
docker run -d -p 8000:8000 -p 9443:9443 --name portainer --restart=always -v /var/run/docker.sock:/var/run/docker.sock -v /srv/containers/portainer:/data portainer/portainer-ee:latest # (2)
|
||||
```
|
||||
|
||||
1. This is needed to ensure that docker starts automatically every time the server is turned on.
|
||||
2. Be sure to set the `-v /srv/containers/portainer:/data` value to a safe place that gets backed up regularily.
|
||||
|
||||
### Configure Docker Network
|
||||
I highly recomment setting up a [Dedicated Docker MACVLAN Network](https://docs.bunny-lab.io/Docker%20%26%20Kubernetes/Docker/Docker%20Networking/). You can use it to keep your containers on their own subnet.
|
||||
|
||||
### Access Portainer WebUI
|
||||
You will be able to access the Portainer WebUI at the following address: `https://<IP Address>:9443`
|
||||
!!! warning
|
||||
You need to be quick, as there is a timeout period where you wont be able to onboard / provision Portainer and will be forced to restart it's container. If this happens, you can find the container using `sudo docker container ls` proceeded by `sudo docker restart <ID of Portainer Container>`.
|
187
Servers/Containerization/Kubernetes/Deployment/K8S.md
Normal file
187
Servers/Containerization/Kubernetes/Deployment/K8S.md
Normal file
@ -0,0 +1,187 @@
|
||||
# Deploy Generic Kubernetes
|
||||
The instructions outlined below assume you are deploying the environment using Ansible Playbooks either via Ansible's CLI or AWX.
|
||||
|
||||
### Deploy K8S User
|
||||
```jsx title="01-deploy-k8s-user.yml"
|
||||
- hosts: 'controller-nodes, worker-nodes'
|
||||
become: yes
|
||||
|
||||
tasks:
|
||||
- name: create the k8sadmin user account
|
||||
user: name=k8sadmin append=yes state=present createhome=yes shell=/bin/bash
|
||||
|
||||
- name: allow 'k8sadmin' to use sudo without needing a password
|
||||
lineinfile:
|
||||
dest: /etc/sudoers
|
||||
line: 'k8sadmin ALL=(ALL) NOPASSWD: ALL'
|
||||
validate: 'visudo -cf %s'
|
||||
|
||||
- name: set up authorized keys for the k8sadmin user
|
||||
authorized_key: user=k8sadmin key="{{item}}"
|
||||
with_file:
|
||||
- ~/.ssh/id_rsa.pub
|
||||
```
|
||||
|
||||
### Install K8S
|
||||
```jsx title="02-install-k8s.yml"
|
||||
---
|
||||
- hosts: "controller-nodes, worker-nodes"
|
||||
remote_user: nicole
|
||||
become: yes
|
||||
become_method: sudo
|
||||
become_user: root
|
||||
gather_facts: yes
|
||||
connection: ssh
|
||||
|
||||
tasks:
|
||||
- name: Create containerd config file
|
||||
file:
|
||||
path: "/etc/modules-load.d/containerd.conf"
|
||||
state: "touch"
|
||||
|
||||
- name: Add conf for containerd
|
||||
blockinfile:
|
||||
path: "/etc/modules-load.d/containerd.conf"
|
||||
block: |
|
||||
overlay
|
||||
br_netfilter
|
||||
|
||||
- name: modprobe
|
||||
shell: |
|
||||
sudo modprobe overlay
|
||||
sudo modprobe br_netfilter
|
||||
|
||||
|
||||
- name: Set system configurations for Kubernetes networking
|
||||
file:
|
||||
path: "/etc/sysctl.d/99-kubernetes-cri.conf"
|
||||
state: "touch"
|
||||
|
||||
- name: Add conf for containerd
|
||||
blockinfile:
|
||||
path: "/etc/sysctl.d/99-kubernetes-cri.conf"
|
||||
block: |
|
||||
net.bridge.bridge-nf-call-iptables = 1
|
||||
net.ipv4.ip_forward = 1
|
||||
net.bridge.bridge-nf-call-ip6tables = 1
|
||||
|
||||
- name: Apply new settings
|
||||
command: sudo sysctl --system
|
||||
|
||||
- name: install containerd
|
||||
shell: |
|
||||
sudo apt-get update && sudo apt-get install -y containerd
|
||||
sudo mkdir -p /etc/containerd
|
||||
sudo containerd config default | sudo tee /etc/containerd/config.toml
|
||||
sudo systemctl restart containerd
|
||||
|
||||
- name: disable swap
|
||||
shell: |
|
||||
sudo swapoff -a
|
||||
sudo sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
|
||||
|
||||
- name: install and configure dependencies
|
||||
shell: |
|
||||
sudo apt-get update && sudo apt-get install -y apt-transport-https curl
|
||||
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
|
||||
|
||||
- name: Create kubernetes repo file
|
||||
file:
|
||||
path: "/etc/apt/sources.list.d/kubernetes.list"
|
||||
state: "touch"
|
||||
|
||||
- name: Add K8s Source
|
||||
blockinfile:
|
||||
path: "/etc/apt/sources.list.d/kubernetes.list"
|
||||
block: |
|
||||
deb https://apt.kubernetes.io/ kubernetes-xenial main
|
||||
|
||||
- name: Install Kubernetes
|
||||
shell: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y kubelet=1.20.1-00 kubeadm=1.20.1-00 kubectl=1.20.1-00
|
||||
sudo apt-mark hold kubelet kubeadm kubectl
|
||||
```
|
||||
|
||||
### Configure ControlPlanes
|
||||
```jsx title="03-configure-controllers.yml"
|
||||
- hosts: controller-nodes
|
||||
become: yes
|
||||
|
||||
tasks:
|
||||
- name: Initialize the K8S Cluster
|
||||
shell: kubeadm init --pod-network-cidr=10.244.0.0/16
|
||||
args:
|
||||
chdir: $HOME
|
||||
creates: cluster_initialized.txt
|
||||
|
||||
- name: Create .kube directory
|
||||
become: yes
|
||||
become_user: k8sadmin
|
||||
file:
|
||||
path: /home/k8sadmin/.kube
|
||||
state: directory
|
||||
mode: 0755
|
||||
|
||||
- name: Copy admin.conf to user's kube config
|
||||
copy:
|
||||
src: /etc/kubernetes/admin.conf
|
||||
dest: /home/k8sadmin/.kube/config
|
||||
remote_src: yes
|
||||
owner: k8sadmin
|
||||
|
||||
- name: Install the Pod Network
|
||||
become: yes
|
||||
become_user: k8sadmin
|
||||
shell: kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml
|
||||
args:
|
||||
chdir: $HOME
|
||||
|
||||
- name: Get the token for joining the worker nodes
|
||||
become: yes
|
||||
become_user: k8sadmin
|
||||
shell: kubeadm token create --print-join-command
|
||||
register: kubernetes_join_command
|
||||
|
||||
- name: Output Join Command to the Screen
|
||||
debug:
|
||||
msg: "{{ kubernetes_join_command.stdout }}"
|
||||
|
||||
- name: Copy join command to local file.
|
||||
become: yes
|
||||
local_action: copy content="{{ kubernetes_join_command.stdout_lines[0] }}" dest="/tmp/kubernetes_join_command" mode=0777
|
||||
```
|
||||
|
||||
### Join Worker Node(s)
|
||||
```jsx title="04-join-worker-nodes.yml"
|
||||
- hosts: worker-nodes
|
||||
become: yes
|
||||
gather_facts: yes
|
||||
|
||||
tasks:
|
||||
- name: Copy join command from Ansible host to the worker nodes.
|
||||
become: yes
|
||||
copy:
|
||||
src: /tmp/kubernetes_join_command
|
||||
dest: /tmp/kubernetes_join_command
|
||||
mode: 0777
|
||||
|
||||
- name: Join the Worker nodes to the cluster.
|
||||
become: yes
|
||||
command: sh /tmp/kubernetes_join_command
|
||||
register: joined_or_not
|
||||
```
|
||||
|
||||
### Host Inventory File Template
|
||||
```jsx title="hosts"
|
||||
[controller-nodes]
|
||||
k8s-ctrlr-01 ansible_host=192.168.3.6 ansible_user=nicole
|
||||
|
||||
[worker-nodes]
|
||||
k8s-node-01 ansible_host=192.168.3.4 ansible_user=nicole
|
||||
k8s-node-02 ansible_host=192.168.3.5 ansible_user=nicole
|
||||
|
||||
[all:vars]
|
||||
ansible_become_user=root
|
||||
ansible_become_method=sudo
|
||||
```
|
224
Servers/Containerization/Kubernetes/Deployment/Rancher RKE2.md
Normal file
224
Servers/Containerization/Kubernetes/Deployment/Rancher RKE2.md
Normal file
@ -0,0 +1,224 @@
|
||||
# Deploy RKE2 Cluster
|
||||
Deploying a Rancher RKE2 Cluster is fairly straightforward. Just run the commands in-order and pay attention to which steps apply to all machines in the cluster, the controlplanes, and the workers.
|
||||
|
||||
!!! note "Prerequisites"
|
||||
This document assumes you are running **Ubuntu Server 20.04** or later.
|
||||
|
||||
## All Cluster Nodes
|
||||
Assume all commands are running as root moving forward. (e.g. `sudo su`)
|
||||
|
||||
### Run Updates
|
||||
You will need to run these commands on every server that participates in the cluster then perform a reboot of the server **PRIOR** to moving onto the next section.
|
||||
``` sh
|
||||
sudo apt update && sudo apt upgrade -y
|
||||
sudo apt install nfs-common iptables nano htop -y
|
||||
echo "Adding 15 Second Delay to Ensure Previous Commands finish running"
|
||||
sleep 15
|
||||
sudo apt autoremove -y
|
||||
sudo reboot
|
||||
```
|
||||
!!! tip
|
||||
If this is a virtual machine, now would be the best time to take a checkpoint / snapshot of the VM before moving forward, in case you need to perform rollbacks of the server(s) if you accidentally misconfigure something.
|
||||
## Initial ControlPlane Node
|
||||
When you are starting a brand new cluster, you need to create what is referred to as the "Initial ControlPlane". This node is responsible for bootstrapping the entire cluster together in the beginning, and will eventually assist in handling container workloads and orchestrating operations in the cluster.
|
||||
!!! warning
|
||||
You only want to follow the instructions for the **initial** controlplane once. Running it on another machine to create additional controlplanes will cause the cluster to try to set up two different clusters, wrecking havok. Instead, follow the instructions in the next section to add redundant controlplanes.
|
||||
|
||||
### Download the Run Server Deployment Script
|
||||
```
|
||||
curl -sfL https://get.rke2.io | INSTALL_RKE2_TYPE=server sh -
|
||||
```
|
||||
### Enable & Configure Services
|
||||
``` sh
|
||||
# Make yourself sudo
|
||||
sudo su
|
||||
|
||||
# Start and Enable the Kubernetes Service
|
||||
systemctl enable rke2-server.service
|
||||
systemctl start rke2-server.service
|
||||
|
||||
# Symlink the Kubectl Management Command
|
||||
ln -s $(find /var/lib/rancher/rke2/data/ -name kubectl) /usr/local/bin/kubectl
|
||||
|
||||
# Temporarily Export the Kubeconfig to manage the cluster from CLI
|
||||
export KUBECONFIG=/etc/rancher/rke2/rke2.yaml
|
||||
|
||||
# Add a Delay to Allow Cluster to Finish Initializing / Get Ready
|
||||
echo "Adding 60 Second Delay to Ensure Cluster is Ready - Run (kubectl get node) if the server is still not ready to know when to proceed."
|
||||
sleep 60
|
||||
|
||||
# Check that the Cluster Node is Running and Ready
|
||||
kubectl get node
|
||||
```
|
||||
|
||||
!!! example
|
||||
When the cluster is ready, you should see something like this when you run `kubectl get node`
|
||||
|
||||
This may be a good point to step away for 5 minutes, get a cup of coffee, and come back so it has a little extra time to be fully ready before moving on.
|
||||
```
|
||||
root@awx:/home/nicole# kubectl get node
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
awx Ready control-plane,etcd,master 3m21s v1.26.12+rke2r1
|
||||
```
|
||||
|
||||
### Install Helm, Rancher, CertManager, Jetstack, Rancher, and Longhorn
|
||||
``` sh
|
||||
# Install Helm
|
||||
curl -#L https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
|
||||
|
||||
# Install Necessary Helm Repositories
|
||||
helm repo add rancher-latest https://releases.rancher.com/server-charts/latest
|
||||
helm repo add jetstack https://charts.jetstack.io
|
||||
helm repo add longhorn https://charts.longhorn.io
|
||||
helm repo update
|
||||
|
||||
# Install Cert-Manager via Helm
|
||||
kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.6.1/cert-manager.crds.yaml
|
||||
|
||||
# Install Jetstack via Helm
|
||||
helm upgrade -i cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace
|
||||
|
||||
# Install Rancher via Helm
|
||||
helm upgrade -i rancher rancher-latest/rancher --create-namespace --namespace cattle-system --set hostname=rancher.bunny-lab.io --set bootstrapPassword=bootStrapAllTheThings --set replicas=1
|
||||
|
||||
# Install Longhorn via Helm
|
||||
helm upgrade -i longhorn longhorn/longhorn --namespace longhorn-system --create-namespace
|
||||
```
|
||||
|
||||
!!! example "Be Patient - Come back in 20 Minutes"
|
||||
Rancher is going to take a while to fully set itself up, things will appear broken. Depending on how many resources you gave the cluster, it may take longer or shorter. A good ballpark is giving it at least 20 minutes to deploy itself before attempting to log into the webUI at https://awx.bunny-lab.io.
|
||||
|
||||
If you want to keep an eye on the deployment progress, you need to run the following command: `KUBECONFIG=/etc/rancher/rke2/rke2.yaml kubectl get pods --all-namespaces`
|
||||
The output should look like how it does below:
|
||||
```
|
||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||
cattle-fleet-system fleet-controller-59cdb866d7-94r2q 1/1 Running 0 4m31s
|
||||
cattle-fleet-system gitjob-f497866f8-t726l 1/1 Running 0 4m31s
|
||||
cattle-provisioning-capi-system capi-controller-manager-6f87d6bd74-xx22v 1/1 Running 0 55s
|
||||
cattle-system helm-operation-28dcp 0/2 Completed 0 109s
|
||||
cattle-system helm-operation-f9qww 0/2 Completed 0 4m39s
|
||||
cattle-system helm-operation-ft8gq 0/2 Completed 0 26s
|
||||
cattle-system helm-operation-m27tq 0/2 Completed 0 61s
|
||||
cattle-system helm-operation-qrgj8 0/2 Completed 0 5m11s
|
||||
cattle-system rancher-64db9f48c-qm6v4 1/1 Running 3 (8m8s ago) 13m
|
||||
cattle-system rancher-webhook-65f5455d9c-tzbv4 1/1 Running 0 98s
|
||||
cert-manager cert-manager-55cf8685cb-86l4n 1/1 Running 0 14m
|
||||
cert-manager cert-manager-cainjector-fbd548cb8-9fgv4 1/1 Running 0 14m
|
||||
cert-manager cert-manager-webhook-655b4d58fb-s2cjh 1/1 Running 0 14m
|
||||
kube-system cloud-controller-manager-awx 1/1 Running 5 (3m37s ago) 19m
|
||||
kube-system etcd-awx 1/1 Running 0 19m
|
||||
kube-system helm-install-rke2-canal-q9vm6 0/1 Completed 0 19m
|
||||
kube-system helm-install-rke2-coredns-q8w57 0/1 Completed 0 19m
|
||||
kube-system helm-install-rke2-ingress-nginx-54vgk 0/1 Completed 0 19m
|
||||
kube-system helm-install-rke2-metrics-server-87zhw 0/1 Completed 0 19m
|
||||
kube-system helm-install-rke2-snapshot-controller-crd-q6bh6 0/1 Completed 0 19m
|
||||
kube-system helm-install-rke2-snapshot-controller-tjk5f 0/1 Completed 0 19m
|
||||
kube-system helm-install-rke2-snapshot-validation-webhook-r9pcn 0/1 Completed 0 19m
|
||||
kube-system kube-apiserver-awx 1/1 Running 0 19m
|
||||
kube-system kube-controller-manager-awx 1/1 Running 5 (3m37s ago) 19m
|
||||
kube-system kube-proxy-awx 1/1 Running 0 19m
|
||||
kube-system kube-scheduler-awx 1/1 Running 5 (3m35s ago) 19m
|
||||
kube-system rke2-canal-gm45f 2/2 Running 0 19m
|
||||
kube-system rke2-coredns-rke2-coredns-565dfc7d75-qp64p 1/1 Running 0 19m
|
||||
kube-system rke2-coredns-rke2-coredns-autoscaler-6c48c95bf9-fclz5 1/1 Running 0 19m
|
||||
kube-system rke2-ingress-nginx-controller-lhjwq 1/1 Running 0 17m
|
||||
kube-system rke2-metrics-server-c9c78bd66-fnvx8 1/1 Running 0 18m
|
||||
kube-system rke2-snapshot-controller-6f7bbb497d-dw6v4 1/1 Running 4 (6m17s ago) 18m
|
||||
kube-system rke2-snapshot-validation-webhook-65b5675d5c-tdfcf 1/1 Running 0 18m
|
||||
longhorn-system csi-attacher-785fd6545b-6jfss 1/1 Running 1 (6m17s ago) 9m39s
|
||||
longhorn-system csi-attacher-785fd6545b-k7jdh 1/1 Running 0 9m39s
|
||||
longhorn-system csi-attacher-785fd6545b-rr6k4 1/1 Running 0 9m39s
|
||||
longhorn-system csi-provisioner-8658f9bd9c-58dc8 1/1 Running 0 9m38s
|
||||
longhorn-system csi-provisioner-8658f9bd9c-g8cv2 1/1 Running 0 9m38s
|
||||
longhorn-system csi-provisioner-8658f9bd9c-mbwh2 1/1 Running 0 9m38s
|
||||
longhorn-system csi-resizer-68c4c75bf5-d5vdd 1/1 Running 0 9m36s
|
||||
longhorn-system csi-resizer-68c4c75bf5-r96lf 1/1 Running 0 9m36s
|
||||
longhorn-system csi-resizer-68c4c75bf5-tnggs 1/1 Running 0 9m36s
|
||||
longhorn-system csi-snapshotter-7c466dd68f-5szxn 1/1 Running 0 9m30s
|
||||
longhorn-system csi-snapshotter-7c466dd68f-w96lw 1/1 Running 0 9m30s
|
||||
longhorn-system csi-snapshotter-7c466dd68f-xt42z 1/1 Running 0 9m30s
|
||||
longhorn-system engine-image-ei-68f17757-jn986 1/1 Running 0 10m
|
||||
longhorn-system instance-manager-fab02be089480f35c7b2288110eb9441 1/1 Running 0 10m
|
||||
longhorn-system longhorn-csi-plugin-5j77p 3/3 Running 0 9m30s
|
||||
longhorn-system longhorn-driver-deployer-75fff9c757-dps2j 1/1 Running 0 13m
|
||||
longhorn-system longhorn-manager-2vfr4 1/1 Running 4 (10m ago) 13m
|
||||
longhorn-system longhorn-ui-7dc586665c-hzt6k 1/1 Running 0 13m
|
||||
longhorn-system longhorn-ui-7dc586665c-lssfj 1/1 Running 0 13m
|
||||
```
|
||||
|
||||
!!! note
|
||||
Be sure to write down the "*bootstrapPassword*" variable for when you log into Rancher later. In this example, the password is `bootStrapAllTheThings`.
|
||||
Also be sure to adjust the "*hostname*" variable to reflect the FQDN of the cluster. You can leave it default like this and change it upon first login if you want. This is important for the last step where you adjust DNS. The example given is `rancher.bunny-lab.io`.
|
||||
|
||||
### Log into webUI
|
||||
At this point, you can log into the webUI at https://awx.bunny-lab.io using the default `bootStrapAllTheThings` password, or whatever password you configured, you can change the password after logging in if you need to by navigating to **Home > Users & Authentication > "..." > Edit Config > "New Password" > Save**. From here, you can deploy more nodes, or deploy single-node workloads such as an [Ansible AWX Operator](https://docs.bunny-lab.io/Containers/Kubernetes/Rancher%20RKE2/AWX%20Operator/Ansible%20AWX%20Operator/).
|
||||
|
||||
### Rebooting the ControlNode
|
||||
If you ever find yourself needing to reboot the ControlNode, and need to run kubectl CLI commands, you will need to run the command below to import the cluster credentials upon every reboot. Reboots should take much less time to get the cluster ready again as compared to the original deployments.
|
||||
```
|
||||
export KUBECONFIG=/etc/rancher/rke2/rke2.yaml
|
||||
```
|
||||
|
||||
## Create Additional ControlPlane Node(s)
|
||||
This is the part where you can add additional controlplane nodes to add additional redundancy to the RKE2 Cluster. This is important for high-availability environments.
|
||||
|
||||
### Download the Server Deployment Script
|
||||
``` sh
|
||||
curl -sfL https://get.rke2.io | INSTALL_RKE2_TYPE=server sh -
|
||||
```
|
||||
### Configure and Connect to Initial ControlPlane Node
|
||||
``` sh
|
||||
# Symlink the Kubectl Management Command
|
||||
ln -s $(find /var/lib/rancher/rke2/data/ -name kubectl) /usr/local/bin/kubectl
|
||||
|
||||
# Manually Create a Rancher-Kubernetes-Specific Config File
|
||||
mkdir -p /etc/rancher/rke2/
|
||||
|
||||
# Inject IP of Initial ControlPlane Node into Config File
|
||||
echo "server: https://192.168.3.21:9345" > /etc/rancher/rke2/config.yaml
|
||||
|
||||
# Inject the Initial ControlPlane Node trust token into the config file
|
||||
# You can get the token by running the following command on the first node in the cluster: `cat /var/lib/rancher/rke2/server/node-token`
|
||||
echo "token: K10aa0632863da4ae4e2ccede0ca6a179f510a0eee0d6d6eb53dca96050048f055e::server:3b130ceebfbb7ed851cd990fe55e6f3a" >> /etc/rancher/rke2/config.yaml
|
||||
|
||||
# Start and Enable the Kubernetes Service
|
||||
systemctl enable rke2-server.service
|
||||
systemctl start rke2-server.service
|
||||
```
|
||||
!!! note
|
||||
Be sure to change the IP address of the initial controlplane node provided in the example above to match your environment.
|
||||
|
||||
## Add Worker Node(s)
|
||||
Worker nodes are the bread-and-butter of a Kubernetes cluster. They handle running container workloads, and acting as storage for the cluster (this can be configured to varying degrees based on your needs).
|
||||
|
||||
### Download the Server Worker Script
|
||||
``` sh
|
||||
curl -sfL https://get.rke2.io | INSTALL_RKE2_TYPE=agent sh -
|
||||
```
|
||||
### Configure and Connect to RKE2 Cluster
|
||||
``` sh
|
||||
# Manually Create a Rancher-Kubernetes-Specific Config File
|
||||
mkdir -p /etc/rancher/rke2/
|
||||
|
||||
# Inject IP of Initial ControlPlane Node into Config File
|
||||
echo "server: https://192.168.3.21:9345" > /etc/rancher/rke2/config.yaml
|
||||
|
||||
# Inject the Initial ControlPlane Node trust token into the config file
|
||||
# You can get the token by running the following command on the first node in the cluster: `cat /var/lib/rancher/rke2/server/node-token`
|
||||
echo "token: K10aa0632863da4ae4e2ccede0ca6a179f510a0eee0d6d6eb53dca96050048f055e::server:3b130ceebfbb7ed851cd990fe55e6f3a" >> /etc/rancher/rke2/config.yaml
|
||||
|
||||
# Start and Enable the Kubernetes Service**
|
||||
systemctl enable rke2-agent.service
|
||||
systemctl start rke2-agent.service
|
||||
```
|
||||
|
||||
## DNS Server Record
|
||||
You will need to set up some kind of DNS server record to point the FQDN of the cluster (e.g. `rancher.bunny-lab.io`) to the IP address of the Initial ControlPlane. This can be achieved in a number of ways, such as editing the Windows `HOSTS` file, Linux's `/etc/resolv.conf` file, a Windows DNS Server "A" Record, or an NGINX/Traefik Reverse Proxy.
|
||||
|
||||
Once you have added the DNS record, you should be able to access the login page for the Rancher RKE2 Kubernetes cluster. Use the `bootstrapPassword` mentioned previously to log in, then change it immediately from the user management area of Rancher.
|
||||
|
||||
| TYPE OF ACCESS | FQDN | IP ADDRESS |
|
||||
| -------------- | ------------------------------------- | ------------ |
|
||||
| HOST FILE | rancher.bunny-lab.io | 192.168.3.10 |
|
||||
| REVERSE PROXY | http://rancher.bunny-lab.io:80 | 192.168.5.29 |
|
||||
| DNS RECORD | A Record: rancher.bunny-lab.io | 192.168.3.10 |
|
@ -0,0 +1,2 @@
|
||||
awx-operator
|
||||
https://ansible.github.io/awx-operator/
|
@ -0,0 +1,28 @@
|
||||
AWX:
|
||||
enabled: true
|
||||
name: awx
|
||||
postgres:
|
||||
dbName: Unset
|
||||
enabled: false
|
||||
host: Unset
|
||||
password: Unset
|
||||
port: 5678
|
||||
sslmode: prefer
|
||||
type: unmanaged
|
||||
username: admin
|
||||
spec:
|
||||
admin_user: admin
|
||||
admin_email: cyberstrawberry101@gmail.com
|
||||
auto_upgrade: true
|
||||
hostname: awx.cyberstrawberry.net
|
||||
ingress_path: /
|
||||
ingress_path_type: Prefix
|
||||
ingress_type: ingress
|
||||
ipv6_disabled: true
|
||||
projects_persistence: true
|
||||
projects_storage_class: longhorn
|
||||
projects_storage_size: 32Gi
|
||||
task_privileged: true
|
||||
global:
|
||||
cattle:
|
||||
systemProjectId: p-78f96
|
@ -0,0 +1,25 @@
|
||||
krb5.conf
|
||||
|
||||
--------------------------------------------
|
||||
|
||||
[libdefaults]
|
||||
default_realm = MOONGATE.LOCAL
|
||||
dns_lookup_realm = true
|
||||
dns_lookup_kdc = true
|
||||
ticket_lifetime = 24h
|
||||
renew_lifetime = 7d
|
||||
forwardable = true
|
||||
default_ccache_name = KEYRING:persistent:%{uid}
|
||||
|
||||
[realms]
|
||||
MOONGATE.LOCAL = {
|
||||
kdc = NEXUS-DC-01.MOONGATE.LOCAL
|
||||
admin_server = NEXUS-DC-01.MOONGATE.LOCAL
|
||||
}
|
||||
|
||||
[domain_realm]
|
||||
.moongate.local = MOONGATE.LOCAL
|
||||
moongate.local = MOONGATE.LOCAL
|
||||
|
||||
--------------------------------------------
|
||||
|
@ -0,0 +1 @@
|
||||
v1.3.0
|
158
Servers/Containerization/Kubernetes/Helm Charts/Gitea/config.yml
Normal file
158
Servers/Containerization/Kubernetes/Helm Charts/Gitea/config.yml
Normal file
@ -0,0 +1,158 @@
|
||||
affinity: {}
|
||||
checkDeprecation: true
|
||||
clusterDomain: cluster.local
|
||||
containerSecurityContext: {}
|
||||
dnsConfig: {}
|
||||
extraContainerVolumeMounts: []
|
||||
extraInitVolumeMounts: []
|
||||
extraVolumeMounts: []
|
||||
extraVolumes: []
|
||||
gitea:
|
||||
additionalConfigFromEnvs:
|
||||
- name: ENV_TO_INI__SERVER__ROOT_URL
|
||||
value: https://git.cyberstrawberry.net
|
||||
additionalConfigSources: []
|
||||
admin:
|
||||
email: cyberstrawberry101@gmail.com
|
||||
existingSecret: null
|
||||
password: SUPER-SECRET-ADMIN-PASSWORD-THAT-NOONE-WILL-GUESS
|
||||
username: nicole.rappe
|
||||
config:
|
||||
APP_NAME: "CyberStrawberry"
|
||||
ldap: []
|
||||
livenessProbe:
|
||||
enabled: true
|
||||
failureThreshold: 10
|
||||
initialDelaySeconds: 200
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
tcpSocket:
|
||||
port: http
|
||||
timeoutSeconds: 1
|
||||
metrics:
|
||||
enabled: false
|
||||
serviceMonitor:
|
||||
enabled: false
|
||||
oauth: []
|
||||
podAnnotations: {}
|
||||
readinessProbe:
|
||||
enabled: true
|
||||
failureThreshold: 3
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
tcpSocket:
|
||||
port: http
|
||||
timeoutSeconds: 1
|
||||
ssh:
|
||||
logLevel: INFO
|
||||
startupProbe:
|
||||
enabled: false
|
||||
failureThreshold: 10
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
tcpSocket:
|
||||
port: http
|
||||
timeoutSeconds: 1
|
||||
global:
|
||||
hostAliases: []
|
||||
imagePullSecrets: []
|
||||
imageRegistry: ''
|
||||
storageClass: longhorn
|
||||
image:
|
||||
pullPolicy: Always
|
||||
registry: ''
|
||||
repository: gitea/gitea
|
||||
rootless: false
|
||||
tag: ''
|
||||
imagePullSecrets: []
|
||||
ingress:
|
||||
annotations: {}
|
||||
className: null
|
||||
enabled: false
|
||||
hosts:
|
||||
- host: git.cyberstrawberry.net
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
tls: []
|
||||
initPreScript: ''
|
||||
memcached:
|
||||
enabled: true
|
||||
service:
|
||||
ports:
|
||||
memcached: 11211
|
||||
nodeSelector: {}
|
||||
persistence:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
annotations: {}
|
||||
enabled: true
|
||||
existingClaim: null
|
||||
labels: {}
|
||||
size: 32Gi
|
||||
storageClass: null
|
||||
subPath: null
|
||||
podSecurityContext:
|
||||
fsGroup: 1000
|
||||
postgresql:
|
||||
enabled: true
|
||||
global:
|
||||
postgresql:
|
||||
auth:
|
||||
database: gitea
|
||||
password: gitea
|
||||
username: gitea
|
||||
service:
|
||||
ports:
|
||||
postgresql: 5432
|
||||
primary:
|
||||
persistence:
|
||||
size: 32Gi
|
||||
replicaCount: 1
|
||||
resources: {}
|
||||
schedulerName: ''
|
||||
securityContext: {}
|
||||
service:
|
||||
http:
|
||||
annotations: {}
|
||||
clusterIP: None
|
||||
externalIPs: null
|
||||
externalTrafficPolicy: null
|
||||
ipFamilies: null
|
||||
ipFamilyPolicy: null
|
||||
loadBalancerIP: null
|
||||
loadBalancerSourceRanges: []
|
||||
nodePort: null
|
||||
port: 3000
|
||||
type: ClusterIP
|
||||
ssh:
|
||||
annotations: {}
|
||||
clusterIP: None
|
||||
externalIPs: null
|
||||
externalTrafficPolicy: null
|
||||
hostPort: null
|
||||
ipFamilies: null
|
||||
ipFamilyPolicy: null
|
||||
loadBalancerIP: null
|
||||
loadBalancerSourceRanges: []
|
||||
nodePort: null
|
||||
port: 22
|
||||
type: ClusterIP
|
||||
signing:
|
||||
enabled: false
|
||||
existingSecret: ''
|
||||
gpgHome: /data/git/.gnupg
|
||||
privateKey: ''
|
||||
statefulset:
|
||||
annotations: {}
|
||||
env: []
|
||||
labels: {}
|
||||
terminationGracePeriodSeconds: 60
|
||||
test:
|
||||
enabled: true
|
||||
image:
|
||||
name: busybox
|
||||
tag: latest
|
||||
tolerations: []
|
@ -0,0 +1,194 @@
|
||||
affinity: {}
|
||||
cronjob:
|
||||
enabled: false
|
||||
lifecycle: {}
|
||||
resources: {}
|
||||
securityContext: {}
|
||||
deploymentAnnotations: {}
|
||||
deploymentLabels: {}
|
||||
externalDatabase:
|
||||
database: nextcloud
|
||||
enabled: true
|
||||
existingSecret:
|
||||
enabled: false
|
||||
host: cluster-nextcloud-postgresql
|
||||
password: SecurePasswordGoesHere
|
||||
type: postgresql
|
||||
user: nextcloud
|
||||
fullnameOverride: ''
|
||||
hpa:
|
||||
cputhreshold: 60
|
||||
enabled: false
|
||||
maxPods: 10
|
||||
minPods: 1
|
||||
image:
|
||||
pullPolicy: IfNotPresent
|
||||
repository: nextcloud
|
||||
ingress:
|
||||
annotations: {}
|
||||
enabled: false
|
||||
labels: {}
|
||||
path: /
|
||||
pathType: Prefix
|
||||
internalDatabase:
|
||||
enabled: false
|
||||
name: nextcloud
|
||||
lifecycle: {}
|
||||
livenessProbe:
|
||||
enabled: true
|
||||
failureThreshold: 3
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
mariadb:
|
||||
architecture: standalone
|
||||
auth:
|
||||
database: nextcloud
|
||||
password: changeme
|
||||
username: nextcloud
|
||||
enabled: false
|
||||
primary:
|
||||
persistence:
|
||||
accessMode: ReadWriteOnce
|
||||
enabled: false
|
||||
size: 8Gi
|
||||
metrics:
|
||||
enabled: false
|
||||
https: false
|
||||
image:
|
||||
pullPolicy: IfNotPresent
|
||||
repository: xperimental/nextcloud-exporter
|
||||
tag: 0.6.0
|
||||
replicaCount: 1
|
||||
service:
|
||||
annotations:
|
||||
prometheus.io/port: '9205'
|
||||
prometheus.io/scrape: 'true'
|
||||
labels: {}
|
||||
type: ClusterIP
|
||||
serviceMonitor:
|
||||
enabled: false
|
||||
interval: 30s
|
||||
jobLabel: ''
|
||||
labels: {}
|
||||
namespace: ''
|
||||
scrapeTimeout: ''
|
||||
timeout: 5s
|
||||
tlsSkipVerify: false
|
||||
token: ''
|
||||
nameOverride: ''
|
||||
nextcloud:
|
||||
configs: {}
|
||||
datadir: /var/www/html/data
|
||||
defaultConfigs:
|
||||
.htaccess: true
|
||||
apache-pretty-urls.config.php: true
|
||||
apcu.config.php: true
|
||||
apps.config.php: true
|
||||
autoconfig.php: true
|
||||
redis.config.php: true
|
||||
smtp.config.php: true
|
||||
existingSecret:
|
||||
enabled: false
|
||||
extraEnv: null
|
||||
extraInitContainers: []
|
||||
extraSidecarContainers: []
|
||||
extraVolumeMounts: null
|
||||
extraVolumes: null
|
||||
host: storage.cyberstrawberry.net
|
||||
mail:
|
||||
domain: domain.com
|
||||
enabled: false
|
||||
fromAddress: user
|
||||
smtp:
|
||||
authtype: LOGIN
|
||||
host: domain.com
|
||||
name: user
|
||||
password: pass
|
||||
port: 465
|
||||
secure: ssl
|
||||
password: SUPER-SECRET-PASSWORD-FOR-ADMIN
|
||||
persistence:
|
||||
subPath: null
|
||||
phpConfigs: {}
|
||||
podSecurityContext: {}
|
||||
securityContext: {}
|
||||
strategy:
|
||||
type: Recreate
|
||||
update: 0
|
||||
username: Nicole
|
||||
nginx:
|
||||
config:
|
||||
default: true
|
||||
enabled: false
|
||||
image:
|
||||
pullPolicy: IfNotPresent
|
||||
repository: nginx
|
||||
tag: alpine
|
||||
resources: {}
|
||||
securityContext: {}
|
||||
nodeSelector: {}
|
||||
persistence:
|
||||
accessMode: ReadWriteOnce
|
||||
annotations: {}
|
||||
enabled: true
|
||||
nextcloudData:
|
||||
accessMode: ReadWriteOnce
|
||||
annotations: {}
|
||||
enabled: true
|
||||
size: 800Gi
|
||||
subPath: null
|
||||
size: 16Gi
|
||||
phpClientHttpsFix:
|
||||
enabled: true
|
||||
protocol: https
|
||||
podAnnotations: {}
|
||||
postgresql:
|
||||
enabled: true
|
||||
global:
|
||||
postgresql:
|
||||
auth:
|
||||
database: nextcloud
|
||||
password: SUPER-SECRET-PASSWORD-FOR-DB
|
||||
username: nextcloud
|
||||
primary:
|
||||
persistence:
|
||||
enabled: true
|
||||
rbac:
|
||||
enabled: false
|
||||
serviceaccount:
|
||||
annotations: {}
|
||||
create: true
|
||||
name: nextcloud-serviceaccount
|
||||
readinessProbe:
|
||||
enabled: true
|
||||
failureThreshold: 3
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
redis:
|
||||
auth:
|
||||
enabled: true
|
||||
password: changeme
|
||||
enabled: false
|
||||
replicaCount: 1
|
||||
resources: {}
|
||||
securityContext: {}
|
||||
service:
|
||||
loadBalancerIP: nil
|
||||
nodePort: nil
|
||||
port: 8080
|
||||
type: ClusterIP
|
||||
startupProbe:
|
||||
enabled: false
|
||||
failureThreshold: 30
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
tolerations: []
|
||||
global:
|
||||
cattle:
|
||||
systemProjectId: p-78f96
|
Reference in New Issue
Block a user