Files
vps2-skripte/vps2-backup.sh

396 lines
16 KiB
Bash
Executable File
Raw Permalink Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
#!/bin/bash
# vps2-backup v1.1 Portainer, Traefik, Gitea, Authentik → NAS via rsync-daemon (WireGuard)
set -euo pipefail
umask 077
# === NAS über WireGuard (rsync-daemon) ========================================
RSYNC_USER="backup-master"
NAS_WG_IP="192.168.178.74" # WG-IP deines NAS
NAS_RSYNC_PORT="873" # i.d.R. 873
NAS_MODULE="rocinante-vps2" # DSM Shared-Folder-Name als rsync-Modul
RSYNC_PASSFILE="/root/.rsync_pass" # enthält NUR das Passwort (eine Zeile)
# Zielpfade hängen wir als Unterordner an das Modul an (werden bei Bedarf erstellt)
# === Logging / Lock ===========================================================
DATE="$(date +'%Y-%m-%d')"
LOGFILE="/var/log/vps2-backup.log"
LOCKFILE="/tmp/vps2-backup.lock"
log(){ echo "[$(date +'%F %T')] $*" | tee -a "$LOGFILE"; }
START_TS="$(date +%s)"
# === Docker (mit sudo-Fallback) ===============================================
SUDO=""
if [[ $EUID -ne 0 ]]; then
if sudo -n true 2>/dev/null; then SUDO="sudo"
else log " Kein Root & kein passwordless sudo Docker/Configs evtl. eingeschränkt."; fi
fi
DOCKER="docker"
if ! $DOCKER ps >/dev/null 2>&1; then
if [[ -n "$SUDO" ]] && $SUDO docker ps >/dev/null 2>&1; then DOCKER="$SUDO docker"
else log " Docker nicht nutzbar Container-Backups werden übersprungen."; fi
fi
# === Source-Pfade (Bind-Mounts) ===============================================
PORTAINER_DATA="/opt/portainer/data"
TRAEFIK_DIR="/opt/traefik" # enthält dynamic/ und traefik.yml
TRAEFIK_DYNAMIC="$TRAEFIK_DIR/dynamic"
TRAEFIK_YML="$TRAEFIK_DIR/traefik.yml"
TRAEFIK_ACME="$TRAEFIK_DIR/acme.json" # gemountet als /letsencrypt/acme.json
GITEA_DATA="/opt/gitea/gitea" # bind für /data im gitea-Container
# Authentik (falls du Bind-Mounts nutzt; sonst nur DB-Dump)
AUTHENTIK_CFG="/opt/authentik/config"
AUTHENTIK_MEDIA="/opt/authentik/media"
# === DB-Container-Namen =======================================================
PG_GITEA="gitea-db" # postgres:14
PG_AUTHENTIK="authentik-postgresql-1" # postgres:16-alpine
# === Uptime Kuma ===============================================================
KUMA_CONTAINER="uptime-kuma"
KUMA_DATA="/opt/uptime-kuma" # host-seitiges Datenverzeichnis (Bind-Mount)
# === rsync helper (Daemon) ====================================================
# Beachte: Kein SSH; Auth per --password-file; Verschlüsselung via WireGuard.
RSYNC_COMMON=(-rltH --delete --info=stats1 --delay-updates --no-owner --no-group)
RSYNC_AUTH=(--password-file="$RSYNC_PASSFILE")
PORT_PART=""
[[ -n "${NAS_RSYNC_PORT:-}" && "$NAS_RSYNC_PORT" != "873" ]] && PORT_PART=":${NAS_RSYNC_PORT}"
RSYNC_BASE="rsync://${RSYNC_USER}@${NAS_WG_IP}${PORT_PART}/${NAS_MODULE}"
check_prereqs () {
[[ -r "$RSYNC_PASSFILE" ]] || { log "❌ Passwortdatei $RSYNC_PASSFILE fehlt/unklar"; exit 1; }
if ! ping -c1 -W2 "$NAS_WG_IP" >/dev/null 2>&1; then
log "❌ WireGuard-Peer $NAS_WG_IP nicht erreichbar"; exit 1
fi
}
rsync_dir () {
local SRC="$1" DEST_SUB="$2"
if [[ -d "$SRC" ]]; then
log "➡ RSYNC DIR $SRC${RSYNC_BASE}/${DEST_SUB}/"
set +e
rsync "${RSYNC_COMMON[@]}" "${RSYNC_AUTH[@]}" \
"$SRC"/ "${RSYNC_BASE}/${DEST_SUB}/" >>"$LOGFILE" 2>&1
local rc=$?; set -e
[[ $rc -eq 0 || $rc -eq 23 || $rc -eq 24 ]] || { log "❌ rsync $SRC rc=$rc"; exit $rc; }
else
log " überspringe (kein Verzeichnis): $SRC"
fi
}
rsync_file () {
local SRC="$1" DEST_SUB="$2"
if [[ -f "$SRC" ]]; then
log "➡ RSYNC FILE $SRC${RSYNC_BASE}/${DEST_SUB}/"
set +e
rsync "${RSYNC_COMMON[@]}" "${RSYNC_AUTH[@]}" \
"$SRC" "${RSYNC_BASE}/${DEST_SUB}/" >>"$LOGFILE" 2>&1
local rc=$?; set -e
[[ $rc -eq 0 || $rc -eq 23 || $rc -eq 24 ]] || { log "❌ rsync $SRC rc=$rc"; exit $rc; }
else
log " überspringe (keine Datei): $SRC"
fi
}
rsync_to_path () {
local SRC="$1" DEST_PATH="$2"
log "➡ RSYNC TO $SRC${RSYNC_BASE}/${DEST_PATH}"
set +e
rsync "${RSYNC_COMMON[@]}" "${RSYNC_AUTH[@]}" \
"$SRC" "${RSYNC_BASE}/${DEST_PATH}" >>"$LOGFILE" 2>&1
local rc=$?; set -e
[[ $rc -eq 0 || $rc -eq 23 || $rc -eq 24 ]] || { log "❌ rsync $SRC rc=$rc"; exit $rc; }
}
# Leeres Verzeichnis für Verzeichnisanlage via rsync-daemon
EMPTY_DIR="$(mktemp -d)"
TMP_DIR="$(mktemp -d)"
trap 'rm -rf "$TMP_DIR" "$EMPTY_DIR"' EXIT
# legt ${NAS_MODULE}/<SUBDIR>/ an (falls nicht vorhanden)
ensure_remote_subdir () {
local SUBDIR="$1"
# rsync legt Unterordner im Modul an, wenn wir ein leeres Verzeichnis syncen
rsync -rl --exclude='*' "${EMPTY_DIR}/" "${RSYNC_BASE}/${SUBDIR}/" \
"${RSYNC_AUTH[@]}" >>"$LOGFILE" 2>&1 || true
}
# lädt SRC als Zieldateiname DEST_REL (inkl. Unterpfad) ins Modul
rsync_put_as () {
local SRC="$1" DEST_REL="$2" # z.B. DEST_REL="portainer/portainer-backup.tar.gz"
log "➡ RSYNC PUT $SRC${RSYNC_BASE}/${DEST_REL}"
set +e
rsync "${RSYNC_COMMON[@]}" "${RSYNC_AUTH[@]}" \
"$SRC" "${RSYNC_BASE}/${DEST_REL}" >>"$LOGFILE" 2>&1
local rc=$?; set -e
[[ $rc -eq 0 || $rc -eq 23 || $rc -eq 24 ]] || { log "❌ rsync put rc=$rc"; exit $rc; }
}
# === Lock =====================================================================
if [[ -e "$LOCKFILE" ]]; then log "⚠ Läuft bereits Abbruch."; exit 1; fi
trap 'rm -f "$LOCKFILE"' EXIT
touch "$LOCKFILE"
log "🔁 Backup gestartet: $DATE (User: $(id -un), UID:$(id -u))"
check_prereqs
# ── 1) Portainer-Backup via API (Token, JSON-POST) → fixer Dateiname ─────────
log "▶ Stage: Portainer via API"
PORTAINER_TOKEN="$(cat /root/.portainer-token 2>/dev/null || true)"
PORTAINER_BACKUP_PASS="$(cat /root/.portainer-backup-pass 2>/dev/null || true)"
OUT="${TMP_DIR}/portainer-backup.tar.gz"
if [ -z "$PORTAINER_TOKEN" ]; then
log "WARN: /root/.portainer-token fehlt überspringe Portainer-Backup."
else
# Kandidaten in Priorität: lokal publishtes 9000/9443 → Container-IP → (optional) Traefik
PORTAINER_URL=""
CURL_INSECURE=""
set +e
# 1) Loopback published (empfohlen)
curl -fsS -m 3 -H "X-API-Key: $PORTAINER_TOKEN" http://127.0.0.1:9000/api/status >/dev/null 2>&1 \
&& { PORTAINER_URL="http://127.0.0.1:9000"; }
if [ -z "$PORTAINER_URL" ]; then
curl -fsS -m 3 -k -H "X-API-Key: $PORTAINER_TOKEN" https://127.0.0.1:9443/api/status >/dev/null 2>&1 \
&& { PORTAINER_URL="https://127.0.0.1:9443"; CURL_INSECURE="-k"; }
fi
# 2) Container-IP direkt (wenn nichts gepublished ist)
if [ -z "$PORTAINER_URL" ]; then
PORTAINER_IP="$($DOCKER inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' portainer 2>/dev/null)"
if [ -n "$PORTAINER_IP" ]; then
curl -fsS -m 3 -H "X-API-Key: $PORTAINER_TOKEN" http://"${PORTAINER_IP}":9000/api/status >/dev/null 2>&1 \
&& { PORTAINER_URL="http://${PORTAINER_IP}:9000"; }
if [ -z "$PORTAINER_URL" ]; then
curl -fsS -m 3 -k -H "X-API-Key: $PORTAINER_TOKEN" https://"${PORTAINER_IP}":9443/api/status >/dev/null 2>&1 \
&& { PORTAINER_URL="https://${PORTAINER_IP}:9443"; CURL_INSECURE="-k"; }
fi
fi
fi
# 3) (Optional) Traefik-Domain nur wenn lokal nicht geht UND resolvbar
if [ -z "$PORTAINER_URL" ] && getent hosts portainer.davidt.cloud >/dev/null 2>&1; then
curl -fsS -m 3 -k -H "X-API-Key: $PORTAINER_TOKEN" https://portainer.davidt.cloud/api/status >/dev/null 2>&1 \
&& { PORTAINER_URL="https://portainer.davidt.cloud"; CURL_INSECURE="-k"; }
fi
set -e
if [ -z "$PORTAINER_URL" ]; then
log "WARN: Keine erreichbare Portainer-URL gefunden überspringe Portainer-Backup."
else
log "[*] Portainer erreichbar unter: $PORTAINER_URL"
JSON_BODY='{}'
[ -n "$PORTAINER_BACKUP_PASS" ] && JSON_BODY=$(printf '{"password":"%s"}' "$PORTAINER_BACKUP_PASS")
if curl -fsS $CURL_INSECURE -X POST \
-H "X-API-Key: ${PORTAINER_TOKEN}" \
-H "Content-Type: application/json" \
-d "${JSON_BODY}" \
-o "${OUT}" \
"${PORTAINER_URL}/api/backup"
then
if file "${OUT}" | grep -qi 'gzip compressed data'; then
ensure_remote_subdir "portainer"
rsync_put_as "${OUT}" "portainer/portainer-backup.tar.gz"
log "[OK] Portainer-Backup gespeichert."
else
log "ERROR: Antwort ist kein gzip (evtl. 401/HTML?) → Token/Berechtigungen prüfen:"
head -c 300 "${OUT}" | sed -e 's/[^[:print:]\t]/./g'
exit 1
fi
else
log "ERROR: Portainer-Backup-Request fehlgeschlagen."
exit 1
fi
fi
fi
# --- Stage: Traefik -----------------------------------------------------------
log "▶ Stage: Traefik"
rsync_dir "$TRAEFIK_DYNAMIC" "traefik/dynamic"
rsync_file "$TRAEFIK_YML" "traefik"
rsync_file "$TRAEFIK_ACME" "traefik"
# --- Stage: Gitea data --------------------------------------------------------
log "▶ Stage: Gitea (/data)"
rsync_dir "$GITEA_DATA" "gitea/data"
# --- Helpers ------------------------------------------------------------------
get_env_from_container(){
local C="$1" V="$2"
$DOCKER inspect "$C" --format '{{range .Config.Env}}{{println .}}{{end}}' 2>/dev/null \
| awk -F= -v k="$V" '$1==k{print substr($0,length(k)+2)}'
}
# --- Stage: Gitea DB (Postgres) Dump auf HOST, dann rsync -------------------
log "▶ Stage: Gitea (Postgres Dump)"
GITEA_APP="gitea"
if $DOCKER ps --format '{{.Names}}' | grep -qx "$GITEA_APP"; then
G_USER="$(get_env_from_container "$GITEA_APP" 'GITEA__database__USER')"
G_PASS="$(get_env_from_container "$GITEA_APP" 'GITEA__database__PASSWD')"
G_DB="$(get_env_from_container "$GITEA_APP" 'GITEA__database__NAME')"
else
G_USER="gitea"; G_PASS="gitea"; G_DB="gitea"
fi
G_DUMP="/tmp/gitea-$DATE.dump"
if $DOCKER ps --format '{{.Names}}' | grep -qx "$PG_GITEA"; then
log "🧩 pg_dump (Container $PG_GITEA) DB=$G_DB USER=$G_USER$G_DUMP"
set +e
# pg_dump streamt direkt zum Host
$DOCKER exec -i "$PG_GITEA" sh -lc \
"export PGPASSWORD='$G_PASS'; pg_dump -U '$G_USER' -h 127.0.0.1 -p 5432 -d '$G_DB' -Fc" \
> "$G_DUMP" 2>>"$LOGFILE"
rc=$?; set -e
if [[ $rc -ne 0 || ! -s "$G_DUMP" ]]; then
log "❌ pg_dump gitea rc=$rc / Datei leer?"
rm -f "$G_DUMP" || true
else
rsync_to_path "$G_DUMP" "dumps/gitea/gitea-latest.dump"
rm -f "$G_DUMP" || true
fi
else
log " $PG_GITEA läuft nicht überspringe Dump."
fi
# ── Stage: CryptPad (dateibasiert lt. Maintenance-Guide) ──────────────────────
log "▶ Stage: CryptPad (Files)"
# Host-Pfad der Bind-Mounts (aus deiner compose)
CP_BLOB="/opt/cryptpad/data/blob"
CP_BLOCK="/opt/cryptpad/data/block"
CP_DATA="/opt/cryptpad/data/data"
CP_FILES="/opt/cryptpad/data/files" # im Container /cryptpad/datastore
CP_CUSTOM="/opt/cryptpad/customize"
CP_OO_DIST="/opt/cryptpad/onlyoffice-dist"
CP_OO_CONF="/opt/cryptpad/onlyoffice-conf"
CP_CONFIG="/opt/cryptpad/config/config.js"
# Ziel-Unterordner im rsync-Modul
ensure_remote_subdir "cryptpad/blob"
ensure_remote_subdir "cryptpad/block"
ensure_remote_subdir "cryptpad/data"
ensure_remote_subdir "cryptpad/files"
ensure_remote_subdir "cryptpad/customize"
ensure_remote_subdir "cryptpad/onlyoffice-dist"
ensure_remote_subdir "cryptpad/onlyoffice-conf"
ensure_remote_subdir "cryptpad/config"
# Verzeichnisse spiegeln
rsync_dir "$CP_BLOB" "cryptpad/blob"
rsync_dir "$CP_BLOCK" "cryptpad/block"
rsync_dir "$CP_DATA" "cryptpad/data"
rsync_dir "$CP_FILES" "cryptpad/files"
rsync_dir "$CP_CUSTOM" "cryptpad/customize"
rsync_dir "$CP_OO_DIST" "cryptpad/onlyoffice-dist"
rsync_dir "$CP_OO_CONF" "cryptpad/onlyoffice-conf"
# Einzeldatei config.js separat (mit Versionskopie)
if [[ -f "$CP_CONFIG" ]]; then
rsync_to_path "$CP_CONFIG" "cryptpad/config/config.js" # latest
else
log " $CP_CONFIG nicht gefunden überspringe config.js"
fi
# --- Stage: Authentik config/media (optional) ---------------------------------
log "▶ Stage: Authentik (Files, optional)"
rsync_dir "$AUTHENTIK_CFG" "authentik/config"
rsync_dir "$AUTHENTIK_MEDIA" "authentik/media"
# --- Stage: Authentik (Postgres Dump) ----------------------------------------
log "▶ Stage: Authentik (Postgres Dump)"
A_DUMP="/tmp/authentik-$DATE.dump"
if $DOCKER ps --format '{{.Names}}' | grep -qx "$PG_AUTHENTIK"; then
# Werte aus dem DB-Container lesen (Defaults falls leer)
A_USER="$($DOCKER inspect "$PG_AUTHENTIK" --format '{{range .Config.Env}}{{println .}}{{end}}' \
| grep -E '^POSTGRES_USER=' | head -n1 | cut -d= -f2-)"
A_PASS="$($DOCKER inspect "$PG_AUTHENTIK" --format '{{range .Config.Env}}{{println .}}{{end}}' \
| grep -E '^POSTGRES_PASSWORD=' | head -n1 | cut -d= -f2-)"
A_DB="$($DOCKER inspect "$PG_AUTHENTIK" --format '{{range .Config.Env}}{{println .}}{{end}}' \
| grep -E '^POSTGRES_DB=' | head -n1 | cut -d= -f2-)"
A_USER="${A_USER:-authentik}"
A_PASS="${A_PASS:-authentik}"
A_DB="${A_DB:-authentik}"
log "🧩 pg_dump (Container $PG_AUTHENTIK) DB=$A_DB USER=$A_USER$A_DUMP"
set +e
$DOCKER exec -i "$PG_AUTHENTIK" sh -lc \
"export PGPASSWORD=\"$A_PASS\"; pg_dump -U \"$A_USER\" -h 127.0.0.1 -p 5432 -d \"$A_DB\" -Fc" \
> "$A_DUMP" 2>>"$LOGFILE"
rc=$?; set -e
if [[ $rc -ne 0 || ! -s "$A_DUMP" ]]; then
log "❌ pg_dump authentik rc=$rc / Datei leer?"
rm -f "$A_DUMP" || true
else
rsync_to_path "$A_DUMP" "dumps/authentik/authentik-latest.dump"
rm -f "$A_DUMP" || true
fi
else
log " $PG_AUTHENTIK läuft nicht überspringe Dump."
fi
# --- Stage: Uptime Kuma -------------------------------------------------------
log "▶ Stage: Uptime Kuma (SQLite + Daten)"
ONLINE_BKP_OK=0
if $DOCKER ps --format '{{.Names}}' | grep -qx "$KUMA_CONTAINER"; then
log "🧩 Versuche SQLite Online-Backup via .backup im Container ($KUMA_CONTAINER → kuma-latest.db)"
set +e
$DOCKER exec "$KUMA_CONTAINER" sh -lc \
'if command -v sqlite3 >/dev/null 2>&1; then \
sqlite3 /app/data/kuma.db ".backup \"/app/data/kuma-latest.db\"" ; \
else \
echo "sqlite3 fehlt im Container" >&2; exit 127; \
fi'
rc=$?
set -e
if [[ $rc -eq 0 && -f "$KUMA_DATA/kuma-latest.db" ]]; then
ONLINE_BKP_OK=1
log "✅ Online-Backup erstellt: $KUMA_DATA/kuma-latest.db"
rsync_to_path "$KUMA_DATA/kuma-latest.db" "dumps/uptime-kuma/kuma-latest.db"
else
log " Online-Backup fehlgeschlagen (rc=$rc) fallback auf Stop+Copy+rsync"
fi
else
log " $KUMA_CONTAINER läuft nicht sichere Datenverzeichnis direkt (fallback)"
fi
if [[ $ONLINE_BKP_OK -ne 1 ]]; then
# Konsistenter Snapshot via Stop → lokale Kopie → rsync → Start
if $DOCKER ps --format '{{.Names}}' | grep -qx "$KUMA_CONTAINER"; then
log "⏸ Stoppe $KUMA_CONTAINER für konsistente DB-Kopie"
set +e; $DOCKER stop -t 25 "$KUMA_CONTAINER" >>"$LOGFILE" 2>&1; rc=$?; set -e
if [[ $rc -ne 0 ]]; then log "⚠ Stop rc=$rc fahre dennoch fort"; fi
fi
# Wenn DB vorhanden, lokale „latest“ Kopie anlegen und syncen
if [[ -f "$KUMA_DATA/kuma.db" ]]; then
cp -a "$KUMA_DATA/kuma.db" "$KUMA_DATA/kuma-latest.db"
rsync_to_path "$KUMA_DATA/kuma-latest.db" "dumps/uptime-kuma/kuma-latest.db"
fi
# (Optional) Zusätzlich komplettes Datenverzeichnis spiegeln Snapshots übernehmen Versionierung
rsync_dir "$KUMA_DATA" "uptime-kuma/data"
if $DOCKER ps -a --format '{{.Names}}' | grep -qx "$KUMA_CONTAINER"; then
log "▶ Starte $KUMA_CONTAINER wieder"
set +e; $DOCKER start "$KUMA_CONTAINER" >>"$LOGFILE" 2>&1; rc=$?; set -e
if [[ $rc -ne 0 ]]; then log "❌ Start rc=$rc bitte prüfen"; fi
fi
fi
END_TS="$(date +%s)"
DUR=$((END_TS - START_TS))
printf -v DUR_STR '%02dh:%02dm:%02ds' $((DUR/3600)) $((DUR%3600/60)) $((DUR%60))
log "✅ Backup abgeschlossen in $DUR_STR"
exit 0