initial commit

This commit is contained in:
root
2025-08-26 18:20:10 +02:00
parent 7d71c405d7
commit 330625f1c5
3 changed files with 400 additions and 0 deletions

25
update-crowdsec-homeip.sh Executable file
View File

@@ -0,0 +1,25 @@
#!/usr/bin/env bash
set -euo pipefail
HOST="vpn.davidt.de"
OUT="/etc/crowdsec/parsers/s02-enrich/zz-local-whitelist-home.yaml"
v4="$(getent ahostsv4 "$HOST" | awk '/STREAM/ {print $1; exit}' || true)"
v6="$(getent ahostsv6 "$HOST" | awk '/STREAM/ {print $1; exit}' || true)"
tmp="$(mktemp)"
{
echo 'name: local/home-allow'
echo 'description: "Whitelist home via vpn.davidt.de (auto)"'
echo 'whitelist:'
echo ' reason: "home dyn IP"'
echo ' ip:'
[[ -n "$v4" ]] && echo " - \"$v4\""
[[ -n "$v6" ]] && echo " - \"$v6\""
} > "$tmp"
if ! cmp -s "$tmp" "$OUT"; then
install -m 0644 -o root -g root "$tmp" "$OUT"
systemctl restart crowdsec
fi
rm -f "$tmp"

189
vps-healthcheck.sh Executable file
View File

@@ -0,0 +1,189 @@
#!/usr/bin/env bash
# Healthcheck v2 (modular) Core + optionale Module (z. B. Mailcow)
# --- Helpers / Debug ---
DEBUG="${DEBUG:-0}"
NO_PUSH="${NO_PUSH:-0}"
log() { [ "$DEBUG" = "1" ] && echo "[dbg] $*" >&2; }
alerts=()
add_alert(){ alerts+=("$1"); }
# --- Defaults, falls nicht via Env gesetzt ---
KUMA_BASE_URL="${KUMA_BASE_URL:-https://status.example/api/push}"
KUMA_PUSH_ID="${KUMA_PUSH_ID:-}"
LOAD_PER_CORE_MAX="${LOAD_PER_CORE_MAX:-1.50}"
MEM_USED_MAX="${MEM_USED_MAX:-90}"
DISK_USED_MAX="${DISK_USED_MAX:-85}"
INODE_USED_MAX="${INODE_USED_MAX:-90}"
MOUNTS=(${MOUNTS:-/ /var /opt /var/lib/docker})
SWAP_USED_MAX="${SWAP_USED_MAX:-60}"
SWAP_SI_MIN="${SWAP_SI_MIN:-50}"
SWAP_SO_MIN="${SWAP_SO_MIN:-50}"
MEM_PRESSURE_FOR_SWAP_ALERT="${MEM_PRESSURE_FOR_SWAP_ALERT:-95}"
# Mailcow-Module
ENABLE_MAILCOW="${ENABLE_MAILCOW:-0}"
COMPOSE_PROJECT_LABEL="${COMPOSE_PROJECT_LABEL:-mailcowdockerized}"
MAILCOW_POSTFIX="${MAILCOW_POSTFIX:-postfix-mailcow}"
QUEUE_MAX="${QUEUE_MAX:-100}"
# --- Echtes Netz-Ping (ICMP/TCP) ---
PING_MODE="${PING_MODE:-icmp}" # icmp | tcp | off
PING_TARGET="${PING_TARGET:-$(echo "${KUMA_BASE_URL}" | awk -F/ '{print $3}')}"
PING_COUNT="${PING_COUNT:-3}"
PING_TIMEOUT="${PING_TIMEOUT:-1}" # Sek. pro Reply
PING_DEADLINE="${PING_DEADLINE:-3}" # Sek. Gesamt
PING_FAMILY="${PING_FAMILY:-auto}" # 4 | 6 | auto
PING_TCP_URL="${PING_TCP_URL:-https://$(echo "${KUMA_BASE_URL}" | awk -F/ '{print $3}')/}"
PING_TCP_TIMEOUT="${PING_TCP_TIMEOUT:-2}"
# --- Sanitizer für Zahlen aus Env (entfernt Inline-Kommentare/Einheiten) ---
strip_comment() { echo "${1%%#*}"; }
num_int() { strip_comment "$1" | tr -cd '0-9'; }
num_float() { strip_comment "$1" | tr -cd '0-9.'; }
LOAD_PER_CORE_MAX=$(num_float "${LOAD_PER_CORE_MAX:-1.50}")
MEM_USED_MAX=$(num_int "${MEM_USED_MAX:-90}")
DISK_USED_MAX=$(num_int "${DISK_USED_MAX:-85}")
INODE_USED_MAX=$(num_int "${INODE_USED_MAX:-90}")
SWAP_USED_MAX=$(num_int "${SWAP_USED_MAX:-60}")
SWAP_SI_MIN=$(num_int "${SWAP_SI_MIN:-50}")
SWAP_SO_MIN=$(num_int "${SWAP_SO_MIN:-50}")
MEM_PRESSURE_FOR_SWAP_ALERT=$(num_int "${MEM_PRESSURE_FOR_SWAP_ALERT:-95}")
# --- Sanity ---
if [ -z "${KUMA_PUSH_ID}" ]; then
echo "ERROR: KUMA_PUSH_ID ist leer. Bitte in /etc/vps-healthcheck.env setzen."
exit 1
fi
# --- Mess-Funktion: echter Netz-Ping in ms ---
measure_ping() {
local ms=""
if [ "${PING_MODE}" = "icmp" ] && command -v ping >/dev/null 2>&1 && [ -n "${PING_TARGET}" ]; then
local fam=""
[ "$PING_FAMILY" = "4" ] && fam="-4"
[ "$PING_FAMILY" = "6" ] && fam="-6"
local out
out=$(LANG=C ping -n $fam -c "$PING_COUNT" -W "$PING_TIMEOUT" -w "$PING_DEADLINE" "$PING_TARGET" 2>&1 || true)
# iputils: rtt min/avg/max/mdev = a/b/c/d ms -> avg
ms=$(echo "$out" | sed -n 's/.*= \([0-9.]\+\)\/\([0-9.]\+\)\/\([0-9.]\+\)\/\([0-9.]\+\) .*$/\2/p' | tail -n1)
# busybox: round-trip min/avg/max = a/b/c ms -> avg
[ -z "$ms" ] && ms=$(echo "$out" | sed -n 's/.*= \([0-9.]\+\)\/\([0-9.]\+\)\/\([0-9.]\+\) .*$/\2/p' | tail -n1)
fi
# TCP-Fallback: TLS-Handshake- oder Connect-Zeit (Sekunden -> ms)
if [ -z "$ms" ] && command -v curl >/dev/null 2>&1; then
local sec
if echo "$PING_TCP_URL" | grep -q '^https://'; then
sec=$(curl -sS -o /dev/null --max-time "$PING_TCP_TIMEOUT" -w "%{time_appconnect}" "$PING_TCP_URL" || true)
else
sec=$(curl -sS -o /dev/null --max-time "$PING_TCP_TIMEOUT" -w "%{time_connect}" "$PING_TCP_URL" || true)
fi
if echo "$sec" | grep -Eq '^[0-9.]+$'; then
ms=$(awk -v s="$sec" 'BEGIN{printf "%.0f", s*1000}')
fi
fi
if echo "$ms" | grep -Eq '^[0-9.]+$'; then
awk -v a="$ms" 'BEGIN{printf "%.0f", a}'
else
echo "0"
fi
}
# --- Push an Kuma (mit optionalem ping=) ---
push_to_kuma() {
local status="$1" msg="$2" ping_ms="$3"
local args=( -fsS --retry 2 --max-time 8 --get "${KUMA_BASE_URL}/${KUMA_PUSH_ID}"
--data-urlencode "status=${status}"
--data-urlencode "msg=${msg}" )
[ -n "$ping_ms" ] && args+=( --data-urlencode "ping=${ping_ms}" )
if [ "$NO_PUSH" = "1" ]; then
echo "[dry-run] would push: status=$status ping=${ping_ms:-} msg=$msg"
return 0
fi
curl "${args[@]}" >/dev/null || true
}
# --- CPU-Load pro Core ---
cores=$(nproc 2>/dev/null || echo 1)
load1=$(awk '{print $1}' /proc/loadavg)
load_per_core=$(awk -v l="$load1" -v c="$cores" 'BEGIN{printf "%.2f", (c>0?l/c:l)}')
awk -v a="$load_per_core" -v m="$LOAD_PER_CORE_MAX" 'BEGIN{exit (a>m)?0:1}' && \
add_alert "Hohe CPU-Last: ${load1} (=${load_per_core}/Core, Limit ${LOAD_PER_CORE_MAX}/Core)"
# --- RAM-Nutzung (MemAvailable) ---
mem_used_pct=$(awk '
/MemTotal:/ {t=$2}
/MemAvailable:/ {a=$2}
END { if(t>0){printf "%.0f", (1- a/t)*100} else {print 0} }
' /proc/meminfo)
[ "${mem_used_pct:-0}" -gt "$MEM_USED_MAX" ] && add_alert "RAM hoch: ${mem_used_pct}% (Limit ${MEM_USED_MAX}%)"
# --- Swap: Nutzung + Aktivität (pages/s) ---
swap_used_pct=$(awk '
/SwapTotal:/ {t=$2}
/SwapFree:/ {f=$2}
END { if(t>0) printf "%.0f", (1- f/t)*100; else print 0 }
' /proc/meminfo)
psin0=$(awk '/pswpin/ {print $2}' /proc/vmstat); psout0=$(awk '/pswpout/ {print $2}' /proc/vmstat)
sleep 1
psin1=$(awk '/pswpin/ {print $2}' /proc/vmstat); psout1=$(awk '/pswpout/ {print $2}' /proc/vmstat)
si=$((psin1-psin0)); so=$((psout1-psout0))
if [ "$swap_used_pct" -gt "$SWAP_USED_MAX" ] && { [ "$si" -gt "$SWAP_SI_MIN" ] || [ "$so" -gt "$SWAP_SO_MIN" ] || [ "${mem_used_pct:-0}" -gt "$MEM_PRESSURE_FOR_SWAP_ALERT" ]; }; then
add_alert "Swap hoch: ${swap_used_pct}% (si=${si}/s so=${so}/s)"
fi
# --- IO-wait (zweites 1s Sample) ---
read u1 n1 s1 i1 w1 _ < <(awk '/^cpu /{print $2,$3,$4,$5,$6}' /proc/stat)
sleep 1
read u2 n2 s2 i2 w2 _ < <(awk '/^cpu /{print $2,$3,$4,$5,$6}' /proc/stat)
total=$(( (u2-u1)+(n2-n1)+(s2-s1)+(i2-i1)+(w2-w1) ))
iow=$(( w2 - w1 ))
iow_pct=$(awk -v i="$iow" -v t="$total" 'BEGIN{ if(t>0) printf "%.0f", (i*100)/t; else print 0 }')
[ "${iow_pct:-0}" -gt 25 ] && add_alert "IO-Wait hoch: ${iow_pct}% (Limit 25%)"
# --- Disks: Nutzung + Inodes ---
for m in "${MOUNTS[@]}"; do
if mountpoint -q "$m"; then
used=$(df -P "$m" 2>/dev/null | awk 'NR==2{gsub("%","",$5);print $5}')
[ -n "$used" ] && [ "$used" -gt "$DISK_USED_MAX" ] && add_alert "Disk fast voll: $m = ${used}% (Limit ${DISK_USED_MAX}%)"
iused=$(df -Pi "$m" 2>/dev/null | awk 'NR==2{gsub("%","",$5);print $5}')
[ -n "$iused" ] && [ "$iused" -gt "$INODE_USED_MAX" ] && add_alert "Inodes knapp: $m = ${iused}% (Limit ${INODE_USED_MAX}%)"
fi
done
# --- Optionale Module laden ---
MODULE_DIR="/usr/local/lib/vps-healthcheck/modules"
# Mailcow (optional)
if [ "${ENABLE_MAILCOW:-0}" = "1" ] && [ -r "${MODULE_DIR}/mailcow.sh" ]; then
log "loading module: mailcow"
source "${MODULE_DIR}/mailcow.sh"
fi
# Raspi (optional)
if [ "${ENABLE_RASPI:-0}" = "1" ] && [ -r "${MODULE_DIR}/raspi.sh" ]; then
log "loading module: raspi"
source "${MODULE_DIR}/raspi.sh"
fi
# --- Ergebnis pushen (mit echtem Netz-Ping) ---
NET_PING_MS="$(measure_ping)"
[ "$DEBUG" = "1" ] && echo "[dbg] ping(${PING_MODE}) -> ${NET_PING_MS} ms @ ${PING_TARGET:-$PING_TCP_URL}" >&2
if [ "${#alerts[@]}" -gt 0 ]; then
msg="$(hostname) $(printf '%s; ' "${alerts[@]}")"
push_to_kuma "down" "${msg:0:900}" "$NET_PING_MS"
echo -e "$msg"
else
push_to_kuma "up" "OK ($(hostname))" "$NET_PING_MS"
echo "OK"
fi

186
vps2-backup.sh Executable file
View File

@@ -0,0 +1,186 @@
#!/bin/bash
# vps2-backup v1.0 Portainer, Traefik, Gitea, Authentik → NAS (Snapshots)
set -euo pipefail
umask 077
# === NAS / SSH ===
NAS_USER="backup-master"
NAS_HOST="nas.davidt.de"
NAS_PORT=10022
NAS_PATH="/volume1/rocinante-vps2" # <- anpassen, falls anders gewünscht
# === Logging / Lock ===
DATE="$(date +'%Y-%m-%d')"
LOGFILE="/var/log/vps2-backup.log"
LOCKFILE="/tmp/vps2-backup.lock"
log(){ echo "[$(date +'%F %T')] $*" | tee -a "$LOGFILE"; }
# === Docker (mit sudo-Fallback) ===
SUDO=""
if [[ $EUID -ne 0 ]]; then
if sudo -n true 2>/dev/null; then SUDO="sudo"
else log " Kein Root & kein passwordless sudo Docker/Configs evtl. eingeschränkt."; fi
fi
DOCKER="docker"
if ! $DOCKER ps >/dev/null 2>&1; then
if [[ -n "$SUDO" ]] && $SUDO docker ps >/dev/null 2>&1; then DOCKER="$SUDO docker"
else log " Docker nicht nutzbar Container-Backups werden übersprungen."; fi
fi
# === Source-Pfade (Bind-Mounts) ===
PORTAINER_DATA="/opt/portainer/data"
TRAEFIK_DIR="/opt/traefik" # enthält dynamic/ und traefik.yml
TRAEFIK_DYNAMIC="$TRAEFIK_DIR/dynamic"
TRAEFIK_YML="$TRAEFIK_DIR/traefik.yml"
TRAEFIK_ACME="$TRAEFIK_DIR/acme.json" # gemountet als /letsencrypt/acme.json
GITEA_DATA="/opt/gitea/gitea" # bind für /data im gitea-Container
# Authentik (falls du Bind-Mounts nutzt; sonst nur DB-Dump)
AUTHENTIK_CFG="/opt/authentik/config"
AUTHENTIK_MEDIA="/opt/authentik/media"
# === DB-Container-Namen (aus deinem Output) ===
PG_GITEA="gitea-db" # postgres:14
PG_AUTHENTIK="authentik-postgresql-1" # postgres:16-alpine
# === rsync helper ===
RSYNC_COMMON=(-aH --delete --numeric-ids --info=stats1 --delay-updates)
RSYNC_SSH=(-e "ssh -p $NAS_PORT")
rsync_dir () {
local SRC="$1" DEST_SUB="$2"
if [[ -d "$SRC" ]]; then
log "➡️ RSYNC DIR $SRC$NAS_PATH/$DEST_SUB/"
set +e
rsync "${RSYNC_COMMON[@]}" "${RSYNC_SSH[@]}" \
"$SRC"/ "$NAS_USER@$NAS_HOST:$NAS_PATH/$DEST_SUB/" >>"$LOGFILE" 2>&1
local rc=$?; set -e
[[ $rc -eq 0 || $rc -eq 23 || $rc -eq 24 ]] || { log "❌ rsync $SRC rc=$rc"; exit $rc; }
else
log " überspringe (kein Verzeichnis): $SRC"
fi
}
rsync_file () {
local SRC="$1" DEST_SUB="$2"
if [[ -f "$SRC" ]]; then
log "➡️ RSYNC FILE $SRC$NAS_PATH/$DEST_SUB/"
ssh -p "$NAS_PORT" "$NAS_USER@$NAS_HOST" "mkdir -p '$NAS_PATH/$DEST_SUB'" >/dev/null 2>&1 || true
set +e
rsync -a "${RSYNC_SSH[@]}" "$SRC" "$NAS_USER@$NAS_HOST:$NAS_PATH/$DEST_SUB/" >>"$LOGFILE" 2>&1
local rc=$?; set -e
[[ $rc -eq 0 || $rc -eq 23 || $rc -eq 24 ]] || { log "❌ rsync $SRC rc=$rc"; exit $rc; }
else
log " überspringe (keine Datei): $SRC"
fi
}
# === Lock ===
if [[ -e "$LOCKFILE" ]]; then log "⚠️ Läuft bereits Abbruch."; exit 1; fi
trap 'rm -f "$LOCKFILE"' EXIT
touch "$LOCKFILE"
log "🔁 Backup gestartet: $DATE (User: $(id -un), UID:$(id -u))"
# === Remote-Struktur ===
ssh -p "$NAS_PORT" "$NAS_USER@$NAS_HOST" "mkdir -p \
'$NAS_PATH/portainer' \
'$NAS_PATH/traefik' \
'$NAS_PATH/gitea' \
'$NAS_PATH/authentik' \
'$NAS_PATH/dumps/gitea' \
'$NAS_PATH/dumps/authentik' \
'$NAS_PATH/system'" >/dev/null 2>&1 || true
# --- Stage: Portainer ---------------------------------------------------------
log "▶️ Stage: Portainer"
rsync_dir "$PORTAINER_DATA" "portainer"
# --- Stage: Traefik -----------------------------------------------------------
log "▶️ Stage: Traefik"
rsync_dir "$TRAEFIK_DYNAMIC" "traefik/dynamic"
rsync_file "$TRAEFIK_YML" "traefik"
rsync_file "$TRAEFIK_ACME" "traefik"
# --- Stage: Gitea data --------------------------------------------------------
log "▶️ Stage: Gitea (/data)"
rsync_dir "$GITEA_DATA" "gitea/data"
# --- Stage: Gitea DB (Postgres) ----------------------------------------------
log "▶️ Stage: Gitea (Postgres Dump)"
# Credentials aus Gitea-App-Container lesen (deine ENV aus dem Output)
get_env_from_container(){
local C="$1" V="$2"
$DOCKER inspect "$C" --format '{{range .Config.Env}}{{println .}}{{end}}' 2>/dev/null \
| awk -F= -v k="$V" '$1==k{print substr($0,length(k)+2)}'
}
GITEA_APP="gitea"
if $DOCKER ps --format '{{.Names}}' | grep -qx "$GITEA_APP"; then
G_USER="$(get_env_from_container "$GITEA_APP" 'GITEA__database__USER')"
G_PASS="$(get_env_from_container "$GITEA_APP" 'GITEA__database__PASSWD')"
G_DB="$(get_env_from_container "$GITEA_APP" 'GITEA__database__NAME')"
else
G_USER="gitea"; G_PASS="gitea"; G_DB="gitea"
fi
G_DUMP="/tmp/gitea-$DATE.dump"
if $DOCKER ps --format '{{.Names}}' | grep -qx "$PG_GITEA"; then
log "🧩 pg_dump (in Container $PG_GITEA) DB=$G_DB USER=$G_USER"
set +e
$DOCKER exec "$PG_GITEA" sh -lc \
"export PGPASSWORD='$G_PASS'; pg_dump -U '$G_USER' -h 127.0.0.1 -p 5432 -d '$G_DB' -Fc -f '$G_DUMP'" >>"$LOGFILE" 2>&1
rc=$?; set -e
if [[ $rc -ne 0 ]]; then log "❌ pg_dump gitea rc=$rc"; else
rsync -a "${RSYNC_SSH[@]}" "$($DOCKER exec "$PG_GITEA" sh -lc "ls -1 $G_DUMP" 2>/dev/null)" \
"$NAS_USER@$NAS_HOST:$NAS_PATH/dumps/gitea/" >>"$LOGFILE" 2>&1 || true
$DOCKER exec "$PG_GITEA" sh -lc "rm -f '$G_DUMP'" >/dev/null 2>&1 || true
fi
else
log " $PG_GITEA läuft nicht überspringe Dump."
fi
# --- Stage: Authentik config/media (falls vorhanden) --------------------------
log "▶️ Stage: Authentik (Files, optional)"
rsync_dir "$AUTHENTIK_CFG" "authentik/config"
rsync_dir "$AUTHENTIK_MEDIA" "authentik/media"
# --- Stage: Authentik DB (Postgres) ------------------------------------------
log "▶️ Stage: Authentik (Postgres Dump)"
A_DUMP="/tmp/authentik-$DATE.dump"
if $DOCKER ps --format '{{.Names}}' | grep -qx "$PG_AUTHENTIK"; then
# Versuche POSTGRES_* aus dem DB-Container zu lesen, sonst Defaults
get_env_db(){
local C="$1" V="$2" D="$3"
local val; val="$($DOCKER inspect "$C" --format '{{range .Config.Env}}{{println .}}{{end}}' 2>/dev/null \
| awk -F= -v k="$V" '$1==k{print substr($0,length(k)+2)}')"
echo "${val:-$D}"
}
A_USER="$(get_env_db "$PG_AUTHENTIK" POSTGRES_USER "authentik")"
A_PASS="$(get_env_db "$PG_AUTHENTIK" POSTGRES_PASSWORD "authentik")"
A_DB="$(get_env_db "$PG_AUTHENTIK" POSTGRES_DB "authentik")"
log "🧩 pg_dump (in Container $PG_AUTHENTIK) DB=$A_DB USER=$A_USER"
set +e
$DOCKER exec "$PG_AUTHENTIK" sh -lc \
"export PGPASSWORD='$A_PASS'; pg_dump -U '$A_USER' -h 127.0.0.1 -p 5432 -d '$A_DB' -Fc -f '$A_DUMP'" >>"$LOGFILE" 2>&1
rc=$?; set -e
if [[ $rc -ne 0 ]]; then log "❌ pg_dump authentik rc=$rc"; else
rsync -a "${RSYNC_SSH[@]}" "$($DOCKER exec "$PG_AUTHENTIK" sh -lc "ls -1 $A_DUMP" 2>/dev/null)" \
"$NAS_USER@$NAS_HOST:$NAS_PATH/dumps/authentik/" >>"$LOGFILE" 2>&1 || true
$DOCKER exec "$PG_AUTHENTIK" sh -lc "rm -f '$A_DUMP'" >/dev/null 2>&1 || true
fi
else
log " $PG_AUTHENTIK läuft nicht überspringe Dump."
fi
# --- Stage: System-Minidump ---------------------------------------------------
log "▶️ Stage: System-Minidump"
TMP="$(mktemp -d)"
( crontab -l 2>/dev/null || true ) > "$TMP/crontab.txt"
$DOCKER ps --format 'table {{.Names}}\t{{.Image}}\t{{.Status}}' > "$TMP/docker-ps.txt" 2>>"$LOGFILE" || true
$DOCKER volume ls > "$TMP/docker-volumes.txt" 2>>"$LOGFILE" || true
$DOCKER network ls > "$TMP/docker-networks.txt" 2>>"$LOGFILE" || true
rsync -a "${RSYNC_SSH[@]}" "$TMP"/ "$NAS_USER@$NAS_HOST:$NAS_PATH/system/" >>"$LOGFILE" 2>&1
rm -rf "$TMP"
log "✅ Backup abgeschlossen: $DATE"
log "-------------------------------------------"