768 lines
26 KiB
Bash
Executable file
768 lines
26 KiB
Bash
Executable file
#!/bin/bash
|
||
|
||
###############################################################################
|
||
# Layer 4: Core Monitoring & Notifications
|
||
#
|
||
# This script deploys ntfy and Uptime Kuma on watchtower.
|
||
# Must be run after Layers 1A, 2, and 3 are complete.
|
||
###############################################################################
|
||
|
||
set -e # Exit on error
|
||
|
||
# Colors for output
|
||
RED='\033[0;31m'
|
||
GREEN='\033[0;32m'
|
||
YELLOW='\033[1;33m'
|
||
BLUE='\033[0;34m'
|
||
NC='\033[0m' # No Color
|
||
|
||
# Project root directory
|
||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
|
||
ANSIBLE_DIR="$PROJECT_ROOT/ansible"
|
||
|
||
###############################################################################
|
||
# Helper Functions
|
||
###############################################################################
|
||
|
||
print_header() {
|
||
echo -e "\n${BLUE}========================================${NC}"
|
||
echo -e "${BLUE}$1${NC}"
|
||
echo -e "${BLUE}========================================${NC}\n"
|
||
}
|
||
|
||
print_success() {
|
||
echo -e "${GREEN}✓${NC} $1"
|
||
}
|
||
|
||
print_error() {
|
||
echo -e "${RED}✗${NC} $1"
|
||
}
|
||
|
||
print_warning() {
|
||
echo -e "${YELLOW}⚠${NC} $1"
|
||
}
|
||
|
||
print_info() {
|
||
echo -e "${BLUE}ℹ${NC} $1"
|
||
}
|
||
|
||
confirm_action() {
|
||
local prompt="$1"
|
||
local response
|
||
|
||
read -p "$(echo -e ${YELLOW}${prompt}${NC} [y/N]: )" response
|
||
[[ "$response" =~ ^[Yy]$ ]]
|
||
}
|
||
|
||
###############################################################################
|
||
# Verification Functions
|
||
###############################################################################
|
||
|
||
check_prerequisites() {
|
||
print_header "Verifying Prerequisites"
|
||
|
||
local errors=0
|
||
|
||
if [ -z "$VIRTUAL_ENV" ]; then
|
||
print_error "Virtual environment not activated"
|
||
echo "Run: source venv/bin/activate"
|
||
((errors++))
|
||
else
|
||
print_success "Virtual environment activated"
|
||
fi
|
||
|
||
if ! command -v ansible &> /dev/null; then
|
||
print_error "Ansible not found"
|
||
((errors++))
|
||
else
|
||
print_success "Ansible found"
|
||
fi
|
||
|
||
if [ ! -f "$ANSIBLE_DIR/inventory.ini" ]; then
|
||
print_error "inventory.ini not found"
|
||
((errors++))
|
||
else
|
||
print_success "inventory.ini exists"
|
||
fi
|
||
|
||
# Check if watchtower is configured
|
||
if ! grep -q "^\[watchtower\]" "$ANSIBLE_DIR/inventory.ini"; then
|
||
print_error "watchtower not configured in inventory.ini"
|
||
print_info "Layer 4 requires watchtower VPS"
|
||
((errors++))
|
||
else
|
||
print_success "watchtower configured in inventory"
|
||
fi
|
||
|
||
if [ $errors -gt 0 ]; then
|
||
print_error "Prerequisites not met"
|
||
exit 1
|
||
fi
|
||
|
||
print_success "Prerequisites verified"
|
||
}
|
||
|
||
check_vars_files() {
|
||
print_header "Checking Configuration Files"
|
||
|
||
# Check services_config.yml
|
||
if [ ! -f "$ANSIBLE_DIR/services_config.yml" ]; then
|
||
print_error "services_config.yml not found"
|
||
print_info "This file should have been created in Layer 0"
|
||
exit 1
|
||
fi
|
||
|
||
print_success "services_config.yml exists"
|
||
|
||
# Show configured subdomains
|
||
local ntfy_sub=$(grep "^ ntfy:" "$ANSIBLE_DIR/services_config.yml" | awk '{print $2}' 2>/dev/null || echo "ntfy")
|
||
local uptime_sub=$(grep "^ uptime_kuma:" "$ANSIBLE_DIR/services_config.yml" | awk '{print $2}' 2>/dev/null || echo "uptime")
|
||
|
||
print_info "Configured subdomains:"
|
||
echo " • ntfy: $ntfy_sub"
|
||
echo " • uptime_kuma: $uptime_sub"
|
||
echo ""
|
||
}
|
||
|
||
check_dns_configuration() {
|
||
print_header "Validating DNS Configuration"
|
||
|
||
cd "$ANSIBLE_DIR"
|
||
|
||
# Get watchtower IP
|
||
local watchtower_ip=$(ansible-inventory -i inventory.ini --list | python3 -c "import sys, json; data=json.load(sys.stdin); hosts=data.get('watchtower', {}).get('hosts', []); print(hosts[0] if hosts else '')" 2>/dev/null)
|
||
|
||
if [ -z "$watchtower_ip" ]; then
|
||
print_error "Could not determine watchtower IP from inventory"
|
||
return 1
|
||
fi
|
||
|
||
print_info "Watchtower IP: $watchtower_ip"
|
||
echo ""
|
||
|
||
# Get domain from infra_vars.yml
|
||
local root_domain=$(grep "^root_domain:" "$ANSIBLE_DIR/infra_vars.yml" | awk '{print $2}' 2>/dev/null)
|
||
|
||
if [ -z "$root_domain" ]; then
|
||
print_error "Could not determine root_domain from infra_vars.yml"
|
||
return 1
|
||
fi
|
||
|
||
# Get subdomains from centralized config
|
||
local ntfy_subdomain="ntfy"
|
||
local uptime_subdomain="uptime"
|
||
|
||
if [ -f "$ANSIBLE_DIR/services_config.yml" ]; then
|
||
ntfy_subdomain=$(grep "^ ntfy:" "$ANSIBLE_DIR/services_config.yml" | awk '{print $2}' 2>/dev/null || echo "ntfy")
|
||
uptime_subdomain=$(grep "^ uptime_kuma:" "$ANSIBLE_DIR/services_config.yml" | awk '{print $2}' 2>/dev/null || echo "uptime")
|
||
fi
|
||
|
||
local ntfy_fqdn="${ntfy_subdomain}.${root_domain}"
|
||
local uptime_fqdn="${uptime_subdomain}.${root_domain}"
|
||
|
||
print_info "Checking DNS records..."
|
||
echo ""
|
||
|
||
local dns_ok=true
|
||
|
||
# Check ntfy DNS
|
||
print_info "Checking $ntfy_fqdn..."
|
||
if command -v dig &> /dev/null; then
|
||
local ntfy_resolved=$(dig +short "$ntfy_fqdn" | head -n1)
|
||
if [ "$ntfy_resolved" = "$watchtower_ip" ]; then
|
||
print_success "$ntfy_fqdn → $ntfy_resolved ✓"
|
||
elif [ -n "$ntfy_resolved" ]; then
|
||
print_error "$ntfy_fqdn → $ntfy_resolved (expected $watchtower_ip)"
|
||
dns_ok=false
|
||
else
|
||
print_error "$ntfy_fqdn does not resolve"
|
||
dns_ok=false
|
||
fi
|
||
else
|
||
print_warning "dig command not found, skipping DNS validation"
|
||
print_info "Install dnsutils/bind-tools to enable DNS validation"
|
||
return 1
|
||
fi
|
||
|
||
# Check Uptime Kuma DNS
|
||
print_info "Checking $uptime_fqdn..."
|
||
if command -v dig &> /dev/null; then
|
||
local uptime_resolved=$(dig +short "$uptime_fqdn" | head -n1)
|
||
if [ "$uptime_resolved" = "$watchtower_ip" ]; then
|
||
print_success "$uptime_fqdn → $uptime_resolved ✓"
|
||
elif [ -n "$uptime_resolved" ]; then
|
||
print_error "$uptime_fqdn → $uptime_resolved (expected $watchtower_ip)"
|
||
dns_ok=false
|
||
else
|
||
print_error "$uptime_fqdn does not resolve"
|
||
dns_ok=false
|
||
fi
|
||
fi
|
||
|
||
echo ""
|
||
|
||
if [ "$dns_ok" = false ]; then
|
||
print_error "DNS validation failed"
|
||
print_info "Please configure DNS records:"
|
||
echo " • $ntfy_fqdn → $watchtower_ip"
|
||
echo " • $uptime_fqdn → $watchtower_ip"
|
||
echo ""
|
||
print_warning "DNS changes can take time to propagate (up to 24-48 hours)"
|
||
echo ""
|
||
if ! confirm_action "Continue anyway? (SSL certificates will fail without proper DNS)"; then
|
||
exit 1
|
||
fi
|
||
else
|
||
print_success "DNS validation passed"
|
||
fi
|
||
}
|
||
|
||
###############################################################################
|
||
# ntfy Deployment
|
||
###############################################################################
|
||
|
||
deploy_ntfy() {
|
||
print_header "Deploying ntfy (Notification Service)"
|
||
|
||
cd "$ANSIBLE_DIR"
|
||
|
||
print_info "ntfy requires admin credentials for authentication"
|
||
echo ""
|
||
|
||
# Check if env vars are set
|
||
if [ -z "$NTFY_USER" ] || [ -z "$NTFY_PASSWORD" ]; then
|
||
print_warning "NTFY_USER and NTFY_PASSWORD environment variables not set"
|
||
echo ""
|
||
print_info "Please enter credentials for ntfy admin user:"
|
||
echo ""
|
||
|
||
echo -e -n "${BLUE}ntfy admin username${NC} [admin]: "
|
||
read ntfy_user
|
||
ntfy_user="${ntfy_user:-admin}"
|
||
|
||
echo -e -n "${BLUE}ntfy admin password${NC}: "
|
||
read -s ntfy_password
|
||
echo ""
|
||
|
||
if [ -z "$ntfy_password" ]; then
|
||
print_error "Password cannot be empty"
|
||
return 1
|
||
fi
|
||
|
||
export NTFY_USER="$ntfy_user"
|
||
export NTFY_PASSWORD="$ntfy_password"
|
||
else
|
||
print_success "Using NTFY_USER and NTFY_PASSWORD from environment"
|
||
fi
|
||
|
||
echo ""
|
||
print_info "This will:"
|
||
echo " • Install ntfy from official repositories"
|
||
echo " • Configure ntfy with authentication (deny-all by default)"
|
||
echo " • Create admin user: $NTFY_USER"
|
||
echo " • Set up Caddy reverse proxy"
|
||
echo ""
|
||
|
||
if ! confirm_action "Proceed with ntfy deployment?"; then
|
||
print_warning "Skipped ntfy deployment"
|
||
return 1
|
||
fi
|
||
|
||
print_info "Running: ansible-playbook -i inventory.ini services/ntfy/deploy_ntfy_playbook.yml"
|
||
echo ""
|
||
|
||
if ansible-playbook -i inventory.ini services/ntfy/deploy_ntfy_playbook.yml; then
|
||
print_success "ntfy deployment complete"
|
||
echo ""
|
||
print_info "ntfy is now available at your configured subdomain"
|
||
print_info "Admin user: $NTFY_USER"
|
||
return 0
|
||
else
|
||
print_error "ntfy deployment failed"
|
||
return 1
|
||
fi
|
||
}
|
||
|
||
###############################################################################
|
||
# Uptime Kuma Deployment
|
||
###############################################################################
|
||
|
||
deploy_uptime_kuma() {
|
||
print_header "Deploying Uptime Kuma (Monitoring Platform)"
|
||
|
||
cd "$ANSIBLE_DIR"
|
||
|
||
print_info "This will:"
|
||
echo " • Deploy Uptime Kuma via Docker"
|
||
echo " • Configure Caddy reverse proxy"
|
||
echo " • Set up data persistence"
|
||
echo ""
|
||
|
||
if ! confirm_action "Proceed with Uptime Kuma deployment?"; then
|
||
print_warning "Skipped Uptime Kuma deployment"
|
||
return 1
|
||
fi
|
||
|
||
print_info "Running: ansible-playbook -i inventory.ini services/uptime_kuma/deploy_uptime_kuma_playbook.yml"
|
||
echo ""
|
||
|
||
if ansible-playbook -i inventory.ini services/uptime_kuma/deploy_uptime_kuma_playbook.yml; then
|
||
print_success "Uptime Kuma deployment complete"
|
||
echo ""
|
||
print_warning "IMPORTANT: First-time setup required"
|
||
echo " 1. Access Uptime Kuma at your configured subdomain"
|
||
echo " 2. Create admin user on first visit"
|
||
echo " 3. Update ansible/infra_secrets.yml with credentials"
|
||
return 0
|
||
else
|
||
print_error "Uptime Kuma deployment failed"
|
||
return 1
|
||
fi
|
||
}
|
||
|
||
###############################################################################
|
||
# Backup Configuration
|
||
###############################################################################
|
||
|
||
setup_uptime_kuma_backup() {
|
||
print_header "Setting Up Uptime Kuma Backup (Optional)"
|
||
|
||
cd "$ANSIBLE_DIR"
|
||
|
||
print_info "This will set up automated backups to lapy"
|
||
echo ""
|
||
|
||
if ! confirm_action "Set up Uptime Kuma backup to lapy?"; then
|
||
print_warning "Skipped backup setup"
|
||
return 0
|
||
fi
|
||
|
||
# Check if rsync is available
|
||
print_info "Verifying rsync is installed on watchtower and lapy..."
|
||
if ! ansible watchtower -i inventory.ini -m shell -a "command -v rsync" &>/dev/null; then
|
||
print_error "rsync not found on watchtower"
|
||
print_info "Run Layer 2 to install rsync"
|
||
print_warning "Backup setup skipped - rsync not available"
|
||
return 0
|
||
fi
|
||
|
||
print_info "Running: ansible-playbook -i inventory.ini services/uptime_kuma/setup_backup_uptime_kuma_to_lapy.yml"
|
||
echo ""
|
||
|
||
if ansible-playbook -i inventory.ini services/uptime_kuma/setup_backup_uptime_kuma_to_lapy.yml; then
|
||
print_success "Uptime Kuma backup configured"
|
||
print_info "Backups will run periodically via cron"
|
||
return 0
|
||
else
|
||
print_error "Backup setup failed"
|
||
return 1
|
||
fi
|
||
}
|
||
|
||
###############################################################################
|
||
# Post-Deployment Configuration
|
||
###############################################################################
|
||
|
||
setup_ntfy_notification() {
|
||
print_header "Setting Up ntfy Notification in Uptime Kuma (Optional)"
|
||
|
||
cd "$ANSIBLE_DIR"
|
||
|
||
print_info "This will automatically configure ntfy as a notification method in Uptime Kuma"
|
||
print_warning "Prerequisites:"
|
||
echo " • Uptime Kuma admin account must be created first"
|
||
echo " • infra_secrets.yml must have Uptime Kuma credentials"
|
||
echo ""
|
||
|
||
if ! confirm_action "Set up ntfy notification in Uptime Kuma?"; then
|
||
print_warning "Skipped ntfy notification setup"
|
||
print_info "You can set this up manually or run this script again later"
|
||
return 0
|
||
fi
|
||
|
||
# Check if infra_secrets.yml has Uptime Kuma credentials
|
||
if ! grep -q "uptime_kuma_username:" "$ANSIBLE_DIR/infra_secrets.yml" 2>/dev/null || \
|
||
! grep -q "uptime_kuma_password:" "$ANSIBLE_DIR/infra_secrets.yml" 2>/dev/null; then
|
||
print_error "Uptime Kuma credentials not found in infra_secrets.yml"
|
||
print_info "Please complete Step 1 and 2 of post-deployment steps first:"
|
||
echo " 1. Create admin user in Uptime Kuma web UI"
|
||
echo " 2. Add credentials to ansible/infra_secrets.yml"
|
||
print_warning "Skipped - you can run this script again after completing those steps"
|
||
return 0
|
||
fi
|
||
|
||
# Check credentials are not empty
|
||
local uk_user=$(grep "^uptime_kuma_username:" "$ANSIBLE_DIR/infra_secrets.yml" | awk '{print $2}' | tr -d '"' | tr -d "'")
|
||
local uk_pass=$(grep "^uptime_kuma_password:" "$ANSIBLE_DIR/infra_secrets.yml" | awk '{print $2}' | tr -d '"' | tr -d "'")
|
||
|
||
if [ -z "$uk_user" ] || [ -z "$uk_pass" ]; then
|
||
print_error "Uptime Kuma credentials are empty in infra_secrets.yml"
|
||
print_info "Please update ansible/infra_secrets.yml with your credentials"
|
||
return 0
|
||
fi
|
||
|
||
print_success "Found Uptime Kuma credentials in infra_secrets.yml"
|
||
|
||
print_info "Running playbook to configure ntfy notification..."
|
||
echo ""
|
||
|
||
if ansible-playbook -i inventory.ini services/ntfy/setup_ntfy_uptime_kuma_notification.yml; then
|
||
print_success "ntfy notification configured in Uptime Kuma"
|
||
print_info "You can now use ntfy for all your monitors!"
|
||
return 0
|
||
else
|
||
print_error "Failed to configure ntfy notification"
|
||
print_info "You can set this up manually or run the playbook again later:"
|
||
echo " ansible-playbook -i inventory.ini services/ntfy/setup_ntfy_uptime_kuma_notification.yml"
|
||
return 0
|
||
fi
|
||
}
|
||
|
||
###############################################################################
|
||
# Verification Functions
|
||
###############################################################################
|
||
|
||
verify_deployments() {
|
||
print_header "Verifying Deployments"
|
||
|
||
cd "$ANSIBLE_DIR"
|
||
|
||
local ssh_key=$(grep "ansible_ssh_private_key_file" "$ANSIBLE_DIR/inventory.ini" | head -n1 | sed 's/.*ansible_ssh_private_key_file=\([^ ]*\).*/\1/')
|
||
ssh_key="${ssh_key/#\~/$HOME}"
|
||
|
||
local watchtower_host=$(ansible-inventory -i inventory.ini --list | python3 -c "import sys, json; data=json.load(sys.stdin); print(' '.join(data.get('watchtower', {}).get('hosts', [])))" 2>/dev/null)
|
||
|
||
if [ -z "$watchtower_host" ]; then
|
||
print_error "Could not determine watchtower host"
|
||
return
|
||
fi
|
||
|
||
print_info "Checking services on watchtower ($watchtower_host)..."
|
||
echo ""
|
||
|
||
# Check ntfy
|
||
if timeout 5 ssh -i "$ssh_key" -o StrictHostKeyChecking=no -o BatchMode=yes counterweight@$watchtower_host "systemctl is-active ntfy" &>/dev/null; then
|
||
print_success "ntfy service running"
|
||
else
|
||
print_warning "ntfy service not running or not installed"
|
||
fi
|
||
|
||
# Check Uptime Kuma docker container
|
||
if timeout 5 ssh -i "$ssh_key" -o StrictHostKeyChecking=no -o BatchMode=yes counterweight@$watchtower_host "docker ps | grep uptime-kuma" &>/dev/null; then
|
||
print_success "Uptime Kuma container running"
|
||
else
|
||
print_warning "Uptime Kuma container not running"
|
||
fi
|
||
|
||
# Check Caddy configs
|
||
if timeout 5 ssh -i "$ssh_key" -o StrictHostKeyChecking=no -o BatchMode=yes counterweight@$watchtower_host "test -f /etc/caddy/sites-enabled/ntfy.conf" &>/dev/null; then
|
||
print_success "ntfy Caddy config exists"
|
||
else
|
||
print_warning "ntfy Caddy config not found"
|
||
fi
|
||
|
||
if timeout 5 ssh -i "$ssh_key" -o StrictHostKeyChecking=no -o BatchMode=yes counterweight@$watchtower_host "test -f /etc/caddy/sites-enabled/uptime-kuma.conf" &>/dev/null; then
|
||
print_success "Uptime Kuma Caddy config exists"
|
||
else
|
||
print_warning "Uptime Kuma Caddy config not found"
|
||
fi
|
||
|
||
echo ""
|
||
}
|
||
|
||
verify_final_setup() {
|
||
print_header "Final Verification - Post-Deployment Steps"
|
||
|
||
cd "$ANSIBLE_DIR"
|
||
|
||
print_info "Checking if all post-deployment steps were completed..."
|
||
echo ""
|
||
|
||
local all_ok=true
|
||
|
||
# Check 1: infra_secrets.yml has Uptime Kuma credentials
|
||
print_info "Checking infra_secrets.yml..."
|
||
if grep -q "^uptime_kuma_username:" "$ANSIBLE_DIR/infra_secrets.yml" 2>/dev/null && \
|
||
grep -q "^uptime_kuma_password:" "$ANSIBLE_DIR/infra_secrets.yml" 2>/dev/null; then
|
||
local uk_user=$(grep "^uptime_kuma_username:" "$ANSIBLE_DIR/infra_secrets.yml" | awk '{print $2}' | tr -d '"' | tr -d "'")
|
||
local uk_pass=$(grep "^uptime_kuma_password:" "$ANSIBLE_DIR/infra_secrets.yml" | awk '{print $2}' | tr -d '"' | tr -d "'")
|
||
|
||
if [ -n "$uk_user" ] && [ -n "$uk_pass" ] && [ "$uk_user" != '""' ] && [ "$uk_pass" != '""' ]; then
|
||
print_success "Uptime Kuma credentials configured in infra_secrets.yml"
|
||
else
|
||
print_error "Uptime Kuma credentials are empty in infra_secrets.yml"
|
||
print_info "Please complete Step 2: Update infra_secrets.yml"
|
||
all_ok=false
|
||
fi
|
||
else
|
||
print_error "Uptime Kuma credentials not found in infra_secrets.yml"
|
||
print_info "Please complete Step 2: Update infra_secrets.yml"
|
||
all_ok=false
|
||
fi
|
||
|
||
echo ""
|
||
|
||
# Check 2: Can connect to Uptime Kuma API
|
||
print_info "Checking Uptime Kuma API access..."
|
||
|
||
if [ -n "$uk_user" ] && [ -n "$uk_pass" ]; then
|
||
# Create a test Python script to check API access
|
||
local test_script=$(mktemp)
|
||
cat > "$test_script" << 'EOFPYTHON'
|
||
import sys
|
||
import yaml
|
||
from uptime_kuma_api import UptimeKumaApi
|
||
|
||
try:
|
||
# Load config
|
||
with open('infra_vars.yml', 'r') as f:
|
||
infra_vars = yaml.safe_load(f)
|
||
|
||
with open('services/uptime_kuma/uptime_kuma_vars.yml', 'r') as f:
|
||
uk_vars = yaml.safe_load(f)
|
||
|
||
with open('infra_secrets.yml', 'r') as f:
|
||
secrets = yaml.safe_load(f)
|
||
|
||
root_domain = infra_vars.get('root_domain')
|
||
subdomain = uk_vars.get('uptime_kuma_subdomain', 'uptime')
|
||
url = f"https://{subdomain}.{root_domain}"
|
||
|
||
username = secrets.get('uptime_kuma_username')
|
||
password = secrets.get('uptime_kuma_password')
|
||
|
||
# Try to connect
|
||
api = UptimeKumaApi(url)
|
||
api.login(username, password)
|
||
|
||
# Check if we can get monitors
|
||
monitors = api.get_monitors()
|
||
|
||
print(f"SUCCESS:{len(monitors)}")
|
||
api.disconnect()
|
||
sys.exit(0)
|
||
|
||
except Exception as e:
|
||
print(f"ERROR:{str(e)}", file=sys.stderr)
|
||
sys.exit(1)
|
||
EOFPYTHON
|
||
|
||
local result=$(cd "$ANSIBLE_DIR" && python3 "$test_script" 2>&1)
|
||
rm -f "$test_script"
|
||
|
||
if echo "$result" | grep -q "^SUCCESS:"; then
|
||
local monitor_count=$(echo "$result" | grep "^SUCCESS:" | cut -d: -f2)
|
||
print_success "Successfully connected to Uptime Kuma API"
|
||
print_info "Current monitors: $monitor_count"
|
||
else
|
||
print_error "Cannot connect to Uptime Kuma API"
|
||
print_warning "This usually means:"
|
||
echo " • Admin account not created yet (Step 1)"
|
||
echo " • Wrong credentials in infra_secrets.yml (Step 2)"
|
||
echo " • Uptime Kuma not accessible"
|
||
all_ok=false
|
||
fi
|
||
else
|
||
print_warning "Skipping API check - credentials not configured"
|
||
all_ok=false
|
||
fi
|
||
|
||
echo ""
|
||
|
||
# Check 3: ntfy notification configured in Uptime Kuma
|
||
print_info "Checking ntfy notification configuration..."
|
||
|
||
if [ -n "$uk_user" ] && [ -n "$uk_pass" ]; then
|
||
local test_notif=$(mktemp)
|
||
cat > "$test_notif" << 'EOFPYTHON'
|
||
import sys
|
||
import yaml
|
||
from uptime_kuma_api import UptimeKumaApi
|
||
|
||
try:
|
||
# Load config
|
||
with open('infra_vars.yml', 'r') as f:
|
||
infra_vars = yaml.safe_load(f)
|
||
|
||
with open('services/uptime_kuma/uptime_kuma_vars.yml', 'r') as f:
|
||
uk_vars = yaml.safe_load(f)
|
||
|
||
with open('infra_secrets.yml', 'r') as f:
|
||
secrets = yaml.safe_load(f)
|
||
|
||
root_domain = infra_vars.get('root_domain')
|
||
subdomain = uk_vars.get('uptime_kuma_subdomain', 'uptime')
|
||
url = f"https://{subdomain}.{root_domain}"
|
||
|
||
username = secrets.get('uptime_kuma_username')
|
||
password = secrets.get('uptime_kuma_password')
|
||
|
||
# Connect
|
||
api = UptimeKumaApi(url)
|
||
api.login(username, password)
|
||
|
||
# Check for ntfy notification
|
||
notifications = api.get_notifications()
|
||
ntfy_found = any(n.get('type') == 'ntfy' for n in notifications)
|
||
|
||
if ntfy_found:
|
||
print("SUCCESS:ntfy notification configured")
|
||
else:
|
||
print("NOTFOUND:No ntfy notification found")
|
||
|
||
api.disconnect()
|
||
sys.exit(0)
|
||
|
||
except Exception as e:
|
||
print(f"ERROR:{str(e)}", file=sys.stderr)
|
||
sys.exit(1)
|
||
EOFPYTHON
|
||
|
||
local notif_result=$(cd "$ANSIBLE_DIR" && python3 "$test_notif" 2>&1)
|
||
rm -f "$test_notif"
|
||
|
||
if echo "$notif_result" | grep -q "^SUCCESS:"; then
|
||
print_success "ntfy notification is configured in Uptime Kuma"
|
||
elif echo "$notif_result" | grep -q "^NOTFOUND:"; then
|
||
print_warning "ntfy notification not yet configured"
|
||
print_info "Run the script again and choose 'yes' for ntfy notification setup"
|
||
print_info "Or complete Step 3 manually"
|
||
all_ok=false
|
||
else
|
||
print_warning "Could not verify ntfy notification (API access issue)"
|
||
fi
|
||
else
|
||
print_warning "Skipping ntfy check - credentials not configured"
|
||
fi
|
||
|
||
echo ""
|
||
|
||
# Summary
|
||
if [ "$all_ok" = true ]; then
|
||
print_success "All post-deployment steps completed! ✓"
|
||
echo ""
|
||
print_info "Layer 4 is fully configured and ready to use"
|
||
print_info "You can now proceed to Layer 6 (infrastructure monitoring)"
|
||
return 0
|
||
else
|
||
print_warning "Some post-deployment steps are incomplete"
|
||
echo ""
|
||
print_info "Complete these steps:"
|
||
echo " 1. Access Uptime Kuma web UI and create admin account"
|
||
echo " 2. Update ansible/infra_secrets.yml with credentials"
|
||
echo " 3. Run this script again to configure ntfy notification"
|
||
echo ""
|
||
print_info "You can also complete manually and verify with:"
|
||
echo " ./scripts/setup_layer_4_monitoring.sh"
|
||
return 1
|
||
fi
|
||
}
|
||
|
||
###############################################################################
|
||
# Summary Functions
|
||
###############################################################################
|
||
|
||
print_summary() {
|
||
print_header "Layer 4 Setup Complete! 🎉"
|
||
|
||
echo "Summary of what was configured:"
|
||
echo ""
|
||
print_success "ntfy notification service deployed"
|
||
print_success "Uptime Kuma monitoring platform deployed"
|
||
print_success "Caddy reverse proxy configured for both services"
|
||
echo ""
|
||
|
||
print_warning "REQUIRED POST-DEPLOYMENT STEPS:"
|
||
echo ""
|
||
echo "MANUAL (do these first):"
|
||
echo " 1. Access Uptime Kuma Web UI and create admin account"
|
||
echo " 2. Update ansible/infra_secrets.yml with credentials"
|
||
echo ""
|
||
echo "AUTOMATED (script can do these):"
|
||
echo " 3. Configure ntfy notification - script will offer to set this up"
|
||
echo " 4. Final verification - script will check everything"
|
||
echo ""
|
||
print_info "After completing steps 1 & 2, the script will:"
|
||
echo " • Automatically configure ntfy in Uptime Kuma"
|
||
echo " • Verify all post-deployment steps"
|
||
echo " • Tell you if anything is missing"
|
||
echo ""
|
||
print_warning "You MUST complete steps 1 & 2 before proceeding to Layer 6!"
|
||
echo ""
|
||
|
||
print_info "What these services enable:"
|
||
echo " • ntfy: Push notifications to your devices"
|
||
echo " • Uptime Kuma: Monitor all services and infrastructure"
|
||
echo " • Together: Complete monitoring and alerting solution"
|
||
echo ""
|
||
|
||
print_info "Next steps:"
|
||
echo " 1. Complete the post-deployment steps above"
|
||
echo " 2. Test ntfy: Send a test notification"
|
||
echo " 3. Test Uptime Kuma: Create a test monitor"
|
||
echo " 4. Proceed to Layer 5: ./scripts/setup_layer_5_headscale.sh (optional)"
|
||
echo " OR Layer 6: ./scripts/setup_layer_6_infra_monitoring.sh"
|
||
echo ""
|
||
}
|
||
|
||
###############################################################################
|
||
# Main Execution
|
||
###############################################################################
|
||
|
||
main() {
|
||
clear
|
||
|
||
print_header "📊 Layer 4: Core Monitoring & Notifications"
|
||
|
||
echo "This script will deploy ntfy and Uptime Kuma on watchtower."
|
||
echo ""
|
||
print_info "Services to deploy:"
|
||
echo " • ntfy (notification service)"
|
||
echo " • Uptime Kuma (monitoring platform)"
|
||
echo ""
|
||
|
||
if ! confirm_action "Continue with Layer 4 setup?"; then
|
||
echo "Setup cancelled."
|
||
exit 0
|
||
fi
|
||
|
||
check_prerequisites
|
||
check_vars_files
|
||
check_dns_configuration
|
||
|
||
# Deploy services (don't fail if skipped)
|
||
deploy_ntfy || true
|
||
echo ""
|
||
deploy_uptime_kuma || true
|
||
echo ""
|
||
setup_uptime_kuma_backup || true
|
||
|
||
echo ""
|
||
verify_deployments
|
||
|
||
# Always show summary and offer ntfy configuration
|
||
print_summary
|
||
echo ""
|
||
|
||
# Always ask about ntfy notification setup (regardless of deployment status)
|
||
print_header "Configure ntfy Notification in Uptime Kuma"
|
||
print_info "After creating your Uptime Kuma admin account and updating infra_secrets.yml,"
|
||
print_info "the script can automatically configure ntfy as a notification method."
|
||
echo ""
|
||
print_warning "Prerequisites:"
|
||
echo " 1. Access Uptime Kuma web UI and create admin account"
|
||
echo " 2. Update ansible/infra_secrets.yml with your credentials"
|
||
echo ""
|
||
|
||
# Always offer to set up ntfy notification
|
||
setup_ntfy_notification
|
||
|
||
# Final verification
|
||
echo ""
|
||
verify_final_setup
|
||
}
|
||
|
||
# Run main function
|
||
main "$@"
|
||
|