From c14d61d090afd686d484b9a3f2ad3beb6da99597 Mon Sep 17 00:00:00 2001 From: counterweight Date: Sun, 7 Dec 2025 19:02:50 +0100 Subject: [PATCH] stuff --- SCRIPT_PLAYBOOK_MAPPING.md | 59 -- ansible/backup.infra_vars.yml | 4 - ansible/infra/920_join_headscale_mesh.yml | 1 + ansible/infra_secrets.yml.example | 10 + ansible/infra_vars.yml | 3 - ...le.inventory.ini => inventory.ini.example} | 0 .../deploy_headscale_ui_playbook.yml | 142 +++ .../deploy_ntfy_emergency_app_playbook.yml | 111 +++ ansible/services_config.yml | 2 +- ansible/services_config.yml.example | 32 - backup.inventory.ini | 16 - scripts/setup_layer_0.sh | 488 ----------- scripts/setup_layer_1a_vps.sh | 393 --------- scripts/setup_layer_1b_nodito.sh | 411 --------- scripts/setup_layer_2.sh | 407 --------- scripts/setup_layer_3_caddy.sh | 355 -------- scripts/setup_layer_4_monitoring.sh | 806 ------------------ scripts/setup_layer_5_headscale.sh | 524 ------------ scripts/setup_layer_6_infra_monitoring.sh | 473 ---------- scripts/setup_layer_7_services.sh | 524 ------------ scripts/setup_layer_8_secondary_services.sh | 384 --------- 21 files changed, 265 insertions(+), 4880 deletions(-) delete mode 100644 SCRIPT_PLAYBOOK_MAPPING.md delete mode 100644 ansible/backup.infra_vars.yml rename ansible/{example.inventory.ini => inventory.ini.example} (100%) create mode 100644 ansible/services/headscale/deploy_headscale_ui_playbook.yml delete mode 100644 ansible/services_config.yml.example delete mode 100644 backup.inventory.ini delete mode 100755 scripts/setup_layer_0.sh delete mode 100755 scripts/setup_layer_1a_vps.sh delete mode 100755 scripts/setup_layer_1b_nodito.sh delete mode 100755 scripts/setup_layer_2.sh delete mode 100755 scripts/setup_layer_3_caddy.sh delete mode 100755 scripts/setup_layer_4_monitoring.sh delete mode 100755 scripts/setup_layer_5_headscale.sh delete mode 100755 scripts/setup_layer_6_infra_monitoring.sh delete mode 100755 scripts/setup_layer_7_services.sh delete mode 100755 scripts/setup_layer_8_secondary_services.sh diff --git a/SCRIPT_PLAYBOOK_MAPPING.md b/SCRIPT_PLAYBOOK_MAPPING.md deleted file mode 100644 index 38189ab..0000000 --- a/SCRIPT_PLAYBOOK_MAPPING.md +++ /dev/null @@ -1,59 +0,0 @@ -# Script to Playbook Mapping - -This document describes which playbooks each setup script applies to which machines. - -## Table - -| Script | Playbook | Target Machines/Groups | Notes | -|--------|----------|------------------------|-------| -| **setup_layer_0.sh** | None | N/A | Initial setup script - creates venv, config files | -| **setup_layer_1a_vps.sh** | `infra/01_user_and_access_setup_playbook.yml` | `vps` (vipy, watchtower, spacey) | Creates counterweight user, configures SSH | -| **setup_layer_1a_vps.sh** | `infra/02_firewall_and_fail2ban_playbook.yml` | `vps` (vipy, watchtower, spacey) | Configures UFW firewall and fail2ban | -| **setup_layer_1b_nodito.sh** | `infra/nodito/30_proxmox_bootstrap_playbook.yml` | `nodito_host` (nodito) | Initial Proxmox bootstrap | -| **setup_layer_1b_nodito.sh** | `infra/nodito/31_proxmox_community_repos_playbook.yml` | `nodito_host` (nodito) | Configures Proxmox community repositories | -| **setup_layer_1b_nodito.sh** | `infra/nodito/32_zfs_pool_setup_playbook.yml` | `nodito_host` (nodito) | Sets up ZFS pool on Proxmox | -| **setup_layer_1b_nodito.sh** | `infra/nodito/33_proxmox_debian_cloud_template.yml` | `nodito_host` (nodito) | Creates Debian cloud template for VMs | -| **setup_layer_2.sh** | `infra/900_install_rsync.yml` | `all` (vipy, watchtower, spacey, nodito) | Installs rsync on all machines | -| **setup_layer_2.sh** | `infra/910_docker_playbook.yml` | `all` (vipy, watchtower, spacey, nodito) | Installs Docker on all machines | -| **setup_layer_3_caddy.sh** | `services/caddy_playbook.yml` | `vps` (vipy, watchtower, spacey) | Installs and configures Caddy reverse proxy | -| **setup_layer_4_monitoring.sh** | `services/ntfy/deploy_ntfy_playbook.yml` | `watchtower` | Deploys ntfy notification service | -| **setup_layer_4_monitoring.sh** | `services/uptime_kuma/deploy_uptime_kuma_playbook.yml` | `watchtower` | Deploys Uptime Kuma monitoring | -| **setup_layer_4_monitoring.sh** | `services/uptime_kuma/setup_backup_uptime_kuma_to_lapy.yml` | `lapy` (localhost) | Configures backup of Uptime Kuma to laptop | -| **setup_layer_4_monitoring.sh** | `services/ntfy/setup_ntfy_uptime_kuma_notification.yml` | `watchtower` | Configures ntfy notifications for Uptime Kuma | -| **setup_layer_5_headscale.sh** | `services/headscale/deploy_headscale_playbook.yml` | `spacey` | Deploys Headscale mesh VPN server | -| **setup_layer_5_headscale.sh** | `infra/920_join_headscale_mesh.yml` | `all` (vipy, watchtower, spacey, nodito) | Joins all machines to Headscale mesh (with --limit) | -| **setup_layer_5_headscale.sh** | `services/headscale/setup_backup_headscale_to_lapy.yml` | `lapy` (localhost) | Configures backup of Headscale to laptop | -| **setup_layer_6_infra_monitoring.sh** | `infra/410_disk_usage_alerts.yml` | `all` (vipy, watchtower, spacey, nodito, lapy) | Sets up disk usage monitoring alerts | -| **setup_layer_6_infra_monitoring.sh** | `infra/420_system_healthcheck.yml` | `all` (vipy, watchtower, spacey, nodito, lapy) | Sets up system health checks | -| **setup_layer_6_infra_monitoring.sh** | `infra/430_cpu_temp_alerts.yml` | `nodito_host` (nodito) | Sets up CPU temperature alerts for Proxmox | -| **setup_layer_7_services.sh** | `services/vaultwarden/deploy_vaultwarden_playbook.yml` | `vipy` | Deploys Vaultwarden password manager | -| **setup_layer_7_services.sh** | `services/forgejo/deploy_forgejo_playbook.yml` | `vipy` | Deploys Forgejo Git server | -| **setup_layer_7_services.sh** | `services/lnbits/deploy_lnbits_playbook.yml` | `vipy` | Deploys LNbits Lightning wallet | -| **setup_layer_7_services.sh** | `services/vaultwarden/setup_backup_vaultwarden_to_lapy.yml` | `lapy` (localhost) | Configures backup of Vaultwarden to laptop | -| **setup_layer_7_services.sh** | `services/lnbits/setup_backup_lnbits_to_lapy.yml` | `lapy` (localhost) | Configures backup of LNbits to laptop | -| **setup_layer_8_secondary_services.sh** | `services/ntfy-emergency-app/deploy_ntfy_emergency_app_playbook.yml` | `vipy` | Deploys emergency ntfy app | -| **setup_layer_8_secondary_services.sh** | `services/memos/deploy_memos_playbook.yml` | `memos-box` (VM on nodito) | Deploys Memos note-taking service | - -## Machine Groups Reference - -- **vps**: vipy, watchtower, spacey (VPS servers) -- **nodito_host**: nodito (Proxmox server) -- **nodito_vms**: memos-box and other VMs created on nodito -- **lapy**: localhost (your laptop) -- **all**: All machines in inventory -- **watchtower**: Single VPS for monitoring services -- **vipy**: Single VPS for main services -- **spacey**: Single VPS for Headscale -- **memos-box**: VM on nodito for Memos service - -## Notes - -- Scripts use `--limit` flag to restrict playbooks that target `all` to specific hosts -- Backup playbooks run on `lapy` (localhost) to configure backup jobs -- Some playbooks are optional and may be skipped if hosts aren't configured -- Layer 0 is a prerequisite for all other layers - - - - - diff --git a/ansible/backup.infra_vars.yml b/ansible/backup.infra_vars.yml deleted file mode 100644 index 952df93..0000000 --- a/ansible/backup.infra_vars.yml +++ /dev/null @@ -1,4 +0,0 @@ -new_user: counterweight -ssh_port: 22 -allow_ssh_from: "any" -root_domain: contrapeso.xyz diff --git a/ansible/infra/920_join_headscale_mesh.yml b/ansible/infra/920_join_headscale_mesh.yml index a0c3b5a..425036c 100644 --- a/ansible/infra/920_join_headscale_mesh.yml +++ b/ansible/infra/920_join_headscale_mesh.yml @@ -99,6 +99,7 @@ --login-server {{ headscale_domain }} --authkey {{ auth_key }} --accept-dns=true + --advertise-tags "tag:{{ inventory_hostname }}" register: tailscale_up_result changed_when: "'already authenticated' not in tailscale_up_result.stdout" failed_when: tailscale_up_result.rc != 0 and 'already authenticated' not in tailscale_up_result.stdout diff --git a/ansible/infra_secrets.yml.example b/ansible/infra_secrets.yml.example index 07ee552..80b740b 100644 --- a/ansible/infra_secrets.yml.example +++ b/ansible/infra_secrets.yml.example @@ -9,3 +9,13 @@ uptime_kuma_password: "your_password_here" ntfy_username: "your_ntfy_username" ntfy_password: "your_ntfy_password" + +# headscale-ui credentials +# Used for HTTP basic authentication via Caddy +# Provide either: +# - headscale_ui_password: plain text password (will be hashed automatically) +# - headscale_ui_password_hash: pre-hashed bcrypt password (more secure, use caddy hash-password to generate) + +headscale_ui_username: "admin" +headscale_ui_password: "your_secure_password_here" +# headscale_ui_password_hash: "$2a$14$..." # Optional: pre-hashed password diff --git a/ansible/infra_vars.yml b/ansible/infra_vars.yml index a719e68..952df93 100644 --- a/ansible/infra_vars.yml +++ b/ansible/infra_vars.yml @@ -1,6 +1,3 @@ -# Infrastructure Variables -# Generated by setup_layer_0.sh - new_user: counterweight ssh_port: 22 allow_ssh_from: "any" diff --git a/ansible/example.inventory.ini b/ansible/inventory.ini.example similarity index 100% rename from ansible/example.inventory.ini rename to ansible/inventory.ini.example diff --git a/ansible/services/headscale/deploy_headscale_ui_playbook.yml b/ansible/services/headscale/deploy_headscale_ui_playbook.yml new file mode 100644 index 0000000..3be792c --- /dev/null +++ b/ansible/services/headscale/deploy_headscale_ui_playbook.yml @@ -0,0 +1,142 @@ +- name: Deploy headscale-ui with Docker and configure Caddy reverse proxy + hosts: spacey + become: yes + vars_files: + - ../../infra_vars.yml + - ../../services_config.yml + - ../../infra_secrets.yml + - ./headscale_vars.yml + vars: + headscale_subdomain: "{{ subdomains.headscale }}" + caddy_sites_dir: "{{ caddy_sites_dir }}" + headscale_domain: "{{ headscale_subdomain }}.{{ root_domain }}" + headscale_ui_version: "2025.08.23" + headscale_ui_dir: /opt/headscale-ui + headscale_ui_http_port: 18080 + headscale_ui_https_port: 18443 + + tasks: + - name: Check if Docker is installed + command: docker --version + register: docker_check + changed_when: false + failed_when: false + + - name: Fail if Docker is not installed + fail: + msg: "Docker is not installed. Please run the docker_playbook.yml first." + when: docker_check.rc != 0 + + - name: Ensure Docker service is running + systemd: + name: docker + state: started + enabled: yes + + - name: Create headscale-ui directory + file: + path: "{{ headscale_ui_dir }}" + state: directory + owner: root + group: root + mode: '0755' + + - name: Create docker-compose.yml for headscale-ui + copy: + dest: "{{ headscale_ui_dir }}/docker-compose.yml" + content: | + version: "3" + services: + headscale-ui: + image: ghcr.io/gurucomputing/headscale-ui:{{ headscale_ui_version }} + container_name: headscale-ui + restart: unless-stopped + ports: + - "{{ headscale_ui_http_port }}:8080" + - "{{ headscale_ui_https_port }}:8443" + owner: root + group: root + mode: '0644' + + - name: Deploy headscale-ui container with docker compose + command: docker compose up -d + args: + chdir: "{{ headscale_ui_dir }}" + register: docker_compose_result + changed_when: "'Creating' in docker_compose_result.stdout or 'Starting' in docker_compose_result.stdout or docker_compose_result.rc != 0" + + - name: Wait for headscale-ui to be ready + uri: + url: "http://localhost:{{ headscale_ui_http_port }}" + status_code: [200, 404] + register: headscale_ui_ready + until: headscale_ui_ready.status in [200, 404] + retries: 30 + delay: 2 + ignore_errors: yes + + - name: Ensure Caddy sites-enabled directory exists + file: + path: "{{ caddy_sites_dir }}" + state: directory + owner: root + group: root + mode: '0755' + + - name: Ensure Caddyfile includes import directive for sites-enabled + lineinfile: + path: /etc/caddy/Caddyfile + line: 'import sites-enabled/*' + insertafter: EOF + state: present + backup: yes + + - name: Fail if username is not provided + fail: + msg: "headscale_ui_username must be set in infra_secrets.yml" + when: headscale_ui_username is not defined + + - name: Fail if neither password nor password hash is provided + fail: + msg: "Either headscale_ui_password or headscale_ui_password_hash must be set in infra_secrets.yml" + when: headscale_ui_password is not defined and headscale_ui_password_hash is not defined + + - name: Generate bcrypt hash for headscale-ui password + become: yes + command: caddy hash-password --plaintext "{{ headscale_ui_password }}" + register: headscale_ui_password_hash_result + changed_when: false + no_log: true + when: headscale_ui_password is defined and headscale_ui_password_hash is not defined + + - name: Set headscale-ui password hash from generated value + set_fact: + headscale_ui_password_hash: "{{ headscale_ui_password_hash_result.stdout.strip() }}" + when: headscale_ui_password is defined and headscale_ui_password_hash is not defined + + - name: Update headscale Caddy config to include headscale-ui /web route with authentication + become: yes + copy: + dest: "{{ caddy_sites_dir }}/headscale.conf" + content: | + {{ headscale_domain }} { + @headscale_ui { + path /web* + } + handle @headscale_ui { + basicauth { + {{ headscale_ui_username }} {{ headscale_ui_password_hash }} + } + reverse_proxy http://localhost:{{ headscale_ui_http_port }} + } + # Headscale API is protected by its own API key authentication + # All API operations require a valid Bearer token in the Authorization header + reverse_proxy * http://localhost:{{ headscale_port }} + } + owner: root + group: root + mode: '0644' + + - name: Reload Caddy to apply new config + command: systemctl reload caddy + diff --git a/ansible/services/ntfy-emergency-app/deploy_ntfy_emergency_app_playbook.yml b/ansible/services/ntfy-emergency-app/deploy_ntfy_emergency_app_playbook.yml index 18a3b72..b8c0064 100644 --- a/ansible/services/ntfy-emergency-app/deploy_ntfy_emergency_app_playbook.yml +++ b/ansible/services/ntfy-emergency-app/deploy_ntfy_emergency_app_playbook.yml @@ -14,6 +14,7 @@ ntfy_emergency_app_ntfy_url: "https://{{ ntfy_service_domain }}" ntfy_emergency_app_ntfy_user: "{{ ntfy_username | default('') }}" ntfy_emergency_app_ntfy_password: "{{ ntfy_password | default('') }}" + uptime_kuma_api_url: "https://{{ subdomains.uptime_kuma }}.{{ root_domain }}" tasks: - name: Create ntfy-emergency-app directory @@ -77,3 +78,113 @@ - name: Reload Caddy to apply new config command: systemctl reload caddy + + - name: Create Uptime Kuma monitor setup script for ntfy-emergency-app + delegate_to: localhost + become: no + copy: + dest: /tmp/setup_ntfy_emergency_app_monitor.py + content: | + #!/usr/bin/env python3 + import sys + import traceback + import yaml + from uptime_kuma_api import UptimeKumaApi, MonitorType + + try: + # Load configs + with open('/tmp/ansible_config.yml', 'r') as f: + config = yaml.safe_load(f) + + url = config['uptime_kuma_url'] + username = config['username'] + password = config['password'] + monitor_url = config['monitor_url'] + monitor_name = config['monitor_name'] + + # Connect to Uptime Kuma + api = UptimeKumaApi(url, timeout=30) + api.login(username, password) + + # Get all monitors + monitors = api.get_monitors() + + # Find or create "services" group + group = next((m for m in monitors if m.get('name') == 'services' and m.get('type') == 'group'), None) + if not group: + group_result = api.add_monitor(type='group', name='services') + # Refresh to get the group with id + monitors = api.get_monitors() + group = next((m for m in monitors if m.get('name') == 'services' and m.get('type') == 'group'), None) + + # Check if monitor already exists + existing_monitor = None + for monitor in monitors: + if monitor.get('name') == monitor_name: + existing_monitor = monitor + break + + # Get ntfy notification ID + notifications = api.get_notifications() + ntfy_notification_id = None + for notif in notifications: + if notif.get('type') == 'ntfy': + ntfy_notification_id = notif.get('id') + break + + if existing_monitor: + print(f"Monitor '{monitor_name}' already exists (ID: {existing_monitor['id']})") + print("Skipping - monitor already configured") + else: + print(f"Creating monitor '{monitor_name}'...") + api.add_monitor( + type=MonitorType.HTTP, + name=monitor_name, + url=monitor_url, + parent=group['id'], + interval=60, + maxretries=3, + retryInterval=60, + notificationIDList={ntfy_notification_id: True} if ntfy_notification_id else {} + ) + + api.disconnect() + print("SUCCESS") + + except Exception as e: + error_msg = str(e) if str(e) else repr(e) + print(f"ERROR: {error_msg}", file=sys.stderr) + traceback.print_exc(file=sys.stderr) + sys.exit(1) + mode: '0755' + + - name: Create temporary config for monitor setup + delegate_to: localhost + become: no + copy: + dest: /tmp/ansible_config.yml + content: | + uptime_kuma_url: "{{ uptime_kuma_api_url }}" + username: "{{ uptime_kuma_username }}" + password: "{{ uptime_kuma_password }}" + monitor_url: "https://{{ ntfy_emergency_app_domain }}" + monitor_name: "ntfy-emergency-app" + mode: '0644' + + - name: Run Uptime Kuma monitor setup + command: python3 /tmp/setup_ntfy_emergency_app_monitor.py + delegate_to: localhost + become: no + register: monitor_setup + changed_when: "'SUCCESS' in monitor_setup.stdout" + ignore_errors: yes + + - name: Clean up temporary files + delegate_to: localhost + become: no + file: + path: "{{ item }}" + state: absent + loop: + - /tmp/setup_ntfy_emergency_app_monitor.py + - /tmp/ansible_config.yml diff --git a/ansible/services_config.yml b/ansible/services_config.yml index 5c0dcbd..b497c51 100644 --- a/ansible/services_config.yml +++ b/ansible/services_config.yml @@ -16,7 +16,7 @@ subdomains: lnbits: wallet # Secondary Services (on vipy) - ntfy_emergency_app: emergency + ntfy_emergency_app: avisame personal_blog: pablohere # Memos (on memos-box) diff --git a/ansible/services_config.yml.example b/ansible/services_config.yml.example deleted file mode 100644 index 972b685..0000000 --- a/ansible/services_config.yml.example +++ /dev/null @@ -1,32 +0,0 @@ -# Centralized Services Configuration -# Copy this to services_config.yml and customize - -# Edit these subdomains to match your preferences -subdomains: - # Monitoring Services (on watchtower) - ntfy: ntfy - uptime_kuma: uptime - - # VPN Infrastructure (on spacey) - headscale: headscale - - # Core Services (on vipy) - vaultwarden: vault - forgejo: git - lnbits: lnbits - - # Secondary Services (on vipy) - ntfy_emergency_app: emergency - - # Memos (on memos-box) - memos: memos - -# Caddy configuration -caddy_sites_dir: /etc/caddy/sites-enabled - -# Service-specific settings shared across playbooks -service_settings: - ntfy: - topic: alerts - headscale: - namespace: counter-net diff --git a/backup.inventory.ini b/backup.inventory.ini deleted file mode 100644 index dec2de3..0000000 --- a/backup.inventory.ini +++ /dev/null @@ -1,16 +0,0 @@ -[vps] -vipy ansible_host=207.154.226.192 ansible_user=counterweight ansible_port=22 ansible_ssh_private_key_file=~/.ssh/counterganzua -watchtower ansible_host=206.189.63.167 ansible_user=counterweight ansible_port=22 ansible_ssh_private_key_file=~/.ssh/counterganzua -spacey ansible_host=165.232.73.4 ansible_user=counterweight ansible_port=22 ansible_ssh_private_key_file=~/.ssh/counterganzua - -[nodito_host] -nodito ansible_host=192.168.1.139 ansible_user=counterweight ansible_port=22 ansible_ssh_pass=noesfacilvivirenunmundocentralizado ansible_ssh_private_key_file=~/.ssh/counterganzua - -[nodito_vms] -memos-box ansible_host=192.168.1.149 ansible_user=counterweight ansible_port=22 ansible_ssh_private_key_file=~/.ssh/counterganzua - - -# Local connection to laptop: this assumes you're running ansible commands from your personal laptop -# Make sure to adjust the username -[lapy] -localhost ansible_connection=local ansible_user=counterweight gpg_recipient=counterweightoperator@protonmail.com gpg_key_id=883EDBAA726BD96C \ No newline at end of file diff --git a/scripts/setup_layer_0.sh b/scripts/setup_layer_0.sh deleted file mode 100755 index f994f98..0000000 --- a/scripts/setup_layer_0.sh +++ /dev/null @@ -1,488 +0,0 @@ -#!/bin/bash - -############################################################################### -# Layer 0: Foundation Setup -# -# This script sets up your laptop (lapy) as the Ansible control node. -# It prepares all the prerequisites needed for the infrastructure deployment. -############################################################################### - -set -e # Exit on error - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -NC='\033[0m' # No Color - -# Project root directory -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" - -############################################################################### -# Helper Functions -############################################################################### - -print_header() { - echo -e "\n${BLUE}========================================${NC}" - echo -e "${BLUE}$1${NC}" - echo -e "${BLUE}========================================${NC}\n" -} - -print_success() { - echo -e "${GREEN}✓${NC} $1" -} - -print_error() { - echo -e "${RED}✗${NC} $1" -} - -print_warning() { - echo -e "${YELLOW}⚠${NC} $1" -} - -print_info() { - echo -e "${BLUE}ℹ${NC} $1" -} - -prompt_user() { - local prompt="$1" - local default="$2" - local result - - if [ -n "$default" ]; then - read -p "$(echo -e ${BLUE}${prompt}${NC} [${default}]: )" result - result="${result:-$default}" - else - read -p "$(echo -e ${BLUE}${prompt}${NC}: )" result - fi - - echo "$result" -} - -confirm_action() { - local prompt="$1" - local response - - read -p "$(echo -e ${YELLOW}${prompt}${NC} [y/N]: )" response - [[ "$response" =~ ^[Yy]$ ]] -} - -############################################################################### -# Main Setup Functions -############################################################################### - -check_prerequisites() { - print_header "Checking Prerequisites" - - # Check if we're in the right directory - if [ ! -f "$PROJECT_ROOT/README.md" ] || [ ! -d "$PROJECT_ROOT/ansible" ]; then - print_error "Not in the correct project directory" - echo "Expected: $PROJECT_ROOT" - exit 1 - fi - print_success "Running from correct directory: $PROJECT_ROOT" - - # Check if Python 3 is installed - if ! command -v python3 &> /dev/null; then - print_error "Python 3 is not installed. Please install Python 3 first." - exit 1 - fi - print_success "Python 3 found: $(python3 --version)" - - # Check if git is installed - if ! command -v git &> /dev/null; then - print_warning "Git is not installed. Some features may not work." - else - print_success "Git found: $(git --version | head -n1)" - fi -} - -setup_python_venv() { - print_header "Setting Up Python Virtual Environment" - - cd "$PROJECT_ROOT" - - if [ -d "venv" ]; then - print_info "Virtual environment already exists" - if confirm_action "Recreate virtual environment?"; then - rm -rf venv - python3 -m venv venv - print_success "Virtual environment recreated" - else - print_success "Using existing virtual environment" - fi - else - python3 -m venv venv - print_success "Virtual environment created" - fi - - # Activate venv - source venv/bin/activate - print_success "Virtual environment activated" - - # Upgrade pip - print_info "Upgrading pip..." - pip install --upgrade pip > /dev/null 2>&1 - print_success "pip upgraded" -} - -install_python_requirements() { - print_header "Installing Python Requirements" - - cd "$PROJECT_ROOT" - - if [ ! -f "requirements.txt" ]; then - print_error "requirements.txt not found" - exit 1 - fi - - print_info "Installing packages from requirements.txt..." - pip install -r requirements.txt - print_success "Python requirements installed" - - # Verify Ansible installation - if ! command -v ansible &> /dev/null; then - print_error "Ansible installation failed" - exit 1 - fi - print_success "Ansible installed: $(ansible --version | head -n1)" -} - -install_ansible_collections() { - print_header "Installing Ansible Galaxy Collections" - - cd "$PROJECT_ROOT/ansible" - - if [ ! -f "requirements.yml" ]; then - print_warning "requirements.yml not found, skipping Ansible collections" - return - fi - - print_info "Installing Ansible Galaxy collections..." - ansible-galaxy collection install -r requirements.yml - print_success "Ansible Galaxy collections installed" -} - -setup_inventory_file() { - print_header "Setting Up Inventory File" - - cd "$PROJECT_ROOT/ansible" - - if [ -f "inventory.ini" ]; then - print_info "inventory.ini already exists" - cat inventory.ini - echo "" - if ! confirm_action "Do you want to update it?"; then - print_success "Using existing inventory.ini" - return - fi - fi - - print_info "Let's configure your infrastructure hosts" - echo "" - - # Collect information - echo -e -n "${BLUE}SSH key path${NC} [~/.ssh/counterganzua]: " - read ssh_key - ssh_key="${ssh_key:-~/.ssh/counterganzua}" - - echo "" - echo "Enter the IP addresses for your infrastructure (VMs will be added later):" - echo "" - - echo -e -n "${BLUE}vipy${NC} (main VPS) IP: " - read vipy_ip - echo -e -n "${BLUE}watchtower${NC} (monitoring VPS) IP: " - read watchtower_ip - echo -e -n "${BLUE}spacey${NC} (headscale VPS) IP: " - read spacey_ip - echo -e -n "${BLUE}nodito${NC} (Proxmox server) IP [optional]: " - read nodito_ip - - echo "" - echo -e -n "${BLUE}Your username on lapy${NC} [$(whoami)]: " - read lapy_user - lapy_user="${lapy_user:-$(whoami)}" - - echo -e -n "${BLUE}GPG recipient email${NC} [optional, for encrypted backups]: " - read gpg_email - echo -e -n "${BLUE}GPG key ID${NC} [optional, for encrypted backups]: " - read gpg_key - - # Generate inventory.ini - cat > inventory.ini << EOF -# Ansible Inventory File -# Generated by setup_layer_0.sh - -EOF - - vps_entries="" - if [ -n "$vipy_ip" ]; then - vps_entries+="vipy ansible_host=$vipy_ip ansible_user=counterweight ansible_port=22 ansible_ssh_private_key_file=$ssh_key\n" - fi - if [ -n "$watchtower_ip" ]; then - vps_entries+="watchtower ansible_host=$watchtower_ip ansible_user=counterweight ansible_port=22 ansible_ssh_private_key_file=$ssh_key\n" - fi - if [ -n "$spacey_ip" ]; then - vps_entries+="spacey ansible_host=$spacey_ip ansible_user=counterweight ansible_port=22 ansible_ssh_private_key_file=$ssh_key\n" - fi - - if [ -n "$vps_entries" ]; then - cat >> inventory.ini << EOF -[vps] -${vps_entries} -EOF - fi - - if [ -n "$nodito_ip" ]; then - cat >> inventory.ini << EOF -[nodito_host] -nodito ansible_host=$nodito_ip ansible_user=counterweight ansible_port=22 ansible_ssh_private_key_file=$ssh_key - -EOF - fi - - # Add nodito_vms placeholder for VMs that will be created later - cat >> inventory.ini << EOF -# Nodito VMs - These don't exist yet and will be created on the Proxmox server -# Add them here once you create VMs on nodito (e.g., memos-box, etc.) -[nodito_vms] -# Example: -# memos_box ansible_host=192.168.1.150 ansible_user=counterweight ansible_port=22 ansible_ssh_private_key_file=$ssh_key - -EOF - - # Add lapy - cat >> inventory.ini << EOF -# Local connection to laptop: this assumes you're running ansible commands from your personal laptop -[lapy] -localhost ansible_connection=local ansible_user=$lapy_user -EOF - - if [ -n "$gpg_email" ] && [ -n "$gpg_key" ]; then - echo " gpg_recipient=$gpg_email gpg_key_id=$gpg_key" >> inventory.ini - fi - - print_success "inventory.ini created" - echo "" - print_info "Review your inventory file:" - cat inventory.ini - echo "" -} - -setup_infra_vars() { - print_header "Setting Up Infrastructure Variables" - - cd "$PROJECT_ROOT/ansible" - - if [ -f "infra_vars.yml" ]; then - print_info "infra_vars.yml already exists" - cat infra_vars.yml - echo "" - if ! confirm_action "Do you want to update it?"; then - print_success "Using existing infra_vars.yml" - return - fi - fi - - echo "" - echo -e -n "${BLUE}Your root domain${NC} (e.g., contrapeso.xyz): " - read domain - - while [ -z "$domain" ]; do - print_warning "Domain cannot be empty" - echo -e -n "${BLUE}Your root domain${NC}: " - read domain - done - - cat > infra_vars.yml << EOF -# Infrastructure Variables -# Generated by setup_layer_0.sh - -new_user: counterweight -ssh_port: 22 -allow_ssh_from: "any" -root_domain: $domain -EOF - - print_success "infra_vars.yml created" - echo "" - print_info "Contents:" - cat infra_vars.yml - echo "" -} - -setup_services_config() { - print_header "Setting Up Services Configuration" - - cd "$PROJECT_ROOT/ansible" - - if [ -f "services_config.yml" ]; then - print_info "services_config.yml already exists" - if ! confirm_action "Do you want to recreate it from template?"; then - print_success "Using existing services_config.yml" - return - fi - fi - - if [ ! -f "services_config.yml.example" ]; then - print_error "services_config.yml.example not found" - return - fi - - cp services_config.yml.example services_config.yml - - print_success "services_config.yml created" - echo "" - print_info "This file centralizes all service subdomains and Caddy settings" - print_info "Customize subdomains in: ansible/services_config.yml" - echo "" -} - -setup_infra_secrets() { - print_header "Setting Up Infrastructure Secrets" - - cd "$PROJECT_ROOT/ansible" - - if [ -f "infra_secrets.yml" ]; then - print_warning "infra_secrets.yml already exists" - if ! confirm_action "Do you want to recreate the template?"; then - print_success "Using existing infra_secrets.yml" - return - fi - fi - - cat > infra_secrets.yml << EOF -# Infrastructure Secrets -# Generated by setup_layer_0.sh -# -# IMPORTANT: This file contains sensitive credentials -# It is already in .gitignore - DO NOT commit it to git -# -# You'll need to fill in the Uptime Kuma credentials after Layer 4 -# when you deploy Uptime Kuma - -# Uptime Kuma Credentials (fill these in after deploying Uptime Kuma in Layer 4) -uptime_kuma_username: "" -uptime_kuma_password: "" -EOF - - print_success "infra_secrets.yml template created" - print_warning "You'll need to fill in Uptime Kuma credentials after Layer 4" - echo "" -} - -validate_ssh_key() { - print_header "Validating SSH Key" - - cd "$PROJECT_ROOT/ansible" - - # Extract SSH key path from inventory - if [ -f "inventory.ini" ]; then - ssh_key=$(grep "ansible_ssh_private_key_file" inventory.ini | head -n1 | sed 's/.*ansible_ssh_private_key_file=\([^ ]*\).*/\1/') - - # Expand tilde - ssh_key="${ssh_key/#\~/$HOME}" - - if [ -f "$ssh_key" ]; then - print_success "SSH key found: $ssh_key" - - # Check permissions - perms=$(stat -c "%a" "$ssh_key" 2>/dev/null || stat -f "%OLp" "$ssh_key" 2>/dev/null) - if [ "$perms" != "600" ]; then - print_warning "SSH key permissions are $perms (should be 600)" - if confirm_action "Fix permissions?"; then - chmod 600 "$ssh_key" - print_success "Permissions fixed" - fi - else - print_success "SSH key permissions are correct (600)" - fi - else - print_error "SSH key not found: $ssh_key" - print_warning "Make sure to create your SSH key before proceeding to Layer 1" - echo "" - echo "To generate a new SSH key:" - echo " ssh-keygen -t ed25519 -f $ssh_key -C \"your-email@example.com\"" - fi - else - print_warning "inventory.ini not found, skipping SSH key validation" - fi -} - -print_summary() { - print_header "Layer 0 Setup Complete! 🎉" - - echo "Summary of what was configured:" - echo "" - print_success "Python virtual environment created and activated" - print_success "Ansible and dependencies installed" - print_success "Ansible Galaxy collections installed" - print_success "inventory.ini configured with your hosts" - print_success "infra_vars.yml configured with your domain" - print_success "services_config.yml created with subdomain settings" - print_success "infra_secrets.yml template created" - echo "" - - print_info "Before proceeding to Layer 1:" - echo " 1. Ensure your SSH key is added to all VPS root users" - echo " 2. Verify you can SSH into each machine manually" - echo " 3. Configure DNS nameservers for your domain (if not done)" - echo "" - - print_info "Note about inventory groups:" - echo " • [nodito_vms] group created as placeholder" - echo " • These VMs will be created later on Proxmox" - echo " • Add their host entries to inventory.ini once created" - echo "" - - print_info "To test SSH access to a host:" - echo " ssh -i ~/.ssh/counterganzua root@" - echo "" - - print_info "Next steps:" - echo " 1. Review the files in ansible/" - echo " 2. Test SSH connections to your hosts" - echo " 3. Proceed to Layer 1: ./scripts/setup_layer_1.sh" - echo "" - - print_warning "Remember to activate the venv before running other commands:" - echo " source venv/bin/activate" - echo "" -} - -############################################################################### -# Main Execution -############################################################################### - -main() { - clear - - print_header "🚀 Layer 0: Foundation Setup" - - echo "This script will set up your laptop (lapy) as the Ansible control node." - echo "It will install all prerequisites and configure basic settings." - echo "" - - if ! confirm_action "Continue with Layer 0 setup?"; then - echo "Setup cancelled." - exit 0 - fi - - check_prerequisites - setup_python_venv - install_python_requirements - install_ansible_collections - setup_inventory_file - setup_infra_vars - setup_services_config - setup_infra_secrets - validate_ssh_key - print_summary -} - -# Run main function -main "$@" - diff --git a/scripts/setup_layer_1a_vps.sh b/scripts/setup_layer_1a_vps.sh deleted file mode 100755 index f60452f..0000000 --- a/scripts/setup_layer_1a_vps.sh +++ /dev/null @@ -1,393 +0,0 @@ -#!/bin/bash - -############################################################################### -# Layer 1A: VPS Basic Setup -# -# This script configures users, SSH, firewall, and fail2ban on VPS machines. -# Runs independently - can be executed without Nodito setup. -############################################################################### - -set -e # Exit on error - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -NC='\033[0m' # No Color - -# Project root directory -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" -ANSIBLE_DIR="$PROJECT_ROOT/ansible" - -############################################################################### -# Helper Functions -############################################################################### - -print_header() { - echo -e "\n${BLUE}========================================${NC}" - echo -e "${BLUE}$1${NC}" - echo -e "${BLUE}========================================${NC}\n" -} - -print_success() { - echo -e "${GREEN}✓${NC} $1" -} - -print_error() { - echo -e "${RED}✗${NC} $1" -} - -print_warning() { - echo -e "${YELLOW}⚠${NC} $1" -} - -print_info() { - echo -e "${BLUE}ℹ${NC} $1" -} - -confirm_action() { - local prompt="$1" - local response - - read -p "$(echo -e ${YELLOW}${prompt}${NC} [y/N]: )" response - [[ "$response" =~ ^[Yy]$ ]] -} - -############################################################################### -# Verification Functions -############################################################################### - -check_layer_0_complete() { - print_header "Verifying Layer 0 Prerequisites" - - local errors=0 - - # Check if venv exists - if [ ! -d "$PROJECT_ROOT/venv" ]; then - print_error "Python venv not found. Run Layer 0 first." - ((errors++)) - else - print_success "Python venv exists" - fi - - # Check if we're in a venv - if [ -z "$VIRTUAL_ENV" ]; then - print_error "Virtual environment not activated" - echo "Run: source venv/bin/activate" - ((errors++)) - else - print_success "Virtual environment activated" - fi - - # Check if Ansible is installed - if ! command -v ansible &> /dev/null; then - print_error "Ansible not found" - ((errors++)) - else - print_success "Ansible found: $(ansible --version | head -n1)" - fi - - # Check if inventory.ini exists - if [ ! -f "$ANSIBLE_DIR/inventory.ini" ]; then - print_error "inventory.ini not found" - ((errors++)) - else - print_success "inventory.ini exists" - fi - - # Check if infra_vars.yml exists - if [ ! -f "$ANSIBLE_DIR/infra_vars.yml" ]; then - print_error "infra_vars.yml not found" - ((errors++)) - else - print_success "infra_vars.yml exists" - fi - - if [ $errors -gt 0 ]; then - print_error "Layer 0 is not complete. Please run ./scripts/setup_layer_0.sh first" - exit 1 - fi - - print_success "Layer 0 prerequisites verified" -} - -get_hosts_from_inventory() { - local target="$1" - cd "$ANSIBLE_DIR" - - # Parse inventory.ini directly - more reliable than ansible-inventory - if [ -f "$ANSIBLE_DIR/inventory.ini" ]; then - # Look for the group section [target] - local in_section=false - local hosts="" - while IFS= read -r line; do - # Remove comments and whitespace - line=$(echo "$line" | sed 's/#.*$//' | xargs) - [ -z "$line" ] && continue - - # Check if we're entering the target section - if [[ "$line" =~ ^\[$target\]$ ]]; then - in_section=true - continue - fi - - # Check if we're entering a different section - if [[ "$line" =~ ^\[.*\]$ ]]; then - in_section=false - continue - fi - - # If we're in the target section, extract hostname - if [ "$in_section" = true ]; then - local hostname=$(echo "$line" | awk '{print $1}') - if [ -n "$hostname" ]; then - hosts="$hosts $hostname" - fi - fi - done < "$ANSIBLE_DIR/inventory.ini" - echo "$hosts" | xargs - fi -} - -check_vps_configured() { - print_header "Checking VPS Configuration" - - # Get all hosts from the vps group - local vps_hosts=$(get_hosts_from_inventory "vps") - local has_vps=false - - # Check for expected VPS hostnames - for expected_host in vipy watchtower spacey; do - if echo "$vps_hosts" | grep -q "\b$expected_host\b"; then - print_success "$expected_host configured" - has_vps=true - else - print_info "$expected_host not configured (skipping)" - fi - done - - if [ "$has_vps" = false ]; then - print_error "No VPSs configured in inventory.ini" - print_info "Add at least one VPS (vipy, watchtower, or spacey) to the [vps] group to proceed" - exit 1 - fi - - echo "" -} - -check_ssh_connectivity() { - print_header "Testing SSH Connectivity as Root" - - local ssh_key=$(grep "ansible_ssh_private_key_file" "$ANSIBLE_DIR/inventory.ini" | head -n1 | sed 's/.*ansible_ssh_private_key_file=\([^ ]*\).*/\1/') - ssh_key="${ssh_key/#\~/$HOME}" - - print_info "Using SSH key: $ssh_key" - echo "" - - local all_good=true - - # Get all hosts from the vps group - local vps_hosts=$(get_hosts_from_inventory "vps") - - # Test VPSs (vipy, watchtower, spacey) - for expected_host in vipy watchtower spacey; do - if echo "$vps_hosts" | grep -q "\b$expected_host\b"; then - print_info "Testing SSH to $expected_host as root..." - if timeout 10 ssh -i "$ssh_key" -o StrictHostKeyChecking=no -o BatchMode=yes root@$expected_host "echo 'SSH OK'" &>/dev/null; then - print_success "SSH to $expected_host as root: OK" - else - print_error "Cannot SSH to $expected_host as root" - print_warning "Make sure your SSH key is added to root on $expected_host" - all_good=false - fi - fi - done - - if [ "$all_good" = false ]; then - echo "" - print_error "SSH connectivity test failed" - print_info "To fix this:" - echo " 1. Ensure your VPS provider has added your SSH key to root" - echo " 2. Test manually: ssh -i $ssh_key root@" - echo "" - if ! confirm_action "Continue anyway?"; then - exit 1 - fi - fi - - echo "" - print_success "SSH connectivity verified" -} - -############################################################################### -# VPS Setup Functions -############################################################################### - -setup_vps_users_and_access() { - print_header "Setting Up Users and SSH Access on VPSs" - - cd "$ANSIBLE_DIR" - - print_info "This will:" - echo " • Create the 'counterweight' user with sudo access" - echo " • Configure SSH key authentication" - echo " • Disable root login (optional, configured in playbook)" - echo "" - print_info "Running: ansible-playbook -i inventory.ini infra/01_user_and_access_setup_playbook.yml" - echo "" - - if ! confirm_action "Proceed with user and access setup?"; then - print_warning "Skipped user and access setup" - return 1 - fi - - # Run the playbook with -e 'ansible_user="root"' to use root for this first run - if ansible-playbook -i inventory.ini infra/01_user_and_access_setup_playbook.yml -e 'ansible_user="root"'; then - print_success "User and access setup complete" - return 0 - else - print_error "User and access setup failed" - return 1 - fi -} - -setup_vps_firewall_and_fail2ban() { - print_header "Setting Up Firewall and Fail2ban on VPSs" - - cd "$ANSIBLE_DIR" - - print_info "This will:" - echo " • Configure UFW firewall with SSH access" - echo " • Install and configure fail2ban for brute force protection" - echo " • Install and configure auditd for security logging" - echo "" - print_info "Running: ansible-playbook -i inventory.ini infra/02_firewall_and_fail2ban_playbook.yml" - echo "" - - if ! confirm_action "Proceed with firewall and fail2ban setup?"; then - print_warning "Skipped firewall setup" - return 1 - fi - - # Now use the default counterweight user - if ansible-playbook -i inventory.ini infra/02_firewall_and_fail2ban_playbook.yml; then - print_success "Firewall and fail2ban setup complete" - return 0 - else - print_error "Firewall setup failed" - return 1 - fi -} - -############################################################################### -# Verification Functions -############################################################################### - -verify_layer_1a() { - print_header "Verifying Layer 1A Completion" - - cd "$ANSIBLE_DIR" - - local ssh_key=$(grep "ansible_ssh_private_key_file" "$ANSIBLE_DIR/inventory.ini" | head -n1 | sed 's/.*ansible_ssh_private_key_file=\([^ ]*\).*/\1/') - ssh_key="${ssh_key/#\~/$HOME}" - - # Test SSH as counterweight user - print_info "Testing SSH as counterweight user..." - echo "" - - local all_good=true - - # Get all hosts from the vps group - local vps_hosts=$(get_hosts_from_inventory "vps") - - for expected_host in vipy watchtower spacey; do - if echo "$vps_hosts" | grep -q "\b$expected_host\b"; then - if timeout 10 ssh -i "$ssh_key" -o StrictHostKeyChecking=no -o BatchMode=yes counterweight@$expected_host "echo 'SSH OK'" &>/dev/null; then - print_success "SSH to $expected_host as counterweight: OK" - else - print_error "Cannot SSH to $expected_host as counterweight" - all_good=false - fi - fi - done - - echo "" - if [ "$all_good" = true ]; then - print_success "All SSH connectivity verified" - else - print_warning "Some SSH tests failed - manual verification recommended" - print_info "Test manually: ssh -i $ssh_key counterweight@" - fi -} - -############################################################################### -# Summary Functions -############################################################################### - -print_summary() { - print_header "Layer 1A: VPS Setup Complete! 🎉" - - echo "Summary of what was configured:" - echo "" - print_success "counterweight user created on all VPSs" - print_success "SSH key authentication configured" - print_success "UFW firewall active and configured" - print_success "fail2ban protecting against brute force attacks" - print_success "auditd logging security events" - echo "" - - print_warning "Important Security Changes:" - echo " • Root SSH login is now disabled (by design)" - echo " • Always use 'counterweight' user for SSH access" - echo " • Firewall is active - only SSH allowed by default" - echo "" - - print_info "Next steps:" - echo " 1. Test SSH access: ssh -i ~/.ssh/counterganzua counterweight@" - echo " 2. (Optional) Set up Nodito: ./scripts/setup_layer_1b_nodito.sh" - echo " 3. Proceed to Layer 2: ./scripts/setup_layer_2.sh" - echo "" -} - -############################################################################### -# Main Execution -############################################################################### - -main() { - clear - - print_header "🔧 Layer 1A: VPS Basic Setup" - - echo "This script will configure users, SSH, firewall, and fail2ban on VPS machines." - echo "" - print_info "Targets: vipy, watchtower, spacey" - echo "" - - if ! confirm_action "Continue with Layer 1A setup?"; then - echo "Setup cancelled." - exit 0 - fi - - check_layer_0_complete - check_vps_configured - check_ssh_connectivity - - # VPS Setup - local setup_failed=false - setup_vps_users_and_access || setup_failed=true - setup_vps_firewall_and_fail2ban || setup_failed=true - - verify_layer_1a - - if [ "$setup_failed" = true ]; then - print_warning "Some steps failed - please review errors above" - fi - - print_summary -} - -# Run main function -main "$@" - diff --git a/scripts/setup_layer_1b_nodito.sh b/scripts/setup_layer_1b_nodito.sh deleted file mode 100755 index 5ebb243..0000000 --- a/scripts/setup_layer_1b_nodito.sh +++ /dev/null @@ -1,411 +0,0 @@ -#!/bin/bash - -############################################################################### -# Layer 1B: Nodito (Proxmox) Setup -# -# This script configures the Nodito Proxmox server. -# Runs independently - can be executed without VPS setup. -############################################################################### - -set -e # Exit on error - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -NC='\033[0m' # No Color - -# Project root directory -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" -ANSIBLE_DIR="$PROJECT_ROOT/ansible" - -############################################################################### -# Helper Functions -############################################################################### - -print_header() { - echo -e "\n${BLUE}========================================${NC}" - echo -e "${BLUE}$1${NC}" - echo -e "${BLUE}========================================${NC}\n" -} - -print_success() { - echo -e "${GREEN}✓${NC} $1" -} - -print_error() { - echo -e "${RED}✗${NC} $1" -} - -print_warning() { - echo -e "${YELLOW}⚠${NC} $1" -} - -print_info() { - echo -e "${BLUE}ℹ${NC} $1" -} - -confirm_action() { - local prompt="$1" - local response - - read -p "$(echo -e ${YELLOW}${prompt}${NC} [y/N]: )" response - [[ "$response" =~ ^[Yy]$ ]] -} - -############################################################################### -# Verification Functions -############################################################################### - -check_layer_0_complete() { - print_header "Verifying Layer 0 Prerequisites" - - local errors=0 - - # Check if venv exists - if [ ! -d "$PROJECT_ROOT/venv" ]; then - print_error "Python venv not found. Run Layer 0 first." - ((errors++)) - else - print_success "Python venv exists" - fi - - # Check if we're in a venv - if [ -z "$VIRTUAL_ENV" ]; then - print_error "Virtual environment not activated" - echo "Run: source venv/bin/activate" - ((errors++)) - else - print_success "Virtual environment activated" - fi - - # Check if Ansible is installed - if ! command -v ansible &> /dev/null; then - print_error "Ansible not found" - ((errors++)) - else - print_success "Ansible found: $(ansible --version | head -n1)" - fi - - # Check if inventory.ini exists - if [ ! -f "$ANSIBLE_DIR/inventory.ini" ]; then - print_error "inventory.ini not found" - ((errors++)) - else - print_success "inventory.ini exists" - fi - - if [ $errors -gt 0 ]; then - print_error "Layer 0 is not complete. Please run ./scripts/setup_layer_0.sh first" - exit 1 - fi - - print_success "Layer 0 prerequisites verified" -} - -get_hosts_from_inventory() { - local target="$1" - cd "$ANSIBLE_DIR" - ansible-inventory -i inventory.ini --list | \ - python3 - "$target" <<'PY' 2>/dev/null || echo "" -import json, sys -data = json.load(sys.stdin) -target = sys.argv[1] -if target in data: - print(' '.join(data[target].get('hosts', []))) -else: - hostvars = data.get('_meta', {}).get('hostvars', {}) - if target in hostvars: - print(target) -PY -} - -check_nodito_configured() { - print_header "Checking Nodito Configuration" - - local nodito_hosts=$(get_hosts_from_inventory "nodito_host") - - if [ -z "$nodito_hosts" ]; then - print_error "No nodito host configured in inventory.ini" - print_info "Add the nodito host to the [nodito_host] group in inventory.ini to proceed" - exit 1 - fi - - print_success "Nodito configured: $nodito_hosts" - echo "" -} - -############################################################################### -# Nodito Setup Functions -############################################################################### - -setup_nodito_bootstrap() { - print_header "Bootstrapping Nodito (Proxmox Server)" - - cd "$ANSIBLE_DIR" - - print_info "This will:" - echo " • Set up SSH key access for root" - echo " • Create the counterweight user with SSH keys" - echo " • Update and secure the system" - echo " • Disable root login and password authentication" - echo "" - print_info "Running: ansible-playbook -i inventory.ini infra/nodito/30_proxmox_bootstrap_playbook.yml" - print_warning "You will be prompted for the root password" - echo "" - - if ! confirm_action "Proceed with nodito bootstrap?"; then - print_warning "Skipped nodito bootstrap" - return 1 - fi - - # Run with root user and ask for password - if ansible-playbook -i inventory.ini infra/nodito/30_proxmox_bootstrap_playbook.yml -e 'ansible_user=root' --ask-pass; then - print_success "Nodito bootstrap complete" - return 0 - else - print_error "Nodito bootstrap failed" - return 1 - fi -} - -setup_nodito_community_repos() { - print_header "Switching Nodito to Community Repositories" - - cd "$ANSIBLE_DIR" - - print_info "This will:" - echo " • Remove enterprise repository files" - echo " • Add community repository files" - echo " • Disable subscription nag messages" - echo " • Update Proxmox packages" - echo "" - print_info "Running: ansible-playbook -i inventory.ini infra/nodito/31_proxmox_community_repos_playbook.yml" - echo "" - - if ! confirm_action "Proceed with community repos setup?"; then - print_warning "Skipped community repos setup" - return 1 - fi - - if ansible-playbook -i inventory.ini infra/nodito/31_proxmox_community_repos_playbook.yml; then - print_success "Community repositories configured" - print_warning "Clear browser cache before using Proxmox web UI (Ctrl+Shift+R)" - return 0 - else - print_error "Community repos setup failed" - return 1 - fi -} - -setup_nodito_zfs() { - print_header "Setting Up ZFS Storage Pool on Nodito (Optional)" - - cd "$ANSIBLE_DIR" - - print_warning "⚠️ ZFS setup will DESTROY ALL DATA on the specified disks!" - echo "" - print_info "Before proceeding, you must:" - echo " 1. SSH into nodito: ssh root@" - echo " 2. List disks: ls -la /dev/disk/by-id/ | grep -E '(ata-|scsi-|nvme-)'" - echo " 3. Identify the two disk IDs you want to use for RAID 1" - echo " 4. Edit ansible/infra/nodito/nodito_vars.yml" - echo " 5. Set zfs_disk_1 and zfs_disk_2 to your disk IDs" - echo "" - print_info "Example nodito_vars.yml content:" - echo ' zfs_disk_1: "/dev/disk/by-id/ata-WDC_WD40EFRX-68N32N0_WD-WCC7K1234567"' - echo ' zfs_disk_2: "/dev/disk/by-id/ata-WDC_WD40EFRX-68N32N0_WD-WCC7K7654321"' - echo "" - - if [ ! -f "$ANSIBLE_DIR/infra/nodito/nodito_vars.yml" ]; then - print_warning "nodito_vars.yml not found" - if confirm_action "Create nodito_vars.yml template?"; then - cat > "$ANSIBLE_DIR/infra/nodito/nodito_vars.yml" << 'EOF' -# Nodito Variables -# Configure these before running ZFS setup - -# ZFS Storage Pool Configuration -# Uncomment and configure these lines after identifying your disk IDs: -# zfs_disk_1: "/dev/disk/by-id/ata-YOUR-DISK-1-ID-HERE" -# zfs_disk_2: "/dev/disk/by-id/ata-YOUR-DISK-2-ID-HERE" -# zfs_pool_name: "proxmox-storage" - -# CPU Temperature Monitoring -monitoring_script_dir: /opt/cpu-temp-monitor -monitoring_script_path: "{{ monitoring_script_dir }}/cpu_temp_monitor.sh" -log_file: "{{ monitoring_script_dir }}/cpu_temp_monitor.log" -temp_threshold_celsius: 80 -EOF - print_success "Created nodito_vars.yml template" - print_info "Edit this file and configure ZFS disks, then re-run this script" - fi - return 1 - fi - - # Check if ZFS disks are configured - if ! grep -q "^zfs_disk_1:" "$ANSIBLE_DIR/infra/nodito/nodito_vars.yml" 2>/dev/null; then - print_info "ZFS disks not configured in nodito_vars.yml" - print_info "Edit ansible/infra/nodito/nodito_vars.yml to configure disk IDs" - if ! confirm_action "Skip ZFS setup for now?"; then - print_info "Please configure ZFS disks first" - return 1 - fi - print_warning "Skipped ZFS setup" - return 1 - fi - - print_info "Running: ansible-playbook -i inventory.ini infra/nodito/32_zfs_pool_setup_playbook.yml" - echo "" - - if ! confirm_action "⚠️ Proceed with ZFS setup? (THIS WILL DESTROY DATA ON CONFIGURED DISKS)"; then - print_warning "Skipped ZFS setup" - return 1 - fi - - if ansible-playbook -i inventory.ini infra/nodito/32_zfs_pool_setup_playbook.yml; then - print_success "ZFS storage pool configured" - return 0 - else - print_error "ZFS setup failed" - return 1 - fi -} - -setup_nodito_cloud_template() { - print_header "Creating Debian Cloud Template on Nodito (Optional)" - - cd "$ANSIBLE_DIR" - - print_info "This will:" - echo " • Download Debian cloud image" - echo " • Create a VM template (ID 9000)" - echo " • Configure cloud-init for easy VM creation" - echo "" - print_info "Running: ansible-playbook -i inventory.ini infra/nodito/33_proxmox_debian_cloud_template.yml" - echo "" - - if ! confirm_action "Proceed with cloud template creation?"; then - print_warning "Skipped cloud template creation" - return 1 - fi - - if ansible-playbook -i inventory.ini infra/nodito/33_proxmox_debian_cloud_template.yml; then - print_success "Debian cloud template created (VM ID 9000)" - return 0 - else - print_error "Cloud template creation failed" - return 1 - fi -} - -############################################################################### -# Verification Functions -############################################################################### - -verify_layer_1b() { - print_header "Verifying Layer 1B Completion" - - cd "$ANSIBLE_DIR" - - local ssh_key=$(grep "ansible_ssh_private_key_file" "$ANSIBLE_DIR/inventory.ini" | head -n1 | sed 's/.*ansible_ssh_private_key_file=\([^ ]*\).*/\1/') - ssh_key="${ssh_key/#\~/$HOME}" - - local nodito_hosts=$(get_hosts_from_inventory "nodito") - - print_info "Testing SSH as counterweight user..." - echo "" - - for host in $nodito_hosts; do - if timeout 10 ssh -i "$ssh_key" -o StrictHostKeyChecking=no -o BatchMode=yes counterweight@$host "echo 'SSH OK'" &>/dev/null; then - print_success "SSH to $host as counterweight: OK" - else - print_error "Cannot SSH to $host as counterweight" - print_info "Test manually: ssh -i $ssh_key counterweight@$host" - fi - done - - echo "" -} - -############################################################################### -# Summary Functions -############################################################################### - -print_summary() { - print_header "Layer 1B: Nodito Setup Complete! 🎉" - - echo "Summary of what was configured:" - echo "" - print_success "Nodito bootstrapped with SSH keys" - print_success "counterweight user created" - print_success "Community repositories configured" - print_success "Root login and password auth disabled" - - if grep -q "^zfs_disk_1:" "$ANSIBLE_DIR/infra/nodito/nodito_vars.yml" 2>/dev/null; then - print_success "ZFS storage pool configured (if you ran it)" - fi - echo "" - - print_warning "Important Security Changes:" - echo " • Root SSH login is now disabled" - echo " • Always use 'counterweight' user for SSH access" - echo " • Password authentication is disabled" - echo "" - - print_info "Proxmox Web UI:" - local nodito_hosts=$(get_hosts_from_inventory "nodito") - echo " • Access at: https://$nodito_hosts:8006" - echo " • Clear browser cache (Ctrl+Shift+R) to avoid UI issues" - echo "" - - print_info "Next steps:" - echo " 1. Test SSH: ssh -i ~/.ssh/counterganzua counterweight@" - echo " 2. Access Proxmox web UI and verify community repos" - echo " 3. Create VMs on Proxmox (if needed)" - echo " 4. Proceed to Layer 2: ./scripts/setup_layer_2.sh" - echo "" -} - -############################################################################### -# Main Execution -############################################################################### - -main() { - clear - - print_header "🖥️ Layer 1B: Nodito (Proxmox) Setup" - - echo "This script will configure your Nodito Proxmox server." - echo "" - print_info "Target: nodito (Proxmox server)" - echo "" - - if ! confirm_action "Continue with Layer 1B setup?"; then - echo "Setup cancelled." - exit 0 - fi - - check_layer_0_complete - check_nodito_configured - - # Nodito Setup - local setup_failed=false - setup_nodito_bootstrap || setup_failed=true - setup_nodito_community_repos || setup_failed=true - setup_nodito_zfs || setup_failed=true - setup_nodito_cloud_template || setup_failed=true - - verify_layer_1b - - if [ "$setup_failed" = true ]; then - print_warning "Some optional steps were skipped - this is normal" - fi - - print_summary -} - -# Run main function -main "$@" - diff --git a/scripts/setup_layer_2.sh b/scripts/setup_layer_2.sh deleted file mode 100755 index 1f35431..0000000 --- a/scripts/setup_layer_2.sh +++ /dev/null @@ -1,407 +0,0 @@ -#!/bin/bash - -############################################################################### -# Layer 2: General Infrastructure Tools -# -# This script installs rsync and docker on the machines that need them. -# Must be run after Layer 1A (VPS) or Layer 1B (Nodito) is complete. -############################################################################### - -set -e # Exit on error - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -NC='\033[0m' # No Color - -# Project root directory -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" -ANSIBLE_DIR="$PROJECT_ROOT/ansible" - -############################################################################### -# Helper Functions -############################################################################### - -print_header() { - echo -e "\n${BLUE}========================================${NC}" - echo -e "${BLUE}$1${NC}" - echo -e "${BLUE}========================================${NC}\n" -} - -print_success() { - echo -e "${GREEN}✓${NC} $1" -} - -print_error() { - echo -e "${RED}✗${NC} $1" -} - -print_warning() { - echo -e "${YELLOW}⚠${NC} $1" -} - -print_info() { - echo -e "${BLUE}ℹ${NC} $1" -} - -confirm_action() { - local prompt="$1" - local response - - read -p "$(echo -e ${YELLOW}${prompt}${NC} [y/N]: )" response - [[ "$response" =~ ^[Yy]$ ]] -} - -############################################################################### -# Verification Functions -############################################################################### - -check_layer_0_complete() { - print_header "Verifying Layer 0 Prerequisites" - - local errors=0 - - if [ -z "$VIRTUAL_ENV" ]; then - print_error "Virtual environment not activated" - echo "Run: source venv/bin/activate" - ((errors++)) - else - print_success "Virtual environment activated" - fi - - if ! command -v ansible &> /dev/null; then - print_error "Ansible not found" - ((errors++)) - else - print_success "Ansible found" - fi - - if [ ! -f "$ANSIBLE_DIR/inventory.ini" ]; then - print_error "inventory.ini not found" - ((errors++)) - else - print_success "inventory.ini exists" - fi - - if [ $errors -gt 0 ]; then - print_error "Layer 0 is not complete" - exit 1 - fi - - print_success "Layer 0 prerequisites verified" -} - -get_hosts_from_inventory() { - local target="$1" - cd "$ANSIBLE_DIR" - ansible-inventory -i inventory.ini --list | \ - python3 - "$target" <<'PY' 2>/dev/null || echo "" -import json, sys -data = json.load(sys.stdin) -target = sys.argv[1] -if target in data: - print(' '.join(data[target].get('hosts', []))) -else: - hostvars = data.get('_meta', {}).get('hostvars', {}) - if target in hostvars: - print(target) -PY -} - -check_ssh_connectivity() { - print_header "Testing SSH Connectivity" - - local ssh_key=$(grep "ansible_ssh_private_key_file" "$ANSIBLE_DIR/inventory.ini" | head -n1 | sed 's/.*ansible_ssh_private_key_file=\([^ ]*\).*/\1/') - ssh_key="${ssh_key/#\~/$HOME}" - - local all_good=true - - for group in vipy watchtower spacey nodito; do - local hosts=$(get_hosts_from_inventory "$group") - if [ -n "$hosts" ]; then - for host in $hosts; do - print_info "Testing SSH to $host as counterweight..." - if timeout 10 ssh -i "$ssh_key" -o StrictHostKeyChecking=no -o BatchMode=yes counterweight@$host "echo 'SSH OK'" &>/dev/null; then - print_success "SSH to $host: OK" - else - print_error "Cannot SSH to $host as counterweight" - print_warning "Make sure Layer 1A or 1B is complete for this host" - all_good=false - fi - done - fi - done - - if [ "$all_good" = false ]; then - echo "" - print_error "SSH connectivity test failed" - print_info "Ensure Layer 1A (VPS) or Layer 1B (Nodito) is complete" - echo "" - if ! confirm_action "Continue anyway?"; then - exit 1 - fi - fi - - echo "" - print_success "SSH connectivity verified" -} - -############################################################################### -# rsync Installation -############################################################################### - -install_rsync() { - print_header "Installing rsync" - - cd "$ANSIBLE_DIR" - - print_info "rsync is needed for backup operations" - print_info "Recommended hosts: vipy, watchtower, lapy" - echo "" - - # Show available hosts - echo "Available hosts in inventory:" - for group in vipy watchtower spacey nodito lapy; do - local hosts=$(get_hosts_from_inventory "$group") - if [ -n "$hosts" ]; then - echo " [$group]: $hosts" - fi - done - echo "" - - print_info "Installation options:" - echo " 1. Install on recommended hosts (vipy, watchtower, lapy)" - echo " 2. Install on all hosts" - echo " 3. Custom selection (specify groups)" - echo " 4. Skip rsync installation" - echo "" - - echo -e -n "${BLUE}Choose option${NC} [1-4]: " - read option - - local limit_hosts="" - case "$option" in - 1) - limit_hosts="vipy,watchtower,lapy" - print_info "Installing rsync on: vipy, watchtower, lapy" - ;; - 2) - limit_hosts="all" - print_info "Installing rsync on: all hosts" - ;; - 3) - echo -e -n "${BLUE}Enter groups (comma-separated, e.g., vipy,watchtower,nodito)${NC}: " - read limit_hosts - print_info "Installing rsync on: $limit_hosts" - ;; - 4) - print_warning "Skipping rsync installation" - return 1 - ;; - *) - print_error "Invalid option" - return 1 - ;; - esac - - echo "" - if ! confirm_action "Proceed with rsync installation?"; then - print_warning "Skipped rsync installation" - return 1 - fi - - print_info "Running: ansible-playbook -i inventory.ini infra/900_install_rsync.yml --limit $limit_hosts" - echo "" - - if ansible-playbook -i inventory.ini infra/900_install_rsync.yml --limit "$limit_hosts"; then - print_success "rsync installation complete" - return 0 - else - print_error "rsync installation failed" - return 1 - fi -} - -############################################################################### -# Docker Installation -############################################################################### - -install_docker() { - print_header "Installing Docker and Docker Compose" - - cd "$ANSIBLE_DIR" - - print_info "Docker is needed for containerized services" - print_info "Recommended hosts: vipy, watchtower" - echo "" - - # Show available hosts (exclude lapy - docker on laptop is optional) - echo "Available hosts in inventory:" - for group in vipy watchtower spacey nodito; do - local hosts=$(get_hosts_from_inventory "$group") - if [ -n "$hosts" ]; then - echo " [$group]: $hosts" - fi - done - echo "" - - print_info "Installation options:" - echo " 1. Install on recommended hosts (vipy, watchtower)" - echo " 2. Install on all hosts" - echo " 3. Custom selection (specify groups)" - echo " 4. Skip docker installation" - echo "" - - echo -e -n "${BLUE}Choose option${NC} [1-4]: " - read option - - local limit_hosts="" - case "$option" in - 1) - limit_hosts="vipy,watchtower" - print_info "Installing Docker on: vipy, watchtower" - ;; - 2) - limit_hosts="all" - print_info "Installing Docker on: all hosts" - ;; - 3) - echo -e -n "${BLUE}Enter groups (comma-separated, e.g., vipy,watchtower,nodito)${NC}: " - read limit_hosts - print_info "Installing Docker on: $limit_hosts" - ;; - 4) - print_warning "Skipping Docker installation" - return 1 - ;; - *) - print_error "Invalid option" - return 1 - ;; - esac - - echo "" - if ! confirm_action "Proceed with Docker installation?"; then - print_warning "Skipped Docker installation" - return 1 - fi - - print_info "Running: ansible-playbook -i inventory.ini infra/910_docker_playbook.yml --limit $limit_hosts" - echo "" - - if ansible-playbook -i inventory.ini infra/910_docker_playbook.yml --limit "$limit_hosts"; then - print_success "Docker installation complete" - print_warning "You may need to log out and back in for docker group to take effect" - return 0 - else - print_error "Docker installation failed" - return 1 - fi -} - -############################################################################### -# Verification Functions -############################################################################### - -verify_installations() { - print_header "Verifying Installations" - - cd "$ANSIBLE_DIR" - - local ssh_key=$(grep "ansible_ssh_private_key_file" "$ANSIBLE_DIR/inventory.ini" | head -n1 | sed 's/.*ansible_ssh_private_key_file=\([^ ]*\).*/\1/') - ssh_key="${ssh_key/#\~/$HOME}" - - echo "Checking installed tools on hosts..." - echo "" - - # Check all remote hosts - for group in vipy watchtower spacey nodito; do - local hosts=$(get_hosts_from_inventory "$group") - if [ -n "$hosts" ]; then - for host in $hosts; do - print_info "Checking $host..." - - # Check rsync - if timeout 5 ssh -i "$ssh_key" -o StrictHostKeyChecking=no -o BatchMode=yes counterweight@$host "command -v rsync" &>/dev/null; then - print_success "$host: rsync installed" - else - print_warning "$host: rsync not found (may not be needed)" - fi - - # Check docker - if timeout 5 ssh -i "$ssh_key" -o StrictHostKeyChecking=no -o BatchMode=yes counterweight@$host "command -v docker" &>/dev/null; then - print_success "$host: docker installed" - - # Check docker service - if timeout 5 ssh -i "$ssh_key" -o StrictHostKeyChecking=no -o BatchMode=yes counterweight@$host "sudo systemctl is-active docker" &>/dev/null; then - print_success "$host: docker service running" - else - print_warning "$host: docker service not running" - fi - else - print_warning "$host: docker not found (may not be needed)" - fi - - echo "" - done - fi - done -} - -############################################################################### -# Summary Functions -############################################################################### - -print_summary() { - print_header "Layer 2 Setup Complete! 🎉" - - echo "Summary:" - echo "" - print_success "Infrastructure tools installed on specified hosts" - echo "" - - print_info "What was installed:" - echo " • rsync - for backup operations" - echo " • docker + docker compose - for containerized services" - echo "" - - print_info "Next steps:" - echo " 1. Proceed to Layer 3: ./scripts/setup_layer_3_caddy.sh" - echo "" -} - -############################################################################### -# Main Execution -############################################################################### - -main() { - clear - - print_header "🔧 Layer 2: General Infrastructure Tools" - - echo "This script will install rsync and docker on your infrastructure." - echo "" - - if ! confirm_action "Continue with Layer 2 setup?"; then - echo "Setup cancelled." - exit 0 - fi - - check_layer_0_complete - check_ssh_connectivity - - # Install tools - install_rsync - echo "" - install_docker - - verify_installations - print_summary -} - -# Run main function -main "$@" - diff --git a/scripts/setup_layer_3_caddy.sh b/scripts/setup_layer_3_caddy.sh deleted file mode 100755 index 2ce0f6d..0000000 --- a/scripts/setup_layer_3_caddy.sh +++ /dev/null @@ -1,355 +0,0 @@ -#!/bin/bash - -############################################################################### -# Layer 3: Reverse Proxy (Caddy) -# -# This script deploys Caddy reverse proxy on VPS machines. -# Must be run after Layer 1A (VPS setup) is complete. -############################################################################### - -set -e # Exit on error - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -NC='\033[0m' # No Color - -# Project root directory -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" -ANSIBLE_DIR="$PROJECT_ROOT/ansible" - -############################################################################### -# Helper Functions -############################################################################### - -print_header() { - echo -e "\n${BLUE}========================================${NC}" - echo -e "${BLUE}$1${NC}" - echo -e "${BLUE}========================================${NC}\n" -} - -print_success() { - echo -e "${GREEN}✓${NC} $1" -} - -print_error() { - echo -e "${RED}✗${NC} $1" -} - -print_warning() { - echo -e "${YELLOW}⚠${NC} $1" -} - -print_info() { - echo -e "${BLUE}ℹ${NC} $1" -} - -confirm_action() { - local prompt="$1" - local response - - read -p "$(echo -e ${YELLOW}${prompt}${NC} [y/N]: )" response - [[ "$response" =~ ^[Yy]$ ]] -} - -############################################################################### -# Verification Functions -############################################################################### - -check_layer_0_complete() { - print_header "Verifying Layer 0 Prerequisites" - - local errors=0 - - if [ -z "$VIRTUAL_ENV" ]; then - print_error "Virtual environment not activated" - echo "Run: source venv/bin/activate" - ((errors++)) - else - print_success "Virtual environment activated" - fi - - if ! command -v ansible &> /dev/null; then - print_error "Ansible not found" - ((errors++)) - else - print_success "Ansible found" - fi - - if [ ! -f "$ANSIBLE_DIR/inventory.ini" ]; then - print_error "inventory.ini not found" - ((errors++)) - else - print_success "inventory.ini exists" - fi - - if [ $errors -gt 0 ]; then - print_error "Layer 0 is not complete" - exit 1 - fi - - print_success "Layer 0 prerequisites verified" -} - -get_hosts_from_inventory() { - local target="$1" - cd "$ANSIBLE_DIR" - ansible-inventory -i inventory.ini --list | \ - python3 - "$target" <<'PY' 2>/dev/null || echo "" -import json, sys -data = json.load(sys.stdin) -target = sys.argv[1] -if target in data: - print(' '.join(data[target].get('hosts', []))) -else: - hostvars = data.get('_meta', {}).get('hostvars', {}) - if target in hostvars: - print(target) -PY -} - -check_target_hosts() { - print_header "Checking Target Hosts" - - local has_hosts=false - - print_info "Caddy will be deployed to these hosts:" - echo "" - - for group in vipy watchtower spacey; do - local hosts=$(get_hosts_from_inventory "$group") - if [ -n "$hosts" ]; then - echo " [$group]: $hosts" - has_hosts=true - else - print_warning "[$group]: not configured (skipping)" - fi - done - - echo "" - - if [ "$has_hosts" = false ]; then - print_error "No target hosts configured for Caddy" - print_info "Caddy needs vipy, watchtower, or spacey in inventory.ini" - exit 1 - fi - - print_success "Target hosts verified" -} - -check_ssh_connectivity() { - print_header "Testing SSH Connectivity" - - local ssh_key=$(grep "ansible_ssh_private_key_file" "$ANSIBLE_DIR/inventory.ini" | head -n1 | sed 's/.*ansible_ssh_private_key_file=\([^ ]*\).*/\1/') - ssh_key="${ssh_key/#\~/$HOME}" - - local all_good=true - - for group in vipy watchtower spacey; do - local hosts=$(get_hosts_from_inventory "$group") - if [ -n "$hosts" ]; then - for host in $hosts; do - print_info "Testing SSH to $host as counterweight..." - if timeout 10 ssh -i "$ssh_key" -o StrictHostKeyChecking=no -o BatchMode=yes counterweight@$host "echo 'SSH OK'" &>/dev/null; then - print_success "SSH to $host: OK" - else - print_error "Cannot SSH to $host as counterweight" - print_warning "Make sure Layer 1A is complete for this host" - all_good=false - fi - done - fi - done - - if [ "$all_good" = false ]; then - echo "" - print_error "SSH connectivity test failed" - print_info "Ensure Layer 1A (VPS setup) is complete" - echo "" - if ! confirm_action "Continue anyway?"; then - exit 1 - fi - fi - - echo "" - print_success "SSH connectivity verified" -} - -############################################################################### -# Caddy Deployment -############################################################################### - -deploy_caddy() { - print_header "Deploying Caddy" - - cd "$ANSIBLE_DIR" - - print_info "This will:" - echo " • Install Caddy from official repositories" - echo " • Configure Caddy service" - echo " • Open firewall ports 80/443" - echo " • Create sites-enabled directory structure" - echo " • Enable automatic HTTPS with Let's Encrypt" - echo "" - - print_info "Target hosts: vipy, watchtower, spacey (if configured)" - echo "" - - print_warning "Important:" - echo " • Caddy will start with empty configuration" - echo " • Services will add their own config files in later layers" - echo " • Ports 80/443 must be available on the VPSs" - echo "" - - if ! confirm_action "Proceed with Caddy deployment?"; then - print_warning "Skipped Caddy deployment" - return 1 - fi - - print_info "Running: ansible-playbook -i inventory.ini services/caddy_playbook.yml" - echo "" - - if ansible-playbook -i inventory.ini services/caddy_playbook.yml; then - print_success "Caddy deployment complete" - return 0 - else - print_error "Caddy deployment failed" - return 1 - fi -} - -############################################################################### -# Verification Functions -############################################################################### - -verify_caddy() { - print_header "Verifying Caddy Installation" - - cd "$ANSIBLE_DIR" - - local ssh_key=$(grep "ansible_ssh_private_key_file" "$ANSIBLE_DIR/inventory.ini" | head -n1 | sed 's/.*ansible_ssh_private_key_file=\([^ ]*\).*/\1/') - ssh_key="${ssh_key/#\~/$HOME}" - - echo "Checking Caddy on each host..." - echo "" - - for group in vipy watchtower spacey; do - local hosts=$(get_hosts_from_inventory "$group") - if [ -n "$hosts" ]; then - for host in $hosts; do - print_info "Checking $host..." - - # Check if caddy is installed - if timeout 5 ssh -i "$ssh_key" -o StrictHostKeyChecking=no -o BatchMode=yes counterweight@$host "command -v caddy" &>/dev/null; then - print_success "$host: Caddy installed" - else - print_error "$host: Caddy not found" - continue - fi - - # Check if caddy service is running - if timeout 5 ssh -i "$ssh_key" -o StrictHostKeyChecking=no -o BatchMode=yes counterweight@$host "sudo systemctl is-active caddy" &>/dev/null; then - print_success "$host: Caddy service running" - else - print_error "$host: Caddy service not running" - fi - - # Check if sites-enabled directory exists - if timeout 5 ssh -i "$ssh_key" -o StrictHostKeyChecking=no -o BatchMode=yes counterweight@$host "test -d /etc/caddy/sites-enabled" &>/dev/null; then - print_success "$host: sites-enabled directory exists" - else - print_warning "$host: sites-enabled directory not found" - fi - - # Check if ports 80/443 are open - if timeout 5 ssh -i "$ssh_key" -o StrictHostKeyChecking=no -o BatchMode=yes counterweight@$host "sudo ufw status | grep -E '80|443'" &>/dev/null; then - print_success "$host: Firewall ports 80/443 open" - else - print_warning "$host: Could not verify firewall ports" - fi - - echo "" - done - fi - done -} - -############################################################################### -# Summary Functions -############################################################################### - -print_summary() { - print_header "Layer 3 Setup Complete! 🎉" - - echo "Summary of what was configured:" - echo "" - print_success "Caddy installed on VPS hosts" - print_success "Caddy service running" - print_success "Firewall ports 80/443 opened" - print_success "Sites-enabled directory structure created" - echo "" - - print_info "What Caddy provides:" - echo " • Automatic HTTPS with Let's Encrypt" - echo " • Reverse proxy for all web services" - echo " • HTTP/2 support" - echo " • Simple per-service configuration" - echo "" - - print_info "How services use Caddy:" - echo " • Each service adds a config file to /etc/caddy/sites-enabled/" - echo " • Main Caddyfile imports all configs" - echo " • Caddy automatically manages SSL certificates" - echo "" - - print_warning "Important Notes:" - echo " • Caddy is currently running with default/empty config" - echo " • Services deployed in later layers will add their configs" - echo " • DNS must point to your VPS IPs for SSL to work" - echo "" - - print_info "Next steps:" - echo " 1. Verify Caddy is accessible (optional): curl http://" - echo " 2. Proceed to Layer 4: ./scripts/setup_layer_4_monitoring.sh" - echo "" -} - -############################################################################### -# Main Execution -############################################################################### - -main() { - clear - - print_header "🌐 Layer 3: Reverse Proxy (Caddy)" - - echo "This script will deploy Caddy reverse proxy on your VPS machines." - echo "" - print_info "Targets: vipy, watchtower, spacey" - echo "" - - if ! confirm_action "Continue with Layer 3 setup?"; then - echo "Setup cancelled." - exit 0 - fi - - check_layer_0_complete - check_target_hosts - check_ssh_connectivity - - # Deploy Caddy - if deploy_caddy; then - verify_caddy - print_summary - else - print_error "Caddy deployment failed" - exit 1 - fi -} - -# Run main function -main "$@" - diff --git a/scripts/setup_layer_4_monitoring.sh b/scripts/setup_layer_4_monitoring.sh deleted file mode 100755 index d82ad41..0000000 --- a/scripts/setup_layer_4_monitoring.sh +++ /dev/null @@ -1,806 +0,0 @@ -#!/bin/bash - -############################################################################### -# Layer 4: Core Monitoring & Notifications -# -# This script deploys ntfy and Uptime Kuma on watchtower. -# Must be run after Layers 1A, 2, and 3 are complete. -############################################################################### - -set -e # Exit on error - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -NC='\033[0m' # No Color - -# Project root directory -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" -ANSIBLE_DIR="$PROJECT_ROOT/ansible" - -############################################################################### -# Helper Functions -############################################################################### - -print_header() { - echo -e "\n${BLUE}========================================${NC}" - echo -e "${BLUE}$1${NC}" - echo -e "${BLUE}========================================${NC}\n" -} - -print_success() { - echo -e "${GREEN}✓${NC} $1" -} - -print_error() { - echo -e "${RED}✗${NC} $1" -} - -print_warning() { - echo -e "${YELLOW}⚠${NC} $1" -} - -print_info() { - echo -e "${BLUE}ℹ${NC} $1" -} - -confirm_action() { - local prompt="$1" - local response - - read -p "$(echo -e ${YELLOW}${prompt}${NC} [y/N]: )" response - [[ "$response" =~ ^[Yy]$ ]] -} - -get_hosts_from_inventory() { - local target="$1" - cd "$ANSIBLE_DIR" - ansible-inventory -i inventory.ini --list | \ - python3 - "$target" <<'PY' 2>/dev/null || echo "" -import json, sys -data = json.load(sys.stdin) -target = sys.argv[1] -if target in data: - print(' '.join(data[target].get('hosts', []))) -else: - hostvars = data.get('_meta', {}).get('hostvars', {}) - if target in hostvars: - print(target) -PY -} - -get_host_ip() { - local target="$1" - cd "$ANSIBLE_DIR" - ansible-inventory -i inventory.ini --list | \ - python3 - "$target" <<'PY' 2>/dev/null || echo "" -import json, sys -data = json.load(sys.stdin) -target = sys.argv[1] -hostvars = data.get('_meta', {}).get('hostvars', {}) -if target in hostvars: - print(hostvars[target].get('ansible_host', target)) -else: - hosts = data.get(target, {}).get('hosts', []) - if hosts: - first = hosts[0] - hv = hostvars.get(first, {}) - print(hv.get('ansible_host', first)) -PY -} - -############################################################################### -# Verification Functions -############################################################################### - -check_prerequisites() { - print_header "Verifying Prerequisites" - - local errors=0 - - if [ -z "$VIRTUAL_ENV" ]; then - print_error "Virtual environment not activated" - echo "Run: source venv/bin/activate" - ((errors++)) - else - print_success "Virtual environment activated" - fi - - if ! command -v ansible &> /dev/null; then - print_error "Ansible not found" - ((errors++)) - else - print_success "Ansible found" - fi - - if [ ! -f "$ANSIBLE_DIR/inventory.ini" ]; then - print_error "inventory.ini not found" - ((errors++)) - else - print_success "inventory.ini exists" - fi - - # Check if watchtower is configured - if [ -z "$(get_hosts_from_inventory "watchtower")" ]; then - print_error "watchtower not configured in inventory.ini" - print_info "Layer 4 requires watchtower VPS" - ((errors++)) - else - print_success "watchtower configured in inventory" - fi - - if [ $errors -gt 0 ]; then - print_error "Prerequisites not met" - exit 1 - fi - - print_success "Prerequisites verified" -} - -check_vars_files() { - print_header "Checking Configuration Files" - - # Check services_config.yml - if [ ! -f "$ANSIBLE_DIR/services_config.yml" ]; then - print_error "services_config.yml not found" - print_info "This file should have been created in Layer 0" - exit 1 - fi - - print_success "services_config.yml exists" - - # Show configured subdomains - local ntfy_sub=$(grep "^ ntfy:" "$ANSIBLE_DIR/services_config.yml" | awk '{print $2}' 2>/dev/null || echo "ntfy") - local uptime_sub=$(grep "^ uptime_kuma:" "$ANSIBLE_DIR/services_config.yml" | awk '{print $2}' 2>/dev/null || echo "uptime") - - print_info "Configured subdomains:" - echo " • ntfy: $ntfy_sub" - echo " • uptime_kuma: $uptime_sub" - echo "" -} - -check_dns_configuration() { - print_header "Validating DNS Configuration" - - cd "$ANSIBLE_DIR" - - # Get watchtower IP - local watchtower_ip=$(get_host_ip "watchtower") - - if [ -z "$watchtower_ip" ]; then - print_error "Could not determine watchtower IP from inventory" - return 1 - fi - - print_info "Watchtower IP: $watchtower_ip" - echo "" - - # Get domain from infra_vars.yml - local root_domain=$(grep "^root_domain:" "$ANSIBLE_DIR/infra_vars.yml" | awk '{print $2}' 2>/dev/null) - - if [ -z "$root_domain" ]; then - print_error "Could not determine root_domain from infra_vars.yml" - return 1 - fi - - # Get subdomains from centralized config - local ntfy_subdomain="ntfy" - local uptime_subdomain="uptime" - - if [ -f "$ANSIBLE_DIR/services_config.yml" ]; then - ntfy_subdomain=$(grep "^ ntfy:" "$ANSIBLE_DIR/services_config.yml" | awk '{print $2}' 2>/dev/null || echo "ntfy") - uptime_subdomain=$(grep "^ uptime_kuma:" "$ANSIBLE_DIR/services_config.yml" | awk '{print $2}' 2>/dev/null || echo "uptime") - fi - - local ntfy_fqdn="${ntfy_subdomain}.${root_domain}" - local uptime_fqdn="${uptime_subdomain}.${root_domain}" - - print_info "Checking DNS records..." - echo "" - - local dns_ok=true - - # Check ntfy DNS - print_info "Checking $ntfy_fqdn..." - if command -v dig &> /dev/null; then - local ntfy_resolved=$(dig +short "$ntfy_fqdn" | head -n1) - if [ "$ntfy_resolved" = "$watchtower_ip" ]; then - print_success "$ntfy_fqdn → $ntfy_resolved ✓" - elif [ -n "$ntfy_resolved" ]; then - print_error "$ntfy_fqdn → $ntfy_resolved (expected $watchtower_ip)" - dns_ok=false - else - print_error "$ntfy_fqdn does not resolve" - dns_ok=false - fi - else - print_warning "dig command not found, skipping DNS validation" - print_info "Install dnsutils/bind-tools to enable DNS validation" - return 1 - fi - - # Check Uptime Kuma DNS - print_info "Checking $uptime_fqdn..." - if command -v dig &> /dev/null; then - local uptime_resolved=$(dig +short "$uptime_fqdn" | head -n1) - if [ "$uptime_resolved" = "$watchtower_ip" ]; then - print_success "$uptime_fqdn → $uptime_resolved ✓" - elif [ -n "$uptime_resolved" ]; then - print_error "$uptime_fqdn → $uptime_resolved (expected $watchtower_ip)" - dns_ok=false - else - print_error "$uptime_fqdn does not resolve" - dns_ok=false - fi - fi - - echo "" - - if [ "$dns_ok" = false ]; then - print_error "DNS validation failed" - print_info "Please configure DNS records:" - echo " • $ntfy_fqdn → $watchtower_ip" - echo " • $uptime_fqdn → $watchtower_ip" - echo "" - print_warning "DNS changes can take time to propagate (up to 24-48 hours)" - echo "" - if ! confirm_action "Continue anyway? (SSL certificates will fail without proper DNS)"; then - exit 1 - fi - else - print_success "DNS validation passed" - fi -} - -############################################################################### -# ntfy Deployment -############################################################################### - -deploy_ntfy() { - print_header "Deploying ntfy (Notification Service)" - - cd "$ANSIBLE_DIR" - - print_info "ntfy requires admin credentials for authentication" - echo "" - - # Check if env vars are set - if [ -z "$NTFY_USER" ] || [ -z "$NTFY_PASSWORD" ]; then - print_warning "NTFY_USER and NTFY_PASSWORD environment variables not set" - echo "" - print_info "Please enter credentials for ntfy admin user:" - echo "" - - echo -e -n "${BLUE}ntfy admin username${NC} [admin]: " - read ntfy_user - ntfy_user="${ntfy_user:-admin}" - - echo -e -n "${BLUE}ntfy admin password${NC}: " - read -s ntfy_password - echo "" - - if [ -z "$ntfy_password" ]; then - print_error "Password cannot be empty" - return 1 - fi - - export NTFY_USER="$ntfy_user" - export NTFY_PASSWORD="$ntfy_password" - else - print_success "Using NTFY_USER and NTFY_PASSWORD from environment" - fi - - echo "" - print_info "This will:" - echo " • Install ntfy from official repositories" - echo " • Configure ntfy with authentication (deny-all by default)" - echo " • Create admin user: $NTFY_USER" - echo " • Set up Caddy reverse proxy" - echo "" - - if ! confirm_action "Proceed with ntfy deployment?"; then - print_warning "Skipped ntfy deployment" - return 1 - fi - - print_info "Running: ansible-playbook -i inventory.ini services/ntfy/deploy_ntfy_playbook.yml" - echo "" - - if ansible-playbook -i inventory.ini services/ntfy/deploy_ntfy_playbook.yml; then - print_success "ntfy deployment complete" - echo "" - print_info "ntfy is now available at your configured subdomain" - print_info "Admin user: $NTFY_USER" - return 0 - else - print_error "ntfy deployment failed" - return 1 - fi -} - -############################################################################### -# Uptime Kuma Deployment -############################################################################### - -deploy_uptime_kuma() { - print_header "Deploying Uptime Kuma (Monitoring Platform)" - - cd "$ANSIBLE_DIR" - - print_info "This will:" - echo " • Deploy Uptime Kuma via Docker" - echo " • Configure Caddy reverse proxy" - echo " • Set up data persistence" - echo "" - - if ! confirm_action "Proceed with Uptime Kuma deployment?"; then - print_warning "Skipped Uptime Kuma deployment" - return 1 - fi - - print_info "Running: ansible-playbook -i inventory.ini services/uptime_kuma/deploy_uptime_kuma_playbook.yml" - echo "" - - if ansible-playbook -i inventory.ini services/uptime_kuma/deploy_uptime_kuma_playbook.yml; then - print_success "Uptime Kuma deployment complete" - echo "" - print_warning "IMPORTANT: First-time setup required" - echo " 1. Access Uptime Kuma at your configured subdomain" - echo " 2. Create admin user on first visit" - echo " 3. Update ansible/infra_secrets.yml with credentials" - return 0 - else - print_error "Uptime Kuma deployment failed" - return 1 - fi -} - -############################################################################### -# Backup Configuration -############################################################################### - -setup_uptime_kuma_backup() { - print_header "Setting Up Uptime Kuma Backup (Optional)" - - cd "$ANSIBLE_DIR" - - print_info "This will set up automated backups to lapy" - echo "" - - if ! confirm_action "Set up Uptime Kuma backup to lapy?"; then - print_warning "Skipped backup setup" - return 0 - fi - - # Check if rsync is available - print_info "Verifying rsync is installed on watchtower and lapy..." - if ! ansible watchtower -i inventory.ini -m shell -a "command -v rsync" &>/dev/null; then - print_error "rsync not found on watchtower" - print_info "Run Layer 2 to install rsync" - print_warning "Backup setup skipped - rsync not available" - return 0 - fi - - print_info "Running: ansible-playbook -i inventory.ini services/uptime_kuma/setup_backup_uptime_kuma_to_lapy.yml" - echo "" - - if ansible-playbook -i inventory.ini services/uptime_kuma/setup_backup_uptime_kuma_to_lapy.yml; then - print_success "Uptime Kuma backup configured" - print_info "Backups will run periodically via cron" - return 0 - else - print_error "Backup setup failed" - return 1 - fi -} - -############################################################################### -# Post-Deployment Configuration -############################################################################### - -setup_ntfy_notification() { - print_header "Setting Up ntfy Notification in Uptime Kuma (Optional)" - - cd "$ANSIBLE_DIR" - - print_info "This will automatically configure ntfy as a notification method in Uptime Kuma" - print_warning "Prerequisites:" - echo " • Uptime Kuma admin account must be created first" - echo " • infra_secrets.yml must have Uptime Kuma credentials" - echo "" - - if ! confirm_action "Set up ntfy notification in Uptime Kuma?"; then - print_warning "Skipped ntfy notification setup" - print_info "You can set this up manually or run this script again later" - return 0 - fi - - # Check if infra_secrets.yml has Uptime Kuma credentials - if ! grep -q "uptime_kuma_username:" "$ANSIBLE_DIR/infra_secrets.yml" 2>/dev/null || \ - ! grep -q "uptime_kuma_password:" "$ANSIBLE_DIR/infra_secrets.yml" 2>/dev/null; then - print_error "Uptime Kuma credentials not found in infra_secrets.yml" - print_info "Please complete Step 1 and 2 of post-deployment steps first:" - echo " 1. Create admin user in Uptime Kuma web UI" - echo " 2. Add credentials to ansible/infra_secrets.yml" - print_warning "Skipped - you can run this script again after completing those steps" - return 0 - fi - - # Check credentials are not empty - local uk_user=$(grep "^uptime_kuma_username:" "$ANSIBLE_DIR/infra_secrets.yml" | awk '{print $2}' | tr -d '"' | tr -d "'") - local uk_pass=$(grep "^uptime_kuma_password:" "$ANSIBLE_DIR/infra_secrets.yml" | awk '{print $2}' | tr -d '"' | tr -d "'") - - if [ -z "$uk_user" ] || [ -z "$uk_pass" ]; then - print_error "Uptime Kuma credentials are empty in infra_secrets.yml" - print_info "Please update ansible/infra_secrets.yml with your credentials" - return 0 - fi - - print_success "Found Uptime Kuma credentials in infra_secrets.yml" - - print_info "Running playbook to configure ntfy notification..." - echo "" - - if ansible-playbook -i inventory.ini services/ntfy/setup_ntfy_uptime_kuma_notification.yml; then - print_success "ntfy notification configured in Uptime Kuma" - print_info "You can now use ntfy for all your monitors!" - return 0 - else - print_error "Failed to configure ntfy notification" - print_info "You can set this up manually or run the playbook again later:" - echo " ansible-playbook -i inventory.ini services/ntfy/setup_ntfy_uptime_kuma_notification.yml" - return 0 - fi -} - -############################################################################### -# Verification Functions -############################################################################### - -verify_deployments() { - print_header "Verifying Deployments" - - cd "$ANSIBLE_DIR" - - local ssh_key=$(grep "ansible_ssh_private_key_file" "$ANSIBLE_DIR/inventory.ini" | head -n1 | sed 's/.*ansible_ssh_private_key_file=\([^ ]*\).*/\1/') - ssh_key="${ssh_key/#\~/$HOME}" - - local watchtower_host - watchtower_host=$(get_hosts_from_inventory "watchtower") - - if [ -z "$watchtower_host" ]; then - print_error "Could not determine watchtower host" - return - fi - - print_info "Checking services on watchtower ($watchtower_host)..." - echo "" - - # Check ntfy - if timeout 5 ssh -i "$ssh_key" -o StrictHostKeyChecking=no -o BatchMode=yes counterweight@$watchtower_host "systemctl is-active ntfy" &>/dev/null; then - print_success "ntfy service running" - else - print_warning "ntfy service not running or not installed" - fi - - # Check Uptime Kuma docker container - if timeout 5 ssh -i "$ssh_key" -o StrictHostKeyChecking=no -o BatchMode=yes counterweight@$watchtower_host "docker ps | grep uptime-kuma" &>/dev/null; then - print_success "Uptime Kuma container running" - else - print_warning "Uptime Kuma container not running" - fi - - # Check Caddy configs - if timeout 5 ssh -i "$ssh_key" -o StrictHostKeyChecking=no -o BatchMode=yes counterweight@$watchtower_host "test -f /etc/caddy/sites-enabled/ntfy.conf" &>/dev/null; then - print_success "ntfy Caddy config exists" - else - print_warning "ntfy Caddy config not found" - fi - - if timeout 5 ssh -i "$ssh_key" -o StrictHostKeyChecking=no -o BatchMode=yes counterweight@$watchtower_host "test -f /etc/caddy/sites-enabled/uptime-kuma.conf" &>/dev/null; then - print_success "Uptime Kuma Caddy config exists" - else - print_warning "Uptime Kuma Caddy config not found" - fi - - echo "" -} - -verify_final_setup() { - print_header "Final Verification - Post-Deployment Steps" - - cd "$ANSIBLE_DIR" - - print_info "Checking if all post-deployment steps were completed..." - echo "" - - local all_ok=true - - # Check 1: infra_secrets.yml has Uptime Kuma credentials - print_info "Checking infra_secrets.yml..." - if grep -q "^uptime_kuma_username:" "$ANSIBLE_DIR/infra_secrets.yml" 2>/dev/null && \ - grep -q "^uptime_kuma_password:" "$ANSIBLE_DIR/infra_secrets.yml" 2>/dev/null; then - local uk_user=$(grep "^uptime_kuma_username:" "$ANSIBLE_DIR/infra_secrets.yml" | awk '{print $2}' | tr -d '"' | tr -d "'") - local uk_pass=$(grep "^uptime_kuma_password:" "$ANSIBLE_DIR/infra_secrets.yml" | awk '{print $2}' | tr -d '"' | tr -d "'") - - if [ -n "$uk_user" ] && [ -n "$uk_pass" ] && [ "$uk_user" != '""' ] && [ "$uk_pass" != '""' ]; then - print_success "Uptime Kuma credentials configured in infra_secrets.yml" - else - print_error "Uptime Kuma credentials are empty in infra_secrets.yml" - print_info "Please complete Step 2: Update infra_secrets.yml" - all_ok=false - fi - else - print_error "Uptime Kuma credentials not found in infra_secrets.yml" - print_info "Please complete Step 2: Update infra_secrets.yml" - all_ok=false - fi - - echo "" - - # Check 2: Can connect to Uptime Kuma API - print_info "Checking Uptime Kuma API access..." - - if [ -n "$uk_user" ] && [ -n "$uk_pass" ]; then - # Create a test Python script to check API access - local test_script=$(mktemp) - cat > "$test_script" << 'EOFPYTHON' -import sys -import yaml -from uptime_kuma_api import UptimeKumaApi - -try: - # Load config - with open('infra_vars.yml', 'r') as f: - infra_vars = yaml.safe_load(f) - - with open('services/uptime_kuma/uptime_kuma_vars.yml', 'r') as f: - uk_vars = yaml.safe_load(f) - - with open('infra_secrets.yml', 'r') as f: - secrets = yaml.safe_load(f) - - root_domain = infra_vars.get('root_domain') - subdomain = uk_vars.get('uptime_kuma_subdomain', 'uptime') - url = f"https://{subdomain}.{root_domain}" - - username = secrets.get('uptime_kuma_username') - password = secrets.get('uptime_kuma_password') - - # Try to connect - api = UptimeKumaApi(url) - api.login(username, password) - - # Check if we can get monitors - monitors = api.get_monitors() - - print(f"SUCCESS:{len(monitors)}") - api.disconnect() - sys.exit(0) - -except Exception as e: - print(f"ERROR:{str(e)}", file=sys.stderr) - sys.exit(1) -EOFPYTHON - - local result=$(cd "$ANSIBLE_DIR" && python3 "$test_script" 2>&1) - rm -f "$test_script" - - if echo "$result" | grep -q "^SUCCESS:"; then - local monitor_count=$(echo "$result" | grep "^SUCCESS:" | cut -d: -f2) - print_success "Successfully connected to Uptime Kuma API" - print_info "Current monitors: $monitor_count" - else - print_error "Cannot connect to Uptime Kuma API" - print_warning "This usually means:" - echo " • Admin account not created yet (Step 1)" - echo " • Wrong credentials in infra_secrets.yml (Step 2)" - echo " • Uptime Kuma not accessible" - all_ok=false - fi - else - print_warning "Skipping API check - credentials not configured" - all_ok=false - fi - - echo "" - - # Check 3: ntfy notification configured in Uptime Kuma - print_info "Checking ntfy notification configuration..." - - if [ -n "$uk_user" ] && [ -n "$uk_pass" ]; then - local test_notif=$(mktemp) - cat > "$test_notif" << 'EOFPYTHON' -import sys -import yaml -from uptime_kuma_api import UptimeKumaApi - -try: - # Load config - with open('infra_vars.yml', 'r') as f: - infra_vars = yaml.safe_load(f) - - with open('services/uptime_kuma/uptime_kuma_vars.yml', 'r') as f: - uk_vars = yaml.safe_load(f) - - with open('infra_secrets.yml', 'r') as f: - secrets = yaml.safe_load(f) - - root_domain = infra_vars.get('root_domain') - subdomain = uk_vars.get('uptime_kuma_subdomain', 'uptime') - url = f"https://{subdomain}.{root_domain}" - - username = secrets.get('uptime_kuma_username') - password = secrets.get('uptime_kuma_password') - - # Connect - api = UptimeKumaApi(url) - api.login(username, password) - - # Check for ntfy notification - notifications = api.get_notifications() - ntfy_found = any(n.get('type') == 'ntfy' for n in notifications) - - if ntfy_found: - print("SUCCESS:ntfy notification configured") - else: - print("NOTFOUND:No ntfy notification found") - - api.disconnect() - sys.exit(0) - -except Exception as e: - print(f"ERROR:{str(e)}", file=sys.stderr) - sys.exit(1) -EOFPYTHON - - local notif_result=$(cd "$ANSIBLE_DIR" && python3 "$test_notif" 2>&1) - rm -f "$test_notif" - - if echo "$notif_result" | grep -q "^SUCCESS:"; then - print_success "ntfy notification is configured in Uptime Kuma" - elif echo "$notif_result" | grep -q "^NOTFOUND:"; then - print_warning "ntfy notification not yet configured" - print_info "Run the script again and choose 'yes' for ntfy notification setup" - print_info "Or complete Step 3 manually" - all_ok=false - else - print_warning "Could not verify ntfy notification (API access issue)" - fi - else - print_warning "Skipping ntfy check - credentials not configured" - fi - - echo "" - - # Summary - if [ "$all_ok" = true ]; then - print_success "All post-deployment steps completed! ✓" - echo "" - print_info "Layer 4 is fully configured and ready to use" - print_info "You can now proceed to Layer 6 (infrastructure monitoring)" - return 0 - else - print_warning "Some post-deployment steps are incomplete" - echo "" - print_info "Complete these steps:" - echo " 1. Access Uptime Kuma web UI and create admin account" - echo " 2. Update ansible/infra_secrets.yml with credentials" - echo " 3. Run this script again to configure ntfy notification" - echo "" - print_info "You can also complete manually and verify with:" - echo " ./scripts/setup_layer_4_monitoring.sh" - return 1 - fi -} - -############################################################################### -# Summary Functions -############################################################################### - -print_summary() { - print_header "Layer 4 Setup Complete! 🎉" - - echo "Summary of what was configured:" - echo "" - print_success "ntfy notification service deployed" - print_success "Uptime Kuma monitoring platform deployed" - print_success "Caddy reverse proxy configured for both services" - echo "" - - print_warning "REQUIRED POST-DEPLOYMENT STEPS:" - echo "" - echo "MANUAL (do these first):" - echo " 1. Access Uptime Kuma Web UI and create admin account" - echo " 2. Update ansible/infra_secrets.yml with credentials" - echo "" - echo "AUTOMATED (script can do these):" - echo " 3. Configure ntfy notification - script will offer to set this up" - echo " 4. Final verification - script will check everything" - echo "" - print_info "After completing steps 1 & 2, the script will:" - echo " • Automatically configure ntfy in Uptime Kuma" - echo " • Verify all post-deployment steps" - echo " • Tell you if anything is missing" - echo "" - print_warning "You MUST complete steps 1 & 2 before proceeding to Layer 6!" - echo "" - - print_info "What these services enable:" - echo " • ntfy: Push notifications to your devices" - echo " • Uptime Kuma: Monitor all services and infrastructure" - echo " • Together: Complete monitoring and alerting solution" - echo "" - - print_info "Next steps:" - echo " 1. Complete the post-deployment steps above" - echo " 2. Test ntfy: Send a test notification" - echo " 3. Test Uptime Kuma: Create a test monitor" - echo " 4. Proceed to Layer 5: ./scripts/setup_layer_5_headscale.sh (optional)" - echo " OR Layer 6: ./scripts/setup_layer_6_infra_monitoring.sh" - echo "" -} - -############################################################################### -# Main Execution -############################################################################### - -main() { - clear - - print_header "📊 Layer 4: Core Monitoring & Notifications" - - echo "This script will deploy ntfy and Uptime Kuma on watchtower." - echo "" - print_info "Services to deploy:" - echo " • ntfy (notification service)" - echo " • Uptime Kuma (monitoring platform)" - echo "" - - if ! confirm_action "Continue with Layer 4 setup?"; then - echo "Setup cancelled." - exit 0 - fi - - check_prerequisites - check_vars_files - check_dns_configuration - - # Deploy services (don't fail if skipped) - deploy_ntfy || true - echo "" - deploy_uptime_kuma || true - echo "" - setup_uptime_kuma_backup || true - - echo "" - verify_deployments - - # Always show summary and offer ntfy configuration - print_summary - echo "" - - # Always ask about ntfy notification setup (regardless of deployment status) - print_header "Configure ntfy Notification in Uptime Kuma" - print_info "After creating your Uptime Kuma admin account and updating infra_secrets.yml," - print_info "the script can automatically configure ntfy as a notification method." - echo "" - print_warning "Prerequisites:" - echo " 1. Access Uptime Kuma web UI and create admin account" - echo " 2. Update ansible/infra_secrets.yml with your credentials" - echo "" - - # Always offer to set up ntfy notification - setup_ntfy_notification - - # Final verification - echo "" - verify_final_setup -} - -# Run main function -main "$@" - diff --git a/scripts/setup_layer_5_headscale.sh b/scripts/setup_layer_5_headscale.sh deleted file mode 100755 index 0c89745..0000000 --- a/scripts/setup_layer_5_headscale.sh +++ /dev/null @@ -1,524 +0,0 @@ -#!/bin/bash - -############################################################################### -# Layer 5: VPN Infrastructure (Headscale) -# -# This script deploys Headscale and optionally joins machines to the mesh. -# Must be run after Layers 0, 1A, and 3 are complete. -# THIS LAYER IS OPTIONAL - skip to Layer 6 if you don't need VPN. -############################################################################### - -set -e # Exit on error - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -NC='\033[0m' # No Color - -# Project root directory -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" -ANSIBLE_DIR="$PROJECT_ROOT/ansible" - -############################################################################### -# Helper Functions -############################################################################### - -print_header() { - echo -e "\n${BLUE}========================================${NC}" - echo -e "${BLUE}$1${NC}" - echo -e "${BLUE}========================================${NC}\n" -} - -print_success() { - echo -e "${GREEN}✓${NC} $1" -} - -print_error() { - echo -e "${RED}✗${NC} $1" -} - -print_warning() { - echo -e "${YELLOW}⚠${NC} $1" -} - -print_info() { - echo -e "${BLUE}ℹ${NC} $1" -} - -confirm_action() { - local prompt="$1" - local response - - read -p "$(echo -e ${YELLOW}${prompt}${NC} [y/N]: )" response - [[ "$response" =~ ^[Yy]$ ]] -} - -############################################################################### -# Verification Functions -############################################################################### - -check_prerequisites() { - print_header "Verifying Prerequisites" - - local errors=0 - - if [ -z "$VIRTUAL_ENV" ]; then - print_error "Virtual environment not activated" - echo "Run: source venv/bin/activate" - ((errors++)) - else - print_success "Virtual environment activated" - fi - - if ! command -v ansible &> /dev/null; then - print_error "Ansible not found" - ((errors++)) - else - print_success "Ansible found" - fi - - if [ ! -f "$ANSIBLE_DIR/inventory.ini" ]; then - print_error "inventory.ini not found" - ((errors++)) - else - print_success "inventory.ini exists" - fi - - # Check if spacey is configured - if [ -z "$(get_hosts_from_inventory "spacey")" ]; then - print_error "spacey not configured in inventory.ini" - print_info "Layer 5 requires spacey VPS for Headscale server" - ((errors++)) - else - print_success "spacey configured in inventory" - fi - - if [ $errors -gt 0 ]; then - print_error "Prerequisites not met" - exit 1 - fi - - print_success "Prerequisites verified" -} - -get_hosts_from_inventory() { - local target="$1" - cd "$ANSIBLE_DIR" - ansible-inventory -i inventory.ini --list | \ - python3 - "$target" <<'PY' 2>/dev/null || echo "" -import json, sys -data = json.load(sys.stdin) -target = sys.argv[1] -if target in data: - print(' '.join(data[target].get('hosts', []))) -else: - hostvars = data.get('_meta', {}).get('hostvars', {}) - if target in hostvars: - print(target) -PY -} - -get_host_ip() { - local target="$1" - cd "$ANSIBLE_DIR" - ansible-inventory -i inventory.ini --list | \ - python3 - "$target" <<'PY' 2>/dev/null || echo "" -import json, sys -data = json.load(sys.stdin) -target = sys.argv[1] -hostvars = data.get('_meta', {}).get('hostvars', {}) -if target in hostvars: - print(hostvars[target].get('ansible_host', target)) -else: - hosts = data.get(target, {}).get('hosts', []) - if hosts: - first = hosts[0] - hv = hostvars.get(first, {}) - print(hv.get('ansible_host', first)) -PY -} - -check_vars_files() { - print_header "Checking Configuration Files" - - # Check services_config.yml - if [ ! -f "$ANSIBLE_DIR/services_config.yml" ]; then - print_error "services_config.yml not found" - print_info "This file should have been created in Layer 0" - exit 1 - fi - - print_success "services_config.yml exists" - - # Show configured subdomain - local hs_sub=$(grep "^ headscale:" "$ANSIBLE_DIR/services_config.yml" | awk '{print $2}' 2>/dev/null || echo "headscale") - print_info "Configured subdomain: headscale: $hs_sub" - echo "" -} - -check_dns_configuration() { - print_header "Validating DNS Configuration" - - cd "$ANSIBLE_DIR" - - # Get spacey IP - local spacey_ip=$(get_host_ip "spacey") - - if [ -z "$spacey_ip" ]; then - print_error "Could not determine spacey IP from inventory" - return 1 - fi - - print_info "Spacey IP: $spacey_ip" - echo "" - - # Get domain from infra_vars.yml - local root_domain=$(grep "^root_domain:" "$ANSIBLE_DIR/infra_vars.yml" | awk '{print $2}' 2>/dev/null) - - if [ -z "$root_domain" ]; then - print_error "Could not determine root_domain from infra_vars.yml" - return 1 - fi - - # Get subdomain from centralized config - local headscale_subdomain="headscale" - - if [ -f "$ANSIBLE_DIR/services_config.yml" ]; then - headscale_subdomain=$(grep "^ headscale:" "$ANSIBLE_DIR/services_config.yml" | awk '{print $2}' 2>/dev/null || echo "headscale") - fi - - local headscale_fqdn="${headscale_subdomain}.${root_domain}" - - print_info "Checking DNS record..." - echo "" - - # Check Headscale DNS - print_info "Checking $headscale_fqdn..." - if command -v dig &> /dev/null; then - local resolved=$(dig +short "$headscale_fqdn" | head -n1) - if [ "$resolved" = "$spacey_ip" ]; then - print_success "$headscale_fqdn → $resolved ✓" - elif [ -n "$resolved" ]; then - print_error "$headscale_fqdn → $resolved (expected $spacey_ip)" - print_warning "DNS changes can take time to propagate (up to 24-48 hours)" - echo "" - if ! confirm_action "Continue anyway? (SSL certificates will fail without proper DNS)"; then - exit 1 - fi - else - print_error "$headscale_fqdn does not resolve" - print_warning "DNS changes can take time to propagate" - echo "" - if ! confirm_action "Continue anyway? (SSL certificates will fail without proper DNS)"; then - exit 1 - fi - fi - else - print_warning "dig command not found, skipping DNS validation" - print_info "Install dnsutils/bind-tools to enable DNS validation" - fi - - echo "" - print_success "DNS validation complete" -} - -############################################################################### -# Headscale Deployment -############################################################################### - -deploy_headscale() { - print_header "Deploying Headscale Server" - - cd "$ANSIBLE_DIR" - - print_info "This will:" - echo " • Install Headscale on spacey" - echo " • Configure with deny-all ACL policy (you customize later)" - echo " • Create namespace for your network" - echo " • Set up Caddy reverse proxy" - echo " • Configure embedded DERP server" - echo "" - - print_warning "After deployment, you MUST configure ACL policies for machines to communicate" - echo "" - - if ! confirm_action "Proceed with Headscale deployment?"; then - print_warning "Skipped Headscale deployment" - return 1 - fi - - print_info "Running: ansible-playbook -i inventory.ini services/headscale/deploy_headscale_playbook.yml" - echo "" - - if ansible-playbook -i inventory.ini services/headscale/deploy_headscale_playbook.yml; then - print_success "Headscale deployment complete" - return 0 - else - print_error "Headscale deployment failed" - return 1 - fi -} - -############################################################################### -# Join Machines to Mesh -############################################################################### - -join_machines_to_mesh() { - print_header "Join Machines to Mesh (Optional)" - - cd "$ANSIBLE_DIR" - - print_info "This will install Tailscale client and join machines to your Headscale mesh" - echo "" - - # Show available hosts - echo "Available hosts to join:" - for group in vipy watchtower nodito lapy; do - local hosts=$(get_hosts_from_inventory "$group") - if [ -n "$hosts" ]; then - echo " [$group]: $hosts" - fi - done - echo "" - - print_info "Join options:" - echo " 1. Join recommended machines (vipy, watchtower, nodito)" - echo " 2. Join all machines" - echo " 3. Custom selection (specify groups)" - echo " 4. Skip - join machines later manually" - echo "" - - echo -e -n "${BLUE}Choose option${NC} [1-4]: " - read option - - local limit_hosts="" - case "$option" in - 1) - limit_hosts="vipy,watchtower,nodito" - print_info "Joining: vipy, watchtower, nodito" - ;; - 2) - limit_hosts="all" - print_info "Joining: all hosts" - ;; - 3) - echo -e -n "${BLUE}Enter groups (comma-separated, e.g., vipy,watchtower)${NC}: " - read limit_hosts - print_info "Joining: $limit_hosts" - ;; - 4) - print_warning "Skipping machine join - you can join manually later" - print_info "To join manually:" - echo " ansible-playbook -i inventory.ini infra/920_join_headscale_mesh.yml --limit " - return 0 - ;; - *) - print_error "Invalid option" - return 0 - ;; - esac - - echo "" - if ! confirm_action "Proceed with joining machines?"; then - print_warning "Skipped joining machines" - return 0 - fi - - print_info "Running: ansible-playbook -i inventory.ini infra/920_join_headscale_mesh.yml --limit $limit_hosts" - echo "" - - if ansible-playbook -i inventory.ini infra/920_join_headscale_mesh.yml --limit "$limit_hosts"; then - print_success "Machines joined to mesh" - return 0 - else - print_error "Failed to join some machines" - print_info "You can retry or join manually later" - return 0 - fi -} - -############################################################################### -# Backup Configuration -############################################################################### - -setup_headscale_backup() { - print_header "Setting Up Headscale Backup (Optional)" - - cd "$ANSIBLE_DIR" - - print_info "This will set up automated backups to lapy" - echo "" - - if ! confirm_action "Set up Headscale backup to lapy?"; then - print_warning "Skipped backup setup" - return 0 - fi - - # Check if rsync is available - print_info "Verifying rsync is installed on spacey and lapy..." - if ! ansible spacey -i inventory.ini -m shell -a "command -v rsync" &>/dev/null; then - print_error "rsync not found on spacey" - print_info "Run Layer 2 to install rsync" - print_warning "Backup setup skipped - rsync not available" - return 0 - fi - - print_info "Running: ansible-playbook -i inventory.ini services/headscale/setup_backup_headscale_to_lapy.yml" - echo "" - - if ansible-playbook -i inventory.ini services/headscale/setup_backup_headscale_to_lapy.yml; then - print_success "Headscale backup configured" - print_info "Backups will run periodically via cron" - return 0 - else - print_error "Backup setup failed" - return 0 - fi -} - -############################################################################### -# Verification Functions -############################################################################### - -verify_deployment() { - print_header "Verifying Headscale Deployment" - - cd "$ANSIBLE_DIR" - - local ssh_key=$(grep "ansible_ssh_private_key_file" "$ANSIBLE_DIR/inventory.ini" | head -n1 | sed 's/.*ansible_ssh_private_key_file=\([^ ]*\).*/\1/') - ssh_key="${ssh_key/#\~/$HOME}" - - local spacey_host=$(get_hosts_from_inventory "spacey") - - if [ -z "$spacey_host" ]; then - print_error "Could not determine spacey host" - return - fi - - print_info "Checking Headscale on spacey ($spacey_host)..." - echo "" - - # Check Headscale service - if timeout 5 ssh -i "$ssh_key" -o StrictHostKeyChecking=no -o BatchMode=yes counterweight@$spacey_host "systemctl is-active headscale" &>/dev/null; then - print_success "Headscale service running" - else - print_warning "Headscale service not running" - fi - - # Check Caddy config - if timeout 5 ssh -i "$ssh_key" -o StrictHostKeyChecking=no -o BatchMode=yes counterweight@$spacey_host "test -f /etc/caddy/sites-enabled/headscale.conf" &>/dev/null; then - print_success "Headscale Caddy config exists" - else - print_warning "Headscale Caddy config not found" - fi - - # Check ACL file - if timeout 5 ssh -i "$ssh_key" -o StrictHostKeyChecking=no -o BatchMode=yes counterweight@$spacey_host "test -f /etc/headscale/acl.json" &>/dev/null; then - print_success "ACL policy file exists" - else - print_warning "ACL policy file not found" - fi - - # List nodes - print_info "Attempting to list connected nodes..." - local nodes_output=$(timeout 5 ssh -i "$ssh_key" -o StrictHostKeyChecking=no -o BatchMode=yes counterweight@$spacey_host "sudo headscale nodes list" 2>/dev/null || echo "") - - if [ -n "$nodes_output" ]; then - echo "$nodes_output" - else - print_warning "Could not list nodes (this is normal if no machines joined yet)" - fi - - echo "" -} - -############################################################################### -# Summary Functions -############################################################################### - -print_summary() { - print_header "Layer 5 Setup Complete! 🎉" - - echo "Summary of what was configured:" - echo "" - print_success "Headscale VPN server deployed on spacey" - print_success "Caddy reverse proxy configured" - print_success "Namespace created for your network" - echo "" - - print_warning "CRITICAL POST-DEPLOYMENT STEPS:" - echo "" - echo "1. Configure ACL Policies (REQUIRED for machines to communicate):" - echo " • SSH to spacey: ssh counterweight@" - echo " • Edit ACL: sudo nano /etc/headscale/acl.json" - echo " • Add rules to allow communication" - echo " • Restart: sudo systemctl restart headscale" - echo "" - echo "2. Verify machines joined (if you selected that option):" - echo " • SSH to spacey: ssh counterweight@" - echo " • List nodes: sudo headscale nodes list" - echo "" - echo "3. Join additional machines (mobile, desktop):" - echo " • Generate key: sudo headscale preauthkeys create --user --reusable" - echo " • On device: tailscale up --login-server https:// --authkey " - echo "" - - print_info "What Headscale enables:" - echo " • Secure mesh networking between all machines" - echo " • Magic DNS - access machines by hostname" - echo " • NAT traversal - works behind firewalls" - echo " • Self-hosted Tailscale alternative" - echo "" - - print_info "Next steps:" - echo " 1. Configure ACL policies on spacey" - echo " 2. Verify nodes are connected" - echo " 3. Proceed to Layer 6: ./scripts/setup_layer_6_infra_monitoring.sh" - echo "" -} - -############################################################################### -# Main Execution -############################################################################### - -main() { - clear - - print_header "🔐 Layer 5: VPN Infrastructure (Headscale)" - - echo "This script will deploy Headscale for secure mesh networking." - echo "" - print_warning "THIS LAYER IS OPTIONAL" - print_info "Skip to Layer 6 if you don't need VPN mesh networking" - echo "" - - if ! confirm_action "Continue with Layer 5 setup?"; then - echo "Setup skipped - proceeding to Layer 6 is fine!" - exit 0 - fi - - check_prerequisites - check_vars_files - check_dns_configuration - - # Deploy Headscale - if deploy_headscale; then - echo "" - join_machines_to_mesh - echo "" - setup_headscale_backup - echo "" - verify_deployment - print_summary - else - print_error "Headscale deployment failed" - exit 1 - fi -} - -# Run main function -main "$@" - diff --git a/scripts/setup_layer_6_infra_monitoring.sh b/scripts/setup_layer_6_infra_monitoring.sh deleted file mode 100755 index 7c12780..0000000 --- a/scripts/setup_layer_6_infra_monitoring.sh +++ /dev/null @@ -1,473 +0,0 @@ -#!/bin/bash - -############################################################################### -# Layer 6: Infrastructure Monitoring -# -# This script deploys disk usage, healthcheck, and CPU temp monitoring. -# Must be run after Layer 4 (Uptime Kuma) is complete with credentials set. -############################################################################### - -set -e # Exit on error - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -NC='\033[0m' # No Color - -# Project root directory -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" -ANSIBLE_DIR="$PROJECT_ROOT/ansible" - -############################################################################### -# Helper Functions -############################################################################### - -print_header() { - echo -e "\n${BLUE}========================================${NC}" - echo -e "${BLUE}$1${NC}" - echo -e "${BLUE}========================================${NC}\n" -} - -print_success() { - echo -e "${GREEN}✓${NC} $1" -} - -print_error() { - echo -e "${RED}✗${NC} $1" -} - -print_warning() { - echo -e "${YELLOW}⚠${NC} $1" -} - -print_info() { - echo -e "${BLUE}ℹ${NC} $1" -} - -confirm_action() { - local prompt="$1" - local response - - read -p "$(echo -e ${YELLOW}${prompt}${NC} [y/N]: )" response - [[ "$response" =~ ^[Yy]$ ]] -} - -############################################################################### -# Verification Functions -############################################################################### - -check_prerequisites() { - print_header "Verifying Prerequisites" - - local errors=0 - - if [ -z "$VIRTUAL_ENV" ]; then - print_error "Virtual environment not activated" - echo "Run: source venv/bin/activate" - ((errors++)) - else - print_success "Virtual environment activated" - fi - - if ! command -v ansible &> /dev/null; then - print_error "Ansible not found" - ((errors++)) - else - print_success "Ansible found" - fi - - if [ ! -f "$ANSIBLE_DIR/inventory.ini" ]; then - print_error "inventory.ini not found" - ((errors++)) - else - print_success "inventory.ini exists" - fi - - # Check Python uptime-kuma-api - if ! python3 -c "import uptime_kuma_api" 2>/dev/null; then - print_error "uptime-kuma-api Python package not found" - print_info "Install with: pip install -r requirements.txt" - ((errors++)) - else - print_success "uptime-kuma-api package found" - fi - - if [ $errors -gt 0 ]; then - print_error "Prerequisites not met" - exit 1 - fi - - print_success "Prerequisites verified" -} - -check_uptime_kuma_credentials() { - print_header "Verifying Uptime Kuma Configuration" - - cd "$ANSIBLE_DIR" - - # Check if infra_secrets.yml has credentials - if ! grep -q "^uptime_kuma_username:" "$ANSIBLE_DIR/infra_secrets.yml" 2>/dev/null || \ - ! grep -q "^uptime_kuma_password:" "$ANSIBLE_DIR/infra_secrets.yml" 2>/dev/null; then - print_error "Uptime Kuma credentials not found in infra_secrets.yml" - print_info "You must complete Layer 4 post-deployment steps first:" - echo " 1. Create admin user in Uptime Kuma web UI" - echo " 2. Add credentials to ansible/infra_secrets.yml" - exit 1 - fi - - local uk_user=$(grep "^uptime_kuma_username:" "$ANSIBLE_DIR/infra_secrets.yml" | awk '{print $2}' | tr -d '"' | tr -d "'") - local uk_pass=$(grep "^uptime_kuma_password:" "$ANSIBLE_DIR/infra_secrets.yml" | awk '{print $2}' | tr -d '"' | tr -d "'") - - if [ -z "$uk_user" ] || [ -z "$uk_pass" ]; then - print_error "Uptime Kuma credentials are empty in infra_secrets.yml" - exit 1 - fi - - print_success "Uptime Kuma credentials found" - - # Test API connection - print_info "Testing Uptime Kuma API connection..." - - local test_script=$(mktemp) - cat > "$test_script" << 'EOFPYTHON' -import sys -import yaml -from uptime_kuma_api import UptimeKumaApi - -try: - with open('infra_vars.yml', 'r') as f: - infra_vars = yaml.safe_load(f) - - with open('services_config.yml', 'r') as f: - services_config = yaml.safe_load(f) - - with open('infra_secrets.yml', 'r') as f: - secrets = yaml.safe_load(f) - - root_domain = infra_vars.get('root_domain') - subdomain = services_config.get('subdomains', {}).get('uptime_kuma', 'uptime') - url = f"https://{subdomain}.{root_domain}" - - username = secrets.get('uptime_kuma_username') - password = secrets.get('uptime_kuma_password') - - api = UptimeKumaApi(url) - api.login(username, password) - - monitors = api.get_monitors() - print(f"SUCCESS:{len(monitors)}") - api.disconnect() - -except Exception as e: - print(f"ERROR:{str(e)}", file=sys.stderr) - sys.exit(1) -EOFPYTHON - - local result=$(cd "$ANSIBLE_DIR" && python3 "$test_script" 2>&1) - rm -f "$test_script" - - if echo "$result" | grep -q "^SUCCESS:"; then - local monitor_count=$(echo "$result" | grep "^SUCCESS:" | cut -d: -f2) - print_success "Successfully connected to Uptime Kuma API" - print_info "Current monitors: $monitor_count" - else - print_error "Cannot connect to Uptime Kuma API" - print_info "Error: $result" - echo "" - print_info "Make sure:" - echo " • Uptime Kuma is running (Layer 4)" - echo " • Credentials are correct in infra_secrets.yml" - echo " • Uptime Kuma is accessible" - exit 1 - fi - - echo "" - print_success "Uptime Kuma configuration verified" -} - -get_hosts_from_inventory() { - local target="$1" - cd "$ANSIBLE_DIR" - ansible-inventory -i inventory.ini --list | \ - python3 - "$target" <<'PY' 2>/dev/null || echo "" -import json, sys -data = json.load(sys.stdin) -target = sys.argv[1] -if target in data: - print(' '.join(data[target].get('hosts', []))) -else: - hostvars = data.get('_meta', {}).get('hostvars', {}) - if target in hostvars: - print(target) -PY -} - -############################################################################### -# Disk Usage Monitoring -############################################################################### - -deploy_disk_usage_monitoring() { - print_header "Deploying Disk Usage Monitoring" - - cd "$ANSIBLE_DIR" - - print_info "This will deploy disk usage monitoring on selected hosts" - print_info "Default settings:" - echo " • Threshold: 80%" - echo " • Check interval: 15 minutes" - echo " • Mount point: /" - echo "" - - # Show available hosts - echo "Available hosts:" - for group in vipy watchtower spacey nodito lapy; do - local hosts=$(get_hosts_from_inventory "$group") - if [ -n "$hosts" ]; then - echo " [$group]: $hosts" - fi - done - echo "" - - print_info "Deployment options:" - echo " 1. Deploy on all remote hosts (vipy, watchtower, spacey, nodito)" - echo " 2. Deploy on all hosts (including lapy)" - echo " 3. Custom selection (specify groups)" - echo " 4. Skip disk monitoring" - echo "" - - echo -e -n "${BLUE}Choose option${NC} [1-4]: " - read option - - local limit_hosts="" - case "$option" in - 1) - limit_hosts="vipy,watchtower,spacey,nodito" - print_info "Deploying to remote hosts" - ;; - 2) - limit_hosts="all" - print_info "Deploying to all hosts" - ;; - 3) - echo -e -n "${BLUE}Enter groups (comma-separated)${NC}: " - read limit_hosts - print_info "Deploying to: $limit_hosts" - ;; - 4) - print_warning "Skipping disk usage monitoring" - return 0 - ;; - *) - print_error "Invalid option" - return 0 - ;; - esac - - echo "" - if ! confirm_action "Proceed with disk usage monitoring deployment?"; then - print_warning "Skipped" - return 0 - fi - - print_info "Running: ansible-playbook -i inventory.ini infra/410_disk_usage_alerts.yml --limit $limit_hosts" - echo "" - - if ansible-playbook -i inventory.ini infra/410_disk_usage_alerts.yml --limit "$limit_hosts"; then - print_success "Disk usage monitoring deployed" - return 0 - else - print_error "Deployment failed" - return 0 - fi -} - -############################################################################### -# System Healthcheck Monitoring -############################################################################### - -deploy_system_healthcheck() { - print_header "Deploying System Healthcheck Monitoring" - - cd "$ANSIBLE_DIR" - - print_info "This will deploy system healthcheck monitoring on selected hosts" - print_info "Default settings:" - echo " • Heartbeat interval: 60 seconds" - echo " • Upside-down mode (no news is good news)" - echo "" - - # Show available hosts - echo "Available hosts:" - for group in vipy watchtower spacey nodito lapy; do - local hosts=$(get_hosts_from_inventory "$group") - if [ -n "$hosts" ]; then - echo " [$group]: $hosts" - fi - done - echo "" - - print_info "Deployment options:" - echo " 1. Deploy on all remote hosts (vipy, watchtower, spacey, nodito)" - echo " 2. Deploy on all hosts (including lapy)" - echo " 3. Custom selection (specify groups)" - echo " 4. Skip healthcheck monitoring" - echo "" - - echo -e -n "${BLUE}Choose option${NC} [1-4]: " - read option - - local limit_hosts="" - case "$option" in - 1) - limit_hosts="vipy,watchtower,spacey,nodito" - print_info "Deploying to remote hosts" - ;; - 2) - limit_hosts="all" - print_info "Deploying to all hosts" - ;; - 3) - echo -e -n "${BLUE}Enter groups (comma-separated)${NC}: " - read limit_hosts - print_info "Deploying to: $limit_hosts" - ;; - 4) - print_warning "Skipping healthcheck monitoring" - return 0 - ;; - *) - print_error "Invalid option" - return 0 - ;; - esac - - echo "" - if ! confirm_action "Proceed with healthcheck monitoring deployment?"; then - print_warning "Skipped" - return 0 - fi - - print_info "Running: ansible-playbook -i inventory.ini infra/420_system_healthcheck.yml --limit $limit_hosts" - echo "" - - if ansible-playbook -i inventory.ini infra/420_system_healthcheck.yml --limit "$limit_hosts"; then - print_success "System healthcheck monitoring deployed" - return 0 - else - print_error "Deployment failed" - return 0 - fi -} - -############################################################################### -# CPU Temperature Monitoring (Nodito) -############################################################################### - -deploy_cpu_temp_monitoring() { - print_header "Deploying CPU Temperature Monitoring (Nodito)" - - cd "$ANSIBLE_DIR" - - # Check if nodito is configured - local nodito_hosts=$(get_hosts_from_inventory "nodito") - if [ -z "$nodito_hosts" ]; then - print_info "Nodito not configured in inventory, skipping CPU temp monitoring" - return 0 - fi - - print_info "This will deploy CPU temperature monitoring on nodito (Proxmox)" - print_info "Default settings:" - echo " • Threshold: 80°C" - echo " • Check interval: 60 seconds" - echo "" - - echo "" - if ! confirm_action "Proceed with CPU temp monitoring deployment?"; then - print_warning "Skipped" - return 0 - fi - - print_info "Running: ansible-playbook -i inventory.ini infra/430_cpu_temp_alerts.yml" - echo "" - - if ansible-playbook -i inventory.ini infra/430_cpu_temp_alerts.yml; then - print_success "CPU temperature monitoring deployed" - return 0 - else - print_error "Deployment failed" - return 0 - fi -} - -############################################################################### -# Summary -############################################################################### - -print_summary() { - print_header "Layer 6 Setup Complete! 🎉" - - echo "Summary of what was deployed:" - echo "" - print_success "Infrastructure monitoring configured" - print_success "Monitors created in Uptime Kuma" - print_success "Systemd services and timers running" - echo "" - - print_info "What you have now:" - echo " • Disk usage monitoring on selected hosts" - echo " • System healthcheck monitoring" - echo " • CPU temperature monitoring (if nodito configured)" - echo " • All organized in host-specific groups" - echo "" - - print_info "Verify your monitoring:" - echo " 1. Open Uptime Kuma web UI" - echo " 2. Check monitors organized by host groups" - echo " 3. Verify monitors are receiving data" - echo " 4. Configure notification rules" - echo " 5. Watch for alerts via ntfy" - echo "" - - print_info "Next steps:" - echo " 1. Customize thresholds if needed" - echo " 2. Proceed to Layer 7: Core Services deployment" - echo "" -} - -############################################################################### -# Main Execution -############################################################################### - -main() { - clear - - print_header "📊 Layer 6: Infrastructure Monitoring" - - echo "This script will deploy automated monitoring for your infrastructure." - echo "" - - if ! confirm_action "Continue with Layer 6 setup?"; then - echo "Setup cancelled." - exit 0 - fi - - check_prerequisites - check_uptime_kuma_credentials - - # Deploy monitoring - deploy_disk_usage_monitoring - echo "" - deploy_system_healthcheck - echo "" - deploy_cpu_temp_monitoring - - echo "" - print_summary -} - -# Run main function -main "$@" - diff --git a/scripts/setup_layer_7_services.sh b/scripts/setup_layer_7_services.sh deleted file mode 100755 index 27c3c8d..0000000 --- a/scripts/setup_layer_7_services.sh +++ /dev/null @@ -1,524 +0,0 @@ -#!/bin/bash - -############################################################################### -# Layer 7: Core Services -# -# This script deploys Vaultwarden, Forgejo, and LNBits on vipy. -# Must be run after Layers 0, 1A, 2, and 3 are complete. -############################################################################### - -set -e # Exit on error - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -NC='\033[0m' # No Color - -# Project root directory -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" -ANSIBLE_DIR="$PROJECT_ROOT/ansible" - -############################################################################### -# Helper Functions -############################################################################### - -print_header() { - echo -e "\n${BLUE}========================================${NC}" - echo -e "${BLUE}$1${NC}" - echo -e "${BLUE}========================================${NC}\n" -} - -print_success() { - echo -e "${GREEN}✓${NC} $1" -} - -print_error() { - echo -e "${RED}✗${NC} $1" -} - -print_warning() { - echo -e "${YELLOW}⚠${NC} $1" -} - -print_info() { - echo -e "${BLUE}ℹ${NC} $1" -} - -confirm_action() { - local prompt="$1" - local response - - read -p "$(echo -e ${YELLOW}${prompt}${NC} [y/N]: )" response - [[ "$response" =~ ^[Yy]$ ]] -} - -############################################################################### -# Verification Functions -############################################################################### - -check_prerequisites() { - print_header "Verifying Prerequisites" - - local errors=0 - - if [ -z "$VIRTUAL_ENV" ]; then - print_error "Virtual environment not activated" - echo "Run: source venv/bin/activate" - ((errors++)) - else - print_success "Virtual environment activated" - fi - - if ! command -v ansible &> /dev/null; then - print_error "Ansible not found" - ((errors++)) - else - print_success "Ansible found" - fi - - if [ ! -f "$ANSIBLE_DIR/inventory.ini" ]; then - print_error "inventory.ini not found" - ((errors++)) - else - print_success "inventory.ini exists" - fi - - # Check if vipy is configured - if [ -z "$(get_hosts_from_inventory "vipy")" ]; then - print_error "vipy not configured in inventory.ini" - print_info "Layer 7 requires vipy VPS" - ((errors++)) - else - print_success "vipy configured in inventory" - fi - - if [ $errors -gt 0 ]; then - print_error "Prerequisites not met" - exit 1 - fi - - print_success "Prerequisites verified" -} - -get_hosts_from_inventory() { - local target="$1" - cd "$ANSIBLE_DIR" - ansible-inventory -i inventory.ini --list | \ - python3 - "$target" <<'PY' 2>/dev/null || echo "" -import json, sys -data = json.load(sys.stdin) -target = sys.argv[1] -if target in data: - print(' '.join(data[target].get('hosts', []))) -else: - hostvars = data.get('_meta', {}).get('hostvars', {}) - if target in hostvars: - print(target) -PY -} - -get_host_ip() { - local target="$1" - cd "$ANSIBLE_DIR" - ansible-inventory -i inventory.ini --list | \ - python3 - "$target" <<'PY' 2>/dev/null || echo "" -import json, sys -data = json.load(sys.stdin) -target = sys.argv[1] -hostvars = data.get('_meta', {}).get('hostvars', {}) -if target in hostvars: - print(hostvars[target].get('ansible_host', target)) -else: - hosts = data.get(target, {}).get('hosts', []) - if hosts: - first = hosts[0] - hv = hostvars.get(first, {}) - print(hv.get('ansible_host', first)) -PY -} - -check_dns_configuration() { - print_header "Validating DNS Configuration" - - cd "$ANSIBLE_DIR" - - # Get vipy IP - local vipy_ip=$(get_host_ip "vipy") - - if [ -z "$vipy_ip" ]; then - print_error "Could not determine vipy IP from inventory" - return 1 - fi - - print_info "Vipy IP: $vipy_ip" - echo "" - - # Get domain from infra_vars.yml - local root_domain=$(grep "^root_domain:" "$ANSIBLE_DIR/infra_vars.yml" | awk '{print $2}' 2>/dev/null) - - if [ -z "$root_domain" ]; then - print_error "Could not determine root_domain from infra_vars.yml" - return 1 - fi - - # Get subdomains from centralized config - local vw_subdomain="vault" - local fg_subdomain="git" - local ln_subdomain="lnbits" - - if [ -f "$ANSIBLE_DIR/services_config.yml" ]; then - vw_subdomain=$(grep "^ vaultwarden:" "$ANSIBLE_DIR/services_config.yml" | awk '{print $2}' 2>/dev/null || echo "vault") - fg_subdomain=$(grep "^ forgejo:" "$ANSIBLE_DIR/services_config.yml" | awk '{print $2}' 2>/dev/null || echo "git") - ln_subdomain=$(grep "^ lnbits:" "$ANSIBLE_DIR/services_config.yml" | awk '{print $2}' 2>/dev/null || echo "lnbits") - fi - - print_info "Checking DNS records..." - echo "" - - local dns_ok=true - - if command -v dig &> /dev/null; then - # Check each subdomain - for service in "vaultwarden:$vw_subdomain" "forgejo:$fg_subdomain" "lnbits:$ln_subdomain"; do - local name=$(echo "$service" | cut -d: -f1) - local subdomain=$(echo "$service" | cut -d: -f2) - local fqdn="${subdomain}.${root_domain}" - - print_info "Checking $fqdn..." - local resolved=$(dig +short "$fqdn" | head -n1) - - if [ "$resolved" = "$vipy_ip" ]; then - print_success "$fqdn → $resolved ✓" - elif [ -n "$resolved" ]; then - print_error "$fqdn → $resolved (expected $vipy_ip)" - dns_ok=false - else - print_error "$fqdn does not resolve" - dns_ok=false - fi - done - else - print_warning "dig command not found, skipping DNS validation" - print_info "Install dnsutils/bind-tools to enable DNS validation" - return 1 - fi - - echo "" - - if [ "$dns_ok" = false ]; then - print_error "DNS validation failed" - print_info "Please configure DNS records for all services" - echo "" - print_warning "DNS changes can take time to propagate" - echo "" - if ! confirm_action "Continue anyway? (SSL certificates will fail without proper DNS)"; then - exit 1 - fi - else - print_success "DNS validation passed" - fi -} - -############################################################################### -# Service Deployment -############################################################################### - -deploy_vaultwarden() { - print_header "Deploying Vaultwarden (Password Manager)" - - cd "$ANSIBLE_DIR" - - print_info "This will:" - echo " • Deploy Vaultwarden via Docker" - echo " • Configure Caddy reverse proxy" - echo " • Set up fail2ban protection" - echo " • Enable sign-ups (disable after first user)" - echo "" - - if ! confirm_action "Proceed with Vaultwarden deployment?"; then - print_warning "Skipped Vaultwarden deployment" - return 0 - fi - - print_info "Running: ansible-playbook -i inventory.ini services/vaultwarden/deploy_vaultwarden_playbook.yml" - echo "" - - if ansible-playbook -i inventory.ini services/vaultwarden/deploy_vaultwarden_playbook.yml; then - print_success "Vaultwarden deployed" - echo "" - print_warning "POST-DEPLOYMENT:" - echo " 1. Visit your Vaultwarden subdomain" - echo " 2. Create your first user account" - echo " 3. Run: ansible-playbook -i inventory.ini services/vaultwarden/disable_vaultwarden_sign_ups_playbook.yml" - return 0 - else - print_error "Vaultwarden deployment failed" - return 0 - fi -} - -deploy_forgejo() { - print_header "Deploying Forgejo (Git Server)" - - cd "$ANSIBLE_DIR" - - print_info "This will:" - echo " • Install Forgejo binary" - echo " • Create git user and directories" - echo " • Configure Caddy reverse proxy" - echo " • Enable SSH cloning on port 22" - echo "" - - if ! confirm_action "Proceed with Forgejo deployment?"; then - print_warning "Skipped Forgejo deployment" - return 0 - fi - - print_info "Running: ansible-playbook -i inventory.ini services/forgejo/deploy_forgejo_playbook.yml" - echo "" - - if ansible-playbook -i inventory.ini services/forgejo/deploy_forgejo_playbook.yml; then - print_success "Forgejo deployed" - echo "" - print_warning "POST-DEPLOYMENT:" - echo " 1. Visit your Forgejo subdomain" - echo " 2. Create admin account on first visit" - echo " 3. Add your SSH key for git cloning" - return 0 - else - print_error "Forgejo deployment failed" - return 0 - fi -} - -deploy_lnbits() { - print_header "Deploying LNBits (Lightning Wallet)" - - cd "$ANSIBLE_DIR" - - print_info "This will:" - echo " • Install system dependencies and uv (Python 3.12 tooling)" - echo " • Clone LNBits repository (version v1.3.1)" - echo " • Sync dependencies with uv targeting Python 3.12" - echo " • Configure with FakeWallet (testing)" - echo " • Create systemd service" - echo " • Configure Caddy reverse proxy" - echo "" - - if ! confirm_action "Proceed with LNBits deployment?"; then - print_warning "Skipped LNBits deployment" - return 0 - fi - - print_info "Running: ansible-playbook -i inventory.ini services/lnbits/deploy_lnbits_playbook.yml" - echo "" - - if ansible-playbook -i inventory.ini services/lnbits/deploy_lnbits_playbook.yml; then - print_success "LNBits deployed" - echo "" - print_warning "POST-DEPLOYMENT:" - echo " 1. Visit your LNBits subdomain" - echo " 2. Create superuser on first visit" - echo " 3. Configure real Lightning backend (FakeWallet is for testing only)" - echo " 4. Disable new user registration" - return 0 - else - print_error "LNBits deployment failed" - return 0 - fi -} - -############################################################################### -# Backup Configuration -############################################################################### - -setup_backups() { - print_header "Setting Up Backups (Optional)" - - cd "$ANSIBLE_DIR" - - print_info "Configure automated backups to lapy" - echo "" - - # Vaultwarden backup - if confirm_action "Set up Vaultwarden backup to lapy?"; then - print_info "Running: ansible-playbook -i inventory.ini services/vaultwarden/setup_backup_vaultwarden_to_lapy.yml" - if ansible-playbook -i inventory.ini services/vaultwarden/setup_backup_vaultwarden_to_lapy.yml; then - print_success "Vaultwarden backup configured" - else - print_error "Vaultwarden backup setup failed" - fi - echo "" - fi - - # LNBits backup - if confirm_action "Set up LNBits backup to lapy (GPG encrypted)?"; then - print_info "Running: ansible-playbook -i inventory.ini services/lnbits/setup_backup_lnbits_to_lapy.yml" - if ansible-playbook -i inventory.ini services/lnbits/setup_backup_lnbits_to_lapy.yml; then - print_success "LNBits backup configured" - else - print_error "LNBits backup setup failed" - fi - echo "" - fi - - print_warning "Forgejo backups are not automated - set up manually if needed" -} - -############################################################################### -# Verification -############################################################################### - -verify_services() { - print_header "Verifying Service Deployments" - - cd "$ANSIBLE_DIR" - - local ssh_key=$(grep "ansible_ssh_private_key_file" "$ANSIBLE_DIR/inventory.ini" | head -n1 | sed 's/.*ansible_ssh_private_key_file=\([^ ]*\).*/\1/') - ssh_key="${ssh_key/#\~/$HOME}" - - local vipy_host=$(get_hosts_from_inventory "vipy") - - if [ -z "$vipy_host" ]; then - print_error "Could not determine vipy host" - return - fi - - print_info "Checking services on vipy ($vipy_host)..." - echo "" - - # Check Vaultwarden - if timeout 5 ssh -i "$ssh_key" -o StrictHostKeyChecking=no -o BatchMode=yes counterweight@$vipy_host "docker ps | grep vaultwarden" &>/dev/null; then - print_success "Vaultwarden container running" - else - print_warning "Vaultwarden container not running (may not be deployed)" - fi - - # Check Forgejo - if timeout 5 ssh -i "$ssh_key" -o StrictHostKeyChecking=no -o BatchMode=yes counterweight@$vipy_host "systemctl is-active forgejo" &>/dev/null; then - print_success "Forgejo service running" - else - print_warning "Forgejo service not running (may not be deployed)" - fi - - # Check LNBits - if timeout 5 ssh -i "$ssh_key" -o StrictHostKeyChecking=no -o BatchMode=yes counterweight@$vipy_host "systemctl is-active lnbits" &>/dev/null; then - print_success "LNBits service running" - else - print_warning "LNBits service not running (may not be deployed)" - fi - - # Check Caddy configs - if timeout 5 ssh -i "$ssh_key" -o StrictHostKeyChecking=no -o BatchMode=yes counterweight@$vipy_host "ls /etc/caddy/sites-enabled/*.conf 2>/dev/null" &>/dev/null; then - print_success "Caddy configs exist" - local configs=$(timeout 5 ssh -i "$ssh_key" -o StrictHostKeyChecking=no -o BatchMode=yes counterweight@$vipy_host "ls /etc/caddy/sites-enabled/*.conf 2>/dev/null" | xargs -n1 basename) - print_info "Configured services:" - echo "$configs" | sed 's/^/ /' - else - print_warning "No Caddy configs found" - fi - - echo "" -} - -############################################################################### -# Summary -############################################################################### - -print_summary() { - print_header "Layer 7 Setup Complete! 🎉" - - echo "Summary of what was deployed:" - echo "" - print_success "Core services deployed on vipy" - echo "" - - print_warning "CRITICAL POST-DEPLOYMENT STEPS:" - echo "" - echo "For each service you deployed, you MUST:" - echo "" - - echo "1. Vaultwarden (if deployed):" - echo " • Visit web UI and create first user" - echo " • Disable sign-ups: ansible-playbook -i inventory.ini services/vaultwarden/disable_vaultwarden_sign_ups_playbook.yml" - echo " • Optional: Set up backup" - echo "" - - echo "2. Forgejo (if deployed):" - echo " • Visit web UI and create admin account" - echo " • Add your SSH public key for git operations" - echo " • Test cloning: git clone git@.:username/repo.git" - echo "" - - echo "3. LNBits (if deployed):" - echo " • Visit web UI and create superuser" - echo " • Configure real Lightning backend (currently FakeWallet)" - echo " • Disable new user registration" - echo " • Optional: Set up encrypted backup" - echo "" - - print_info "Services are now accessible:" - echo " • Vaultwarden: https://." - echo " • Forgejo: https://." - echo " • LNBits: https://." - echo "" - - print_success "Uptime Kuma monitors automatically created:" - echo " • Check Uptime Kuma web UI" - echo " • Look in 'services' monitor group" - echo " • Monitors for Vaultwarden, Forgejo, LNBits should appear" - echo "" - - print_info "Next steps:" - echo " 1. Complete post-deployment steps above" - echo " 2. Test each service" - echo " 3. Check Uptime Kuma monitors are working" - echo " 4. Proceed to Layer 8: ./scripts/setup_layer_8_secondary_services.sh" - echo "" -} - -############################################################################### -# Main Execution -############################################################################### - -main() { - clear - - print_header "🚀 Layer 7: Core Services" - - echo "This script will deploy core services on vipy:" - echo " • Vaultwarden (password manager)" - echo " • Forgejo (git server)" - echo " • LNBits (Lightning wallet)" - echo "" - - if ! confirm_action "Continue with Layer 7 setup?"; then - echo "Setup cancelled." - exit 0 - fi - - check_prerequisites - check_dns_configuration - - # Deploy services - deploy_vaultwarden - echo "" - deploy_forgejo - echo "" - deploy_lnbits - - echo "" - verify_services - - echo "" - setup_backups - - print_summary -} - -# Run main function -main "$@" - diff --git a/scripts/setup_layer_8_secondary_services.sh b/scripts/setup_layer_8_secondary_services.sh deleted file mode 100755 index fccaad8..0000000 --- a/scripts/setup_layer_8_secondary_services.sh +++ /dev/null @@ -1,384 +0,0 @@ -#!/bin/bash - -############################################################################### -# Layer 8: Secondary Services -# -# This script deploys the ntfy-emergency-app and memos services. -# Must be run after Layers 0-7 are complete. -############################################################################### - -set -e # Exit on error - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -NC='\033[0m' # No Color - -# Project directories -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" -ANSIBLE_DIR="$PROJECT_ROOT/ansible" - -declare -a LAYER_SUMMARY=() - -print_header() { - echo -e "\n${BLUE}========================================${NC}" - echo -e "${BLUE}$1${NC}" - echo -e "${BLUE}========================================${NC}\n" -} - -print_success() { - echo -e "${GREEN}✓${NC} $1" -} - -print_error() { - echo -e "${RED}✗${NC} $1" -} - -print_warning() { - echo -e "${YELLOW}⚠${NC} $1" -} - -print_info() { - echo -e "${BLUE}ℹ${NC} $1" -} - -confirm_action() { - local prompt="$1" - local response - - read -p "$(echo -e ${YELLOW}${prompt}${NC} [y/N]: )" response - [[ "$response" =~ ^[Yy]$ ]] -} - -record_summary() { - LAYER_SUMMARY+=("$1") -} - -get_hosts_from_inventory() { - local target="$1" - cd "$ANSIBLE_DIR" - ansible-inventory -i inventory.ini --list | \ - python3 - "$target" <<'PY' 2>/dev/null || echo "" -import json, sys -data = json.load(sys.stdin) -target = sys.argv[1] -if target in data: - print(' '.join(data[target].get('hosts', []))) -else: - hostvars = data.get('_meta', {}).get('hostvars', {}) - if target in hostvars: - print(target) -PY -} - -get_primary_host_ip() { - local target="$1" - cd "$ANSIBLE_DIR" - ansible-inventory -i inventory.ini --list | \ - python3 - "$target" <<'PY' 2>/dev/null || echo "" -import json, sys -data = json.load(sys.stdin) -target = sys.argv[1] -hostvars = data.get('_meta', {}).get('hostvars', {}) -if target in hostvars: - print(hostvars[target].get('ansible_host', target)) -else: - hosts = data.get(target, {}).get('hosts', []) - if hosts: - first = hosts[0] - hv = hostvars.get(first, {}) - print(hv.get('ansible_host', first)) -PY -} - -check_prerequisites() { - print_header "Verifying Prerequisites" - - local errors=0 - - if [ -z "$VIRTUAL_ENV" ]; then - print_error "Virtual environment not activated" - echo "Run: source venv/bin/activate" - ((errors++)) - else - print_success "Virtual environment activated" - fi - - if ! command -v ansible &> /dev/null; then - print_error "Ansible not found" - ((errors++)) - else - print_success "Ansible found" - fi - - if [ ! -f "$ANSIBLE_DIR/inventory.ini" ]; then - print_error "inventory.ini not found" - ((errors++)) - else - print_success "inventory.ini exists" - fi - - if [ ! -f "$ANSIBLE_DIR/infra_vars.yml" ]; then - print_error "infra_vars.yml not found" - ((errors++)) - else - print_success "infra_vars.yml exists" - fi - - if [ ! -f "$ANSIBLE_DIR/services_config.yml" ]; then - print_error "services_config.yml not found" - ((errors++)) - else - print_success "services_config.yml exists" - fi - - if [ -z "$(get_hosts_from_inventory "vipy")" ]; then - print_error "vipy not configured in inventory.ini" - ((errors++)) - else - print_success "vipy configured in inventory" - fi - - if [ -z "$(get_hosts_from_inventory "memos-box")" ]; then - print_warning "memos-box not configured in inventory.ini (memos deployment will be skipped)" - else - print_success "memos-box configured in inventory" - fi - - if [ $errors -gt 0 ]; then - print_error "Prerequisites not met. Resolve the issues above and re-run the script." - exit 1 - fi - - print_success "Prerequisites verified" - - # Display configured subdomains - local emergency_subdomain=$(grep "^ ntfy_emergency_app:" "$ANSIBLE_DIR/services_config.yml" | awk '{print $2}' 2>/dev/null || echo "emergency") - local memos_subdomain=$(grep "^ memos:" "$ANSIBLE_DIR/services_config.yml" | awk '{print $2}' 2>/dev/null || echo "memos") - - print_info "Configured subdomains:" - echo " • ntfy_emergency_app: $emergency_subdomain" - echo " • memos: $memos_subdomain" - echo "" -} - -check_dns_configuration() { - print_header "Validating DNS Configuration" - - if ! command -v dig &> /dev/null; then - print_warning "dig command not found. Skipping DNS validation." - print_info "Install dnsutils/bind-tools to enable DNS validation." - return 0 - fi - - cd "$ANSIBLE_DIR" - - local root_domain - root_domain=$(grep "^root_domain:" "$ANSIBLE_DIR/infra_vars.yml" | awk '{print $2}' 2>/dev/null) - - if [ -z "$root_domain" ]; then - print_error "Could not determine root_domain from infra_vars.yml" - return 1 - fi - - local emergency_subdomain=$(grep "^ ntfy_emergency_app:" "$ANSIBLE_DIR/services_config.yml" | awk '{print $2}' 2>/dev/null || echo "emergency") - local memos_subdomain=$(grep "^ memos:" "$ANSIBLE_DIR/services_config.yml" | awk '{print $2}' 2>/dev/null || echo "memos") - - local vipy_ip - vipy_ip=$(get_primary_host_ip "vipy") - - if [ -z "$vipy_ip" ]; then - print_error "Unable to determine vipy IP from inventory" - return 1 - fi - - local memos_ip="" - local memos_host=$(get_hosts_from_inventory "memos-box") - if [ -n "$memos_host" ]; then - memos_ip=$(get_primary_host_ip "$memos_host") - fi - - local dns_ok=true - - local emergency_fqdn="${emergency_subdomain}.${root_domain}" - local memos_fqdn="${memos_subdomain}.${root_domain}" - - print_info "Expected DNS:" - echo " • $emergency_fqdn → $vipy_ip" - if [ -n "$memos_ip" ]; then - echo " • $memos_fqdn → $memos_ip" - else - echo " • $memos_fqdn → (skipped - memos-box not in inventory)" - fi - echo "" - - local resolved - - print_info "Checking $emergency_fqdn..." - resolved=$(dig +short "$emergency_fqdn" | head -n1) - if [ "$resolved" = "$vipy_ip" ]; then - print_success "$emergency_fqdn resolves to $resolved" - elif [ -n "$resolved" ]; then - print_error "$emergency_fqdn resolves to $resolved (expected $vipy_ip)" - dns_ok=false - else - print_error "$emergency_fqdn does not resolve" - dns_ok=false - fi - - if [ -n "$memos_ip" ]; then - print_info "Checking $memos_fqdn..." - resolved=$(dig +short "$memos_fqdn" | head -n1) - if [ "$resolved" = "$memos_ip" ]; then - print_success "$memos_fqdn resolves to $resolved" - elif [ -n "$resolved" ]; then - print_error "$memos_fqdn resolves to $resolved (expected $memos_ip)" - dns_ok=false - else - print_error "$memos_fqdn does not resolve" - dns_ok=false - fi - fi - - echo "" - - if [ "$dns_ok" = false ]; then - print_error "DNS validation failed." - print_info "Update DNS records as shown above and wait for propagation." - echo "" - if ! confirm_action "Continue anyway? (SSL certificates will fail without correct DNS)"; then - exit 1 - fi - else - print_success "DNS validation passed" - fi -} - -deploy_ntfy_emergency_app() { - print_header "Deploying ntfy-emergency-app" - - cd "$ANSIBLE_DIR" - - print_info "This deploys the emergency notification interface pointing at ntfy." - echo "" - - if ! confirm_action "Deploy / update the ntfy-emergency-app?"; then - print_warning "Skipped ntfy-emergency-app deployment" - record_summary "${YELLOW}• ntfy-emergency-app${NC}: skipped" - return 0 - fi - - print_info "Running: ansible-playbook -i inventory.ini services/ntfy-emergency-app/deploy_ntfy_emergency_app_playbook.yml" - echo "" - - if ansible-playbook -i inventory.ini services/ntfy-emergency-app/deploy_ntfy_emergency_app_playbook.yml; then - print_success "ntfy-emergency-app deployed successfully" - record_summary "${GREEN}• ntfy-emergency-app${NC}: deployed" - else - print_error "ntfy-emergency-app deployment failed" - record_summary "${RED}• ntfy-emergency-app${NC}: failed" - fi -} - -deploy_memos() { - print_header "Deploying Memos" - - if [ -z "$(get_hosts_from_inventory "memos-box")" ]; then - print_warning "memos-box not in inventory. Skipping memos deployment." - record_summary "${YELLOW}• memos${NC}: skipped (memos-box missing)" - return 0 - fi - - cd "$ANSIBLE_DIR" - - if ! confirm_action "Deploy / update memos on memos-box?"; then - print_warning "Skipped memos deployment" - record_summary "${YELLOW}• memos${NC}: skipped" - return 0 - fi - - print_info "Running: ansible-playbook -i inventory.ini services/memos/deploy_memos_playbook.yml" - echo "" - - if ansible-playbook -i inventory.ini services/memos/deploy_memos_playbook.yml; then - print_success "Memos deployed successfully" - record_summary "${GREEN}• memos${NC}: deployed" - else - print_error "Memos deployment failed" - record_summary "${RED}• memos${NC}: failed" - fi -} - -verify_services() { - print_header "Verifying Deployments" - - cd "$ANSIBLE_DIR" - - local ssh_key=$(grep "ansible_ssh_private_key_file" "$ANSIBLE_DIR/inventory.ini" | head -n1 | sed 's/.*ansible_ssh_private_key_file=\([^ ]*\).*/\1/') - ssh_key="${ssh_key/#\~/$HOME}" - - local vipy_host - vipy_host=$(get_hosts_from_inventory "vipy") - - if [ -n "$vipy_host" ]; then - print_info "Checking services on vipy ($vipy_host)..." - - if timeout 5 ssh -i "$ssh_key" -o StrictHostKeyChecking=no -o BatchMode=yes counterweight@$vipy_host "docker ps | grep ntfy-emergency-app" &>/dev/null; then - print_success "ntfy-emergency-app container running" - else - print_warning "ntfy-emergency-app container not running" - fi - - echo "" - fi - - local memos_host - memos_host=$(get_hosts_from_inventory "memos-box") - if [ -n "$memos_host" ]; then - print_info "Checking memos on memos-box ($memos_host)..." - if timeout 5 ssh -i "$ssh_key" -o StrictHostKeyChecking=no -o BatchMode=yes counterweight@$memos_host "systemctl is-active memos" &>/dev/null; then - print_success "memos systemd service running" - else - print_warning "memos systemd service not running" - fi - echo "" - fi -} - -print_summary() { - print_header "Layer 8 Summary" - - if [ ${#LAYER_SUMMARY[@]} -eq 0 ]; then - print_info "No actions were performed." - return - fi - - for entry in "${LAYER_SUMMARY[@]}"; do - echo -e "$entry" - done - - echo "" - print_info "Next steps:" - echo " • Visit each service's subdomain to complete any manual setup." - echo " • Configure backups for new services if applicable." - echo " • Update Uptime Kuma monitors if additional endpoints are desired." -} - -main() { - print_header "Layer 8: Secondary Services" - - check_prerequisites - check_dns_configuration - - deploy_ntfy_emergency_app - deploy_memos - - verify_services - print_summary -} - -main "$@" -