diff --git a/ansible/infra/920_join_headscale_mesh.yml b/ansible/infra/920_join_headscale_mesh.yml index 8d06d44..cd6464c 100644 --- a/ansible/infra/920_join_headscale_mesh.yml +++ b/ansible/infra/920_join_headscale_mesh.yml @@ -44,7 +44,7 @@ shell: > ssh {{ ssh_args }} {{ headscale_user }}@{{ headscale_host }} - "sudo headscale preauthkeys create --user {{ headscale_user_id }} --expiration 10m --output json" + "sudo headscale preauthkeys create --user {{ headscale_user_id }} --expiration 1m --output json" register: preauth_key_result changed_when: true failed_when: preauth_key_result.rc != 0 @@ -77,7 +77,7 @@ - name: Add Tailscale repository apt_repository: - repo: "deb [signed-by=/etc/apt/keyrings/tailscale.gpg] https://pkgs.tailscale.com/stable/debian {{ ansible_distribution_release }} main" + repo: "deb [signed-by=/etc/apt/keyrings/tailscale.gpg] https://pkgs.tailscale.com/stable/debian {{ ansible_lsb.codename }} main" state: present update_cache: yes @@ -99,8 +99,6 @@ --login-server {{ headscale_domain }} --authkey {{ auth_key }} --accept-dns=true - --hostname={{ ansible_hostname }} - --reset register: tailscale_up_result changed_when: "'already authenticated' not in tailscale_up_result.stdout" failed_when: tailscale_up_result.rc != 0 and 'already authenticated' not in tailscale_up_result.stdout @@ -109,37 +107,6 @@ pause: seconds: 2 - - name: Get node ID from headscale server - delegate_to: "{{ groups['lapy'][0] }}" - become: no - vars: - ssh_args: "{{ ('-i ' + headscale_key + ' ' if headscale_key else '') + '-p ' + headscale_port|string }}" - shell: > - ssh {{ ssh_args }} - {{ headscale_user }}@{{ headscale_host }} - "sudo headscale nodes list -o json" - register: nodes_list_result - changed_when: false - failed_when: nodes_list_result.rc != 0 - - - name: Extract node ID for this host - set_fact: - headscale_node_id: "{{ (nodes_list_result.stdout | from_json) | selectattr('given_name', 'equalto', ansible_hostname) | map(attribute='id') | first }}" - failed_when: headscale_node_id is not defined or headscale_node_id == '' - - - name: Tag node with its hostname - delegate_to: "{{ groups['lapy'][0] }}" - become: no - vars: - ssh_args: "{{ ('-i ' + headscale_key + ' ' if headscale_key else '') + '-p ' + headscale_port|string }}" - shell: > - ssh {{ ssh_args }} - {{ headscale_user }}@{{ headscale_host }} - "sudo headscale nodes tag --tags tag:{{ ansible_hostname }} -i {{ headscale_node_id }}" - register: tag_result - changed_when: true - failed_when: tag_result.rc != 0 - - name: Display Tailscale status command: tailscale status register: tailscale_status @@ -148,3 +115,10 @@ - name: Show Tailscale connection status debug: msg: "{{ tailscale_status.stdout_lines }}" + + - name: Deny all inbound traffic from Tailscale network interface + ufw: + rule: deny + direction: in + interface: tailscale0 + diff --git a/ansible/infra_secrets.yml.example b/ansible/infra_secrets.yml.example index cddc58a..2482160 100644 --- a/ansible/infra_secrets.yml.example +++ b/ansible/infra_secrets.yml.example @@ -22,7 +22,3 @@ headscale_ui_password: "your_secure_password_here" bitcoin_rpc_user: "bitcoinrpc" bitcoin_rpc_password: "CHANGE_ME_TO_SECURE_PASSWORD" - -# Mempool MariaDB credentials -# Used by: services/mempool/deploy_mempool_playbook.yml -mariadb_mempool_password: "CHANGE_ME_TO_SECURE_PASSWORD" diff --git a/ansible/services/bitcoin-knots/bitcoin_knots_vars.yml b/ansible/services/bitcoin-knots/bitcoin_knots_vars.yml index c9bd7ca..fc38c75 100644 --- a/ansible/services/bitcoin-knots/bitcoin_knots_vars.yml +++ b/ansible/services/bitcoin-knots/bitcoin_knots_vars.yml @@ -14,7 +14,8 @@ bitcoin_conf_dir: /etc/bitcoin # Network bitcoin_rpc_port: 8332 bitcoin_p2p_port: 8333 -bitcoin_rpc_bind: "0.0.0.0" +bitcoin_rpc_bind: "127.0.0.1" # Security: localhost only +bitcoin_tailscale_interface: tailscale0 # Tailscale interface for UFW rules # Build options bitcoin_build_jobs: 4 # Parallel build jobs (-j flag), adjust based on CPU cores @@ -22,17 +23,10 @@ bitcoin_build_prefix: /usr/local # Configuration options bitcoin_enable_txindex: true # Set to true if transaction index needed (REQUIRED for Electrum servers like Electrs/ElectrumX) +bitcoin_enable_prune: false # Set to prune amount (e.g., 550) to enable pruning, false for full node (MUST be false for Electrum servers) bitcoin_max_connections: 125 # dbcache will be calculated as 90% of host RAM automatically in playbook -# ZMQ Configuration -bitcoin_zmq_enabled: true -bitcoin_zmq_bind: "tcp://0.0.0.0" -bitcoin_zmq_port_rawblock: 28332 -bitcoin_zmq_port_rawtx: 28333 -bitcoin_zmq_port_hashblock: 28334 -bitcoin_zmq_port_hashtx: 28335 - # Service user bitcoin_user: bitcoin bitcoin_group: bitcoin diff --git a/ansible/services/bitcoin-knots/deploy_bitcoin_knots_playbook.yml b/ansible/services/bitcoin-knots/deploy_bitcoin_knots_playbook.yml index 47c4f58..2c7cebb 100644 --- a/ansible/services/bitcoin-knots/deploy_bitcoin_knots_playbook.yml +++ b/ansible/services/bitcoin-knots/deploy_bitcoin_knots_playbook.yml @@ -137,6 +137,7 @@ changed_when: false when: not bitcoind_binary_exists.stat.exists + - name: Download SHA256SUMS file get_url: url: "https://bitcoinknots.org/files/{{ bitcoin_version_major }}.x/{{ bitcoin_knots_version_short }}/SHA256SUMS" @@ -155,7 +156,7 @@ command: gpg --verify /tmp/bitcoin-knots-{{ bitcoin_knots_version_short }}-SHA256SUMS.asc /tmp/bitcoin-knots-{{ bitcoin_knots_version_short }}-SHA256SUMS register: sha256sums_verification changed_when: false - failed_when: false # Don't fail here - check for 'Good signature' in next task + failed_when: sha256sums_verification.rc != 0 when: not bitcoind_binary_exists.stat.exists @@ -259,7 +260,6 @@ -DCMAKE_INSTALL_PREFIX={{ bitcoin_build_prefix }} -DBUILD_BITCOIN_WALLET=OFF -DCMAKE_BUILD_TYPE=Release - -DWITH_ZMQ=ON .. args: chdir: "{{ bitcoin_knots_source_dir }}/build" @@ -267,15 +267,6 @@ register: configure_result changed_when: true - - name: Verify CMake enabled ZMQ - shell: | - set -e - cd "{{ bitcoin_knots_source_dir }}/build" - cmake -LAH .. | grep -iE 'ZMQ|WITH_ZMQ|ENABLE_ZMQ|USE_ZMQ' - when: not bitcoind_binary_exists.stat.exists and cmake_exists.stat.exists | default(false) - register: zmq_check - changed_when: false - - name: Fail if CMakeLists.txt not found fail: msg: "CMakeLists.txt not found in {{ bitcoin_knots_source_dir }}. Cannot build Bitcoin Knots." @@ -345,7 +336,7 @@ rpcpassword={{ bitcoin_rpc_password }} rpcbind={{ bitcoin_rpc_bind }} rpcport={{ bitcoin_rpc_port }} - rpcallowip=0.0.0.0/0 + rpcallowip=127.0.0.1 # Network Configuration listen=1 @@ -360,18 +351,15 @@ txindex=1 {% endif %} - # Logging (to journald via systemd) - logtimestamps=1 - printtoconsole=1 - - # ZMQ Configuration - {% if bitcoin_zmq_enabled | default(false) %} - zmqpubrawblock={{ bitcoin_zmq_bind }}:{{ bitcoin_zmq_port_rawblock }} - zmqpubrawtx={{ bitcoin_zmq_bind }}:{{ bitcoin_zmq_port_rawtx }} - zmqpubhashblock={{ bitcoin_zmq_bind }}:{{ bitcoin_zmq_port_hashblock }} - zmqpubhashtx={{ bitcoin_zmq_bind }}:{{ bitcoin_zmq_port_hashtx }} + # Pruning (optional) + {% if bitcoin_enable_prune %} + prune={{ bitcoin_enable_prune }} {% endif %} + # Logging + logtimestamps=1 + logfile={{ bitcoin_data_dir }}/debug.log + # Security disablewallet=1 owner: "{{ bitcoin_user }}" @@ -439,6 +427,33 @@ debug: msg: "Bitcoin Knots RPC is {{ 'available' if rpc_check.status == 200 else 'not yet available' }}" + - name: Allow Bitcoin P2P port on Tailscale interface only + ufw: + rule: allow + direction: in + port: "{{ bitcoin_p2p_port }}" + proto: tcp + interface: "{{ bitcoin_tailscale_interface }}" + comment: "Bitcoin Knots P2P (Tailscale only)" + + - name: Allow Bitcoin P2P port (UDP) on Tailscale interface only + ufw: + rule: allow + direction: in + port: "{{ bitcoin_p2p_port }}" + proto: udp + interface: "{{ bitcoin_tailscale_interface }}" + comment: "Bitcoin Knots P2P UDP (Tailscale only)" + + - name: Verify UFW rules for Bitcoin Knots + command: ufw status numbered + register: ufw_status + changed_when: false + + - name: Display UFW status + debug: + msg: "{{ ufw_status.stdout_lines }}" + - name: Create Bitcoin Knots health check and push script copy: dest: /usr/local/bin/bitcoin-knots-healthcheck-push.sh @@ -465,12 +480,11 @@ "http://${RPC_HOST}:${RPC_PORT}" 2>&1) if [ $? -eq 0 ]; then - # Check if response contains a non-null error - # Successful responses have "error": null, failures have "error": {...} - if echo "$response" | grep -q '"error":null\|"error": null'; then - return 0 - else + # Check if response contains error + if echo "$response" | grep -q '"error"'; then return 1 + else + return 0 fi else return 1 @@ -487,14 +501,11 @@ return 1 fi - # URL encode spaces in message - local encoded_msg="${msg// /%20}" + # URL encode the message + local encoded_msg=$(echo -n "$msg" | curl -Gso /dev/null -w %{url_effective} --data-urlencode "msg=$msg" "" | cut -c 3-) - if ! curl -s --max-time 10 --retry 2 -o /dev/null \ - "${UPTIME_KUMA_PUSH_URL}?status=${status}&msg=${encoded_msg}&ping="; then - echo "ERROR: Failed to push to Uptime Kuma" - return 1 - fi + curl -s --max-time 10 --retry 2 -o /dev/null \ + "${UPTIME_KUMA_PUSH_URL}?status=${status}&msg=${encoded_msg}&ping=" || true } # Main health check @@ -619,14 +630,14 @@ if existing_monitor: print(f"Monitor '{monitor_name}' already exists (ID: {existing_monitor['id']})") - push_token = existing_monitor.get('pushToken') or existing_monitor.get('push_token') - if not push_token: - raise ValueError("Could not find push token for monitor") - push_url = f"{url}/api/push/{push_token}" + # Get push URL from existing monitor + push_id = existing_monitor.get('push_token', existing_monitor.get('id')) + push_url = f"{url}/api/push/{push_id}" print(f"Push URL: {push_url}") + print("Skipping - monitor already configured") else: print(f"Creating push monitor '{monitor_name}'...") - api.add_monitor( + result = api.add_monitor( type=MonitorType.PUSH, name=monitor_name, parent=group['id'], @@ -635,13 +646,12 @@ retryInterval=60, notificationIDList={ntfy_notification_id: True} if ntfy_notification_id else {} ) + # Get push URL from created monitor monitors = api.get_monitors() new_monitor = next((m for m in monitors if m.get('name') == monitor_name), None) if new_monitor: - push_token = new_monitor.get('pushToken') or new_monitor.get('push_token') - if not push_token: - raise ValueError("Could not find push token for new monitor") - push_url = f"{url}/api/push/{push_token}" + push_id = new_monitor.get('push_token', new_monitor.get('id')) + push_url = f"{url}/api/push/{push_id}" print(f"Push URL: {push_url}") api.disconnect() diff --git a/ansible/services/fulcrum/deploy_fulcrum_playbook.yml b/ansible/services/fulcrum/deploy_fulcrum_playbook.yml deleted file mode 100644 index 1f7f73d..0000000 --- a/ansible/services/fulcrum/deploy_fulcrum_playbook.yml +++ /dev/null @@ -1,475 +0,0 @@ -- name: Deploy Fulcrum Electrum Server - hosts: fulcrum_box_local - become: yes - vars_files: - - ../../infra_vars.yml - - ../../services_config.yml - - ../../infra_secrets.yml - - ./fulcrum_vars.yml - vars: - uptime_kuma_api_url: "https://{{ subdomains.uptime_kuma }}.{{ root_domain }}" - - tasks: - - name: Calculate 75% of system RAM for db_mem - set_fact: - fulcrum_db_mem_mb: "{{ (ansible_memtotal_mb | float * fulcrum_db_mem_percent) | int }}" - changed_when: false - - - name: Display calculated db_mem value - debug: - msg: "Setting db_mem to {{ fulcrum_db_mem_mb }} MB ({{ (fulcrum_db_mem_percent * 100) | int }}% of {{ ansible_memtotal_mb }} MB total RAM)" - - - name: Display Fulcrum version to install - debug: - msg: "Installing Fulcrum version {{ fulcrum_version }}" - - - name: Install required packages - apt: - name: - - curl - - wget - - openssl - state: present - update_cache: yes - - - name: Create fulcrum group - group: - name: "{{ fulcrum_group }}" - system: yes - state: present - - - name: Create fulcrum user - user: - name: "{{ fulcrum_user }}" - group: "{{ fulcrum_group }}" - system: yes - shell: /usr/sbin/nologin - home: /home/{{ fulcrum_user }} - create_home: yes - state: present - - - name: Create Fulcrum database directory (heavy data on special mount) - file: - path: "{{ fulcrum_db_dir }}" - state: directory - owner: "{{ fulcrum_user }}" - group: "{{ fulcrum_group }}" - mode: '0755' - - - name: Create Fulcrum config directory - file: - path: "{{ fulcrum_config_dir }}" - state: directory - owner: root - group: "{{ fulcrum_group }}" - mode: '0755' - - - name: Create Fulcrum lib directory (for banner and other data files) - file: - path: "{{ fulcrum_lib_dir }}" - state: directory - owner: "{{ fulcrum_user }}" - group: "{{ fulcrum_group }}" - mode: '0755' - - - name: Check if Fulcrum binary already exists - stat: - path: "{{ fulcrum_binary_path }}" - register: fulcrum_binary_exists - changed_when: false - - - name: Download Fulcrum binary tarball - get_url: - url: "https://github.com/cculianu/Fulcrum/releases/download/v{{ fulcrum_version }}/Fulcrum-{{ fulcrum_version }}-x86_64-linux.tar.gz" - dest: "/tmp/Fulcrum-{{ fulcrum_version }}-x86_64-linux.tar.gz" - mode: '0644' - when: not fulcrum_binary_exists.stat.exists - - - name: Extract Fulcrum binary - unarchive: - src: "/tmp/Fulcrum-{{ fulcrum_version }}-x86_64-linux.tar.gz" - dest: "/tmp" - remote_src: yes - when: not fulcrum_binary_exists.stat.exists - - - name: Install Fulcrum binary - copy: - src: "/tmp/Fulcrum-{{ fulcrum_version }}-x86_64-linux/Fulcrum" - dest: "{{ fulcrum_binary_path }}" - owner: root - group: root - mode: '0755' - remote_src: yes - when: not fulcrum_binary_exists.stat.exists - - - name: Verify Fulcrum binary installation - command: "{{ fulcrum_binary_path }} --version" - register: fulcrum_version_check - changed_when: false - - - name: Display Fulcrum version - debug: - msg: "{{ fulcrum_version_check.stdout_lines }}" - - - name: Create Fulcrum banner file - copy: - dest: "{{ fulcrum_lib_dir }}/fulcrum-banner.txt" - content: | - counterinfra - - PER ASPERA AD ASTRA - owner: "{{ fulcrum_user }}" - group: "{{ fulcrum_group }}" - mode: '0644' - - - name: Create Fulcrum configuration file - copy: - dest: "{{ fulcrum_config_dir }}/fulcrum.conf" - content: | - # Fulcrum Configuration - # Generated by Ansible - - # Bitcoin Core/Knots RPC settings - bitcoind = {{ bitcoin_rpc_host }}:{{ bitcoin_rpc_port }} - rpcuser = {{ bitcoin_rpc_user }} - rpcpassword = {{ bitcoin_rpc_password }} - - # Fulcrum server general settings - datadir = {{ fulcrum_db_dir }} - tcp = {{ fulcrum_tcp_bind }}:{{ fulcrum_tcp_port }} - peering = {{ 'true' if fulcrum_peering else 'false' }} - zmq_allow_hashtx = {{ 'true' if fulcrum_zmq_allow_hashtx else 'false' }} - - # Anonymize client IP addresses and TxIDs in logs - anon_logs = {{ 'true' if fulcrum_anon_logs else 'false' }} - - # Max RocksDB Memory in MiB - db_mem = {{ fulcrum_db_mem_mb }}.0 - - # Banner - banner = {{ fulcrum_lib_dir }}/fulcrum-banner.txt - owner: "{{ fulcrum_user }}" - group: "{{ fulcrum_group }}" - mode: '0640' - notify: Restart fulcrum - - - name: Create systemd service file for Fulcrum - copy: - dest: /etc/systemd/system/fulcrum.service - content: | - # MiniBolt: systemd unit for Fulcrum - # /etc/systemd/system/fulcrum.service - - [Unit] - Description=Fulcrum - After=network.target - - StartLimitBurst=2 - StartLimitIntervalSec=20 - - [Service] - ExecStart={{ fulcrum_binary_path }} {{ fulcrum_config_dir }}/fulcrum.conf - - User={{ fulcrum_user }} - Group={{ fulcrum_group }} - - # Process management - #################### - Type=simple - KillSignal=SIGINT - TimeoutStopSec=300 - - [Install] - WantedBy=multi-user.target - owner: root - group: root - mode: '0644' - notify: Restart fulcrum - - - name: Reload systemd daemon - systemd: - daemon_reload: yes - - - name: Enable and start Fulcrum service - systemd: - name: fulcrum - enabled: yes - state: started - - - name: Wait for Fulcrum to start - wait_for: - port: "{{ fulcrum_tcp_port }}" - host: "{{ fulcrum_tcp_bind }}" - delay: 5 - timeout: 30 - ignore_errors: yes - - - name: Check Fulcrum service status - systemd: - name: fulcrum - register: fulcrum_service_status - changed_when: false - - - name: Display Fulcrum service status - debug: - msg: "Fulcrum service is {{ 'running' if fulcrum_service_status.status.ActiveState == 'active' else 'not running' }}" - - - name: Create Fulcrum health check and push script - copy: - dest: /usr/local/bin/fulcrum-healthcheck-push.sh - content: | - #!/bin/bash - # - # Fulcrum Health Check and Push to Uptime Kuma - # Checks if Fulcrum TCP port is responding and pushes status to Uptime Kuma - # - - FULCRUM_HOST="{{ fulcrum_tcp_bind }}" - FULCRUM_PORT={{ fulcrum_tcp_port }} - UPTIME_KUMA_PUSH_URL="${UPTIME_KUMA_PUSH_URL}" - - # Check if Fulcrum TCP port is responding - check_fulcrum() { - # Try to connect to TCP port - timeout 5 bash -c "echo > /dev/tcp/${FULCRUM_HOST}/${FULCRUM_PORT}" 2>/dev/null - return $? - } - - # Push status to Uptime Kuma - push_to_uptime_kuma() { - local status=$1 - local msg=$2 - - if [ -z "$UPTIME_KUMA_PUSH_URL" ]; then - echo "ERROR: UPTIME_KUMA_PUSH_URL not set" - return 1 - fi - - # URL encode the message - local encoded_msg=$(echo -n "$msg" | curl -Gso /dev/null -w %{url_effective} --data-urlencode "msg=$msg" "" | cut -c 3-) - - curl -s --max-time 10 --retry 2 -o /dev/null \ - "${UPTIME_KUMA_PUSH_URL}?status=${status}&msg=${encoded_msg}&ping=" || true - } - - # Main health check - if check_fulcrum; then - push_to_uptime_kuma "up" "OK" - exit 0 - else - push_to_uptime_kuma "down" "Fulcrum TCP port not responding" - exit 1 - fi - owner: root - group: root - mode: '0755' - - - name: Create systemd timer for Fulcrum health check - copy: - dest: /etc/systemd/system/fulcrum-healthcheck.timer - content: | - [Unit] - Description=Fulcrum Health Check Timer - Requires=fulcrum.service - - [Timer] - OnBootSec=1min - OnUnitActiveSec=1min - Persistent=true - - [Install] - WantedBy=timers.target - owner: root - group: root - mode: '0644' - - - name: Create systemd service for Fulcrum health check - copy: - dest: /etc/systemd/system/fulcrum-healthcheck.service - content: | - [Unit] - Description=Fulcrum Health Check and Push to Uptime Kuma - After=network.target fulcrum.service - - [Service] - Type=oneshot - User=root - ExecStart=/usr/local/bin/fulcrum-healthcheck-push.sh - Environment=UPTIME_KUMA_PUSH_URL= - StandardOutput=journal - StandardError=journal - - [Install] - WantedBy=multi-user.target - owner: root - group: root - mode: '0644' - - - name: Reload systemd daemon for health check - systemd: - daemon_reload: yes - - - name: Enable and start Fulcrum health check timer - systemd: - name: fulcrum-healthcheck.timer - enabled: yes - state: started - - - name: Create Uptime Kuma push monitor setup script for Fulcrum - delegate_to: localhost - become: no - copy: - dest: /tmp/setup_fulcrum_monitor.py - content: | - #!/usr/bin/env python3 - import sys - import traceback - import yaml - from uptime_kuma_api import UptimeKumaApi, MonitorType - - try: - # Load configs - with open('/tmp/ansible_config.yml', 'r') as f: - config = yaml.safe_load(f) - - url = config['uptime_kuma_url'] - username = config['username'] - password = config['password'] - monitor_name = config['monitor_name'] - - # Connect to Uptime Kuma - api = UptimeKumaApi(url, timeout=30) - api.login(username, password) - - # Get all monitors - monitors = api.get_monitors() - - # Find or create "services" group - group = next((m for m in monitors if m.get('name') == 'services' and m.get('type') == 'group'), None) - if not group: - group_result = api.add_monitor(type='group', name='services') - # Refresh to get the group with id - monitors = api.get_monitors() - group = next((m for m in monitors if m.get('name') == 'services' and m.get('type') == 'group'), None) - - # Check if monitor already exists - existing_monitor = None - for monitor in monitors: - if monitor.get('name') == monitor_name: - existing_monitor = monitor - break - - # Get ntfy notification ID - notifications = api.get_notifications() - ntfy_notification_id = None - for notif in notifications: - if notif.get('type') == 'ntfy': - ntfy_notification_id = notif.get('id') - break - - if existing_monitor: - print(f"Monitor '{monitor_name}' already exists (ID: {existing_monitor['id']})") - # Get push URL from existing monitor - push_id = existing_monitor.get('push_token', existing_monitor.get('id')) - push_url = f"{url}/api/push/{push_id}" - print(f"Push URL: {push_url}") - print("Skipping - monitor already configured") - else: - print(f"Creating push monitor '{monitor_name}'...") - result = api.add_monitor( - type=MonitorType.PUSH, - name=monitor_name, - parent=group['id'], - interval=60, - maxretries=3, - retryInterval=60, - notificationIDList={ntfy_notification_id: True} if ntfy_notification_id else {} - ) - # Get push URL from created monitor - monitors = api.get_monitors() - new_monitor = next((m for m in monitors if m.get('name') == monitor_name), None) - if new_monitor: - push_id = new_monitor.get('push_token', new_monitor.get('id')) - push_url = f"{url}/api/push/{push_id}" - print(f"Push URL: {push_url}") - - api.disconnect() - print("SUCCESS") - - except Exception as e: - error_msg = str(e) if str(e) else repr(e) - print(f"ERROR: {error_msg}", file=sys.stderr) - traceback.print_exc(file=sys.stderr) - sys.exit(1) - mode: '0755' - - - name: Create temporary config for monitor setup - delegate_to: localhost - become: no - copy: - dest: /tmp/ansible_config.yml - content: | - uptime_kuma_url: "{{ uptime_kuma_api_url }}" - username: "{{ uptime_kuma_username }}" - password: "{{ uptime_kuma_password }}" - monitor_name: "Fulcrum" - mode: '0644' - - - name: Run Uptime Kuma push monitor setup - command: python3 /tmp/setup_fulcrum_monitor.py - delegate_to: localhost - become: no - register: monitor_setup - changed_when: "'SUCCESS' in monitor_setup.stdout" - ignore_errors: yes - - - name: Extract push URL from monitor setup output - set_fact: - uptime_kuma_push_url: "{{ monitor_setup.stdout | regex_search('Push URL: (https?://[^\\s]+)', '\\1') | first | default('') }}" - delegate_to: localhost - become: no - when: monitor_setup.stdout is defined - - - name: Display extracted push URL - debug: - msg: "Uptime Kuma Push URL: {{ uptime_kuma_push_url }}" - when: uptime_kuma_push_url | default('') != '' - - - name: Set push URL in systemd service environment - lineinfile: - path: /etc/systemd/system/fulcrum-healthcheck.service - regexp: '^Environment=UPTIME_KUMA_PUSH_URL=' - line: "Environment=UPTIME_KUMA_PUSH_URL={{ uptime_kuma_push_url }}" - state: present - insertafter: '^\[Service\]' - when: uptime_kuma_push_url | default('') != '' - - - name: Reload systemd daemon after push URL update - systemd: - daemon_reload: yes - when: uptime_kuma_push_url | default('') != '' - - - name: Restart health check timer to pick up new environment - systemd: - name: fulcrum-healthcheck.timer - state: restarted - when: uptime_kuma_push_url | default('') != '' - - - name: Clean up temporary files - delegate_to: localhost - become: no - file: - path: "{{ item }}" - state: absent - loop: - - /tmp/setup_fulcrum_monitor.py - - /tmp/ansible_config.yml - - /tmp/Fulcrum-{{ fulcrum_version }}-x86_64-linux.tar.gz - - /tmp/Fulcrum-{{ fulcrum_version }}-x86_64-linux - - handlers: - - name: Restart fulcrum - systemd: - name: fulcrum - state: restarted - diff --git a/ansible/services/fulcrum/fulcrum_vars.yml b/ansible/services/fulcrum/fulcrum_vars.yml deleted file mode 100644 index 2be35d6..0000000 --- a/ansible/services/fulcrum/fulcrum_vars.yml +++ /dev/null @@ -1,40 +0,0 @@ -# Fulcrum Configuration Variables - -# Version - Pinned to specific release -fulcrum_version: "2.1.0" # Fulcrum version to install - -# Directories -fulcrum_db_dir: /mnt/fulcrum_data/fulcrum_db # Database directory (heavy data on special mount) -fulcrum_config_dir: /etc/fulcrum # Config file location (standard OS path) -fulcrum_lib_dir: /var/lib/fulcrum # Other data files (banner, etc.) on OS disk -fulcrum_binary_path: /usr/local/bin/Fulcrum - -# Network - Bitcoin RPC connection -# Bitcoin Knots is on a different host (knots_box_local) -# Using RPC user/password authentication (credentials from infra_secrets.yml) -bitcoin_rpc_host: "192.168.1.140" # Bitcoin Knots RPC host (IP of knots_box_local) -bitcoin_rpc_port: 8332 # Bitcoin Knots RPC port -# Note: bitcoin_rpc_user and bitcoin_rpc_password are loaded from infra_secrets.yml - -# Network - Fulcrum server -fulcrum_tcp_port: 50001 -# Binding address for Fulcrum TCP server: -# - "127.0.0.1" = localhost only (use when Caddy is on the same box) -# - "0.0.0.0" = all interfaces (use when Caddy is on a different box) -# - Specific IP = bind to specific network interface -fulcrum_tcp_bind: "0.0.0.0" # Default: localhost (change to "0.0.0.0" if Caddy is on different box) -# If Caddy is on a different box, set this to the IP address that Caddy will use to connect - -# Performance -# db_mem will be calculated as 75% of available RAM automatically in playbook -fulcrum_db_mem_percent: 0.75 # 75% of RAM for database cache - -# Configuration options -fulcrum_anon_logs: true # Anonymize client IPs and TxIDs in logs -fulcrum_peering: false # Disable peering with other Fulcrum servers -fulcrum_zmq_allow_hashtx: true # Allow ZMQ hashtx notifications - -# Service user -fulcrum_user: fulcrum -fulcrum_group: fulcrum - diff --git a/ansible/services/headscale/deploy_headscale_playbook.yml b/ansible/services/headscale/deploy_headscale_playbook.yml index 1bcf5bf..e8a2b37 100644 --- a/ansible/services/headscale/deploy_headscale_playbook.yml +++ b/ansible/services/headscale/deploy_headscale_playbook.yml @@ -90,7 +90,13 @@ copy: dest: /etc/headscale/acl.json content: | - {} + { + "ACLs": [], + "Groups": {}, + "Hosts": {}, + "TagOwners": {}, + "Tests": [] + } owner: headscale group: headscale mode: '0640' diff --git a/ansible/services/headscale/deploy_headscale_ui_playbook.yml b/ansible/services/headscale/deploy_headscale_ui_playbook.yml new file mode 100644 index 0000000..3be792c --- /dev/null +++ b/ansible/services/headscale/deploy_headscale_ui_playbook.yml @@ -0,0 +1,142 @@ +- name: Deploy headscale-ui with Docker and configure Caddy reverse proxy + hosts: spacey + become: yes + vars_files: + - ../../infra_vars.yml + - ../../services_config.yml + - ../../infra_secrets.yml + - ./headscale_vars.yml + vars: + headscale_subdomain: "{{ subdomains.headscale }}" + caddy_sites_dir: "{{ caddy_sites_dir }}" + headscale_domain: "{{ headscale_subdomain }}.{{ root_domain }}" + headscale_ui_version: "2025.08.23" + headscale_ui_dir: /opt/headscale-ui + headscale_ui_http_port: 18080 + headscale_ui_https_port: 18443 + + tasks: + - name: Check if Docker is installed + command: docker --version + register: docker_check + changed_when: false + failed_when: false + + - name: Fail if Docker is not installed + fail: + msg: "Docker is not installed. Please run the docker_playbook.yml first." + when: docker_check.rc != 0 + + - name: Ensure Docker service is running + systemd: + name: docker + state: started + enabled: yes + + - name: Create headscale-ui directory + file: + path: "{{ headscale_ui_dir }}" + state: directory + owner: root + group: root + mode: '0755' + + - name: Create docker-compose.yml for headscale-ui + copy: + dest: "{{ headscale_ui_dir }}/docker-compose.yml" + content: | + version: "3" + services: + headscale-ui: + image: ghcr.io/gurucomputing/headscale-ui:{{ headscale_ui_version }} + container_name: headscale-ui + restart: unless-stopped + ports: + - "{{ headscale_ui_http_port }}:8080" + - "{{ headscale_ui_https_port }}:8443" + owner: root + group: root + mode: '0644' + + - name: Deploy headscale-ui container with docker compose + command: docker compose up -d + args: + chdir: "{{ headscale_ui_dir }}" + register: docker_compose_result + changed_when: "'Creating' in docker_compose_result.stdout or 'Starting' in docker_compose_result.stdout or docker_compose_result.rc != 0" + + - name: Wait for headscale-ui to be ready + uri: + url: "http://localhost:{{ headscale_ui_http_port }}" + status_code: [200, 404] + register: headscale_ui_ready + until: headscale_ui_ready.status in [200, 404] + retries: 30 + delay: 2 + ignore_errors: yes + + - name: Ensure Caddy sites-enabled directory exists + file: + path: "{{ caddy_sites_dir }}" + state: directory + owner: root + group: root + mode: '0755' + + - name: Ensure Caddyfile includes import directive for sites-enabled + lineinfile: + path: /etc/caddy/Caddyfile + line: 'import sites-enabled/*' + insertafter: EOF + state: present + backup: yes + + - name: Fail if username is not provided + fail: + msg: "headscale_ui_username must be set in infra_secrets.yml" + when: headscale_ui_username is not defined + + - name: Fail if neither password nor password hash is provided + fail: + msg: "Either headscale_ui_password or headscale_ui_password_hash must be set in infra_secrets.yml" + when: headscale_ui_password is not defined and headscale_ui_password_hash is not defined + + - name: Generate bcrypt hash for headscale-ui password + become: yes + command: caddy hash-password --plaintext "{{ headscale_ui_password }}" + register: headscale_ui_password_hash_result + changed_when: false + no_log: true + when: headscale_ui_password is defined and headscale_ui_password_hash is not defined + + - name: Set headscale-ui password hash from generated value + set_fact: + headscale_ui_password_hash: "{{ headscale_ui_password_hash_result.stdout.strip() }}" + when: headscale_ui_password is defined and headscale_ui_password_hash is not defined + + - name: Update headscale Caddy config to include headscale-ui /web route with authentication + become: yes + copy: + dest: "{{ caddy_sites_dir }}/headscale.conf" + content: | + {{ headscale_domain }} { + @headscale_ui { + path /web* + } + handle @headscale_ui { + basicauth { + {{ headscale_ui_username }} {{ headscale_ui_password_hash }} + } + reverse_proxy http://localhost:{{ headscale_ui_http_port }} + } + # Headscale API is protected by its own API key authentication + # All API operations require a valid Bearer token in the Authorization header + reverse_proxy * http://localhost:{{ headscale_port }} + } + owner: root + group: root + mode: '0644' + + - name: Reload Caddy to apply new config + command: systemctl reload caddy +