This commit is contained in:
counterweight 2025-12-08 10:34:04 +01:00
parent c14d61d090
commit 0b578ee738
Signed by: counterweight
GPG key ID: 883EDBAA726BD96C
11 changed files with 779 additions and 929 deletions

View file

@ -1,5 +1,5 @@
- name: Secure Debian VPS
hosts: vps
- name: Secure Debian
hosts: all
vars_files:
- ../infra_vars.yml
become: true

View file

@ -1,5 +1,5 @@
- name: Secure Debian VPS
hosts: vps
- name: Secure Debian
hosts: all
vars_files:
- ../infra_vars.yml
become: true

View file

@ -99,7 +99,6 @@
--login-server {{ headscale_domain }}
--authkey {{ auth_key }}
--accept-dns=true
--advertise-tags "tag:{{ inventory_hostname }}"
register: tailscale_up_result
changed_when: "'already authenticated' not in tailscale_up_result.stdout"
failed_when: tailscale_up_result.rc != 0 and 'already authenticated' not in tailscale_up_result.stdout
@ -117,3 +116,9 @@
debug:
msg: "{{ tailscale_status.stdout_lines }}"
- name: Deny all inbound traffic from Tailscale network interface
ufw:
rule: deny
direction: in
interface: tailscale0

View file

@ -19,3 +19,6 @@ ntfy_password: "your_ntfy_password"
headscale_ui_username: "admin"
headscale_ui_password: "your_secure_password_here"
# headscale_ui_password_hash: "$2a$14$..." # Optional: pre-hashed password
bitcoin_rpc_user: "bitcoinrpc"
bitcoin_rpc_password: "CHANGE_ME_TO_SECURE_PASSWORD"

View file

@ -0,0 +1,32 @@
# Bitcoin Knots Configuration Variables
# Version - REQUIRED: Specify exact version/tag to build
bitcoin_knots_version: "v29.2.knots20251110" # Must specify exact version/tag
bitcoin_knots_version_short: "29.2.knots20251110" # Version without 'v' prefix (for tarball URLs)
# Directories
bitcoin_knots_dir: /opt/bitcoin-knots
bitcoin_knots_source_dir: "{{ bitcoin_knots_dir }}/source"
bitcoin_data_dir: /var/lib/bitcoin # Standard location for config, logs, wallets
bitcoin_large_data_dir: /mnt/knots_data # Custom location for blockchain data (blocks, chainstate)
bitcoin_conf_dir: /etc/bitcoin
# Network
bitcoin_rpc_port: 8332
bitcoin_p2p_port: 8333
bitcoin_rpc_bind: "127.0.0.1" # Security: localhost only
bitcoin_tailscale_interface: tailscale0 # Tailscale interface for UFW rules
# Build options
bitcoin_build_jobs: 4 # Parallel build jobs (-j flag), adjust based on CPU cores
bitcoin_build_prefix: /usr/local
# Configuration options
bitcoin_enable_txindex: true # Set to true if transaction index needed (REQUIRED for Electrum servers like Electrs/ElectrumX)
bitcoin_enable_prune: false # Set to prune amount (e.g., 550) to enable pruning, false for full node (MUST be false for Electrum servers)
bitcoin_max_connections: 125
# dbcache will be calculated as 90% of host RAM automatically in playbook
# Service user
bitcoin_user: bitcoin
bitcoin_group: bitcoin

View file

@ -0,0 +1,734 @@
- name: Build and Deploy Bitcoin Knots from Source
hosts: knots_box_local
become: yes
vars_files:
- ../../infra_vars.yml
- ../../services_config.yml
- ../../infra_secrets.yml
- ./bitcoin_knots_vars.yml
vars:
bitcoin_repo_url: "https://github.com/bitcoinknots/bitcoin.git"
bitcoin_sigs_base_url: "https://raw.githubusercontent.com/bitcoinknots/guix.sigs/knots"
bitcoin_version_major: "{{ bitcoin_knots_version_short | regex_replace('^(\\d+)\\..*', '\\1') }}"
bitcoin_source_tarball_url: "https://bitcoinknots.org/files/{{ bitcoin_version_major }}.x/{{ bitcoin_knots_version_short }}/bitcoin-{{ bitcoin_knots_version_short }}.tar.gz"
uptime_kuma_api_url: "https://{{ subdomains.uptime_kuma }}.{{ root_domain }}"
tasks:
- name: Calculate 90% of system RAM for dbcache
set_fact:
bitcoin_dbcache_mb: "{{ (ansible_memtotal_mb | float * 0.9) | int }}"
changed_when: false
- name: Display calculated dbcache value
debug:
msg: "Setting dbcache to {{ bitcoin_dbcache_mb }} MB (90% of {{ ansible_memtotal_mb }} MB total RAM)"
- name: Install build dependencies
apt:
name:
- build-essential
- libtool
- autotools-dev
- automake
- pkg-config
- bsdmainutils
- python3
- python3-pip
- libevent-dev
- libboost-system-dev
- libboost-filesystem-dev
- libboost-test-dev
- libboost-thread-dev
- libboost-chrono-dev
- libboost-program-options-dev
- libboost-dev
- libssl-dev
- libdb-dev
- libminiupnpc-dev
- libzmq3-dev
- libnatpmp-dev
- libsqlite3-dev
- git
- curl
- wget
- cmake
state: present
update_cache: yes
- name: Create bitcoin group
group:
name: "{{ bitcoin_group }}"
system: yes
state: present
- name: Create bitcoin user
user:
name: "{{ bitcoin_user }}"
group: "{{ bitcoin_group }}"
system: yes
shell: /usr/sbin/nologin
home: "{{ bitcoin_data_dir }}"
create_home: yes
state: present
- name: Create bitcoin-knots directory
file:
path: "{{ bitcoin_knots_dir }}"
state: directory
owner: root
group: root
mode: '0755'
- name: Create bitcoin-knots source directory
file:
path: "{{ bitcoin_knots_source_dir }}"
state: directory
owner: root
group: root
mode: '0755'
- name: Create bitcoin data directory (for config, logs, wallets)
file:
path: "{{ bitcoin_data_dir }}"
state: directory
owner: "{{ bitcoin_user }}"
group: "{{ bitcoin_group }}"
mode: '0750'
- name: Create bitcoin large data directory (for blockchain)
file:
path: "{{ bitcoin_large_data_dir }}"
state: directory
owner: "{{ bitcoin_user }}"
group: "{{ bitcoin_group }}"
mode: '0750'
- name: Create bitcoin config directory
file:
path: "{{ bitcoin_conf_dir }}"
state: directory
owner: root
group: root
mode: '0755'
- name: Check if bitcoind binary already exists
stat:
path: "{{ bitcoin_build_prefix }}/bin/bitcoind"
register: bitcoind_binary_exists
changed_when: false
- name: Install gnupg for signature verification
apt:
name: gnupg
state: present
when: not bitcoind_binary_exists.stat.exists
- name: Import Luke Dashjr's Bitcoin Knots signing key
command: gpg --keyserver hkps://keyserver.ubuntu.com --recv-keys 90C8019E36C2E964
register: key_import
changed_when: "'already in secret keyring' not in key_import.stdout and 'already in public keyring' not in key_import.stdout"
when: not bitcoind_binary_exists.stat.exists
failed_when: key_import.rc != 0
- name: Display imported key fingerprint
command: gpg --fingerprint 90C8019E36C2E964
register: key_fingerprint
changed_when: false
when: not bitcoind_binary_exists.stat.exists
- name: Download SHA256SUMS file
get_url:
url: "https://bitcoinknots.org/files/{{ bitcoin_version_major }}.x/{{ bitcoin_knots_version_short }}/SHA256SUMS"
dest: "/tmp/bitcoin-knots-{{ bitcoin_knots_version_short }}-SHA256SUMS"
mode: '0644'
when: not bitcoind_binary_exists.stat.exists
- name: Download SHA256SUMS.asc signature file
get_url:
url: "https://bitcoinknots.org/files/{{ bitcoin_version_major }}.x/{{ bitcoin_knots_version_short }}/SHA256SUMS.asc"
dest: "/tmp/bitcoin-knots-{{ bitcoin_knots_version_short }}-SHA256SUMS.asc"
mode: '0644'
when: not bitcoind_binary_exists.stat.exists
- name: Verify PGP signature on SHA256SUMS file
command: gpg --verify /tmp/bitcoin-knots-{{ bitcoin_knots_version_short }}-SHA256SUMS.asc /tmp/bitcoin-knots-{{ bitcoin_knots_version_short }}-SHA256SUMS
register: sha256sums_verification
changed_when: false
failed_when: sha256sums_verification.rc != 0
when: not bitcoind_binary_exists.stat.exists
- name: Display SHA256SUMS verification result
debug:
msg: "{{ sha256sums_verification.stdout_lines + sha256sums_verification.stderr_lines }}"
when: not bitcoind_binary_exists.stat.exists
- name: Fail if SHA256SUMS signature verification failed
fail:
msg: "SHA256SUMS signature verification failed. Aborting build."
when: not bitcoind_binary_exists.stat.exists and ('Good signature' not in sha256sums_verification.stdout and 'Good signature' not in sha256sums_verification.stderr)
- name: Remove any existing tarball to force fresh download
file:
path: /tmp/bitcoin-{{ bitcoin_knots_version_short }}.tar.gz
state: absent
when: not bitcoind_binary_exists.stat.exists
- name: Download Bitcoin Knots source tarball
get_url:
url: "{{ bitcoin_source_tarball_url }}"
dest: "/tmp/bitcoin-{{ bitcoin_knots_version_short }}.tar.gz"
mode: '0644'
validate_certs: yes
force: yes
when: not bitcoind_binary_exists.stat.exists
- name: Calculate SHA256 checksum of downloaded tarball
command: sha256sum /tmp/bitcoin-{{ bitcoin_knots_version_short }}.tar.gz
register: tarball_checksum
changed_when: false
when: not bitcoind_binary_exists.stat.exists
- name: Extract expected checksum from SHA256SUMS file
shell: grep "bitcoin-{{ bitcoin_knots_version_short }}.tar.gz" /tmp/bitcoin-knots-{{ bitcoin_knots_version_short }}-SHA256SUMS | awk '{print $1}'
register: expected_checksum
changed_when: false
when: not bitcoind_binary_exists.stat.exists
failed_when: expected_checksum.stdout == ""
- name: Display checksum comparison
debug:
msg:
- "Expected: {{ expected_checksum.stdout | trim }}"
- "Actual: {{ tarball_checksum.stdout.split()[0] }}"
when: not bitcoind_binary_exists.stat.exists
- name: Verify tarball checksum matches SHA256SUMS
fail:
msg: "Tarball checksum mismatch! Expected {{ expected_checksum.stdout | trim }}, got {{ tarball_checksum.stdout.split()[0] }}"
when: not bitcoind_binary_exists.stat.exists and expected_checksum.stdout | trim != tarball_checksum.stdout.split()[0]
- name: Remove existing source directory if it exists (to force fresh extraction)
file:
path: "{{ bitcoin_knots_source_dir }}"
state: absent
when: not bitcoind_binary_exists.stat.exists
- name: Remove extracted directory if it exists (from previous runs)
file:
path: "{{ bitcoin_knots_dir }}/bitcoin-{{ bitcoin_knots_version_short }}"
state: absent
when: not bitcoind_binary_exists.stat.exists
- name: Extract verified source tarball
unarchive:
src: /tmp/bitcoin-{{ bitcoin_knots_version_short }}.tar.gz
dest: "{{ bitcoin_knots_dir }}"
remote_src: yes
when: not bitcoind_binary_exists.stat.exists
- name: Check if extracted directory exists
stat:
path: "{{ bitcoin_knots_dir }}/bitcoin-{{ bitcoin_knots_version_short }}"
register: extracted_dir_stat
changed_when: false
when: not bitcoind_binary_exists.stat.exists
- name: Rename extracted directory to expected name
command: mv "{{ bitcoin_knots_dir }}/bitcoin-{{ bitcoin_knots_version_short }}" "{{ bitcoin_knots_source_dir }}"
when: not bitcoind_binary_exists.stat.exists and extracted_dir_stat.stat.exists
- name: Check if CMakeLists.txt exists
stat:
path: "{{ bitcoin_knots_source_dir }}/CMakeLists.txt"
register: cmake_exists
changed_when: false
when: not bitcoind_binary_exists.stat.exists
- name: Create CMake build directory
file:
path: "{{ bitcoin_knots_source_dir }}/build"
state: directory
mode: '0755'
when: not bitcoind_binary_exists.stat.exists and cmake_exists.stat.exists | default(false)
- name: Configure Bitcoin Knots build with CMake
command: >
cmake
-DCMAKE_INSTALL_PREFIX={{ bitcoin_build_prefix }}
-DBUILD_BITCOIN_WALLET=OFF
-DCMAKE_BUILD_TYPE=Release
..
args:
chdir: "{{ bitcoin_knots_source_dir }}/build"
when: not bitcoind_binary_exists.stat.exists and cmake_exists.stat.exists | default(false)
register: configure_result
changed_when: true
- name: Fail if CMakeLists.txt not found
fail:
msg: "CMakeLists.txt not found in {{ bitcoin_knots_source_dir }}. Cannot build Bitcoin Knots."
when: not bitcoind_binary_exists.stat.exists and not (cmake_exists.stat.exists | default(false))
- name: Build Bitcoin Knots with CMake (this may take 30-60+ minutes)
command: cmake --build . -j{{ bitcoin_build_jobs }}
args:
chdir: "{{ bitcoin_knots_source_dir }}/build"
when: not bitcoind_binary_exists.stat.exists and cmake_exists.stat.exists | default(false)
async: 3600
poll: 0
register: build_result
changed_when: true
- name: Check build status
async_status:
jid: "{{ build_result.ansible_job_id }}"
register: build_job_result
until: build_job_result.finished
retries: 120
delay: 60
when: not bitcoind_binary_exists.stat.exists and build_result.ansible_job_id is defined
- name: Fail if build failed
fail:
msg: "Bitcoin Knots build failed: {{ build_job_result.msg }}"
when: not bitcoind_binary_exists.stat.exists and build_result.ansible_job_id is defined and build_job_result.failed | default(false)
- name: Install Bitcoin Knots binaries
command: cmake --install .
args:
chdir: "{{ bitcoin_knots_source_dir }}/build"
when: not bitcoind_binary_exists.stat.exists and cmake_exists.stat.exists | default(false)
changed_when: true
- name: Verify bitcoind binary exists
stat:
path: "{{ bitcoin_build_prefix }}/bin/bitcoind"
register: bitcoind_installed
changed_when: false
- name: Verify bitcoin-cli binary exists
stat:
path: "{{ bitcoin_build_prefix }}/bin/bitcoin-cli"
register: bitcoin_cli_installed
changed_when: false
- name: Fail if binaries not found
fail:
msg: "Bitcoin Knots binaries not found after installation"
when: not bitcoind_installed.stat.exists or not bitcoin_cli_installed.stat.exists
- name: Create bitcoin.conf configuration file
copy:
dest: "{{ bitcoin_conf_dir }}/bitcoin.conf"
content: |
# Bitcoin Knots Configuration
# Generated by Ansible
# Data directory (blockchain storage)
datadir={{ bitcoin_large_data_dir }}
# RPC Configuration
server=1
rpcuser={{ bitcoin_rpc_user }}
rpcpassword={{ bitcoin_rpc_password }}
rpcbind={{ bitcoin_rpc_bind }}
rpcport={{ bitcoin_rpc_port }}
rpcallowip=127.0.0.1
# Network Configuration
listen=1
port={{ bitcoin_p2p_port }}
maxconnections={{ bitcoin_max_connections }}
# Performance
dbcache={{ bitcoin_dbcache_mb }}
# Transaction Index (optional)
{% if bitcoin_enable_txindex %}
txindex=1
{% endif %}
# Pruning (optional)
{% if bitcoin_enable_prune %}
prune={{ bitcoin_enable_prune }}
{% endif %}
# Logging
logtimestamps=1
logfile={{ bitcoin_data_dir }}/debug.log
# Security
disablewallet=1
owner: "{{ bitcoin_user }}"
group: "{{ bitcoin_group }}"
mode: '0640'
notify: Restart bitcoind
- name: Create systemd service file for bitcoind
copy:
dest: /etc/systemd/system/bitcoind.service
content: |
[Unit]
Description=Bitcoin Knots daemon
After=network.target
[Service]
Type=simple
User={{ bitcoin_user }}
Group={{ bitcoin_group }}
ExecStart={{ bitcoin_build_prefix }}/bin/bitcoind -conf={{ bitcoin_conf_dir }}/bitcoin.conf
Restart=always
RestartSec=10
TimeoutStopSec=600
StandardOutput=journal
StandardError=journal
[Install]
WantedBy=multi-user.target
owner: root
group: root
mode: '0644'
notify: Restart bitcoind
- name: Reload systemd daemon
systemd:
daemon_reload: yes
- name: Enable and start bitcoind service
systemd:
name: bitcoind
enabled: yes
state: started
- name: Wait for bitcoind RPC to be available
uri:
url: "http://{{ bitcoin_rpc_bind }}:{{ bitcoin_rpc_port }}"
method: POST
body_format: json
body:
jsonrpc: "1.0"
id: "healthcheck"
method: "getblockchaininfo"
params: []
user: "{{ bitcoin_rpc_user }}"
password: "{{ bitcoin_rpc_password }}"
status_code: 200
timeout: 10
register: rpc_check
until: rpc_check.status == 200
retries: 30
delay: 5
ignore_errors: yes
- name: Display RPC connection status
debug:
msg: "Bitcoin Knots RPC is {{ 'available' if rpc_check.status == 200 else 'not yet available' }}"
- name: Allow Bitcoin P2P port on Tailscale interface only
ufw:
rule: allow
direction: in
port: "{{ bitcoin_p2p_port }}"
proto: tcp
interface: "{{ bitcoin_tailscale_interface }}"
comment: "Bitcoin Knots P2P (Tailscale only)"
- name: Allow Bitcoin P2P port (UDP) on Tailscale interface only
ufw:
rule: allow
direction: in
port: "{{ bitcoin_p2p_port }}"
proto: udp
interface: "{{ bitcoin_tailscale_interface }}"
comment: "Bitcoin Knots P2P UDP (Tailscale only)"
- name: Verify UFW rules for Bitcoin Knots
command: ufw status numbered
register: ufw_status
changed_when: false
- name: Display UFW status
debug:
msg: "{{ ufw_status.stdout_lines }}"
- name: Create Bitcoin Knots health check and push script
copy:
dest: /usr/local/bin/bitcoin-knots-healthcheck-push.sh
content: |
#!/bin/bash
#
# Bitcoin Knots Health Check and Push to Uptime Kuma
# Checks if bitcoind RPC is responding and pushes status to Uptime Kuma
#
RPC_HOST="{{ bitcoin_rpc_bind }}"
RPC_PORT={{ bitcoin_rpc_port }}
RPC_USER="{{ bitcoin_rpc_user }}"
RPC_PASSWORD="{{ bitcoin_rpc_password }}"
UPTIME_KUMA_PUSH_URL="${UPTIME_KUMA_PUSH_URL}"
# Check if bitcoind RPC is responding
check_bitcoind() {
local response
response=$(curl -s --max-time 5 \
--user "${RPC_USER}:${RPC_PASSWORD}" \
--data-binary '{"jsonrpc":"1.0","id":"healthcheck","method":"getblockchaininfo","params":[]}' \
--header 'Content-Type: application/json' \
"http://${RPC_HOST}:${RPC_PORT}" 2>&1)
if [ $? -eq 0 ]; then
# Check if response contains error
if echo "$response" | grep -q '"error"'; then
return 1
else
return 0
fi
else
return 1
fi
}
# Push status to Uptime Kuma
push_to_uptime_kuma() {
local status=$1
local msg=$2
if [ -z "$UPTIME_KUMA_PUSH_URL" ]; then
echo "ERROR: UPTIME_KUMA_PUSH_URL not set"
return 1
fi
# URL encode the message
local encoded_msg=$(echo -n "$msg" | curl -Gso /dev/null -w %{url_effective} --data-urlencode "msg=$msg" "" | cut -c 3-)
curl -s --max-time 10 --retry 2 -o /dev/null \
"${UPTIME_KUMA_PUSH_URL}?status=${status}&msg=${encoded_msg}&ping=" || true
}
# Main health check
if check_bitcoind; then
push_to_uptime_kuma "up" "OK"
exit 0
else
push_to_uptime_kuma "down" "bitcoind RPC not responding"
exit 1
fi
owner: root
group: root
mode: '0755'
- name: Install curl for health check script
apt:
name: curl
state: present
- name: Create systemd timer for Bitcoin Knots health check
copy:
dest: /etc/systemd/system/bitcoin-knots-healthcheck.timer
content: |
[Unit]
Description=Bitcoin Knots Health Check Timer
Requires=bitcoind.service
[Timer]
OnBootSec=1min
OnUnitActiveSec=1min
Persistent=true
[Install]
WantedBy=timers.target
owner: root
group: root
mode: '0644'
- name: Create systemd service for Bitcoin Knots health check
copy:
dest: /etc/systemd/system/bitcoin-knots-healthcheck.service
content: |
[Unit]
Description=Bitcoin Knots Health Check and Push to Uptime Kuma
After=network.target bitcoind.service
[Service]
Type=oneshot
User=root
ExecStart=/usr/local/bin/bitcoin-knots-healthcheck-push.sh
Environment=UPTIME_KUMA_PUSH_URL=
StandardOutput=journal
StandardError=journal
[Install]
WantedBy=multi-user.target
owner: root
group: root
mode: '0644'
- name: Reload systemd daemon for health check
systemd:
daemon_reload: yes
- name: Enable and start Bitcoin Knots health check timer
systemd:
name: bitcoin-knots-healthcheck.timer
enabled: yes
state: started
- name: Create Uptime Kuma push monitor setup script for Bitcoin Knots
delegate_to: localhost
become: no
copy:
dest: /tmp/setup_bitcoin_knots_monitor.py
content: |
#!/usr/bin/env python3
import sys
import traceback
import yaml
from uptime_kuma_api import UptimeKumaApi, MonitorType
try:
# Load configs
with open('/tmp/ansible_config.yml', 'r') as f:
config = yaml.safe_load(f)
url = config['uptime_kuma_url']
username = config['username']
password = config['password']
monitor_name = config['monitor_name']
# Connect to Uptime Kuma
api = UptimeKumaApi(url, timeout=30)
api.login(username, password)
# Get all monitors
monitors = api.get_monitors()
# Find or create "services" group
group = next((m for m in monitors if m.get('name') == 'services' and m.get('type') == 'group'), None)
if not group:
group_result = api.add_monitor(type='group', name='services')
# Refresh to get the group with id
monitors = api.get_monitors()
group = next((m for m in monitors if m.get('name') == 'services' and m.get('type') == 'group'), None)
# Check if monitor already exists
existing_monitor = None
for monitor in monitors:
if monitor.get('name') == monitor_name:
existing_monitor = monitor
break
# Get ntfy notification ID
notifications = api.get_notifications()
ntfy_notification_id = None
for notif in notifications:
if notif.get('type') == 'ntfy':
ntfy_notification_id = notif.get('id')
break
if existing_monitor:
print(f"Monitor '{monitor_name}' already exists (ID: {existing_monitor['id']})")
# Get push URL from existing monitor
push_id = existing_monitor.get('push_token', existing_monitor.get('id'))
push_url = f"{url}/api/push/{push_id}"
print(f"Push URL: {push_url}")
print("Skipping - monitor already configured")
else:
print(f"Creating push monitor '{monitor_name}'...")
result = api.add_monitor(
type=MonitorType.PUSH,
name=monitor_name,
parent=group['id'],
interval=60,
maxretries=3,
retryInterval=60,
notificationIDList={ntfy_notification_id: True} if ntfy_notification_id else {}
)
# Get push URL from created monitor
monitors = api.get_monitors()
new_monitor = next((m for m in monitors if m.get('name') == monitor_name), None)
if new_monitor:
push_id = new_monitor.get('push_token', new_monitor.get('id'))
push_url = f"{url}/api/push/{push_id}"
print(f"Push URL: {push_url}")
api.disconnect()
print("SUCCESS")
except Exception as e:
error_msg = str(e) if str(e) else repr(e)
print(f"ERROR: {error_msg}", file=sys.stderr)
traceback.print_exc(file=sys.stderr)
sys.exit(1)
mode: '0755'
- name: Create temporary config for monitor setup
delegate_to: localhost
become: no
copy:
dest: /tmp/ansible_config.yml
content: |
uptime_kuma_url: "{{ uptime_kuma_api_url }}"
username: "{{ uptime_kuma_username }}"
password: "{{ uptime_kuma_password }}"
monitor_name: "Bitcoin Knots"
mode: '0644'
- name: Run Uptime Kuma push monitor setup
command: python3 /tmp/setup_bitcoin_knots_monitor.py
delegate_to: localhost
become: no
register: monitor_setup
changed_when: "'SUCCESS' in monitor_setup.stdout"
ignore_errors: yes
- name: Extract push URL from monitor setup output
set_fact:
uptime_kuma_push_url: "{{ monitor_setup.stdout | regex_search('Push URL: (https?://[^\\s]+)', '\\1') | first | default('') }}"
delegate_to: localhost
become: no
when: monitor_setup.stdout is defined
- name: Display extracted push URL
debug:
msg: "Uptime Kuma Push URL: {{ uptime_kuma_push_url }}"
when: uptime_kuma_push_url | default('') != ''
- name: Set push URL in systemd service environment
lineinfile:
path: /etc/systemd/system/bitcoin-knots-healthcheck.service
regexp: '^Environment=UPTIME_KUMA_PUSH_URL='
line: "Environment=UPTIME_KUMA_PUSH_URL={{ uptime_kuma_push_url }}"
state: present
insertafter: '^\[Service\]'
when: uptime_kuma_push_url | default('') != ''
- name: Reload systemd daemon after push URL update
systemd:
daemon_reload: yes
when: uptime_kuma_push_url | default('') != ''
- name: Restart health check timer to pick up new environment
systemd:
name: bitcoin-knots-healthcheck.timer
state: restarted
when: uptime_kuma_push_url | default('') != ''
- name: Clean up temporary files
delegate_to: localhost
become: no
file:
path: "{{ item }}"
state: absent
loop:
- /tmp/setup_bitcoin_knots_monitor.py
- /tmp/ansible_config.yml
handlers:
- name: Restart bitcoind
systemd:
name: bitcoind
state: restarted

View file

@ -1,897 +0,0 @@
# Personal Infrastructure Setup Guide
This guide walks you through setting up your complete personal infrastructure, layer by layer. Each layer must be completed before moving to the next one.
**Automated Setup:** Each layer has a bash script that handles the setup process. The scripts will:
- Check prerequisites
- Prompt for required variables
- Set up configuration files
- Execute playbooks
- Verify completion
## Prerequisites
Before starting:
- You have a domain name
- You have VPS accounts ready
- You have nodito ready with Proxmox installed, ssh key in place
- You have SSH access to all machines
- You're running this from your laptop (lapy)
---
## Layer 0: Foundation Setup
**Goal:** Set up your laptop (lapy) as the Ansible control node and configure basic settings.
**Script:** `./scripts/setup_layer_0.sh`
### What This Layer Does:
1. Creates Python virtual environment
2. Installs Ansible and required Python packages
3. Installs Ansible Galaxy collections
4. Guides you through creating `inventory.ini` with your machine IPs
5. Guides you through creating `infra_vars.yml` with your domain
6. Creates `services_config.yml` with centralized subdomain settings
7. Creates `infra_secrets.yml` template for Uptime Kuma credentials
8. Validates SSH keys exist
9. Verifies everything is ready for Layer 1
### Required Information:
- Your domain name (e.g., `contrapeso.xyz`)
- SSH key path (default: `~/.ssh/counterganzua`)
- IP addresses for your infrastructure:
- vipy (main VPS)
- watchtower (monitoring VPS)
- spacey (headscale VPS)
- nodito (Proxmox server) - optional
- **Note:** VMs (like memos-box) will be created later on Proxmox and added to the `nodito_vms` group
### Manual Steps:
After running the script, you'll need to:
1. Ensure your SSH key is added to all VPS root users (usually done by VPS provider)
2. Ensure DNS is configured for your domain (nameservers pointing to your DNS provider)
### Centralized Configuration:
The script creates `ansible/services_config.yml` which contains all service subdomains in one place:
- Easy to review all subdomains at a glance
- No need to edit multiple vars files
- Consistent Caddy settings across all services
- **Edit this file to customize your subdomains before deploying services**
### Verification:
The script will verify:
- ✓ Python venv exists and activated
- ✓ Ansible installed
- ✓ Required Python packages installed
- ✓ Ansible Galaxy collections installed
- ✓ `inventory.ini` exists and formatted correctly
- ✓ `infra_vars.yml` exists with domain configured
- ✓ `services_config.yml` created with subdomain settings
- ✓ `infra_secrets.yml` template created
- ✓ SSH key file exists
### Run the Script:
```bash
cd /home/counterweight/personal_infra
./scripts/setup_layer_0.sh
```
---
## Layer 1A: VPS Basic Setup
**Goal:** Configure users, SSH access, firewall, and fail2ban on VPS machines.
**Script:** `./scripts/setup_layer_1a_vps.sh`
**Can be run independently** - doesn't require Nodito setup.
### What This Layer Does:
For VPSs (vipy, watchtower, spacey):
1. Creates the `counterweight` user with sudo access
2. Configures SSH key authentication
3. Disables root login (by design for security)
4. Sets up UFW firewall with SSH access
5. Installs and configures fail2ban
6. Installs and configures auditd for security logging
### Prerequisites:
- ✅ Layer 0 complete
- ✅ SSH key added to all VPS root users
- ✅ Root access to VPSs
### Verification:
The script will verify:
- ✓ Can SSH to all VPSs as root
- ✓ VPS playbooks complete successfully
- ✓ Can SSH to all VPSs as `counterweight` user
- ✓ Firewall is active and configured
- ✓ fail2ban is running
### Run the Script:
```bash
source venv/bin/activate
cd /home/counterweight/personal_infra
./scripts/setup_layer_1a_vps.sh
```
**Note:** After this layer, you will no longer be able to SSH as root to VPSs (by design for security).
---
## Layer 1B: Nodito (Proxmox) Setup
**Goal:** Configure the Nodito Proxmox server.
**Script:** `./scripts/setup_layer_1b_nodito.sh`
**Can be run independently** - doesn't require VPS setup.
### What This Layer Does:
For Nodito (Proxmox server):
1. Bootstraps SSH key access for root
2. Creates the `counterweight` user
3. Updates and secures the system
4. Disables root login and password authentication
5. Switches to Proxmox community repositories
6. Optionally sets up ZFS storage pool (if disks configured)
7. Optionally creates Debian cloud template
### Prerequisites:
- ✅ Layer 0 complete
- ✅ Root password for nodito
- ✅ Nodito configured in inventory.ini
### Optional: ZFS Setup
For ZFS storage pool (optional):
1. SSH into nodito: `ssh root@<nodito-ip>`
2. List disk IDs: `ls -la /dev/disk/by-id/ | grep -E "(ata-|scsi-|nvme-)"`
3. Note the disk IDs you want to use
4. The script will help you create `ansible/infra/nodito/nodito_vars.yml` with disk configuration
⚠️ **Warning:** ZFS setup will DESTROY ALL DATA on specified disks!
### Verification:
The script will verify:
- ✓ Nodito bootstrap successful
- ✓ Community repos configured
- ✓ Can SSH to nodito as `counterweight` user
### Run the Script:
```bash
source venv/bin/activate
cd /home/counterweight/personal_infra
./scripts/setup_layer_1b_nodito.sh
```
**Note:** After this layer, you will no longer be able to SSH as root to nodito (by design for security).
---
## Layer 2: General Infrastructure Tools
**Goal:** Install common utilities needed by various services.
**Script:** `./scripts/setup_layer_2.sh`
### What This Layer Does:
Installs essential tools on machines that need them:
#### rsync
- **Purpose:** Required for backup operations
- **Deployed to:** vipy, watchtower, lapy (and optionally other hosts)
- **Playbook:** `infra/900_install_rsync.yml`
#### Docker + Docker Compose
- **Purpose:** Required for containerized services
- **Deployed to:** vipy, watchtower (and optionally other hosts)
- **Playbook:** `infra/910_docker_playbook.yml`
### Prerequisites:
- ✅ Layer 0 complete
- ✅ Layer 1A complete (for VPSs) OR Layer 1B complete (for nodito)
- ✅ SSH access as counterweight user
### Services That Need These Tools:
- **rsync:** All backup operations (Uptime Kuma, Vaultwarden, LNBits, etc.)
- **docker:** Uptime Kuma, Vaultwarden, ntfy-emergency-app
### Verification:
The script will verify:
- ✓ rsync installed on specified hosts
- ✓ Docker and Docker Compose installed on specified hosts
- ✓ counterweight user added to docker group
- ✓ Docker service running
### Run the Script:
```bash
source venv/bin/activate
cd /home/counterweight/personal_infra
./scripts/setup_layer_2.sh
```
**Note:** This script is interactive and will let you choose which hosts get which tools.
---
## Layer 3: Reverse Proxy (Caddy)
**Goal:** Deploy Caddy reverse proxy for HTTPS termination and routing.
**Script:** `./scripts/setup_layer_3_caddy.sh`
### What This Layer Does:
Installs and configures Caddy web server on VPS machines:
- Installs Caddy from official repositories
- Configures Caddy to listen on ports 80/443
- Opens firewall ports for HTTP/HTTPS
- Creates `/etc/caddy/sites-enabled/` directory structure
- Sets up automatic HTTPS with Let's Encrypt
**Deployed to:** vipy, watchtower, spacey
### Why Caddy is Critical:
Caddy provides:
- **Automatic HTTPS** - Let's Encrypt certificates with auto-renewal
- **Reverse proxy** - Routes traffic to backend services
- **Simple configuration** - Each service adds its own config file
- **HTTP/2 support** - Modern protocol support
### Prerequisites:
- ✅ Layer 0 complete
- ✅ Layer 1A complete (VPS setup)
- ✅ SSH access as counterweight user
- ✅ Ports 80/443 available on VPSs
### Services That Need Caddy:
All web services depend on Caddy:
- Uptime Kuma (watchtower)
- ntfy (watchtower)
- Headscale (spacey)
- Vaultwarden (vipy)
- Forgejo (vipy)
- LNBits (vipy)
- ntfy-emergency-app (vipy)
### Verification:
The script will verify:
- ✓ Caddy installed on all target hosts
- ✓ Caddy service running
- ✓ Ports 80/443 open in firewall
- ✓ Sites-enabled directory created
- ✓ Can reach Caddy default page
### Run the Script:
```bash
source venv/bin/activate
cd /home/counterweight/personal_infra
./scripts/setup_layer_3_caddy.sh
```
**Note:** Caddy starts with an empty configuration. Services will add their own config files in later layers.
---
## Layer 4: Core Monitoring & Notifications
**Goal:** Deploy ntfy (notifications) and Uptime Kuma (monitoring platform).
**Script:** `./scripts/setup_layer_4_monitoring.sh`
### What This Layer Does:
Deploys core monitoring infrastructure on watchtower:
#### 4A: ntfy (Notification Service)
- Installs ntfy from official repositories
- Configures ntfy with authentication (deny-all by default)
- Creates admin user for sending notifications
- Sets up Caddy reverse proxy
- **Deployed to:** watchtower
#### 4B: Uptime Kuma (Monitoring Platform)
- Deploys Uptime Kuma via Docker
- Configures Caddy reverse proxy
- Sets up data persistence
- Optionally sets up backup to lapy
- **Deployed to:** watchtower
### Prerequisites (Complete BEFORE Running):
**1. Previous layers complete:**
- ✅ Layer 0, 1A, 2, 3 complete (watchtower must be fully set up)
- ✅ Docker installed on watchtower (from Layer 2)
- ✅ Caddy running on watchtower (from Layer 3)
**2. Configure subdomains (in centralized config):**
- ✅ Edit `ansible/services_config.yml` and customize subdomains under `subdomains:` section
- Set `ntfy:` to your preferred subdomain (e.g., `ntfy` or `notify`)
- Set `uptime_kuma:` to your preferred subdomain (e.g., `uptime` or `kuma`)
**3. Create DNS records that match your configured subdomains:**
- ✅ Create A record: `<ntfy_subdomain>.<yourdomain>` → watchtower IP
- ✅ Create A record: `<uptime_kuma_subdomain>.<yourdomain>` → watchtower IP
- ✅ Wait for DNS propagation (can take minutes to hours)
- ✅ Verify with: `dig <subdomain>.<yourdomain>` should return watchtower IP
**4. Prepare ntfy admin credentials:**
- ✅ Decide on username (default: `admin`)
- ✅ Decide on a secure password (script will prompt you)
### Run the Script:
```bash
source venv/bin/activate
cd /home/counterweight/personal_infra
./scripts/setup_layer_4_monitoring.sh
```
The script will prompt you for ntfy admin credentials during deployment.
### Post-Deployment Steps (Complete AFTER Running):
**The script will guide you through most of these, but here's what happens:**
#### Step 1: Set Up Uptime Kuma Admin Account (Manual)
1. Open browser and visit: `https://<uptime_kuma_subdomain>.<yourdomain>`
2. On first visit, you'll see the setup page
3. Create admin username and password
4. Save these credentials securely
#### Step 2: Update infra_secrets.yml (Manual)
1. Edit `ansible/infra_secrets.yml`
2. Add your Uptime Kuma credentials:
```yaml
uptime_kuma_username: "your-admin-username"
uptime_kuma_password: "your-admin-password"
```
3. Save the file
4. **This is required for automated ntfy setup and Layer 6**
#### Step 3: Configure ntfy Notification (Automated)
**The script will offer to do this automatically!** If you completed Steps 1 & 2, the script will:
- Connect to Uptime Kuma via API
- Create ntfy notification configuration
- Test the connection
- No manual UI configuration needed!
**Alternatively (Manual):**
1. In Uptime Kuma web UI, go to **Settings** → **Notifications**
2. Click **Setup Notification**, choose **ntfy**
3. Configure with your ntfy subdomain and credentials
#### Step 4: Final Verification (Automated)
**The script will automatically verify:**
- ✓ Uptime Kuma credentials in infra_secrets.yml
- ✓ Can connect to Uptime Kuma API
- ✓ ntfy notification is configured
- ✓ All post-deployment steps complete
If anything is missing, the script will tell you exactly what to do!
#### Step 5: Subscribe to Notifications on Your Phone (Optional - Manual)
1. Install ntfy app: https://github.com/binwiederhier/ntfy-android
2. Add subscription:
- Server: `https://<ntfy_subdomain>.<yourdomain>`
- Topic: `alerts` (same as configured in Uptime Kuma)
- Username: Your ntfy admin username
- Password: Your ntfy admin password
3. You'll now receive push notifications for all alerts!
**Pro tip:** Run the script again after completing Steps 1 & 2, and it will automatically configure ntfy and verify everything!
### Verification:
The script will automatically verify:
- ✓ DNS records are configured correctly (using `dig`)
- ✓ ntfy service running
- ✓ Uptime Kuma container running
- ✓ Caddy configs created for both services
After post-deployment steps, you can test:
- Visit `https://<ntfy_subdomain>.<yourdomain>` (should load ntfy web UI)
- Visit `https://<uptime_kuma_subdomain>.<yourdomain>` (should load Uptime Kuma)
- Send test notification in Uptime Kuma
**Note:** DNS validation requires `dig` command. If not available, validation will be skipped (you can continue but SSL may fail).
### Why This Layer is Critical:
- **All infrastructure monitoring** (Layer 6) depends on Uptime Kuma
- **All alerts** go through ntfy
- Services availability monitoring needs Uptime Kuma
- Without this layer, you won't know when things break!
---
## Layer 5: VPN Infrastructure (Headscale)
**Goal:** Deploy Headscale for secure mesh networking (like Tailscale, but self-hosted).
**Script:** `./scripts/setup_layer_5_headscale.sh`
**This layer is OPTIONAL** - Skip to Layer 6 if you don't need VPN mesh networking.
### What This Layer Does:
Deploys Headscale coordination server and optionally joins machines to the mesh:
#### 5A: Deploy Headscale Server
- Installs Headscale on spacey
- Configures with deny-all ACL policy (you customize later)
- Creates namespace/user for your network
- Sets up Caddy reverse proxy
- Configures embedded DERP server for NAT traversal
- **Deployed to:** spacey
#### 5B: Join Machines to Mesh (Optional)
- Installs Tailscale client on target machines
- Generates ephemeral pre-auth keys
- Automatically joins machines to your mesh
- Enables Magic DNS
- **Can join:** vipy, watchtower, nodito, lapy, etc.
### Prerequisites (Complete BEFORE Running):
**1. Previous layers complete:**
- ✅ Layer 0, 1A, 3 complete (spacey must be set up)
- ✅ Caddy running on spacey (from Layer 3)
**2. Configure subdomain (in centralized config):**
- ✅ Edit `ansible/services_config.yml` and customize `headscale:` under `subdomains:` section (e.g., `headscale` or `vpn`)
**3. Create DNS record that matches your configured subdomain:**
- ✅ Create A record: `<headscale_subdomain>.<yourdomain>` → spacey IP
- ✅ Wait for DNS propagation
- ✅ Verify with: `dig <subdomain>.<yourdomain>` should return spacey IP
**4. Decide on namespace name:**
- ✅ Choose a namespace for your network (default: `counter-net`)
- ✅ This is set in `headscale_vars.yml` as `headscale_namespace`
### Run the Script:
```bash
source venv/bin/activate
cd /home/counterweight/personal_infra
./scripts/setup_layer_5_headscale.sh
```
The script will:
1. Validate DNS configuration
2. Deploy Headscale server
3. Offer to join machines to the mesh
### Post-Deployment Steps:
#### Configure ACL Policies (Required for machines to communicate)
1. SSH into spacey: `ssh counterweight@<spacey-ip>`
2. Edit ACL file: `sudo nano /etc/headscale/acl.json`
3. Configure rules (example - allow all):
```json
{
"ACLs": [
{"action": "accept", "src": ["*"], "dst": ["*:*"]}
]
}
```
4. Restart Headscale: `sudo systemctl restart headscale`
**Default is deny-all for security** - you must configure ACLs for machines to talk!
#### Join Additional Machines Manually
For machines not in inventory (mobile, desktop):
1. Install Tailscale client on device
2. Generate pre-auth key on spacey:
```bash
ssh counterweight@<spacey-ip>
sudo headscale preauthkeys create --user <namespace> --reusable
```
3. Connect using your Headscale server:
```bash
tailscale up --login-server https://<headscale_subdomain>.<yourdomain> --authkey <key>
```
### Automatic Uptime Kuma Monitor:
**The playbook will automatically create a monitor in Uptime Kuma:**
- ✅ **Headscale** - monitors `https://<subdomain>/health`
- Added to "services" monitor group
- Uses ntfy notification (if configured)
- Check every 60 seconds
**Prerequisites:** Uptime Kuma credentials must be in `infra_secrets.yml` (from Layer 4)
### Verification:
The script will automatically verify:
- ✓ DNS records configured correctly
- ✓ Headscale installed and running
- ✓ Namespace created
- ✓ Caddy config created
- ✓ Machines joined (if selected)
- ✓ Monitor created in Uptime Kuma "services" group
List connected devices:
```bash
ssh counterweight@<spacey-ip>
sudo headscale nodes list
```
### Why Use Headscale:
- **Secure communication** between all your machines
- **Magic DNS** - access machines by hostname
- **NAT traversal** - works even behind firewalls
- **Self-hosted** - full control of your VPN
- **Mobile support** - use official Tailscale apps
### Backup:
Optional backup to lapy:
```bash
ansible-playbook -i inventory.ini services/headscale/setup_backup_headscale_to_lapy.yml
```
---
## Layer 6: Infrastructure Monitoring
**Goal:** Deploy automated monitoring for disk usage, system health, and CPU temperature.
**Script:** `./scripts/setup_layer_6_infra_monitoring.sh`
### What This Layer Does:
Deploys monitoring scripts that report to Uptime Kuma:
#### 6A: Disk Usage Monitoring
- Monitors disk usage on specified mount points
- Sends alerts when usage exceeds threshold (default: 80%)
- Creates Uptime Kuma push monitors automatically
- Organizes monitors in host-specific groups
- **Deploys to:** All hosts (selectable)
#### 6B: System Healthcheck
- Sends regular heartbeat pings to Uptime Kuma
- Alerts if system stops responding
- "No news is good news" monitoring
- **Deploys to:** All hosts (selectable)
#### 6C: CPU Temperature Monitoring (Nodito only)
- Monitors CPU temperature on Proxmox server
- Alerts when temperature exceeds threshold (default: 80°C)
- **Deploys to:** nodito (if configured)
### Prerequisites (Complete BEFORE Running):
**1. Previous layers complete:**
- ✅ Layer 0, 1A/1B, 4 complete
- ✅ Uptime Kuma deployed and configured (Layer 4)
- ✅ **CRITICAL:** `infra_secrets.yml` has Uptime Kuma credentials
**2. Uptime Kuma API credentials ready:**
- ✅ Must have completed Layer 4 post-deployment steps
- ✅ `ansible/infra_secrets.yml` must contain:
```yaml
uptime_kuma_username: "your-username"
uptime_kuma_password: "your-password"
```
**3. Python dependencies installed:**
- ✅ `uptime-kuma-api` must be in requirements.txt
- ✅ Should already be installed from Layer 0
- ✅ Verify: `pip list | grep uptime-kuma-api`
### Run the Script:
```bash
source venv/bin/activate
cd /home/counterweight/personal_infra
./scripts/setup_layer_6_infra_monitoring.sh
```
The script will:
1. Verify Uptime Kuma credentials
2. Offer to deploy disk usage monitoring
3. Offer to deploy system healthchecks
4. Offer to deploy CPU temp monitoring (nodito only)
5. Test monitor creation and alerts
### What Gets Deployed:
**For each monitored host:**
- Push monitor in Uptime Kuma (upside-down mode)
- Monitor group named `{hostname} - infra`
- Systemd service for monitoring script
- Systemd timer for periodic execution
- Log file for monitoring history
**Default settings (customizable):**
- Disk usage threshold: 80%
- Disk check interval: 15 minutes
- Healthcheck interval: 60 seconds
- CPU temp threshold: 80°C
- Monitored mount point: `/` (root)
### Customization Options:
Change thresholds and intervals:
```bash
# Disk monitoring with custom settings
ansible-playbook -i inventory.ini infra/410_disk_usage_alerts.yml \
-e "disk_usage_threshold_percent=85" \
-e "disk_check_interval_minutes=10" \
-e "monitored_mount_point=/home"
# Healthcheck with custom interval
ansible-playbook -i inventory.ini infra/420_system_healthcheck.yml \
-e "healthcheck_interval_seconds=30"
# CPU temp with custom threshold
ansible-playbook -i inventory.ini infra/430_cpu_temp_alerts.yml \
-e "temp_threshold_celsius=75"
```
### Verification:
The script will automatically verify:
- ✓ Uptime Kuma API accessible
- ✓ Monitors created in Uptime Kuma
- ✓ Monitor groups created
- ✓ Systemd services running
- ✓ Can send test alerts
Check Uptime Kuma web UI:
- Monitors should appear organized by host
- Should receive test pings
- Alerts will show when thresholds exceeded
### Post-Deployment:
**Monitor your infrastructure:**
1. Open Uptime Kuma web UI
2. See all monitors organized by host groups
3. Configure notification rules per monitor
4. Set up status pages (optional)
**Test alerts:**
```bash
# Trigger disk usage alert (fill disk temporarily)
# Trigger healthcheck alert (stop the service)
# Check ntfy for notifications
```
### Why This Layer is Important:
- **Proactive monitoring** - Know about issues before users do
- **Disk space alerts** - Prevent services from failing
- **System health** - Detect crashed/frozen machines
- **Temperature monitoring** - Prevent hardware damage
- **Organized** - All monitors grouped by host
---
## Layer 7: Core Services
**Goal:** Deploy core applications: Vaultwarden, Forgejo, and LNBits.
**Script:** `./scripts/setup_layer_7_services.sh`
### What This Layer Does:
Deploys main services on vipy:
#### 7A: Vaultwarden (Password Manager)
- Deploys via Docker
- Configures Caddy reverse proxy
- Sets up fail2ban protection
- Enables sign-ups initially (disable after creating first user)
- **Deployed to:** vipy
#### 7B: Forgejo (Git Server)
- Installs Forgejo binary
- Creates git user and directories
- Configures Caddy reverse proxy
- Enables SSH cloning
- **Deployed to:** vipy
#### 7C: LNBits (Lightning Wallet)
- Installs system dependencies and uv (Python 3.12 tooling)
- Clones LNBits version v1.3.1
- Syncs dependencies with uv targeting Python 3.12
- Configures with FakeWallet backend (for testing)
- Creates systemd service
- Configures Caddy reverse proxy
- **Deployed to:** vipy
### Prerequisites (Complete BEFORE Running):
**1. Previous layers complete:**
- ✅ Layer 0, 1A, 2, 3 complete
- ✅ Docker installed on vipy (Layer 2)
- ✅ Caddy running on vipy (Layer 3)
**2. Configure subdomains (in centralized config):**
- ✅ Edit `ansible/services_config.yml` and customize subdomains under `subdomains:` section:
- Set `vaultwarden:` to your preferred subdomain (e.g., `vault` or `passwords`)
- Set `forgejo:` to your preferred subdomain (e.g., `git` or `code`)
- Set `lnbits:` to your preferred subdomain (e.g., `lnbits` or `wallet`)
**3. Create DNS records matching your subdomains:**
- ✅ Create A record: `<vaultwarden_subdomain>.<yourdomain>` → vipy IP
- ✅ Create A record: `<forgejo_subdomain>.<yourdomain>` → vipy IP
- ✅ Create A record: `<lnbits_subdomain>.<yourdomain>` → vipy IP
- ✅ Wait for DNS propagation
### Run the Script:
```bash
source venv/bin/activate
cd /home/counterweight/personal_infra
./scripts/setup_layer_7_services.sh
```
The script will:
1. Validate DNS configuration
2. Offer to deploy each service
3. Configure backups (optional)
### Post-Deployment Steps:
#### Vaultwarden:
1. Visit `https://<vaultwarden_subdomain>.<yourdomain>`
2. Create your first user account
3. **Important:** Disable sign-ups after first user:
```bash
ansible-playbook -i inventory.ini services/vaultwarden/disable_vaultwarden_sign_ups_playbook.yml
```
4. Optional: Set up backup to lapy
#### Forgejo:
1. Visit `https://<forgejo_subdomain>.<yourdomain>`
2. Create admin account on first visit
3. Default: registrations disabled for security
4. SSH cloning works automatically after adding SSH key
#### LNBits:
1. Visit `https://<lnbits_subdomain>.<yourdomain>`
2. Create superuser on first visit
3. **Important:** Default uses FakeWallet (testing only)
4. Configure real Lightning backend:
- Edit `/opt/lnbits/lnbits/.env` on vipy
- Or use the superuser UI to configure backend
5. Disable new user registration for security
6. Optional: Set up encrypted backup to lapy
### Backup Configuration:
After services are stable, set up backups:
**Vaultwarden backup:**
```bash
ansible-playbook -i inventory.ini services/vaultwarden/setup_backup_vaultwarden_to_lapy.yml
```
**LNBits backup (GPG encrypted):**
```bash
ansible-playbook -i inventory.ini services/lnbits/setup_backup_lnbits_to_lapy.yml
```
**Note:** Forgejo backups are not automated - backup manually or set up your own solution.
### Automatic Uptime Kuma Monitors:
**The playbooks will automatically create monitors in Uptime Kuma for each service:**
- ✅ **Vaultwarden** - monitors `https://<subdomain>/alive`
- ✅ **Forgejo** - monitors `https://<subdomain>/api/healthz`
- ✅ **LNBits** - monitors `https://<subdomain>/api/v1/health`
All monitors:
- Added to "services" monitor group
- Use ntfy notification (if configured)
- Check every 60 seconds
- 3 retries before alerting
**Prerequisites:** Uptime Kuma credentials must be in `infra_secrets.yml` (from Layer 4)
### Verification:
The script will automatically verify:
- ✓ DNS records configured
- ✓ Services deployed
- ✓ Docker containers running (Vaultwarden)
- ✓ Systemd services running (Forgejo, LNBits)
- ✓ Caddy configs created
Manual verification:
- Visit each service's subdomain
- Create admin/first user accounts
- Test functionality
- Check Uptime Kuma for new monitors in "services" group
### Why These Services:
- **Vaultwarden** - Self-hosted password manager (Bitwarden compatible)
- **Forgejo** - Self-hosted Git server (GitHub/GitLab alternative)
- **LNBits** - Lightning Network wallet and accounts system
---
## Layer 8: Secondary Services
**Goal:** Deploy auxiliary services that depend on the core stack: ntfy-emergency-app and memos.
**Script:** `./scripts/setup_layer_8_secondary_services.sh`
### What This Layer Does:
- Deploys the ntfy-emergency-app container on vipy and proxies it through Caddy
- Optionally deploys Memos on `memos-box` (skips automatically if the host is not yet in `inventory.ini`)
### Prerequisites (Complete BEFORE Running):
- ✅ Layers 07 complete (Caddy, ntfy, and Uptime Kuma already online)
- ✅ `ansible/services_config.yml` reviewed so the `ntfy_emergency_app` and `memos` subdomains match your plan
- ✅ `ansible/infra_secrets.yml` contains valid `ntfy_username` and `ntfy_password`
- ✅ DNS A records created for the subdomains (see below)
- ✅ If deploying Memos, ensure `memos-box` exists in `inventory.ini` and is reachable as the `counterweight` user
### DNS Requirements:
- `<ntfy_emergency_app>.<domain>` → vipy IP
- `<memos>.<domain>` → memos-box IP (skip if memos not yet provisioned)
The script runs `dig` to validate DNS before deploying and will warn if records are missing or pointing elsewhere.
### Run the Script:
```bash
source venv/bin/activate
cd /home/counterweight/personal_infra
./scripts/setup_layer_8_secondary_services.sh
```
You can deploy each service independently; the script asks for confirmation before running each playbook.
### Post-Deployment Steps:
- **ntfy-emergency-app:** Visit the emergency subdomain, trigger a test notification, and verify ntfy receives it
- **Memos (if deployed):** Visit the memos subdomain, create the first admin user, and adjust settings from the UI
### Verification:
- The script checks for the presence of Caddy configs, running containers, and Memos systemd service status
- Review Uptime Kuma or add monitors for these services if you want automatic alerting
### Optional Follow-Ups:
- Configure backups for any new data stores (e.g., snapshot memos data)
- Add Uptime Kuma monitors for the new services if you want automated alerting
---
## Troubleshooting
### Common Issues
#### SSH Connection Fails
- Verify VPS is running and accessible
- Check SSH key is in the correct location
- Ensure SSH key has correct permissions (600)
- Try manual SSH: `ssh -i ~/.ssh/counterganzua root@<ip>`
#### Ansible Not Found
- Make sure you've activated the venv: `source venv/bin/activate`
- Run Layer 0 script again
#### DNS Not Resolving
- DNS changes can take up to 24-48 hours to propagate
- Use `dig <subdomain>.<domain>` to check DNS status
- You can proceed with setup; services will work once DNS propagates
---
## Progress Tracking
Use this checklist to track your progress:
- [ ] Layer 0: Foundation Setup
- [ ] Layer 1A: VPS Basic Setup
- [ ] Layer 1B: Nodito (Proxmox) Setup
- [ ] Layer 2: General Infrastructure Tools
- [ ] Layer 3: Reverse Proxy (Caddy)
- [ ] Layer 4: Core Monitoring & Notifications
- [ ] Layer 5: VPN Infrastructure (Headscale)
- [ ] Layer 6: Infrastructure Monitoring
- [ ] Layer 7: Core Services
- [ ] Layer 8: Secondary Services
- [ ] Backups Configured

View file

@ -45,13 +45,6 @@ vms = {
memory_mb = 2048
disk_size_gb = 20
ipconfig0 = "ip=dhcp" # or "ip=192.168.1.50/24,gw=192.168.1.1"
data_disks = [
{
size_gb = 50
# storage defaults to var.zfs_storage_name (proxmox-tank-1)
# optional: slot = "scsi2"
}
]
}
}
```

View file

@ -73,16 +73,6 @@ resource "proxmox_vm_qemu" "vm" {
# optional flags like iothread/ssd/discard differ by provider versions; keep minimal
}
dynamic "disk" {
for_each = try(each.value.data_disks, [])
content {
slot = try(disk.value.slot, format("scsi%s", tonumber(disk.key) + 1))
type = "disk"
storage = try(disk.value.storage, var.zfs_storage_name)
size = "${disk.value.size_gb}G"
}
}
# Cloud-init CD-ROM so ipconfig0/sshkeys apply
disk {
slot = "ide2"

View file

@ -20,11 +20,6 @@ vms = {
memory_mb = 2048
disk_size_gb = 20
ipconfig0 = "ip=dhcp"
data_disks = [
{
size_gb = 50
}
]
}
db1 = {

View file

@ -55,11 +55,6 @@ variable "vms" {
disk_size_gb = number
vlan_tag = optional(number)
ipconfig0 = optional(string) # e.g. "ip=dhcp" or "ip=192.168.1.50/24,gw=192.168.1.1"
data_disks = optional(list(object({
size_gb = number
storage = optional(string)
slot = optional(string)
})), [])
}))
default = {}
}