personal_infra/ansible/services/fulcrum/deploy_fulcrum_playbook.yml
2025-12-24 10:27:35 +01:00

716 lines
23 KiB
YAML

- name: Deploy Fulcrum Electrum Server
hosts: fulcrum_box_local
become: yes
vars_files:
- ../../infra_vars.yml
- ../../services_config.yml
- ../../infra_secrets.yml
- ./fulcrum_vars.yml
vars:
uptime_kuma_api_url: "https://{{ subdomains.uptime_kuma }}.{{ root_domain }}"
tasks:
- name: Calculate 75% of system RAM for db_mem
set_fact:
fulcrum_db_mem_mb: "{{ (ansible_memtotal_mb | float * fulcrum_db_mem_percent) | int }}"
changed_when: false
- name: Display calculated db_mem value
debug:
msg: "Setting db_mem to {{ fulcrum_db_mem_mb }} MB ({{ (fulcrum_db_mem_percent * 100) | int }}% of {{ ansible_memtotal_mb }} MB total RAM)"
- name: Display Fulcrum version to install
debug:
msg: "Installing Fulcrum version {{ fulcrum_version }}"
- name: Install required packages
apt:
name:
- curl
- wget
- openssl
state: present
update_cache: yes
- name: Create fulcrum group
group:
name: "{{ fulcrum_group }}"
system: yes
state: present
- name: Create fulcrum user
user:
name: "{{ fulcrum_user }}"
group: "{{ fulcrum_group }}"
system: yes
shell: /usr/sbin/nologin
home: /home/{{ fulcrum_user }}
create_home: yes
state: present
- name: Create Fulcrum database directory (heavy data on special mount)
file:
path: "{{ fulcrum_db_dir }}"
state: directory
owner: "{{ fulcrum_user }}"
group: "{{ fulcrum_group }}"
mode: '0755'
- name: Create Fulcrum config directory
file:
path: "{{ fulcrum_config_dir }}"
state: directory
owner: root
group: "{{ fulcrum_group }}"
mode: '0755'
- name: Create Fulcrum lib directory (for banner and other data files)
file:
path: "{{ fulcrum_lib_dir }}"
state: directory
owner: "{{ fulcrum_user }}"
group: "{{ fulcrum_group }}"
mode: '0755'
# ===========================================
# SSL Certificate Generation
# ===========================================
- name: Check if SSL certificate already exists
stat:
path: "{{ fulcrum_ssl_cert_path }}"
register: fulcrum_ssl_cert_exists
when: fulcrum_ssl_enabled | default(false)
- name: Generate self-signed SSL certificate for Fulcrum
command: >
openssl req -x509 -newkey rsa:4096
-keyout {{ fulcrum_ssl_key_path }}
-out {{ fulcrum_ssl_cert_path }}
-sha256 -days {{ fulcrum_ssl_cert_days }}
-nodes
-subj "/C=XX/ST=Decentralized/L=Bitcoin/O=Fulcrum/OU=Electrum/CN=fulcrum.local"
args:
creates: "{{ fulcrum_ssl_cert_path }}"
when: fulcrum_ssl_enabled | default(false)
notify: Restart fulcrum
- name: Set SSL certificate permissions
file:
path: "{{ fulcrum_ssl_cert_path }}"
owner: "{{ fulcrum_user }}"
group: "{{ fulcrum_group }}"
mode: '0644'
when: fulcrum_ssl_enabled | default(false) and fulcrum_ssl_cert_exists.stat.exists | default(false) or fulcrum_ssl_enabled | default(false)
- name: Set SSL key permissions
file:
path: "{{ fulcrum_ssl_key_path }}"
owner: "{{ fulcrum_user }}"
group: "{{ fulcrum_group }}"
mode: '0600'
when: fulcrum_ssl_enabled | default(false)
- name: Check if Fulcrum binary already exists
stat:
path: "{{ fulcrum_binary_path }}"
register: fulcrum_binary_exists
changed_when: false
- name: Download Fulcrum binary tarball
get_url:
url: "https://github.com/cculianu/Fulcrum/releases/download/v{{ fulcrum_version }}/Fulcrum-{{ fulcrum_version }}-x86_64-linux.tar.gz"
dest: "/tmp/Fulcrum-{{ fulcrum_version }}-x86_64-linux.tar.gz"
mode: '0644'
when: not fulcrum_binary_exists.stat.exists
- name: Extract Fulcrum binary
unarchive:
src: "/tmp/Fulcrum-{{ fulcrum_version }}-x86_64-linux.tar.gz"
dest: "/tmp"
remote_src: yes
when: not fulcrum_binary_exists.stat.exists
- name: Install Fulcrum binary
copy:
src: "/tmp/Fulcrum-{{ fulcrum_version }}-x86_64-linux/Fulcrum"
dest: "{{ fulcrum_binary_path }}"
owner: root
group: root
mode: '0755'
remote_src: yes
when: not fulcrum_binary_exists.stat.exists
- name: Verify Fulcrum binary installation
command: "{{ fulcrum_binary_path }} --version"
register: fulcrum_version_check
changed_when: false
- name: Display Fulcrum version
debug:
msg: "{{ fulcrum_version_check.stdout_lines }}"
- name: Create Fulcrum banner file
copy:
dest: "{{ fulcrum_lib_dir }}/fulcrum-banner.txt"
content: |
counterinfra
PER ASPERA AD ASTRA
owner: "{{ fulcrum_user }}"
group: "{{ fulcrum_group }}"
mode: '0644'
- name: Create Fulcrum configuration file
copy:
dest: "{{ fulcrum_config_dir }}/fulcrum.conf"
content: |
# Fulcrum Configuration
# Generated by Ansible
# Bitcoin Core/Knots RPC settings
bitcoind = {{ bitcoin_rpc_host }}:{{ bitcoin_rpc_port }}
rpcuser = {{ bitcoin_rpc_user }}
rpcpassword = {{ bitcoin_rpc_password }}
# Fulcrum server general settings
datadir = {{ fulcrum_db_dir }}
tcp = {{ fulcrum_tcp_bind }}:{{ fulcrum_tcp_port }}
peering = {{ 'true' if fulcrum_peering else 'false' }}
zmq_allow_hashtx = {{ 'true' if fulcrum_zmq_allow_hashtx else 'false' }}
# SSL/TLS Configuration
{% if fulcrum_ssl_enabled | default(false) %}
ssl = {{ fulcrum_ssl_bind }}:{{ fulcrum_ssl_port }}
cert = {{ fulcrum_ssl_cert_path }}
key = {{ fulcrum_ssl_key_path }}
{% endif %}
# Anonymize client IP addresses and TxIDs in logs
anon_logs = {{ 'true' if fulcrum_anon_logs else 'false' }}
# Max RocksDB Memory in MiB
db_mem = {{ fulcrum_db_mem_mb }}.0
# Banner
banner = {{ fulcrum_lib_dir }}/fulcrum-banner.txt
owner: "{{ fulcrum_user }}"
group: "{{ fulcrum_group }}"
mode: '0640'
notify: Restart fulcrum
- name: Create systemd service file for Fulcrum
copy:
dest: /etc/systemd/system/fulcrum.service
content: |
# MiniBolt: systemd unit for Fulcrum
# /etc/systemd/system/fulcrum.service
[Unit]
Description=Fulcrum
After=network.target
StartLimitBurst=2
StartLimitIntervalSec=20
[Service]
ExecStart={{ fulcrum_binary_path }} {{ fulcrum_config_dir }}/fulcrum.conf
User={{ fulcrum_user }}
Group={{ fulcrum_group }}
# Process management
####################
Type=simple
KillSignal=SIGINT
TimeoutStopSec=300
[Install]
WantedBy=multi-user.target
owner: root
group: root
mode: '0644'
notify: Restart fulcrum
- name: Reload systemd daemon
systemd:
daemon_reload: yes
- name: Enable and start Fulcrum service
systemd:
name: fulcrum
enabled: yes
state: started
- name: Wait for Fulcrum to start
wait_for:
port: "{{ fulcrum_tcp_port }}"
host: "{{ fulcrum_tcp_bind }}"
delay: 5
timeout: 30
ignore_errors: yes
- name: Check Fulcrum service status
systemd:
name: fulcrum
register: fulcrum_service_status
changed_when: false
- name: Display Fulcrum service status
debug:
msg: "Fulcrum service is {{ 'running' if fulcrum_service_status.status.ActiveState == 'active' else 'not running' }}"
- name: Create Fulcrum health check and push script
copy:
dest: /usr/local/bin/fulcrum-healthcheck-push.sh
content: |
#!/bin/bash
#
# Fulcrum Health Check and Push to Uptime Kuma
# Checks if Fulcrum TCP port is responding and pushes status to Uptime Kuma
#
FULCRUM_HOST="{{ fulcrum_tcp_bind }}"
FULCRUM_PORT={{ fulcrum_tcp_port }}
UPTIME_KUMA_PUSH_URL="${UPTIME_KUMA_PUSH_URL}"
# Check if Fulcrum TCP port is responding
check_fulcrum() {
# Try to connect to TCP port
timeout 5 bash -c "echo > /dev/tcp/${FULCRUM_HOST}/${FULCRUM_PORT}" 2>/dev/null
return $?
}
# Push status to Uptime Kuma
push_to_uptime_kuma() {
local status=$1
local msg=$2
if [ -z "$UPTIME_KUMA_PUSH_URL" ]; then
echo "ERROR: UPTIME_KUMA_PUSH_URL not set"
return 1
fi
# URL encode spaces in message
local encoded_msg="${msg// /%20}"
if ! curl -s --max-time 10 --retry 2 -o /dev/null \
"${UPTIME_KUMA_PUSH_URL}?status=${status}&msg=${encoded_msg}&ping="; then
echo "ERROR: Failed to push to Uptime Kuma"
return 1
fi
}
# Main health check
if check_fulcrum; then
push_to_uptime_kuma "up" "OK"
exit 0
else
push_to_uptime_kuma "down" "Fulcrum TCP port not responding"
exit 1
fi
owner: root
group: root
mode: '0755'
- name: Create systemd timer for Fulcrum health check
copy:
dest: /etc/systemd/system/fulcrum-healthcheck.timer
content: |
[Unit]
Description=Fulcrum Health Check Timer
Requires=fulcrum.service
[Timer]
OnBootSec=1min
OnUnitActiveSec=1min
Persistent=true
[Install]
WantedBy=timers.target
owner: root
group: root
mode: '0644'
- name: Create systemd service for Fulcrum health check
copy:
dest: /etc/systemd/system/fulcrum-healthcheck.service
content: |
[Unit]
Description=Fulcrum Health Check and Push to Uptime Kuma
After=network.target fulcrum.service
[Service]
Type=oneshot
User=root
ExecStart=/usr/local/bin/fulcrum-healthcheck-push.sh
Environment=UPTIME_KUMA_PUSH_URL=
StandardOutput=journal
StandardError=journal
[Install]
WantedBy=multi-user.target
owner: root
group: root
mode: '0644'
- name: Reload systemd daemon for health check
systemd:
daemon_reload: yes
- name: Enable and start Fulcrum health check timer
systemd:
name: fulcrum-healthcheck.timer
enabled: yes
state: started
- name: Create Uptime Kuma push monitor setup script for Fulcrum
delegate_to: localhost
become: no
copy:
dest: /tmp/setup_fulcrum_monitor.py
content: |
#!/usr/bin/env python3
import sys
import traceback
import yaml
from uptime_kuma_api import UptimeKumaApi, MonitorType
try:
# Load configs
with open('/tmp/ansible_config.yml', 'r') as f:
config = yaml.safe_load(f)
url = config['uptime_kuma_url']
username = config['username']
password = config['password']
monitor_name = config['monitor_name']
# Connect to Uptime Kuma
api = UptimeKumaApi(url, timeout=30)
api.login(username, password)
# Get all monitors
monitors = api.get_monitors()
# Find or create "services" group
group = next((m for m in monitors if m.get('name') == 'services' and m.get('type') == 'group'), None)
if not group:
group_result = api.add_monitor(type='group', name='services')
# Refresh to get the group with id
monitors = api.get_monitors()
group = next((m for m in monitors if m.get('name') == 'services' and m.get('type') == 'group'), None)
# Check if monitor already exists
existing_monitor = None
for monitor in monitors:
if monitor.get('name') == monitor_name:
existing_monitor = monitor
break
# Get ntfy notification ID
notifications = api.get_notifications()
ntfy_notification_id = None
for notif in notifications:
if notif.get('type') == 'ntfy':
ntfy_notification_id = notif.get('id')
break
if existing_monitor:
print(f"Monitor '{monitor_name}' already exists (ID: {existing_monitor['id']})")
push_token = existing_monitor.get('pushToken') or existing_monitor.get('push_token')
if not push_token:
raise ValueError("Could not find push token for monitor")
push_url = f"{url}/api/push/{push_token}"
print(f"Push URL: {push_url}")
else:
print(f"Creating push monitor '{monitor_name}'...")
api.add_monitor(
type=MonitorType.PUSH,
name=monitor_name,
parent=group['id'],
interval=60,
maxretries=3,
retryInterval=60,
notificationIDList={ntfy_notification_id: True} if ntfy_notification_id else {}
)
monitors = api.get_monitors()
new_monitor = next((m for m in monitors if m.get('name') == monitor_name), None)
if new_monitor:
push_token = new_monitor.get('pushToken') or new_monitor.get('push_token')
if not push_token:
raise ValueError("Could not find push token for new monitor")
push_url = f"{url}/api/push/{push_token}"
print(f"Push URL: {push_url}")
api.disconnect()
print("SUCCESS")
except Exception as e:
error_msg = str(e) if str(e) else repr(e)
print(f"ERROR: {error_msg}", file=sys.stderr)
traceback.print_exc(file=sys.stderr)
sys.exit(1)
mode: '0755'
- name: Create temporary config for monitor setup
delegate_to: localhost
become: no
copy:
dest: /tmp/ansible_config.yml
content: |
uptime_kuma_url: "{{ uptime_kuma_api_url }}"
username: "{{ uptime_kuma_username }}"
password: "{{ uptime_kuma_password }}"
monitor_name: "Fulcrum"
mode: '0644'
- name: Run Uptime Kuma push monitor setup
command: python3 /tmp/setup_fulcrum_monitor.py
delegate_to: localhost
become: no
register: monitor_setup
changed_when: "'SUCCESS' in monitor_setup.stdout"
ignore_errors: yes
- name: Extract push URL from monitor setup output
set_fact:
uptime_kuma_push_url: "{{ monitor_setup.stdout | regex_search('Push URL: (https?://[^\\s]+)', '\\1') | first | default('') }}"
delegate_to: localhost
become: no
when: monitor_setup.stdout is defined
- name: Display extracted push URL
debug:
msg: "Uptime Kuma Push URL: {{ uptime_kuma_push_url }}"
when: uptime_kuma_push_url | default('') != ''
- name: Set push URL in systemd service environment
lineinfile:
path: /etc/systemd/system/fulcrum-healthcheck.service
regexp: '^Environment=UPTIME_KUMA_PUSH_URL='
line: "Environment=UPTIME_KUMA_PUSH_URL={{ uptime_kuma_push_url }}"
state: present
insertafter: '^\[Service\]'
when: uptime_kuma_push_url | default('') != ''
- name: Reload systemd daemon after push URL update
systemd:
daemon_reload: yes
when: uptime_kuma_push_url | default('') != ''
- name: Restart health check timer to pick up new environment
systemd:
name: fulcrum-healthcheck.timer
state: restarted
when: uptime_kuma_push_url | default('') != ''
- name: Clean up temporary files
delegate_to: localhost
become: no
file:
path: "{{ item }}"
state: absent
loop:
- /tmp/setup_fulcrum_monitor.py
- /tmp/ansible_config.yml
- /tmp/Fulcrum-{{ fulcrum_version }}-x86_64-linux.tar.gz
- /tmp/Fulcrum-{{ fulcrum_version }}-x86_64-linux
handlers:
- name: Restart fulcrum
systemd:
name: fulcrum
state: restarted
- name: Setup public Fulcrum SSL forwarding on vipy via systemd-socket-proxyd
hosts: vipy
become: yes
vars_files:
- ../../infra_vars.yml
- ../../services_config.yml
- ../../infra_secrets.yml
- ./fulcrum_vars.yml
vars:
uptime_kuma_api_url: "https://{{ subdomains.uptime_kuma }}.{{ root_domain }}"
tasks:
- name: Create Fulcrum SSL proxy socket unit
copy:
dest: /etc/systemd/system/fulcrum-ssl-proxy.socket
content: |
[Unit]
Description=Fulcrum SSL Proxy Socket
[Socket]
ListenStream={{ fulcrum_ssl_port }}
[Install]
WantedBy=sockets.target
owner: root
group: root
mode: '0644'
notify: Restart fulcrum-ssl-proxy socket
- name: Create Fulcrum SSL proxy service unit
copy:
dest: /etc/systemd/system/fulcrum-ssl-proxy.service
content: |
[Unit]
Description=Fulcrum SSL Proxy to {{ fulcrum_tailscale_hostname }}
Requires=fulcrum-ssl-proxy.socket
After=network.target
[Service]
Type=notify
ExecStart=/lib/systemd/systemd-socket-proxyd {{ fulcrum_tailscale_hostname }}:{{ fulcrum_ssl_port }}
owner: root
group: root
mode: '0644'
- name: Reload systemd daemon
systemd:
daemon_reload: yes
- name: Enable and start Fulcrum SSL proxy socket
systemd:
name: fulcrum-ssl-proxy.socket
enabled: yes
state: started
- name: Allow Fulcrum SSL port through UFW
ufw:
rule: allow
port: "{{ fulcrum_ssl_port | string }}"
proto: tcp
comment: "Fulcrum SSL public access"
- name: Verify connectivity to fulcrum-box via Tailscale
wait_for:
host: "{{ fulcrum_tailscale_hostname }}"
port: "{{ fulcrum_ssl_port }}"
timeout: 10
ignore_errors: yes
- name: Display public endpoint
debug:
msg: "Fulcrum SSL public endpoint: {{ ansible_host }}:{{ fulcrum_ssl_port }}"
# ===========================================
# Uptime Kuma TCP Monitor for Public SSL Port
# ===========================================
- name: Create Uptime Kuma TCP monitor setup script for Fulcrum SSL
delegate_to: localhost
become: no
copy:
dest: /tmp/setup_fulcrum_ssl_tcp_monitor.py
content: |
#!/usr/bin/env python3
import sys
import traceback
import yaml
from uptime_kuma_api import UptimeKumaApi, MonitorType
try:
with open('/tmp/ansible_fulcrum_ssl_config.yml', 'r') as f:
config = yaml.safe_load(f)
url = config['uptime_kuma_url']
username = config['username']
password = config['password']
monitor_host = config['monitor_host']
monitor_port = config['monitor_port']
monitor_name = config['monitor_name']
api = UptimeKumaApi(url, timeout=30)
api.login(username, password)
monitors = api.get_monitors()
# Find or create "services" group
group = next((m for m in monitors if m.get('name') == 'services' and m.get('type') == 'group'), None)
if not group:
api.add_monitor(type='group', name='services')
monitors = api.get_monitors()
group = next((m for m in monitors if m.get('name') == 'services' and m.get('type') == 'group'), None)
# Check if monitor already exists
existing = next((m for m in monitors if m.get('name') == monitor_name), None)
# Get ntfy notification ID
notifications = api.get_notifications()
ntfy_notification_id = None
for notif in notifications:
if notif.get('type') == 'ntfy':
ntfy_notification_id = notif.get('id')
break
if existing:
print(f"Monitor '{monitor_name}' already exists (ID: {existing['id']})")
print("Skipping - monitor already configured")
else:
print(f"Creating TCP monitor '{monitor_name}'...")
api.add_monitor(
type=MonitorType.PORT,
name=monitor_name,
hostname=monitor_host,
port=monitor_port,
parent=group['id'],
interval=60,
maxretries=3,
retryInterval=60,
notificationIDList={ntfy_notification_id: True} if ntfy_notification_id else {}
)
api.disconnect()
print("SUCCESS")
except Exception as e:
print(f"ERROR: {str(e)}", file=sys.stderr)
traceback.print_exc(file=sys.stderr)
sys.exit(1)
mode: '0755'
- name: Create temporary config for TCP monitor setup
delegate_to: localhost
become: no
copy:
dest: /tmp/ansible_fulcrum_ssl_config.yml
content: |
uptime_kuma_url: "{{ uptime_kuma_api_url }}"
username: "{{ uptime_kuma_username }}"
password: "{{ uptime_kuma_password }}"
monitor_host: "{{ ansible_host }}"
monitor_port: {{ fulcrum_ssl_port }}
monitor_name: "Fulcrum SSL Public"
mode: '0644'
- name: Run Uptime Kuma TCP monitor setup
command: python3 /tmp/setup_fulcrum_ssl_tcp_monitor.py
delegate_to: localhost
become: no
register: tcp_monitor_setup
changed_when: "'SUCCESS' in tcp_monitor_setup.stdout"
ignore_errors: yes
- name: Display TCP monitor setup output
debug:
msg: "{{ tcp_monitor_setup.stdout_lines }}"
when: tcp_monitor_setup.stdout is defined
- name: Clean up TCP monitor temporary files
delegate_to: localhost
become: no
file:
path: "{{ item }}"
state: absent
loop:
- /tmp/setup_fulcrum_ssl_tcp_monitor.py
- /tmp/ansible_fulcrum_ssl_config.yml
handlers:
- name: Restart fulcrum-ssl-proxy socket
systemd:
name: fulcrum-ssl-proxy.socket
state: restarted