bitcoin node stuff
This commit is contained in:
parent
2893bb77cd
commit
8863f800bf
8 changed files with 573 additions and 206 deletions
|
|
@ -44,7 +44,7 @@
|
||||||
shell: >
|
shell: >
|
||||||
ssh {{ ssh_args }}
|
ssh {{ ssh_args }}
|
||||||
{{ headscale_user }}@{{ headscale_host }}
|
{{ headscale_user }}@{{ headscale_host }}
|
||||||
"sudo headscale preauthkeys create --user {{ headscale_user_id }} --expiration 1m --output json"
|
"sudo headscale preauthkeys create --user {{ headscale_user_id }} --expiration 10m --output json"
|
||||||
register: preauth_key_result
|
register: preauth_key_result
|
||||||
changed_when: true
|
changed_when: true
|
||||||
failed_when: preauth_key_result.rc != 0
|
failed_when: preauth_key_result.rc != 0
|
||||||
|
|
|
||||||
|
|
@ -22,3 +22,7 @@ headscale_ui_password: "your_secure_password_here"
|
||||||
|
|
||||||
bitcoin_rpc_user: "bitcoinrpc"
|
bitcoin_rpc_user: "bitcoinrpc"
|
||||||
bitcoin_rpc_password: "CHANGE_ME_TO_SECURE_PASSWORD"
|
bitcoin_rpc_password: "CHANGE_ME_TO_SECURE_PASSWORD"
|
||||||
|
|
||||||
|
# Mempool MariaDB credentials
|
||||||
|
# Used by: services/mempool/deploy_mempool_playbook.yml
|
||||||
|
mariadb_mempool_password: "CHANGE_ME_TO_SECURE_PASSWORD"
|
||||||
|
|
|
||||||
|
|
@ -14,8 +14,7 @@ bitcoin_conf_dir: /etc/bitcoin
|
||||||
# Network
|
# Network
|
||||||
bitcoin_rpc_port: 8332
|
bitcoin_rpc_port: 8332
|
||||||
bitcoin_p2p_port: 8333
|
bitcoin_p2p_port: 8333
|
||||||
bitcoin_rpc_bind: "127.0.0.1" # Security: localhost only
|
bitcoin_rpc_bind: "0.0.0.0"
|
||||||
bitcoin_tailscale_interface: tailscale0 # Tailscale interface for UFW rules
|
|
||||||
|
|
||||||
# Build options
|
# Build options
|
||||||
bitcoin_build_jobs: 4 # Parallel build jobs (-j flag), adjust based on CPU cores
|
bitcoin_build_jobs: 4 # Parallel build jobs (-j flag), adjust based on CPU cores
|
||||||
|
|
@ -23,10 +22,17 @@ bitcoin_build_prefix: /usr/local
|
||||||
|
|
||||||
# Configuration options
|
# Configuration options
|
||||||
bitcoin_enable_txindex: true # Set to true if transaction index needed (REQUIRED for Electrum servers like Electrs/ElectrumX)
|
bitcoin_enable_txindex: true # Set to true if transaction index needed (REQUIRED for Electrum servers like Electrs/ElectrumX)
|
||||||
bitcoin_enable_prune: false # Set to prune amount (e.g., 550) to enable pruning, false for full node (MUST be false for Electrum servers)
|
|
||||||
bitcoin_max_connections: 125
|
bitcoin_max_connections: 125
|
||||||
# dbcache will be calculated as 90% of host RAM automatically in playbook
|
# dbcache will be calculated as 90% of host RAM automatically in playbook
|
||||||
|
|
||||||
|
# ZMQ Configuration
|
||||||
|
bitcoin_zmq_enabled: true
|
||||||
|
bitcoin_zmq_bind: "tcp://0.0.0.0"
|
||||||
|
bitcoin_zmq_port_rawblock: 28332
|
||||||
|
bitcoin_zmq_port_rawtx: 28333
|
||||||
|
bitcoin_zmq_port_hashblock: 28334
|
||||||
|
bitcoin_zmq_port_hashtx: 28335
|
||||||
|
|
||||||
# Service user
|
# Service user
|
||||||
bitcoin_user: bitcoin
|
bitcoin_user: bitcoin
|
||||||
bitcoin_group: bitcoin
|
bitcoin_group: bitcoin
|
||||||
|
|
|
||||||
|
|
@ -137,7 +137,6 @@
|
||||||
changed_when: false
|
changed_when: false
|
||||||
when: not bitcoind_binary_exists.stat.exists
|
when: not bitcoind_binary_exists.stat.exists
|
||||||
|
|
||||||
|
|
||||||
- name: Download SHA256SUMS file
|
- name: Download SHA256SUMS file
|
||||||
get_url:
|
get_url:
|
||||||
url: "https://bitcoinknots.org/files/{{ bitcoin_version_major }}.x/{{ bitcoin_knots_version_short }}/SHA256SUMS"
|
url: "https://bitcoinknots.org/files/{{ bitcoin_version_major }}.x/{{ bitcoin_knots_version_short }}/SHA256SUMS"
|
||||||
|
|
@ -156,7 +155,7 @@
|
||||||
command: gpg --verify /tmp/bitcoin-knots-{{ bitcoin_knots_version_short }}-SHA256SUMS.asc /tmp/bitcoin-knots-{{ bitcoin_knots_version_short }}-SHA256SUMS
|
command: gpg --verify /tmp/bitcoin-knots-{{ bitcoin_knots_version_short }}-SHA256SUMS.asc /tmp/bitcoin-knots-{{ bitcoin_knots_version_short }}-SHA256SUMS
|
||||||
register: sha256sums_verification
|
register: sha256sums_verification
|
||||||
changed_when: false
|
changed_when: false
|
||||||
failed_when: sha256sums_verification.rc != 0
|
failed_when: false # Don't fail here - check for 'Good signature' in next task
|
||||||
when: not bitcoind_binary_exists.stat.exists
|
when: not bitcoind_binary_exists.stat.exists
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -260,6 +259,7 @@
|
||||||
-DCMAKE_INSTALL_PREFIX={{ bitcoin_build_prefix }}
|
-DCMAKE_INSTALL_PREFIX={{ bitcoin_build_prefix }}
|
||||||
-DBUILD_BITCOIN_WALLET=OFF
|
-DBUILD_BITCOIN_WALLET=OFF
|
||||||
-DCMAKE_BUILD_TYPE=Release
|
-DCMAKE_BUILD_TYPE=Release
|
||||||
|
-DWITH_ZMQ=ON
|
||||||
..
|
..
|
||||||
args:
|
args:
|
||||||
chdir: "{{ bitcoin_knots_source_dir }}/build"
|
chdir: "{{ bitcoin_knots_source_dir }}/build"
|
||||||
|
|
@ -267,6 +267,15 @@
|
||||||
register: configure_result
|
register: configure_result
|
||||||
changed_when: true
|
changed_when: true
|
||||||
|
|
||||||
|
- name: Verify CMake enabled ZMQ
|
||||||
|
shell: |
|
||||||
|
set -e
|
||||||
|
cd "{{ bitcoin_knots_source_dir }}/build"
|
||||||
|
cmake -LAH .. | grep -iE 'ZMQ|WITH_ZMQ|ENABLE_ZMQ|USE_ZMQ'
|
||||||
|
when: not bitcoind_binary_exists.stat.exists and cmake_exists.stat.exists | default(false)
|
||||||
|
register: zmq_check
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
- name: Fail if CMakeLists.txt not found
|
- name: Fail if CMakeLists.txt not found
|
||||||
fail:
|
fail:
|
||||||
msg: "CMakeLists.txt not found in {{ bitcoin_knots_source_dir }}. Cannot build Bitcoin Knots."
|
msg: "CMakeLists.txt not found in {{ bitcoin_knots_source_dir }}. Cannot build Bitcoin Knots."
|
||||||
|
|
@ -336,7 +345,7 @@
|
||||||
rpcpassword={{ bitcoin_rpc_password }}
|
rpcpassword={{ bitcoin_rpc_password }}
|
||||||
rpcbind={{ bitcoin_rpc_bind }}
|
rpcbind={{ bitcoin_rpc_bind }}
|
||||||
rpcport={{ bitcoin_rpc_port }}
|
rpcport={{ bitcoin_rpc_port }}
|
||||||
rpcallowip=127.0.0.1
|
rpcallowip=0.0.0.0/0
|
||||||
|
|
||||||
# Network Configuration
|
# Network Configuration
|
||||||
listen=1
|
listen=1
|
||||||
|
|
@ -351,14 +360,17 @@
|
||||||
txindex=1
|
txindex=1
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
# Pruning (optional)
|
# Logging (to journald via systemd)
|
||||||
{% if bitcoin_enable_prune %}
|
|
||||||
prune={{ bitcoin_enable_prune }}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
# Logging
|
|
||||||
logtimestamps=1
|
logtimestamps=1
|
||||||
logfile={{ bitcoin_data_dir }}/debug.log
|
printtoconsole=1
|
||||||
|
|
||||||
|
# ZMQ Configuration
|
||||||
|
{% if bitcoin_zmq_enabled | default(false) %}
|
||||||
|
zmqpubrawblock={{ bitcoin_zmq_bind }}:{{ bitcoin_zmq_port_rawblock }}
|
||||||
|
zmqpubrawtx={{ bitcoin_zmq_bind }}:{{ bitcoin_zmq_port_rawtx }}
|
||||||
|
zmqpubhashblock={{ bitcoin_zmq_bind }}:{{ bitcoin_zmq_port_hashblock }}
|
||||||
|
zmqpubhashtx={{ bitcoin_zmq_bind }}:{{ bitcoin_zmq_port_hashtx }}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
# Security
|
# Security
|
||||||
disablewallet=1
|
disablewallet=1
|
||||||
|
|
@ -427,33 +439,6 @@
|
||||||
debug:
|
debug:
|
||||||
msg: "Bitcoin Knots RPC is {{ 'available' if rpc_check.status == 200 else 'not yet available' }}"
|
msg: "Bitcoin Knots RPC is {{ 'available' if rpc_check.status == 200 else 'not yet available' }}"
|
||||||
|
|
||||||
- name: Allow Bitcoin P2P port on Tailscale interface only
|
|
||||||
ufw:
|
|
||||||
rule: allow
|
|
||||||
direction: in
|
|
||||||
port: "{{ bitcoin_p2p_port }}"
|
|
||||||
proto: tcp
|
|
||||||
interface: "{{ bitcoin_tailscale_interface }}"
|
|
||||||
comment: "Bitcoin Knots P2P (Tailscale only)"
|
|
||||||
|
|
||||||
- name: Allow Bitcoin P2P port (UDP) on Tailscale interface only
|
|
||||||
ufw:
|
|
||||||
rule: allow
|
|
||||||
direction: in
|
|
||||||
port: "{{ bitcoin_p2p_port }}"
|
|
||||||
proto: udp
|
|
||||||
interface: "{{ bitcoin_tailscale_interface }}"
|
|
||||||
comment: "Bitcoin Knots P2P UDP (Tailscale only)"
|
|
||||||
|
|
||||||
- name: Verify UFW rules for Bitcoin Knots
|
|
||||||
command: ufw status numbered
|
|
||||||
register: ufw_status
|
|
||||||
changed_when: false
|
|
||||||
|
|
||||||
- name: Display UFW status
|
|
||||||
debug:
|
|
||||||
msg: "{{ ufw_status.stdout_lines }}"
|
|
||||||
|
|
||||||
- name: Create Bitcoin Knots health check and push script
|
- name: Create Bitcoin Knots health check and push script
|
||||||
copy:
|
copy:
|
||||||
dest: /usr/local/bin/bitcoin-knots-healthcheck-push.sh
|
dest: /usr/local/bin/bitcoin-knots-healthcheck-push.sh
|
||||||
|
|
@ -480,11 +465,12 @@
|
||||||
"http://${RPC_HOST}:${RPC_PORT}" 2>&1)
|
"http://${RPC_HOST}:${RPC_PORT}" 2>&1)
|
||||||
|
|
||||||
if [ $? -eq 0 ]; then
|
if [ $? -eq 0 ]; then
|
||||||
# Check if response contains error
|
# Check if response contains a non-null error
|
||||||
if echo "$response" | grep -q '"error"'; then
|
# Successful responses have "error": null, failures have "error": {...}
|
||||||
return 1
|
if echo "$response" | grep -q '"error":null\|"error": null'; then
|
||||||
else
|
|
||||||
return 0
|
return 0
|
||||||
|
else
|
||||||
|
return 1
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
return 1
|
return 1
|
||||||
|
|
@ -501,11 +487,14 @@
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# URL encode the message
|
# URL encode spaces in message
|
||||||
local encoded_msg=$(echo -n "$msg" | curl -Gso /dev/null -w %{url_effective} --data-urlencode "msg=$msg" "" | cut -c 3-)
|
local encoded_msg="${msg// /%20}"
|
||||||
|
|
||||||
curl -s --max-time 10 --retry 2 -o /dev/null \
|
if ! curl -s --max-time 10 --retry 2 -o /dev/null \
|
||||||
"${UPTIME_KUMA_PUSH_URL}?status=${status}&msg=${encoded_msg}&ping=" || true
|
"${UPTIME_KUMA_PUSH_URL}?status=${status}&msg=${encoded_msg}&ping="; then
|
||||||
|
echo "ERROR: Failed to push to Uptime Kuma"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
# Main health check
|
# Main health check
|
||||||
|
|
@ -630,14 +619,14 @@
|
||||||
|
|
||||||
if existing_monitor:
|
if existing_monitor:
|
||||||
print(f"Monitor '{monitor_name}' already exists (ID: {existing_monitor['id']})")
|
print(f"Monitor '{monitor_name}' already exists (ID: {existing_monitor['id']})")
|
||||||
# Get push URL from existing monitor
|
push_token = existing_monitor.get('pushToken') or existing_monitor.get('push_token')
|
||||||
push_id = existing_monitor.get('push_token', existing_monitor.get('id'))
|
if not push_token:
|
||||||
push_url = f"{url}/api/push/{push_id}"
|
raise ValueError("Could not find push token for monitor")
|
||||||
|
push_url = f"{url}/api/push/{push_token}"
|
||||||
print(f"Push URL: {push_url}")
|
print(f"Push URL: {push_url}")
|
||||||
print("Skipping - monitor already configured")
|
|
||||||
else:
|
else:
|
||||||
print(f"Creating push monitor '{monitor_name}'...")
|
print(f"Creating push monitor '{monitor_name}'...")
|
||||||
result = api.add_monitor(
|
api.add_monitor(
|
||||||
type=MonitorType.PUSH,
|
type=MonitorType.PUSH,
|
||||||
name=monitor_name,
|
name=monitor_name,
|
||||||
parent=group['id'],
|
parent=group['id'],
|
||||||
|
|
@ -646,12 +635,13 @@
|
||||||
retryInterval=60,
|
retryInterval=60,
|
||||||
notificationIDList={ntfy_notification_id: True} if ntfy_notification_id else {}
|
notificationIDList={ntfy_notification_id: True} if ntfy_notification_id else {}
|
||||||
)
|
)
|
||||||
# Get push URL from created monitor
|
|
||||||
monitors = api.get_monitors()
|
monitors = api.get_monitors()
|
||||||
new_monitor = next((m for m in monitors if m.get('name') == monitor_name), None)
|
new_monitor = next((m for m in monitors if m.get('name') == monitor_name), None)
|
||||||
if new_monitor:
|
if new_monitor:
|
||||||
push_id = new_monitor.get('push_token', new_monitor.get('id'))
|
push_token = new_monitor.get('pushToken') or new_monitor.get('push_token')
|
||||||
push_url = f"{url}/api/push/{push_id}"
|
if not push_token:
|
||||||
|
raise ValueError("Could not find push token for new monitor")
|
||||||
|
push_url = f"{url}/api/push/{push_token}"
|
||||||
print(f"Push URL: {push_url}")
|
print(f"Push URL: {push_url}")
|
||||||
|
|
||||||
api.disconnect()
|
api.disconnect()
|
||||||
|
|
|
||||||
475
ansible/services/fulcrum/deploy_fulcrum_playbook.yml
Normal file
475
ansible/services/fulcrum/deploy_fulcrum_playbook.yml
Normal file
|
|
@ -0,0 +1,475 @@
|
||||||
|
- name: Deploy Fulcrum Electrum Server
|
||||||
|
hosts: fulcrum_box_local
|
||||||
|
become: yes
|
||||||
|
vars_files:
|
||||||
|
- ../../infra_vars.yml
|
||||||
|
- ../../services_config.yml
|
||||||
|
- ../../infra_secrets.yml
|
||||||
|
- ./fulcrum_vars.yml
|
||||||
|
vars:
|
||||||
|
uptime_kuma_api_url: "https://{{ subdomains.uptime_kuma }}.{{ root_domain }}"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Calculate 75% of system RAM for db_mem
|
||||||
|
set_fact:
|
||||||
|
fulcrum_db_mem_mb: "{{ (ansible_memtotal_mb | float * fulcrum_db_mem_percent) | int }}"
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Display calculated db_mem value
|
||||||
|
debug:
|
||||||
|
msg: "Setting db_mem to {{ fulcrum_db_mem_mb }} MB ({{ (fulcrum_db_mem_percent * 100) | int }}% of {{ ansible_memtotal_mb }} MB total RAM)"
|
||||||
|
|
||||||
|
- name: Display Fulcrum version to install
|
||||||
|
debug:
|
||||||
|
msg: "Installing Fulcrum version {{ fulcrum_version }}"
|
||||||
|
|
||||||
|
- name: Install required packages
|
||||||
|
apt:
|
||||||
|
name:
|
||||||
|
- curl
|
||||||
|
- wget
|
||||||
|
- openssl
|
||||||
|
state: present
|
||||||
|
update_cache: yes
|
||||||
|
|
||||||
|
- name: Create fulcrum group
|
||||||
|
group:
|
||||||
|
name: "{{ fulcrum_group }}"
|
||||||
|
system: yes
|
||||||
|
state: present
|
||||||
|
|
||||||
|
- name: Create fulcrum user
|
||||||
|
user:
|
||||||
|
name: "{{ fulcrum_user }}"
|
||||||
|
group: "{{ fulcrum_group }}"
|
||||||
|
system: yes
|
||||||
|
shell: /usr/sbin/nologin
|
||||||
|
home: /home/{{ fulcrum_user }}
|
||||||
|
create_home: yes
|
||||||
|
state: present
|
||||||
|
|
||||||
|
- name: Create Fulcrum database directory (heavy data on special mount)
|
||||||
|
file:
|
||||||
|
path: "{{ fulcrum_db_dir }}"
|
||||||
|
state: directory
|
||||||
|
owner: "{{ fulcrum_user }}"
|
||||||
|
group: "{{ fulcrum_group }}"
|
||||||
|
mode: '0755'
|
||||||
|
|
||||||
|
- name: Create Fulcrum config directory
|
||||||
|
file:
|
||||||
|
path: "{{ fulcrum_config_dir }}"
|
||||||
|
state: directory
|
||||||
|
owner: root
|
||||||
|
group: "{{ fulcrum_group }}"
|
||||||
|
mode: '0755'
|
||||||
|
|
||||||
|
- name: Create Fulcrum lib directory (for banner and other data files)
|
||||||
|
file:
|
||||||
|
path: "{{ fulcrum_lib_dir }}"
|
||||||
|
state: directory
|
||||||
|
owner: "{{ fulcrum_user }}"
|
||||||
|
group: "{{ fulcrum_group }}"
|
||||||
|
mode: '0755'
|
||||||
|
|
||||||
|
- name: Check if Fulcrum binary already exists
|
||||||
|
stat:
|
||||||
|
path: "{{ fulcrum_binary_path }}"
|
||||||
|
register: fulcrum_binary_exists
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Download Fulcrum binary tarball
|
||||||
|
get_url:
|
||||||
|
url: "https://github.com/cculianu/Fulcrum/releases/download/v{{ fulcrum_version }}/Fulcrum-{{ fulcrum_version }}-x86_64-linux.tar.gz"
|
||||||
|
dest: "/tmp/Fulcrum-{{ fulcrum_version }}-x86_64-linux.tar.gz"
|
||||||
|
mode: '0644'
|
||||||
|
when: not fulcrum_binary_exists.stat.exists
|
||||||
|
|
||||||
|
- name: Extract Fulcrum binary
|
||||||
|
unarchive:
|
||||||
|
src: "/tmp/Fulcrum-{{ fulcrum_version }}-x86_64-linux.tar.gz"
|
||||||
|
dest: "/tmp"
|
||||||
|
remote_src: yes
|
||||||
|
when: not fulcrum_binary_exists.stat.exists
|
||||||
|
|
||||||
|
- name: Install Fulcrum binary
|
||||||
|
copy:
|
||||||
|
src: "/tmp/Fulcrum-{{ fulcrum_version }}-x86_64-linux/Fulcrum"
|
||||||
|
dest: "{{ fulcrum_binary_path }}"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0755'
|
||||||
|
remote_src: yes
|
||||||
|
when: not fulcrum_binary_exists.stat.exists
|
||||||
|
|
||||||
|
- name: Verify Fulcrum binary installation
|
||||||
|
command: "{{ fulcrum_binary_path }} --version"
|
||||||
|
register: fulcrum_version_check
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Display Fulcrum version
|
||||||
|
debug:
|
||||||
|
msg: "{{ fulcrum_version_check.stdout_lines }}"
|
||||||
|
|
||||||
|
- name: Create Fulcrum banner file
|
||||||
|
copy:
|
||||||
|
dest: "{{ fulcrum_lib_dir }}/fulcrum-banner.txt"
|
||||||
|
content: |
|
||||||
|
counterinfra
|
||||||
|
|
||||||
|
PER ASPERA AD ASTRA
|
||||||
|
owner: "{{ fulcrum_user }}"
|
||||||
|
group: "{{ fulcrum_group }}"
|
||||||
|
mode: '0644'
|
||||||
|
|
||||||
|
- name: Create Fulcrum configuration file
|
||||||
|
copy:
|
||||||
|
dest: "{{ fulcrum_config_dir }}/fulcrum.conf"
|
||||||
|
content: |
|
||||||
|
# Fulcrum Configuration
|
||||||
|
# Generated by Ansible
|
||||||
|
|
||||||
|
# Bitcoin Core/Knots RPC settings
|
||||||
|
bitcoind = {{ bitcoin_rpc_host }}:{{ bitcoin_rpc_port }}
|
||||||
|
rpcuser = {{ bitcoin_rpc_user }}
|
||||||
|
rpcpassword = {{ bitcoin_rpc_password }}
|
||||||
|
|
||||||
|
# Fulcrum server general settings
|
||||||
|
datadir = {{ fulcrum_db_dir }}
|
||||||
|
tcp = {{ fulcrum_tcp_bind }}:{{ fulcrum_tcp_port }}
|
||||||
|
peering = {{ 'true' if fulcrum_peering else 'false' }}
|
||||||
|
zmq_allow_hashtx = {{ 'true' if fulcrum_zmq_allow_hashtx else 'false' }}
|
||||||
|
|
||||||
|
# Anonymize client IP addresses and TxIDs in logs
|
||||||
|
anon_logs = {{ 'true' if fulcrum_anon_logs else 'false' }}
|
||||||
|
|
||||||
|
# Max RocksDB Memory in MiB
|
||||||
|
db_mem = {{ fulcrum_db_mem_mb }}.0
|
||||||
|
|
||||||
|
# Banner
|
||||||
|
banner = {{ fulcrum_lib_dir }}/fulcrum-banner.txt
|
||||||
|
owner: "{{ fulcrum_user }}"
|
||||||
|
group: "{{ fulcrum_group }}"
|
||||||
|
mode: '0640'
|
||||||
|
notify: Restart fulcrum
|
||||||
|
|
||||||
|
- name: Create systemd service file for Fulcrum
|
||||||
|
copy:
|
||||||
|
dest: /etc/systemd/system/fulcrum.service
|
||||||
|
content: |
|
||||||
|
# MiniBolt: systemd unit for Fulcrum
|
||||||
|
# /etc/systemd/system/fulcrum.service
|
||||||
|
|
||||||
|
[Unit]
|
||||||
|
Description=Fulcrum
|
||||||
|
After=network.target
|
||||||
|
|
||||||
|
StartLimitBurst=2
|
||||||
|
StartLimitIntervalSec=20
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
ExecStart={{ fulcrum_binary_path }} {{ fulcrum_config_dir }}/fulcrum.conf
|
||||||
|
|
||||||
|
User={{ fulcrum_user }}
|
||||||
|
Group={{ fulcrum_group }}
|
||||||
|
|
||||||
|
# Process management
|
||||||
|
####################
|
||||||
|
Type=simple
|
||||||
|
KillSignal=SIGINT
|
||||||
|
TimeoutStopSec=300
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0644'
|
||||||
|
notify: Restart fulcrum
|
||||||
|
|
||||||
|
- name: Reload systemd daemon
|
||||||
|
systemd:
|
||||||
|
daemon_reload: yes
|
||||||
|
|
||||||
|
- name: Enable and start Fulcrum service
|
||||||
|
systemd:
|
||||||
|
name: fulcrum
|
||||||
|
enabled: yes
|
||||||
|
state: started
|
||||||
|
|
||||||
|
- name: Wait for Fulcrum to start
|
||||||
|
wait_for:
|
||||||
|
port: "{{ fulcrum_tcp_port }}"
|
||||||
|
host: "{{ fulcrum_tcp_bind }}"
|
||||||
|
delay: 5
|
||||||
|
timeout: 30
|
||||||
|
ignore_errors: yes
|
||||||
|
|
||||||
|
- name: Check Fulcrum service status
|
||||||
|
systemd:
|
||||||
|
name: fulcrum
|
||||||
|
register: fulcrum_service_status
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Display Fulcrum service status
|
||||||
|
debug:
|
||||||
|
msg: "Fulcrum service is {{ 'running' if fulcrum_service_status.status.ActiveState == 'active' else 'not running' }}"
|
||||||
|
|
||||||
|
- name: Create Fulcrum health check and push script
|
||||||
|
copy:
|
||||||
|
dest: /usr/local/bin/fulcrum-healthcheck-push.sh
|
||||||
|
content: |
|
||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Fulcrum Health Check and Push to Uptime Kuma
|
||||||
|
# Checks if Fulcrum TCP port is responding and pushes status to Uptime Kuma
|
||||||
|
#
|
||||||
|
|
||||||
|
FULCRUM_HOST="{{ fulcrum_tcp_bind }}"
|
||||||
|
FULCRUM_PORT={{ fulcrum_tcp_port }}
|
||||||
|
UPTIME_KUMA_PUSH_URL="${UPTIME_KUMA_PUSH_URL}"
|
||||||
|
|
||||||
|
# Check if Fulcrum TCP port is responding
|
||||||
|
check_fulcrum() {
|
||||||
|
# Try to connect to TCP port
|
||||||
|
timeout 5 bash -c "echo > /dev/tcp/${FULCRUM_HOST}/${FULCRUM_PORT}" 2>/dev/null
|
||||||
|
return $?
|
||||||
|
}
|
||||||
|
|
||||||
|
# Push status to Uptime Kuma
|
||||||
|
push_to_uptime_kuma() {
|
||||||
|
local status=$1
|
||||||
|
local msg=$2
|
||||||
|
|
||||||
|
if [ -z "$UPTIME_KUMA_PUSH_URL" ]; then
|
||||||
|
echo "ERROR: UPTIME_KUMA_PUSH_URL not set"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# URL encode the message
|
||||||
|
local encoded_msg=$(echo -n "$msg" | curl -Gso /dev/null -w %{url_effective} --data-urlencode "msg=$msg" "" | cut -c 3-)
|
||||||
|
|
||||||
|
curl -s --max-time 10 --retry 2 -o /dev/null \
|
||||||
|
"${UPTIME_KUMA_PUSH_URL}?status=${status}&msg=${encoded_msg}&ping=" || true
|
||||||
|
}
|
||||||
|
|
||||||
|
# Main health check
|
||||||
|
if check_fulcrum; then
|
||||||
|
push_to_uptime_kuma "up" "OK"
|
||||||
|
exit 0
|
||||||
|
else
|
||||||
|
push_to_uptime_kuma "down" "Fulcrum TCP port not responding"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0755'
|
||||||
|
|
||||||
|
- name: Create systemd timer for Fulcrum health check
|
||||||
|
copy:
|
||||||
|
dest: /etc/systemd/system/fulcrum-healthcheck.timer
|
||||||
|
content: |
|
||||||
|
[Unit]
|
||||||
|
Description=Fulcrum Health Check Timer
|
||||||
|
Requires=fulcrum.service
|
||||||
|
|
||||||
|
[Timer]
|
||||||
|
OnBootSec=1min
|
||||||
|
OnUnitActiveSec=1min
|
||||||
|
Persistent=true
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=timers.target
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0644'
|
||||||
|
|
||||||
|
- name: Create systemd service for Fulcrum health check
|
||||||
|
copy:
|
||||||
|
dest: /etc/systemd/system/fulcrum-healthcheck.service
|
||||||
|
content: |
|
||||||
|
[Unit]
|
||||||
|
Description=Fulcrum Health Check and Push to Uptime Kuma
|
||||||
|
After=network.target fulcrum.service
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=oneshot
|
||||||
|
User=root
|
||||||
|
ExecStart=/usr/local/bin/fulcrum-healthcheck-push.sh
|
||||||
|
Environment=UPTIME_KUMA_PUSH_URL=
|
||||||
|
StandardOutput=journal
|
||||||
|
StandardError=journal
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0644'
|
||||||
|
|
||||||
|
- name: Reload systemd daemon for health check
|
||||||
|
systemd:
|
||||||
|
daemon_reload: yes
|
||||||
|
|
||||||
|
- name: Enable and start Fulcrum health check timer
|
||||||
|
systemd:
|
||||||
|
name: fulcrum-healthcheck.timer
|
||||||
|
enabled: yes
|
||||||
|
state: started
|
||||||
|
|
||||||
|
- name: Create Uptime Kuma push monitor setup script for Fulcrum
|
||||||
|
delegate_to: localhost
|
||||||
|
become: no
|
||||||
|
copy:
|
||||||
|
dest: /tmp/setup_fulcrum_monitor.py
|
||||||
|
content: |
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
import sys
|
||||||
|
import traceback
|
||||||
|
import yaml
|
||||||
|
from uptime_kuma_api import UptimeKumaApi, MonitorType
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Load configs
|
||||||
|
with open('/tmp/ansible_config.yml', 'r') as f:
|
||||||
|
config = yaml.safe_load(f)
|
||||||
|
|
||||||
|
url = config['uptime_kuma_url']
|
||||||
|
username = config['username']
|
||||||
|
password = config['password']
|
||||||
|
monitor_name = config['monitor_name']
|
||||||
|
|
||||||
|
# Connect to Uptime Kuma
|
||||||
|
api = UptimeKumaApi(url, timeout=30)
|
||||||
|
api.login(username, password)
|
||||||
|
|
||||||
|
# Get all monitors
|
||||||
|
monitors = api.get_monitors()
|
||||||
|
|
||||||
|
# Find or create "services" group
|
||||||
|
group = next((m for m in monitors if m.get('name') == 'services' and m.get('type') == 'group'), None)
|
||||||
|
if not group:
|
||||||
|
group_result = api.add_monitor(type='group', name='services')
|
||||||
|
# Refresh to get the group with id
|
||||||
|
monitors = api.get_monitors()
|
||||||
|
group = next((m for m in monitors if m.get('name') == 'services' and m.get('type') == 'group'), None)
|
||||||
|
|
||||||
|
# Check if monitor already exists
|
||||||
|
existing_monitor = None
|
||||||
|
for monitor in monitors:
|
||||||
|
if monitor.get('name') == monitor_name:
|
||||||
|
existing_monitor = monitor
|
||||||
|
break
|
||||||
|
|
||||||
|
# Get ntfy notification ID
|
||||||
|
notifications = api.get_notifications()
|
||||||
|
ntfy_notification_id = None
|
||||||
|
for notif in notifications:
|
||||||
|
if notif.get('type') == 'ntfy':
|
||||||
|
ntfy_notification_id = notif.get('id')
|
||||||
|
break
|
||||||
|
|
||||||
|
if existing_monitor:
|
||||||
|
print(f"Monitor '{monitor_name}' already exists (ID: {existing_monitor['id']})")
|
||||||
|
# Get push URL from existing monitor
|
||||||
|
push_id = existing_monitor.get('push_token', existing_monitor.get('id'))
|
||||||
|
push_url = f"{url}/api/push/{push_id}"
|
||||||
|
print(f"Push URL: {push_url}")
|
||||||
|
print("Skipping - monitor already configured")
|
||||||
|
else:
|
||||||
|
print(f"Creating push monitor '{monitor_name}'...")
|
||||||
|
result = api.add_monitor(
|
||||||
|
type=MonitorType.PUSH,
|
||||||
|
name=monitor_name,
|
||||||
|
parent=group['id'],
|
||||||
|
interval=60,
|
||||||
|
maxretries=3,
|
||||||
|
retryInterval=60,
|
||||||
|
notificationIDList={ntfy_notification_id: True} if ntfy_notification_id else {}
|
||||||
|
)
|
||||||
|
# Get push URL from created monitor
|
||||||
|
monitors = api.get_monitors()
|
||||||
|
new_monitor = next((m for m in monitors if m.get('name') == monitor_name), None)
|
||||||
|
if new_monitor:
|
||||||
|
push_id = new_monitor.get('push_token', new_monitor.get('id'))
|
||||||
|
push_url = f"{url}/api/push/{push_id}"
|
||||||
|
print(f"Push URL: {push_url}")
|
||||||
|
|
||||||
|
api.disconnect()
|
||||||
|
print("SUCCESS")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
error_msg = str(e) if str(e) else repr(e)
|
||||||
|
print(f"ERROR: {error_msg}", file=sys.stderr)
|
||||||
|
traceback.print_exc(file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
mode: '0755'
|
||||||
|
|
||||||
|
- name: Create temporary config for monitor setup
|
||||||
|
delegate_to: localhost
|
||||||
|
become: no
|
||||||
|
copy:
|
||||||
|
dest: /tmp/ansible_config.yml
|
||||||
|
content: |
|
||||||
|
uptime_kuma_url: "{{ uptime_kuma_api_url }}"
|
||||||
|
username: "{{ uptime_kuma_username }}"
|
||||||
|
password: "{{ uptime_kuma_password }}"
|
||||||
|
monitor_name: "Fulcrum"
|
||||||
|
mode: '0644'
|
||||||
|
|
||||||
|
- name: Run Uptime Kuma push monitor setup
|
||||||
|
command: python3 /tmp/setup_fulcrum_monitor.py
|
||||||
|
delegate_to: localhost
|
||||||
|
become: no
|
||||||
|
register: monitor_setup
|
||||||
|
changed_when: "'SUCCESS' in monitor_setup.stdout"
|
||||||
|
ignore_errors: yes
|
||||||
|
|
||||||
|
- name: Extract push URL from monitor setup output
|
||||||
|
set_fact:
|
||||||
|
uptime_kuma_push_url: "{{ monitor_setup.stdout | regex_search('Push URL: (https?://[^\\s]+)', '\\1') | first | default('') }}"
|
||||||
|
delegate_to: localhost
|
||||||
|
become: no
|
||||||
|
when: monitor_setup.stdout is defined
|
||||||
|
|
||||||
|
- name: Display extracted push URL
|
||||||
|
debug:
|
||||||
|
msg: "Uptime Kuma Push URL: {{ uptime_kuma_push_url }}"
|
||||||
|
when: uptime_kuma_push_url | default('') != ''
|
||||||
|
|
||||||
|
- name: Set push URL in systemd service environment
|
||||||
|
lineinfile:
|
||||||
|
path: /etc/systemd/system/fulcrum-healthcheck.service
|
||||||
|
regexp: '^Environment=UPTIME_KUMA_PUSH_URL='
|
||||||
|
line: "Environment=UPTIME_KUMA_PUSH_URL={{ uptime_kuma_push_url }}"
|
||||||
|
state: present
|
||||||
|
insertafter: '^\[Service\]'
|
||||||
|
when: uptime_kuma_push_url | default('') != ''
|
||||||
|
|
||||||
|
- name: Reload systemd daemon after push URL update
|
||||||
|
systemd:
|
||||||
|
daemon_reload: yes
|
||||||
|
when: uptime_kuma_push_url | default('') != ''
|
||||||
|
|
||||||
|
- name: Restart health check timer to pick up new environment
|
||||||
|
systemd:
|
||||||
|
name: fulcrum-healthcheck.timer
|
||||||
|
state: restarted
|
||||||
|
when: uptime_kuma_push_url | default('') != ''
|
||||||
|
|
||||||
|
- name: Clean up temporary files
|
||||||
|
delegate_to: localhost
|
||||||
|
become: no
|
||||||
|
file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
state: absent
|
||||||
|
loop:
|
||||||
|
- /tmp/setup_fulcrum_monitor.py
|
||||||
|
- /tmp/ansible_config.yml
|
||||||
|
- /tmp/Fulcrum-{{ fulcrum_version }}-x86_64-linux.tar.gz
|
||||||
|
- /tmp/Fulcrum-{{ fulcrum_version }}-x86_64-linux
|
||||||
|
|
||||||
|
handlers:
|
||||||
|
- name: Restart fulcrum
|
||||||
|
systemd:
|
||||||
|
name: fulcrum
|
||||||
|
state: restarted
|
||||||
|
|
||||||
40
ansible/services/fulcrum/fulcrum_vars.yml
Normal file
40
ansible/services/fulcrum/fulcrum_vars.yml
Normal file
|
|
@ -0,0 +1,40 @@
|
||||||
|
# Fulcrum Configuration Variables
|
||||||
|
|
||||||
|
# Version - Pinned to specific release
|
||||||
|
fulcrum_version: "2.1.0" # Fulcrum version to install
|
||||||
|
|
||||||
|
# Directories
|
||||||
|
fulcrum_db_dir: /mnt/fulcrum_data/fulcrum_db # Database directory (heavy data on special mount)
|
||||||
|
fulcrum_config_dir: /etc/fulcrum # Config file location (standard OS path)
|
||||||
|
fulcrum_lib_dir: /var/lib/fulcrum # Other data files (banner, etc.) on OS disk
|
||||||
|
fulcrum_binary_path: /usr/local/bin/Fulcrum
|
||||||
|
|
||||||
|
# Network - Bitcoin RPC connection
|
||||||
|
# Bitcoin Knots is on a different host (knots_box_local)
|
||||||
|
# Using RPC user/password authentication (credentials from infra_secrets.yml)
|
||||||
|
bitcoin_rpc_host: "192.168.1.140" # Bitcoin Knots RPC host (IP of knots_box_local)
|
||||||
|
bitcoin_rpc_port: 8332 # Bitcoin Knots RPC port
|
||||||
|
# Note: bitcoin_rpc_user and bitcoin_rpc_password are loaded from infra_secrets.yml
|
||||||
|
|
||||||
|
# Network - Fulcrum server
|
||||||
|
fulcrum_tcp_port: 50001
|
||||||
|
# Binding address for Fulcrum TCP server:
|
||||||
|
# - "127.0.0.1" = localhost only (use when Caddy is on the same box)
|
||||||
|
# - "0.0.0.0" = all interfaces (use when Caddy is on a different box)
|
||||||
|
# - Specific IP = bind to specific network interface
|
||||||
|
fulcrum_tcp_bind: "0.0.0.0" # Default: localhost (change to "0.0.0.0" if Caddy is on different box)
|
||||||
|
# If Caddy is on a different box, set this to the IP address that Caddy will use to connect
|
||||||
|
|
||||||
|
# Performance
|
||||||
|
# db_mem will be calculated as 75% of available RAM automatically in playbook
|
||||||
|
fulcrum_db_mem_percent: 0.75 # 75% of RAM for database cache
|
||||||
|
|
||||||
|
# Configuration options
|
||||||
|
fulcrum_anon_logs: true # Anonymize client IPs and TxIDs in logs
|
||||||
|
fulcrum_peering: false # Disable peering with other Fulcrum servers
|
||||||
|
fulcrum_zmq_allow_hashtx: true # Allow ZMQ hashtx notifications
|
||||||
|
|
||||||
|
# Service user
|
||||||
|
fulcrum_user: fulcrum
|
||||||
|
fulcrum_group: fulcrum
|
||||||
|
|
||||||
|
|
@ -90,13 +90,7 @@
|
||||||
copy:
|
copy:
|
||||||
dest: /etc/headscale/acl.json
|
dest: /etc/headscale/acl.json
|
||||||
content: |
|
content: |
|
||||||
{
|
{}
|
||||||
"ACLs": [],
|
|
||||||
"Groups": {},
|
|
||||||
"Hosts": {},
|
|
||||||
"TagOwners": {},
|
|
||||||
"Tests": []
|
|
||||||
}
|
|
||||||
owner: headscale
|
owner: headscale
|
||||||
group: headscale
|
group: headscale
|
||||||
mode: '0640'
|
mode: '0640'
|
||||||
|
|
|
||||||
|
|
@ -1,142 +0,0 @@
|
||||||
- name: Deploy headscale-ui with Docker and configure Caddy reverse proxy
|
|
||||||
hosts: spacey
|
|
||||||
become: yes
|
|
||||||
vars_files:
|
|
||||||
- ../../infra_vars.yml
|
|
||||||
- ../../services_config.yml
|
|
||||||
- ../../infra_secrets.yml
|
|
||||||
- ./headscale_vars.yml
|
|
||||||
vars:
|
|
||||||
headscale_subdomain: "{{ subdomains.headscale }}"
|
|
||||||
caddy_sites_dir: "{{ caddy_sites_dir }}"
|
|
||||||
headscale_domain: "{{ headscale_subdomain }}.{{ root_domain }}"
|
|
||||||
headscale_ui_version: "2025.08.23"
|
|
||||||
headscale_ui_dir: /opt/headscale-ui
|
|
||||||
headscale_ui_http_port: 18080
|
|
||||||
headscale_ui_https_port: 18443
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Check if Docker is installed
|
|
||||||
command: docker --version
|
|
||||||
register: docker_check
|
|
||||||
changed_when: false
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
- name: Fail if Docker is not installed
|
|
||||||
fail:
|
|
||||||
msg: "Docker is not installed. Please run the docker_playbook.yml first."
|
|
||||||
when: docker_check.rc != 0
|
|
||||||
|
|
||||||
- name: Ensure Docker service is running
|
|
||||||
systemd:
|
|
||||||
name: docker
|
|
||||||
state: started
|
|
||||||
enabled: yes
|
|
||||||
|
|
||||||
- name: Create headscale-ui directory
|
|
||||||
file:
|
|
||||||
path: "{{ headscale_ui_dir }}"
|
|
||||||
state: directory
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: '0755'
|
|
||||||
|
|
||||||
- name: Create docker-compose.yml for headscale-ui
|
|
||||||
copy:
|
|
||||||
dest: "{{ headscale_ui_dir }}/docker-compose.yml"
|
|
||||||
content: |
|
|
||||||
version: "3"
|
|
||||||
services:
|
|
||||||
headscale-ui:
|
|
||||||
image: ghcr.io/gurucomputing/headscale-ui:{{ headscale_ui_version }}
|
|
||||||
container_name: headscale-ui
|
|
||||||
restart: unless-stopped
|
|
||||||
ports:
|
|
||||||
- "{{ headscale_ui_http_port }}:8080"
|
|
||||||
- "{{ headscale_ui_https_port }}:8443"
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: '0644'
|
|
||||||
|
|
||||||
- name: Deploy headscale-ui container with docker compose
|
|
||||||
command: docker compose up -d
|
|
||||||
args:
|
|
||||||
chdir: "{{ headscale_ui_dir }}"
|
|
||||||
register: docker_compose_result
|
|
||||||
changed_when: "'Creating' in docker_compose_result.stdout or 'Starting' in docker_compose_result.stdout or docker_compose_result.rc != 0"
|
|
||||||
|
|
||||||
- name: Wait for headscale-ui to be ready
|
|
||||||
uri:
|
|
||||||
url: "http://localhost:{{ headscale_ui_http_port }}"
|
|
||||||
status_code: [200, 404]
|
|
||||||
register: headscale_ui_ready
|
|
||||||
until: headscale_ui_ready.status in [200, 404]
|
|
||||||
retries: 30
|
|
||||||
delay: 2
|
|
||||||
ignore_errors: yes
|
|
||||||
|
|
||||||
- name: Ensure Caddy sites-enabled directory exists
|
|
||||||
file:
|
|
||||||
path: "{{ caddy_sites_dir }}"
|
|
||||||
state: directory
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: '0755'
|
|
||||||
|
|
||||||
- name: Ensure Caddyfile includes import directive for sites-enabled
|
|
||||||
lineinfile:
|
|
||||||
path: /etc/caddy/Caddyfile
|
|
||||||
line: 'import sites-enabled/*'
|
|
||||||
insertafter: EOF
|
|
||||||
state: present
|
|
||||||
backup: yes
|
|
||||||
|
|
||||||
- name: Fail if username is not provided
|
|
||||||
fail:
|
|
||||||
msg: "headscale_ui_username must be set in infra_secrets.yml"
|
|
||||||
when: headscale_ui_username is not defined
|
|
||||||
|
|
||||||
- name: Fail if neither password nor password hash is provided
|
|
||||||
fail:
|
|
||||||
msg: "Either headscale_ui_password or headscale_ui_password_hash must be set in infra_secrets.yml"
|
|
||||||
when: headscale_ui_password is not defined and headscale_ui_password_hash is not defined
|
|
||||||
|
|
||||||
- name: Generate bcrypt hash for headscale-ui password
|
|
||||||
become: yes
|
|
||||||
command: caddy hash-password --plaintext "{{ headscale_ui_password }}"
|
|
||||||
register: headscale_ui_password_hash_result
|
|
||||||
changed_when: false
|
|
||||||
no_log: true
|
|
||||||
when: headscale_ui_password is defined and headscale_ui_password_hash is not defined
|
|
||||||
|
|
||||||
- name: Set headscale-ui password hash from generated value
|
|
||||||
set_fact:
|
|
||||||
headscale_ui_password_hash: "{{ headscale_ui_password_hash_result.stdout.strip() }}"
|
|
||||||
when: headscale_ui_password is defined and headscale_ui_password_hash is not defined
|
|
||||||
|
|
||||||
- name: Update headscale Caddy config to include headscale-ui /web route with authentication
|
|
||||||
become: yes
|
|
||||||
copy:
|
|
||||||
dest: "{{ caddy_sites_dir }}/headscale.conf"
|
|
||||||
content: |
|
|
||||||
{{ headscale_domain }} {
|
|
||||||
@headscale_ui {
|
|
||||||
path /web*
|
|
||||||
}
|
|
||||||
handle @headscale_ui {
|
|
||||||
basicauth {
|
|
||||||
{{ headscale_ui_username }} {{ headscale_ui_password_hash }}
|
|
||||||
}
|
|
||||||
reverse_proxy http://localhost:{{ headscale_ui_http_port }}
|
|
||||||
}
|
|
||||||
# Headscale API is protected by its own API key authentication
|
|
||||||
# All API operations require a valid Bearer token in the Authorization header
|
|
||||||
reverse_proxy * http://localhost:{{ headscale_port }}
|
|
||||||
}
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: '0644'
|
|
||||||
|
|
||||||
- name: Reload Caddy to apply new config
|
|
||||||
command: systemctl reload caddy
|
|
||||||
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue