751 lines
25 KiB
YAML
751 lines
25 KiB
YAML
- name: Deploy Mempool Block Explorer with Docker
|
|
hosts: mempool_box_local
|
|
become: yes
|
|
vars_files:
|
|
- ../../infra_vars.yml
|
|
- ../../services_config.yml
|
|
- ../../infra_secrets.yml
|
|
- ./mempool_vars.yml
|
|
vars:
|
|
mempool_subdomain: "{{ subdomains.mempool }}"
|
|
mempool_domain: "{{ mempool_subdomain }}.{{ root_domain }}"
|
|
uptime_kuma_api_url: "https://{{ subdomains.uptime_kuma }}.{{ root_domain }}"
|
|
|
|
tasks:
|
|
# ===========================================
|
|
# Docker Installation (from 910_docker_playbook.yml)
|
|
# ===========================================
|
|
- name: Remove old Docker-related packages
|
|
apt:
|
|
name:
|
|
- docker.io
|
|
- docker-doc
|
|
- docker-compose
|
|
- podman-docker
|
|
- containerd
|
|
- runc
|
|
state: absent
|
|
purge: yes
|
|
autoremove: yes
|
|
|
|
- name: Update apt cache
|
|
apt:
|
|
update_cache: yes
|
|
|
|
- name: Install prerequisites
|
|
apt:
|
|
name:
|
|
- ca-certificates
|
|
- curl
|
|
state: present
|
|
|
|
- name: Create directory for Docker GPG key
|
|
file:
|
|
path: /etc/apt/keyrings
|
|
state: directory
|
|
mode: '0755'
|
|
|
|
- name: Download Docker GPG key
|
|
get_url:
|
|
url: https://download.docker.com/linux/debian/gpg
|
|
dest: /etc/apt/keyrings/docker.asc
|
|
mode: '0644'
|
|
|
|
- name: Get Debian architecture
|
|
command: dpkg --print-architecture
|
|
register: deb_arch
|
|
changed_when: false
|
|
|
|
- name: Add Docker repository
|
|
apt_repository:
|
|
repo: "deb [arch={{ deb_arch.stdout }} signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/debian {{ ansible_distribution_release }} stable"
|
|
filename: docker
|
|
state: present
|
|
update_cache: yes
|
|
|
|
- name: Install Docker packages
|
|
apt:
|
|
name:
|
|
- docker-ce
|
|
- docker-ce-cli
|
|
- containerd.io
|
|
- docker-buildx-plugin
|
|
- docker-compose-plugin
|
|
state: present
|
|
update_cache: yes
|
|
|
|
- name: Ensure Docker is started and enabled
|
|
systemd:
|
|
name: docker
|
|
enabled: yes
|
|
state: started
|
|
|
|
- name: Add user to docker group
|
|
user:
|
|
name: "{{ ansible_user }}"
|
|
groups: docker
|
|
append: yes
|
|
|
|
# ===========================================
|
|
# Mempool Deployment
|
|
# ===========================================
|
|
- name: Create mempool directories
|
|
file:
|
|
path: "{{ item }}"
|
|
state: directory
|
|
owner: "{{ ansible_user }}"
|
|
group: "{{ ansible_user }}"
|
|
mode: '0755'
|
|
loop:
|
|
- "{{ mempool_dir }}"
|
|
- "{{ mempool_data_dir }}"
|
|
- "{{ mempool_mysql_dir }}"
|
|
|
|
- name: Create docker-compose.yml for Mempool
|
|
copy:
|
|
dest: "{{ mempool_dir }}/docker-compose.yml"
|
|
content: |
|
|
# All containers use host network for Tailscale MagicDNS resolution
|
|
services:
|
|
mariadb:
|
|
image: mariadb:10.11
|
|
container_name: mempool-db
|
|
restart: unless-stopped
|
|
network_mode: host
|
|
environment:
|
|
MYSQL_DATABASE: "{{ mariadb_database }}"
|
|
MYSQL_USER: "{{ mariadb_user }}"
|
|
MYSQL_PASSWORD: "{{ mariadb_mempool_password }}"
|
|
MYSQL_ROOT_PASSWORD: "{{ mariadb_mempool_password }}"
|
|
volumes:
|
|
- {{ mempool_mysql_dir }}:/var/lib/mysql
|
|
healthcheck:
|
|
test: ["CMD", "healthcheck.sh", "--connect", "--innodb_initialized"]
|
|
interval: 10s
|
|
timeout: 5s
|
|
retries: 5
|
|
start_period: 30s
|
|
|
|
mempool-backend:
|
|
image: mempool/backend:{{ mempool_version }}
|
|
container_name: mempool-backend
|
|
restart: unless-stopped
|
|
network_mode: host
|
|
environment:
|
|
# Database (localhost since all containers share host network)
|
|
DATABASE_ENABLED: "true"
|
|
DATABASE_HOST: "127.0.0.1"
|
|
DATABASE_DATABASE: "{{ mariadb_database }}"
|
|
DATABASE_USERNAME: "{{ mariadb_user }}"
|
|
DATABASE_PASSWORD: "{{ mariadb_mempool_password }}"
|
|
# Bitcoin Core/Knots (via Tailnet MagicDNS)
|
|
CORE_RPC_HOST: "{{ bitcoin_host }}"
|
|
CORE_RPC_PORT: "{{ bitcoin_rpc_port }}"
|
|
CORE_RPC_USERNAME: "{{ bitcoin_rpc_user }}"
|
|
CORE_RPC_PASSWORD: "{{ bitcoin_rpc_password }}"
|
|
# Electrum (Fulcrum via Tailnet MagicDNS)
|
|
ELECTRUM_HOST: "{{ fulcrum_host }}"
|
|
ELECTRUM_PORT: "{{ fulcrum_port }}"
|
|
ELECTRUM_TLS_ENABLED: "{{ fulcrum_tls }}"
|
|
# Mempool settings
|
|
MEMPOOL_NETWORK: "{{ mempool_network }}"
|
|
MEMPOOL_BACKEND: "electrum"
|
|
MEMPOOL_CLEAR_PROTECTION_MINUTES: "20"
|
|
MEMPOOL_INDEXING_BLOCKS_AMOUNT: "52560"
|
|
volumes:
|
|
- {{ mempool_data_dir }}:/backend/cache
|
|
depends_on:
|
|
mariadb:
|
|
condition: service_healthy
|
|
healthcheck:
|
|
test: ["CMD", "curl", "-f", "http://localhost:8999/api/v1/backend-info"]
|
|
interval: 30s
|
|
timeout: 10s
|
|
retries: 3
|
|
start_period: 60s
|
|
|
|
mempool-frontend:
|
|
image: mempool/frontend:{{ mempool_version }}
|
|
container_name: mempool-frontend
|
|
restart: unless-stopped
|
|
network_mode: host
|
|
environment:
|
|
FRONTEND_HTTP_PORT: "{{ mempool_frontend_port }}"
|
|
BACKEND_MAINNET_HTTP_HOST: "127.0.0.1"
|
|
depends_on:
|
|
- mempool-backend
|
|
healthcheck:
|
|
test: ["CMD", "curl", "-f", "http://localhost:{{ mempool_frontend_port }}"]
|
|
interval: 30s
|
|
timeout: 10s
|
|
retries: 3
|
|
start_period: 30s
|
|
owner: "{{ ansible_user }}"
|
|
group: "{{ ansible_user }}"
|
|
mode: '0644'
|
|
|
|
- name: Pull Mempool images
|
|
command: docker compose pull
|
|
args:
|
|
chdir: "{{ mempool_dir }}"
|
|
|
|
- name: Deploy Mempool containers with docker compose
|
|
command: docker compose up -d
|
|
args:
|
|
chdir: "{{ mempool_dir }}"
|
|
|
|
- name: Wait for MariaDB to be healthy
|
|
command: docker inspect --format='{{ '{{' }}.State.Health.Status{{ '}}' }}' mempool-db
|
|
register: mariadb_health
|
|
until: mariadb_health.stdout == 'healthy'
|
|
retries: 30
|
|
delay: 10
|
|
changed_when: false
|
|
|
|
- name: Wait for Mempool backend to start
|
|
uri:
|
|
url: "http://localhost:{{ mempool_backend_port }}/api/v1/backend-info"
|
|
method: GET
|
|
status_code: 200
|
|
timeout: 10
|
|
register: backend_check
|
|
until: backend_check.status == 200
|
|
retries: 30
|
|
delay: 10
|
|
ignore_errors: yes
|
|
|
|
- name: Wait for Mempool frontend to be available
|
|
uri:
|
|
url: "http://localhost:{{ mempool_frontend_port }}"
|
|
method: GET
|
|
status_code: 200
|
|
timeout: 10
|
|
register: frontend_check
|
|
until: frontend_check.status == 200
|
|
retries: 20
|
|
delay: 5
|
|
ignore_errors: yes
|
|
|
|
- name: Display deployment status
|
|
debug:
|
|
msg:
|
|
- "Mempool deployment complete!"
|
|
- "Frontend: http://localhost:{{ mempool_frontend_port }}"
|
|
- "Backend API: http://localhost:{{ mempool_backend_port }}/api/v1/backend-info"
|
|
- "Backend check: {{ 'OK' if backend_check.status == 200 else 'Still initializing...' }}"
|
|
- "Frontend check: {{ 'OK' if frontend_check.status == 200 else 'Still initializing...' }}"
|
|
|
|
# ===========================================
|
|
# Health Check Scripts for Uptime Kuma Push Monitors
|
|
# ===========================================
|
|
- name: Create Mempool MariaDB health check script
|
|
copy:
|
|
dest: /usr/local/bin/mempool-mariadb-healthcheck-push.sh
|
|
content: |
|
|
#!/bin/bash
|
|
UPTIME_KUMA_PUSH_URL="${UPTIME_KUMA_PUSH_URL}"
|
|
|
|
check_container() {
|
|
local status=$(docker inspect --format='{{ '{{' }}.State.Health.Status{{ '}}' }}' mempool-db 2>/dev/null)
|
|
[ "$status" = "healthy" ]
|
|
}
|
|
|
|
push_to_uptime_kuma() {
|
|
local status=$1
|
|
local msg=$2
|
|
if [ -z "$UPTIME_KUMA_PUSH_URL" ]; then
|
|
echo "ERROR: UPTIME_KUMA_PUSH_URL not set"
|
|
return 1
|
|
fi
|
|
curl -s --max-time 10 --retry 2 -o /dev/null \
|
|
"${UPTIME_KUMA_PUSH_URL}?status=${status}&msg=${msg// /%20}&ping=" || true
|
|
}
|
|
|
|
if check_container; then
|
|
push_to_uptime_kuma "up" "OK"
|
|
exit 0
|
|
else
|
|
push_to_uptime_kuma "down" "MariaDB container unhealthy"
|
|
exit 1
|
|
fi
|
|
owner: root
|
|
group: root
|
|
mode: '0755'
|
|
|
|
- name: Create Mempool backend health check script
|
|
copy:
|
|
dest: /usr/local/bin/mempool-backend-healthcheck-push.sh
|
|
content: |
|
|
#!/bin/bash
|
|
UPTIME_KUMA_PUSH_URL="${UPTIME_KUMA_PUSH_URL}"
|
|
BACKEND_PORT={{ mempool_backend_port }}
|
|
|
|
check_backend() {
|
|
curl -sf --max-time 5 "http://localhost:${BACKEND_PORT}/api/v1/backend-info" > /dev/null 2>&1
|
|
}
|
|
|
|
push_to_uptime_kuma() {
|
|
local status=$1
|
|
local msg=$2
|
|
if [ -z "$UPTIME_KUMA_PUSH_URL" ]; then
|
|
echo "ERROR: UPTIME_KUMA_PUSH_URL not set"
|
|
return 1
|
|
fi
|
|
curl -s --max-time 10 --retry 2 -o /dev/null \
|
|
"${UPTIME_KUMA_PUSH_URL}?status=${status}&msg=${msg// /%20}&ping=" || true
|
|
}
|
|
|
|
if check_backend; then
|
|
push_to_uptime_kuma "up" "OK"
|
|
exit 0
|
|
else
|
|
push_to_uptime_kuma "down" "Backend API not responding"
|
|
exit 1
|
|
fi
|
|
owner: root
|
|
group: root
|
|
mode: '0755'
|
|
|
|
- name: Create Mempool frontend health check script
|
|
copy:
|
|
dest: /usr/local/bin/mempool-frontend-healthcheck-push.sh
|
|
content: |
|
|
#!/bin/bash
|
|
UPTIME_KUMA_PUSH_URL="${UPTIME_KUMA_PUSH_URL}"
|
|
FRONTEND_PORT={{ mempool_frontend_port }}
|
|
|
|
check_frontend() {
|
|
curl -sf --max-time 5 "http://localhost:${FRONTEND_PORT}" > /dev/null 2>&1
|
|
}
|
|
|
|
push_to_uptime_kuma() {
|
|
local status=$1
|
|
local msg=$2
|
|
if [ -z "$UPTIME_KUMA_PUSH_URL" ]; then
|
|
echo "ERROR: UPTIME_KUMA_PUSH_URL not set"
|
|
return 1
|
|
fi
|
|
curl -s --max-time 10 --retry 2 -o /dev/null \
|
|
"${UPTIME_KUMA_PUSH_URL}?status=${status}&msg=${msg// /%20}&ping=" || true
|
|
}
|
|
|
|
if check_frontend; then
|
|
push_to_uptime_kuma "up" "OK"
|
|
exit 0
|
|
else
|
|
push_to_uptime_kuma "down" "Frontend not responding"
|
|
exit 1
|
|
fi
|
|
owner: root
|
|
group: root
|
|
mode: '0755'
|
|
|
|
# ===========================================
|
|
# Systemd Timers for Health Checks
|
|
# ===========================================
|
|
- name: Create systemd services for health checks
|
|
copy:
|
|
dest: "/etc/systemd/system/mempool-{{ item.name }}-healthcheck.service"
|
|
content: |
|
|
[Unit]
|
|
Description=Mempool {{ item.label }} Health Check
|
|
After=network.target docker.service
|
|
|
|
[Service]
|
|
Type=oneshot
|
|
User=root
|
|
ExecStart=/usr/local/bin/mempool-{{ item.name }}-healthcheck-push.sh
|
|
Environment=UPTIME_KUMA_PUSH_URL=
|
|
StandardOutput=journal
|
|
StandardError=journal
|
|
|
|
[Install]
|
|
WantedBy=multi-user.target
|
|
owner: root
|
|
group: root
|
|
mode: '0644'
|
|
loop:
|
|
- { name: "mariadb", label: "MariaDB" }
|
|
- { name: "backend", label: "Backend" }
|
|
- { name: "frontend", label: "Frontend" }
|
|
|
|
- name: Create systemd timers for health checks
|
|
copy:
|
|
dest: "/etc/systemd/system/mempool-{{ item }}-healthcheck.timer"
|
|
content: |
|
|
[Unit]
|
|
Description=Mempool {{ item }} Health Check Timer
|
|
|
|
[Timer]
|
|
OnBootSec=2min
|
|
OnUnitActiveSec=1min
|
|
Persistent=true
|
|
|
|
[Install]
|
|
WantedBy=timers.target
|
|
owner: root
|
|
group: root
|
|
mode: '0644'
|
|
loop:
|
|
- mariadb
|
|
- backend
|
|
- frontend
|
|
|
|
- name: Reload systemd daemon
|
|
systemd:
|
|
daemon_reload: yes
|
|
|
|
- name: Enable and start health check timers
|
|
systemd:
|
|
name: "mempool-{{ item }}-healthcheck.timer"
|
|
enabled: yes
|
|
state: started
|
|
loop:
|
|
- mariadb
|
|
- backend
|
|
- frontend
|
|
|
|
# ===========================================
|
|
# Uptime Kuma Push Monitor Setup
|
|
# ===========================================
|
|
- name: Create Uptime Kuma push monitor setup script for Mempool
|
|
delegate_to: localhost
|
|
become: no
|
|
copy:
|
|
dest: /tmp/setup_mempool_monitors.py
|
|
content: |
|
|
#!/usr/bin/env python3
|
|
import sys
|
|
import traceback
|
|
import yaml
|
|
from uptime_kuma_api import UptimeKumaApi, MonitorType
|
|
|
|
try:
|
|
with open('/tmp/ansible_mempool_config.yml', 'r') as f:
|
|
config = yaml.safe_load(f)
|
|
|
|
url = config['uptime_kuma_url']
|
|
username = config['username']
|
|
password = config['password']
|
|
monitors_to_create = config['monitors']
|
|
|
|
api = UptimeKumaApi(url, timeout=30)
|
|
api.login(username, password)
|
|
|
|
monitors = api.get_monitors()
|
|
|
|
# Find or create "services" group
|
|
group = next((m for m in monitors if m.get('name') == 'services' and m.get('type') == 'group'), None)
|
|
if not group:
|
|
api.add_monitor(type='group', name='services')
|
|
monitors = api.get_monitors()
|
|
group = next((m for m in monitors if m.get('name') == 'services' and m.get('type') == 'group'), None)
|
|
|
|
# Get ntfy notification ID
|
|
notifications = api.get_notifications()
|
|
ntfy_notification_id = None
|
|
for notif in notifications:
|
|
if notif.get('type') == 'ntfy':
|
|
ntfy_notification_id = notif.get('id')
|
|
break
|
|
|
|
results = {}
|
|
for monitor_name in monitors_to_create:
|
|
existing = next((m for m in monitors if m.get('name') == monitor_name), None)
|
|
|
|
if existing:
|
|
print(f"Monitor '{monitor_name}' already exists (ID: {existing['id']})")
|
|
push_token = existing.get('pushToken') or existing.get('push_token')
|
|
if push_token:
|
|
results[monitor_name] = f"{url}/api/push/{push_token}"
|
|
print(f"Push URL ({monitor_name}): {results[monitor_name]}")
|
|
else:
|
|
print(f"Creating push monitor '{monitor_name}'...")
|
|
api.add_monitor(
|
|
type=MonitorType.PUSH,
|
|
name=monitor_name,
|
|
parent=group['id'],
|
|
interval=90,
|
|
maxretries=3,
|
|
retryInterval=60,
|
|
notificationIDList={ntfy_notification_id: True} if ntfy_notification_id else {}
|
|
)
|
|
monitors = api.get_monitors()
|
|
new_monitor = next((m for m in monitors if m.get('name') == monitor_name), None)
|
|
if new_monitor:
|
|
push_token = new_monitor.get('pushToken') or new_monitor.get('push_token')
|
|
if push_token:
|
|
results[monitor_name] = f"{url}/api/push/{push_token}"
|
|
print(f"Push URL ({monitor_name}): {results[monitor_name]}")
|
|
|
|
api.disconnect()
|
|
print("SUCCESS")
|
|
|
|
# Write results to file for Ansible to read
|
|
with open('/tmp/mempool_push_urls.yml', 'w') as f:
|
|
yaml.dump(results, f)
|
|
|
|
except Exception as e:
|
|
print(f"ERROR: {str(e)}", file=sys.stderr)
|
|
traceback.print_exc(file=sys.stderr)
|
|
sys.exit(1)
|
|
mode: '0755'
|
|
|
|
- name: Create temporary config for monitor setup
|
|
delegate_to: localhost
|
|
become: no
|
|
copy:
|
|
dest: /tmp/ansible_mempool_config.yml
|
|
content: |
|
|
uptime_kuma_url: "{{ uptime_kuma_api_url }}"
|
|
username: "{{ uptime_kuma_username }}"
|
|
password: "{{ uptime_kuma_password }}"
|
|
monitors:
|
|
- "Mempool MariaDB"
|
|
- "Mempool Backend"
|
|
- "Mempool Frontend"
|
|
mode: '0644'
|
|
|
|
- name: Run Uptime Kuma push monitor setup
|
|
command: python3 /tmp/setup_mempool_monitors.py
|
|
delegate_to: localhost
|
|
become: no
|
|
register: monitor_setup
|
|
changed_when: "'SUCCESS' in monitor_setup.stdout"
|
|
ignore_errors: yes
|
|
|
|
- name: Display monitor setup output
|
|
debug:
|
|
msg: "{{ monitor_setup.stdout_lines }}"
|
|
when: monitor_setup.stdout is defined
|
|
|
|
- name: Read push URLs from file
|
|
slurp:
|
|
src: /tmp/mempool_push_urls.yml
|
|
delegate_to: localhost
|
|
become: no
|
|
register: push_urls_file
|
|
ignore_errors: yes
|
|
|
|
- name: Parse push URLs
|
|
set_fact:
|
|
push_urls: "{{ push_urls_file.content | b64decode | from_yaml }}"
|
|
when: push_urls_file.content is defined
|
|
ignore_errors: yes
|
|
|
|
- name: Update MariaDB health check service with push URL
|
|
lineinfile:
|
|
path: /etc/systemd/system/mempool-mariadb-healthcheck.service
|
|
regexp: '^Environment=UPTIME_KUMA_PUSH_URL='
|
|
line: "Environment=UPTIME_KUMA_PUSH_URL={{ push_urls['Mempool MariaDB'] }}"
|
|
insertafter: '^\[Service\]'
|
|
when: push_urls is defined and push_urls['Mempool MariaDB'] is defined
|
|
|
|
- name: Update Backend health check service with push URL
|
|
lineinfile:
|
|
path: /etc/systemd/system/mempool-backend-healthcheck.service
|
|
regexp: '^Environment=UPTIME_KUMA_PUSH_URL='
|
|
line: "Environment=UPTIME_KUMA_PUSH_URL={{ push_urls['Mempool Backend'] }}"
|
|
insertafter: '^\[Service\]'
|
|
when: push_urls is defined and push_urls['Mempool Backend'] is defined
|
|
|
|
- name: Update Frontend health check service with push URL
|
|
lineinfile:
|
|
path: /etc/systemd/system/mempool-frontend-healthcheck.service
|
|
regexp: '^Environment=UPTIME_KUMA_PUSH_URL='
|
|
line: "Environment=UPTIME_KUMA_PUSH_URL={{ push_urls['Mempool Frontend'] }}"
|
|
insertafter: '^\[Service\]'
|
|
when: push_urls is defined and push_urls['Mempool Frontend'] is defined
|
|
|
|
- name: Reload systemd after push URL updates
|
|
systemd:
|
|
daemon_reload: yes
|
|
when: push_urls is defined
|
|
|
|
- name: Restart health check timers
|
|
systemd:
|
|
name: "mempool-{{ item }}-healthcheck.timer"
|
|
state: restarted
|
|
loop:
|
|
- mariadb
|
|
- backend
|
|
- frontend
|
|
when: push_urls is defined
|
|
|
|
- name: Clean up temporary files
|
|
delegate_to: localhost
|
|
become: no
|
|
file:
|
|
path: "{{ item }}"
|
|
state: absent
|
|
loop:
|
|
- /tmp/setup_mempool_monitors.py
|
|
- /tmp/ansible_mempool_config.yml
|
|
- /tmp/mempool_push_urls.yml
|
|
|
|
- name: Configure Caddy reverse proxy for Mempool on vipy
|
|
hosts: vipy
|
|
become: yes
|
|
vars_files:
|
|
- ../../infra_vars.yml
|
|
- ../../services_config.yml
|
|
- ../../infra_secrets.yml
|
|
- ./mempool_vars.yml
|
|
vars:
|
|
mempool_subdomain: "{{ subdomains.mempool }}"
|
|
mempool_domain: "{{ mempool_subdomain }}.{{ root_domain }}"
|
|
caddy_sites_dir: "{{ caddy_sites_dir }}"
|
|
|
|
tasks:
|
|
- name: Ensure Caddy sites-enabled directory exists
|
|
file:
|
|
path: "{{ caddy_sites_dir }}"
|
|
state: directory
|
|
owner: root
|
|
group: root
|
|
mode: '0755'
|
|
|
|
- name: Ensure Caddyfile includes import directive for sites-enabled
|
|
lineinfile:
|
|
path: /etc/caddy/Caddyfile
|
|
line: 'import sites-enabled/*'
|
|
insertafter: EOF
|
|
state: present
|
|
backup: yes
|
|
create: yes
|
|
mode: '0644'
|
|
|
|
- name: Create Caddy reverse proxy configuration for Mempool
|
|
copy:
|
|
dest: "{{ caddy_sites_dir }}/mempool.conf"
|
|
content: |
|
|
{{ mempool_domain }} {
|
|
reverse_proxy mempool-box:{{ mempool_frontend_port }} {
|
|
# Use Tailscale MagicDNS to resolve the upstream hostname
|
|
transport http {
|
|
resolvers 100.100.100.100
|
|
}
|
|
}
|
|
}
|
|
owner: root
|
|
group: root
|
|
mode: '0644'
|
|
|
|
- name: Reload Caddy to apply new config
|
|
systemd:
|
|
name: caddy
|
|
state: reloaded
|
|
|
|
- name: Display Mempool URL
|
|
debug:
|
|
msg: "Mempool is now available at https://{{ mempool_domain }}"
|
|
|
|
# ===========================================
|
|
# Uptime Kuma HTTP Monitor for Public Endpoint
|
|
# ===========================================
|
|
- name: Create Uptime Kuma HTTP monitor setup script for Mempool
|
|
delegate_to: localhost
|
|
become: no
|
|
copy:
|
|
dest: /tmp/setup_mempool_http_monitor.py
|
|
content: |
|
|
#!/usr/bin/env python3
|
|
import sys
|
|
import traceback
|
|
import yaml
|
|
from uptime_kuma_api import UptimeKumaApi, MonitorType
|
|
|
|
try:
|
|
with open('/tmp/ansible_mempool_http_config.yml', 'r') as f:
|
|
config = yaml.safe_load(f)
|
|
|
|
url = config['uptime_kuma_url']
|
|
username = config['username']
|
|
password = config['password']
|
|
monitor_url = config['monitor_url']
|
|
monitor_name = config['monitor_name']
|
|
|
|
api = UptimeKumaApi(url, timeout=30)
|
|
api.login(username, password)
|
|
|
|
monitors = api.get_monitors()
|
|
|
|
# Find or create "services" group
|
|
group = next((m for m in monitors if m.get('name') == 'services' and m.get('type') == 'group'), None)
|
|
if not group:
|
|
api.add_monitor(type='group', name='services')
|
|
monitors = api.get_monitors()
|
|
group = next((m for m in monitors if m.get('name') == 'services' and m.get('type') == 'group'), None)
|
|
|
|
# Check if monitor already exists
|
|
existing = next((m for m in monitors if m.get('name') == monitor_name), None)
|
|
|
|
# Get ntfy notification ID
|
|
notifications = api.get_notifications()
|
|
ntfy_notification_id = None
|
|
for notif in notifications:
|
|
if notif.get('type') == 'ntfy':
|
|
ntfy_notification_id = notif.get('id')
|
|
break
|
|
|
|
if existing:
|
|
print(f"Monitor '{monitor_name}' already exists (ID: {existing['id']})")
|
|
print("Skipping - monitor already configured")
|
|
else:
|
|
print(f"Creating HTTP monitor '{monitor_name}'...")
|
|
api.add_monitor(
|
|
type=MonitorType.HTTP,
|
|
name=monitor_name,
|
|
url=monitor_url,
|
|
parent=group['id'],
|
|
interval=60,
|
|
maxretries=3,
|
|
retryInterval=60,
|
|
notificationIDList={ntfy_notification_id: True} if ntfy_notification_id else {}
|
|
)
|
|
|
|
api.disconnect()
|
|
print("SUCCESS")
|
|
|
|
except Exception as e:
|
|
print(f"ERROR: {str(e)}", file=sys.stderr)
|
|
traceback.print_exc(file=sys.stderr)
|
|
sys.exit(1)
|
|
mode: '0755'
|
|
|
|
- name: Create temporary config for HTTP monitor setup
|
|
delegate_to: localhost
|
|
become: no
|
|
copy:
|
|
dest: /tmp/ansible_mempool_http_config.yml
|
|
content: |
|
|
uptime_kuma_url: "https://{{ subdomains.uptime_kuma }}.{{ root_domain }}"
|
|
username: "{{ uptime_kuma_username }}"
|
|
password: "{{ uptime_kuma_password }}"
|
|
monitor_url: "https://{{ mempool_domain }}"
|
|
monitor_name: "Mempool Public Access"
|
|
mode: '0644'
|
|
|
|
- name: Run Uptime Kuma HTTP monitor setup
|
|
command: python3 /tmp/setup_mempool_http_monitor.py
|
|
delegate_to: localhost
|
|
become: no
|
|
register: http_monitor_setup
|
|
changed_when: "'SUCCESS' in http_monitor_setup.stdout"
|
|
ignore_errors: yes
|
|
|
|
- name: Display HTTP monitor setup output
|
|
debug:
|
|
msg: "{{ http_monitor_setup.stdout_lines }}"
|
|
when: http_monitor_setup.stdout is defined
|
|
|
|
- name: Clean up HTTP monitor temporary files
|
|
delegate_to: localhost
|
|
become: no
|
|
file:
|
|
path: "{{ item }}"
|
|
state: absent
|
|
loop:
|
|
- /tmp/setup_mempool_http_monitor.py
|
|
- /tmp/ansible_mempool_http_config.yml
|
|
|