lots of stuff man
This commit is contained in:
parent
3b88e6c5e8
commit
c8754e1bdc
43 changed files with 7310 additions and 121 deletions
4
ansible/backup.infra_vars.yml
Normal file
4
ansible/backup.infra_vars.yml
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
new_user: counterweight
|
||||
ssh_port: 22
|
||||
allow_ssh_from: "any"
|
||||
root_domain: contrapeso.xyz
|
||||
331
ansible/infra/410_disk_usage_alerts.yml
Normal file
331
ansible/infra/410_disk_usage_alerts.yml
Normal file
|
|
@ -0,0 +1,331 @@
|
|||
- name: Deploy Disk Usage Monitoring
|
||||
hosts: all
|
||||
become: yes
|
||||
vars_files:
|
||||
- ../infra_vars.yml
|
||||
- ../services_config.yml
|
||||
- ../infra_secrets.yml
|
||||
- ../services/uptime_kuma/uptime_kuma_vars.yml
|
||||
- ../services/ntfy/ntfy_vars.yml
|
||||
|
||||
vars:
|
||||
disk_usage_threshold_percent: 80
|
||||
disk_check_interval_minutes: 15
|
||||
monitored_mount_point: "/"
|
||||
monitoring_script_dir: /opt/disk-monitoring
|
||||
monitoring_script_path: "{{ monitoring_script_dir }}/disk_usage_monitor.sh"
|
||||
log_file: "{{ monitoring_script_dir }}/disk_usage_monitor.log"
|
||||
systemd_service_name: disk-usage-monitor
|
||||
# Uptime Kuma configuration (auto-configured from services_config.yml and infra_secrets.yml)
|
||||
uptime_kuma_api_url: "https://{{ subdomains.uptime_kuma }}.{{ root_domain }}"
|
||||
|
||||
tasks:
|
||||
- name: Validate Uptime Kuma configuration
|
||||
assert:
|
||||
that:
|
||||
- uptime_kuma_api_url is defined
|
||||
- uptime_kuma_api_url != ""
|
||||
- uptime_kuma_username is defined
|
||||
- uptime_kuma_username != ""
|
||||
- uptime_kuma_password is defined
|
||||
- uptime_kuma_password != ""
|
||||
fail_msg: "uptime_kuma_api_url, uptime_kuma_username and uptime_kuma_password must be set"
|
||||
|
||||
- name: Get hostname for monitor identification
|
||||
command: hostname
|
||||
register: host_name
|
||||
changed_when: false
|
||||
|
||||
- name: Set monitor name and group based on hostname and mount point
|
||||
set_fact:
|
||||
monitor_name: "disk-usage-{{ host_name.stdout }}-{{ monitored_mount_point | replace('/', 'root') }}"
|
||||
monitor_friendly_name: "Disk Usage: {{ host_name.stdout }} ({{ monitored_mount_point }})"
|
||||
uptime_kuma_monitor_group: "{{ host_name.stdout }} - infra"
|
||||
|
||||
- name: Create Uptime Kuma monitor setup script
|
||||
copy:
|
||||
dest: /tmp/setup_uptime_kuma_monitor.py
|
||||
content: |
|
||||
#!/usr/bin/env python3
|
||||
import sys
|
||||
import json
|
||||
from uptime_kuma_api import UptimeKumaApi
|
||||
|
||||
def main():
|
||||
api_url = sys.argv[1]
|
||||
username = sys.argv[2]
|
||||
password = sys.argv[3]
|
||||
group_name = sys.argv[4]
|
||||
monitor_name = sys.argv[5]
|
||||
monitor_description = sys.argv[6]
|
||||
interval = int(sys.argv[7])
|
||||
ntfy_topic = sys.argv[8] if len(sys.argv) > 8 else "alerts"
|
||||
|
||||
api = UptimeKumaApi(api_url, timeout=60, wait_events=2.0)
|
||||
api.login(username, password)
|
||||
|
||||
# Get all monitors
|
||||
monitors = api.get_monitors()
|
||||
|
||||
# Get all notifications and find ntfy notification
|
||||
notifications = api.get_notifications()
|
||||
ntfy_notification = next((n for n in notifications if n.get('name') == f'ntfy ({ntfy_topic})'), None)
|
||||
notification_id_list = {}
|
||||
if ntfy_notification:
|
||||
notification_id_list[ntfy_notification['id']] = True
|
||||
|
||||
# Find or create group
|
||||
group = next((m for m in monitors if m.get('name') == group_name and m.get('type') == 'group'), None)
|
||||
if not group:
|
||||
group_result = api.add_monitor(type='group', name=group_name)
|
||||
# Refresh to get the full group object with id
|
||||
monitors = api.get_monitors()
|
||||
group = next((m for m in monitors if m.get('name') == group_name and m.get('type') == 'group'), None)
|
||||
|
||||
# Find or create/update push monitor
|
||||
existing_monitor = next((m for m in monitors if m.get('name') == monitor_name), None)
|
||||
|
||||
monitor_data = {
|
||||
'type': 'push',
|
||||
'name': monitor_name,
|
||||
'parent': group['id'],
|
||||
'interval': interval,
|
||||
'upsideDown': True,
|
||||
'description': monitor_description,
|
||||
'notificationIDList': notification_id_list
|
||||
}
|
||||
|
||||
if existing_monitor:
|
||||
monitor = api.edit_monitor(existing_monitor['id'], **monitor_data)
|
||||
# Refresh to get the full monitor object with pushToken
|
||||
monitors = api.get_monitors()
|
||||
monitor = next((m for m in monitors if m.get('name') == monitor_name), None)
|
||||
else:
|
||||
monitor_result = api.add_monitor(**monitor_data)
|
||||
# Refresh to get the full monitor object with pushToken
|
||||
monitors = api.get_monitors()
|
||||
monitor = next((m for m in monitors if m.get('name') == monitor_name), None)
|
||||
|
||||
# Output result as JSON
|
||||
result = {
|
||||
'monitor_id': monitor['id'],
|
||||
'push_token': monitor['pushToken'],
|
||||
'group_name': group_name,
|
||||
'group_id': group['id'],
|
||||
'monitor_name': monitor_name
|
||||
}
|
||||
print(json.dumps(result))
|
||||
|
||||
api.disconnect()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
mode: '0755'
|
||||
delegate_to: localhost
|
||||
become: no
|
||||
|
||||
- name: Run Uptime Kuma monitor setup script
|
||||
command: >
|
||||
{{ ansible_playbook_python }}
|
||||
/tmp/setup_uptime_kuma_monitor.py
|
||||
"{{ uptime_kuma_api_url }}"
|
||||
"{{ uptime_kuma_username }}"
|
||||
"{{ uptime_kuma_password }}"
|
||||
"{{ uptime_kuma_monitor_group }}"
|
||||
"{{ monitor_name }}"
|
||||
"{{ monitor_friendly_name }} - Alerts when usage exceeds {{ disk_usage_threshold_percent }}%"
|
||||
"{{ (disk_check_interval_minutes * 60) + 60 }}"
|
||||
"{{ ntfy_topic }}"
|
||||
register: monitor_setup_result
|
||||
delegate_to: localhost
|
||||
become: no
|
||||
changed_when: false
|
||||
|
||||
- name: Parse monitor setup result
|
||||
set_fact:
|
||||
monitor_info_parsed: "{{ monitor_setup_result.stdout | from_json }}"
|
||||
|
||||
- name: Set push URL and monitor ID as facts
|
||||
set_fact:
|
||||
uptime_kuma_disk_usage_push_url: "{{ uptime_kuma_api_url }}/api/push/{{ monitor_info_parsed.push_token }}"
|
||||
uptime_kuma_monitor_id: "{{ monitor_info_parsed.monitor_id }}"
|
||||
|
||||
- name: Install required packages for disk monitoring
|
||||
package:
|
||||
name:
|
||||
- curl
|
||||
state: present
|
||||
|
||||
- name: Create monitoring script directory
|
||||
file:
|
||||
path: "{{ monitoring_script_dir }}"
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0755'
|
||||
|
||||
- name: Create disk usage monitoring script
|
||||
copy:
|
||||
dest: "{{ monitoring_script_path }}"
|
||||
content: |
|
||||
#!/bin/bash
|
||||
|
||||
# Disk Usage Monitoring Script
|
||||
# Monitors disk usage and sends alerts to Uptime Kuma
|
||||
# Mode: "No news is good news" - only sends alerts when disk usage is HIGH
|
||||
|
||||
LOG_FILE="{{ log_file }}"
|
||||
USAGE_THRESHOLD="{{ disk_usage_threshold_percent }}"
|
||||
UPTIME_KUMA_URL="{{ uptime_kuma_disk_usage_push_url }}"
|
||||
MOUNT_POINT="{{ monitored_mount_point }}"
|
||||
|
||||
# Function to log messages
|
||||
log_message() {
|
||||
echo "$(date '+%Y-%m-%d %H:%M:%S') - $1" >> "$LOG_FILE"
|
||||
}
|
||||
|
||||
# Function to get disk usage percentage
|
||||
get_disk_usage() {
|
||||
local mount_point="$1"
|
||||
local usage=""
|
||||
|
||||
# Get disk usage percentage (without % sign)
|
||||
usage=$(df -h "$mount_point" 2>/dev/null | awk 'NR==2 {gsub(/%/, "", $5); print $5}')
|
||||
|
||||
if [ -z "$usage" ]; then
|
||||
log_message "ERROR: Could not read disk usage for $mount_point"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "$usage"
|
||||
}
|
||||
|
||||
# Function to get disk usage details
|
||||
get_disk_details() {
|
||||
local mount_point="$1"
|
||||
df -h "$mount_point" 2>/dev/null | awk 'NR==2 {print "Used: "$3" / Total: "$2" ("$5" full)"}'
|
||||
}
|
||||
|
||||
# Function to send alert to Uptime Kuma when disk usage exceeds threshold
|
||||
# With upside-down mode enabled, sending status=up will trigger an alert
|
||||
send_uptime_kuma_alert() {
|
||||
local usage="$1"
|
||||
local details="$2"
|
||||
local message="DISK FULL WARNING: ${MOUNT_POINT} is ${usage}% full (Threshold: ${USAGE_THRESHOLD}%) - ${details}"
|
||||
|
||||
log_message "ALERT: $message"
|
||||
|
||||
# Send push notification to Uptime Kuma with status=up
|
||||
# In upside-down mode, status=up is treated as down/alert
|
||||
response=$(curl -s -w "\n%{http_code}" -G \
|
||||
--data-urlencode "status=up" \
|
||||
--data-urlencode "msg=$message" \
|
||||
"$UPTIME_KUMA_URL" 2>&1)
|
||||
http_code=$(echo "$response" | tail -n1)
|
||||
|
||||
if [ "$http_code" = "200" ] || [ "$http_code" = "201" ]; then
|
||||
log_message "Alert sent successfully to Uptime Kuma (HTTP $http_code)"
|
||||
else
|
||||
log_message "ERROR: Failed to send alert to Uptime Kuma (HTTP $http_code)"
|
||||
fi
|
||||
}
|
||||
|
||||
# Main monitoring logic
|
||||
main() {
|
||||
log_message "Starting disk usage check for $MOUNT_POINT"
|
||||
|
||||
# Get current disk usage
|
||||
current_usage=$(get_disk_usage "$MOUNT_POINT")
|
||||
|
||||
if [ $? -ne 0 ] || [ -z "$current_usage" ]; then
|
||||
log_message "ERROR: Could not read disk usage"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get disk details
|
||||
disk_details=$(get_disk_details "$MOUNT_POINT")
|
||||
|
||||
log_message "Current disk usage: ${current_usage}% - $disk_details"
|
||||
|
||||
# Check if usage exceeds threshold
|
||||
if [ "$current_usage" -gt "$USAGE_THRESHOLD" ]; then
|
||||
log_message "WARNING: Disk usage ${current_usage}% exceeds threshold ${USAGE_THRESHOLD}%"
|
||||
send_uptime_kuma_alert "$current_usage" "$disk_details"
|
||||
else
|
||||
log_message "Disk usage is within normal range - no alert needed (no news is good news)"
|
||||
fi
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0755'
|
||||
|
||||
- name: Create systemd service for disk usage monitoring
|
||||
copy:
|
||||
dest: "/etc/systemd/system/{{ systemd_service_name }}.service"
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Disk Usage Monitor
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart={{ monitoring_script_path }}
|
||||
User=root
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
|
||||
- name: Create systemd timer for disk usage monitoring
|
||||
copy:
|
||||
dest: "/etc/systemd/system/{{ systemd_service_name }}.timer"
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Run Disk Usage Monitor every {{ disk_check_interval_minutes }} minute(s)
|
||||
Requires={{ systemd_service_name }}.service
|
||||
|
||||
[Timer]
|
||||
OnBootSec={{ disk_check_interval_minutes }}min
|
||||
OnUnitActiveSec={{ disk_check_interval_minutes }}min
|
||||
Persistent=true
|
||||
|
||||
[Install]
|
||||
WantedBy=timers.target
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
|
||||
- name: Reload systemd daemon
|
||||
systemd:
|
||||
daemon_reload: yes
|
||||
|
||||
- name: Enable and start disk usage monitoring timer
|
||||
systemd:
|
||||
name: "{{ systemd_service_name }}.timer"
|
||||
enabled: yes
|
||||
state: started
|
||||
|
||||
- name: Test disk usage monitoring script
|
||||
command: "{{ monitoring_script_path }}"
|
||||
register: script_test
|
||||
changed_when: false
|
||||
|
||||
- name: Verify script execution
|
||||
assert:
|
||||
that:
|
||||
- script_test.rc == 0
|
||||
fail_msg: "Disk usage monitoring script failed to execute properly"
|
||||
|
||||
- name: Clean up temporary Uptime Kuma setup script
|
||||
file:
|
||||
path: /tmp/setup_uptime_kuma_monitor.py
|
||||
state: absent
|
||||
delegate_to: localhost
|
||||
become: no
|
||||
313
ansible/infra/420_system_healthcheck.yml
Normal file
313
ansible/infra/420_system_healthcheck.yml
Normal file
|
|
@ -0,0 +1,313 @@
|
|||
- name: Deploy System Healthcheck Monitoring
|
||||
hosts: all
|
||||
become: yes
|
||||
vars_files:
|
||||
- ../infra_vars.yml
|
||||
- ../services_config.yml
|
||||
- ../infra_secrets.yml
|
||||
- ../services/uptime_kuma/uptime_kuma_vars.yml
|
||||
- ../services/ntfy/ntfy_vars.yml
|
||||
|
||||
vars:
|
||||
healthcheck_interval_seconds: 60 # Send healthcheck every 60 seconds (1 minute)
|
||||
healthcheck_timeout_seconds: 90 # Uptime Kuma should alert if no ping received within 90s
|
||||
healthcheck_retries: 1 # Number of retries before alerting
|
||||
monitoring_script_dir: /opt/system-healthcheck
|
||||
monitoring_script_path: "{{ monitoring_script_dir }}/system_healthcheck.sh"
|
||||
log_file: "{{ monitoring_script_dir }}/system_healthcheck.log"
|
||||
systemd_service_name: system-healthcheck
|
||||
# Uptime Kuma configuration (auto-configured from services_config.yml and infra_secrets.yml)
|
||||
uptime_kuma_api_url: "https://{{ subdomains.uptime_kuma }}.{{ root_domain }}"
|
||||
|
||||
tasks:
|
||||
- name: Validate Uptime Kuma configuration
|
||||
assert:
|
||||
that:
|
||||
- uptime_kuma_api_url is defined
|
||||
- uptime_kuma_api_url != ""
|
||||
- uptime_kuma_username is defined
|
||||
- uptime_kuma_username != ""
|
||||
- uptime_kuma_password is defined
|
||||
- uptime_kuma_password != ""
|
||||
fail_msg: "uptime_kuma_api_url, uptime_kuma_username and uptime_kuma_password must be set"
|
||||
|
||||
- name: Get hostname for monitor identification
|
||||
command: hostname
|
||||
register: host_name
|
||||
changed_when: false
|
||||
|
||||
- name: Set monitor name and group based on hostname
|
||||
set_fact:
|
||||
monitor_name: "system-healthcheck-{{ host_name.stdout }}"
|
||||
monitor_friendly_name: "System Healthcheck: {{ host_name.stdout }}"
|
||||
uptime_kuma_monitor_group: "{{ host_name.stdout }} - infra"
|
||||
|
||||
- name: Create Uptime Kuma monitor setup script
|
||||
copy:
|
||||
dest: /tmp/setup_uptime_kuma_healthcheck_monitor.py
|
||||
content: |
|
||||
#!/usr/bin/env python3
|
||||
import sys
|
||||
import json
|
||||
from uptime_kuma_api import UptimeKumaApi
|
||||
|
||||
def main():
|
||||
api_url = sys.argv[1]
|
||||
username = sys.argv[2]
|
||||
password = sys.argv[3]
|
||||
group_name = sys.argv[4]
|
||||
monitor_name = sys.argv[5]
|
||||
monitor_description = sys.argv[6]
|
||||
interval = int(sys.argv[7])
|
||||
retries = int(sys.argv[8])
|
||||
ntfy_topic = sys.argv[9] if len(sys.argv) > 9 else "alerts"
|
||||
|
||||
api = UptimeKumaApi(api_url, timeout=60, wait_events=2.0)
|
||||
api.login(username, password)
|
||||
|
||||
# Get all monitors
|
||||
monitors = api.get_monitors()
|
||||
|
||||
# Get all notifications and find ntfy notification
|
||||
notifications = api.get_notifications()
|
||||
ntfy_notification = next((n for n in notifications if n.get('name') == f'ntfy ({ntfy_topic})'), None)
|
||||
notification_id_list = {}
|
||||
if ntfy_notification:
|
||||
notification_id_list[ntfy_notification['id']] = True
|
||||
|
||||
# Find or create group
|
||||
group = next((m for m in monitors if m.get('name') == group_name and m.get('type') == 'group'), None)
|
||||
if not group:
|
||||
group_result = api.add_monitor(type='group', name=group_name)
|
||||
# Refresh to get the full group object with id
|
||||
monitors = api.get_monitors()
|
||||
group = next((m for m in monitors if m.get('name') == group_name and m.get('type') == 'group'), None)
|
||||
|
||||
# Find or create/update push monitor
|
||||
existing_monitor = next((m for m in monitors if m.get('name') == monitor_name), None)
|
||||
|
||||
monitor_data = {
|
||||
'type': 'push',
|
||||
'name': monitor_name,
|
||||
'parent': group['id'],
|
||||
'interval': interval,
|
||||
'upsideDown': False, # Normal mode: receiving pings = healthy
|
||||
'maxretries': retries,
|
||||
'description': monitor_description,
|
||||
'notificationIDList': notification_id_list
|
||||
}
|
||||
|
||||
if existing_monitor:
|
||||
monitor = api.edit_monitor(existing_monitor['id'], **monitor_data)
|
||||
# Refresh to get the full monitor object with pushToken
|
||||
monitors = api.get_monitors()
|
||||
monitor = next((m for m in monitors if m.get('name') == monitor_name), None)
|
||||
else:
|
||||
monitor_result = api.add_monitor(**monitor_data)
|
||||
# Refresh to get the full monitor object with pushToken
|
||||
monitors = api.get_monitors()
|
||||
monitor = next((m for m in monitors if m.get('name') == monitor_name), None)
|
||||
|
||||
# Output result as JSON
|
||||
result = {
|
||||
'monitor_id': monitor['id'],
|
||||
'push_token': monitor['pushToken'],
|
||||
'group_name': group_name,
|
||||
'group_id': group['id'],
|
||||
'monitor_name': monitor_name
|
||||
}
|
||||
print(json.dumps(result))
|
||||
|
||||
api.disconnect()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
mode: '0755'
|
||||
delegate_to: localhost
|
||||
become: no
|
||||
|
||||
- name: Run Uptime Kuma monitor setup script
|
||||
command: >
|
||||
{{ ansible_playbook_python }}
|
||||
/tmp/setup_uptime_kuma_healthcheck_monitor.py
|
||||
"{{ uptime_kuma_api_url }}"
|
||||
"{{ uptime_kuma_username }}"
|
||||
"{{ uptime_kuma_password }}"
|
||||
"{{ uptime_kuma_monitor_group }}"
|
||||
"{{ monitor_name }}"
|
||||
"{{ monitor_friendly_name }} - Regular healthcheck ping every {{ healthcheck_interval_seconds }}s"
|
||||
"{{ healthcheck_timeout_seconds }}"
|
||||
"{{ healthcheck_retries }}"
|
||||
"{{ ntfy_topic }}"
|
||||
register: monitor_setup_result
|
||||
delegate_to: localhost
|
||||
become: no
|
||||
changed_when: false
|
||||
|
||||
- name: Parse monitor setup result
|
||||
set_fact:
|
||||
monitor_info_parsed: "{{ monitor_setup_result.stdout | from_json }}"
|
||||
|
||||
- name: Set push URL and monitor ID as facts
|
||||
set_fact:
|
||||
uptime_kuma_healthcheck_push_url: "{{ uptime_kuma_api_url }}/api/push/{{ monitor_info_parsed.push_token }}"
|
||||
uptime_kuma_monitor_id: "{{ monitor_info_parsed.monitor_id }}"
|
||||
|
||||
- name: Install required packages for healthcheck monitoring
|
||||
package:
|
||||
name:
|
||||
- curl
|
||||
state: present
|
||||
|
||||
- name: Create monitoring script directory
|
||||
file:
|
||||
path: "{{ monitoring_script_dir }}"
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0755'
|
||||
|
||||
- name: Create system healthcheck script
|
||||
copy:
|
||||
dest: "{{ monitoring_script_path }}"
|
||||
content: |
|
||||
#!/bin/bash
|
||||
|
||||
# System Healthcheck Script
|
||||
# Sends regular heartbeat pings to Uptime Kuma
|
||||
# This ensures the system is running and able to communicate
|
||||
|
||||
LOG_FILE="{{ log_file }}"
|
||||
UPTIME_KUMA_URL="{{ uptime_kuma_healthcheck_push_url }}"
|
||||
HOSTNAME=$(hostname)
|
||||
|
||||
# Function to log messages
|
||||
log_message() {
|
||||
echo "$(date '+%Y-%m-%d %H:%M:%S') - $1" >> "$LOG_FILE"
|
||||
}
|
||||
|
||||
# Function to send healthcheck ping to Uptime Kuma
|
||||
send_healthcheck() {
|
||||
local uptime_seconds=$(awk '{print int($1)}' /proc/uptime)
|
||||
local uptime_days=$((uptime_seconds / 86400))
|
||||
local uptime_hours=$(((uptime_seconds % 86400) / 3600))
|
||||
local uptime_minutes=$(((uptime_seconds % 3600) / 60))
|
||||
|
||||
local message="System healthy - Uptime: ${uptime_days}d ${uptime_hours}h ${uptime_minutes}m"
|
||||
|
||||
log_message "Sending healthcheck ping: $message"
|
||||
|
||||
# Send push notification to Uptime Kuma with status=up
|
||||
encoded_message=$(printf '%s\n' "$message" | sed 's/ /%20/g; s/(/%28/g; s/)/%29/g; s/:/%3A/g; s/\//%2F/g')
|
||||
response=$(curl -s -w "\n%{http_code}" "$UPTIME_KUMA_URL?status=up&msg=$encoded_message" 2>&1)
|
||||
http_code=$(echo "$response" | tail -n1)
|
||||
|
||||
if [ "$http_code" = "200" ] || [ "$http_code" = "201" ]; then
|
||||
log_message "Healthcheck ping sent successfully (HTTP $http_code)"
|
||||
else
|
||||
log_message "ERROR: Failed to send healthcheck ping (HTTP $http_code)"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Main healthcheck logic
|
||||
main() {
|
||||
log_message "Starting system healthcheck for $HOSTNAME"
|
||||
|
||||
# Send healthcheck ping
|
||||
if send_healthcheck; then
|
||||
log_message "Healthcheck completed successfully"
|
||||
else
|
||||
log_message "ERROR: Healthcheck failed"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0755'
|
||||
|
||||
- name: Create systemd service for system healthcheck
|
||||
copy:
|
||||
dest: "/etc/systemd/system/{{ systemd_service_name }}.service"
|
||||
content: |
|
||||
[Unit]
|
||||
Description=System Healthcheck Monitor
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart={{ monitoring_script_path }}
|
||||
User=root
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
|
||||
- name: Create systemd timer for system healthcheck
|
||||
copy:
|
||||
dest: "/etc/systemd/system/{{ systemd_service_name }}.timer"
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Run System Healthcheck every minute
|
||||
Requires={{ systemd_service_name }}.service
|
||||
|
||||
[Timer]
|
||||
OnBootSec=30sec
|
||||
OnUnitActiveSec={{ healthcheck_interval_seconds }}sec
|
||||
Persistent=true
|
||||
|
||||
[Install]
|
||||
WantedBy=timers.target
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
|
||||
- name: Reload systemd daemon
|
||||
systemd:
|
||||
daemon_reload: yes
|
||||
|
||||
- name: Enable and start system healthcheck timer
|
||||
systemd:
|
||||
name: "{{ systemd_service_name }}.timer"
|
||||
enabled: yes
|
||||
state: started
|
||||
|
||||
- name: Test system healthcheck script
|
||||
command: "{{ monitoring_script_path }}"
|
||||
register: script_test
|
||||
changed_when: false
|
||||
|
||||
- name: Verify script execution
|
||||
assert:
|
||||
that:
|
||||
- script_test.rc == 0
|
||||
fail_msg: "System healthcheck script failed to execute properly"
|
||||
|
||||
- name: Display monitor information
|
||||
debug:
|
||||
msg: |
|
||||
✓ System healthcheck monitoring deployed successfully!
|
||||
|
||||
Monitor Name: {{ monitor_friendly_name }}
|
||||
Monitor Group: {{ uptime_kuma_monitor_group }}
|
||||
Healthcheck Interval: Every {{ healthcheck_interval_seconds }} seconds (1 minute)
|
||||
Timeout: {{ healthcheck_timeout_seconds }} seconds (90s)
|
||||
Retries: {{ healthcheck_retries }}
|
||||
|
||||
The system will send a heartbeat ping every minute.
|
||||
Uptime Kuma will alert if no ping is received within 90 seconds (with 1 retry).
|
||||
|
||||
- name: Clean up temporary Uptime Kuma setup script
|
||||
file:
|
||||
path: /tmp/setup_uptime_kuma_healthcheck_monitor.py
|
||||
state: absent
|
||||
delegate_to: localhost
|
||||
become: no
|
||||
|
||||
11
ansible/infra_secrets.yml.example
Normal file
11
ansible/infra_secrets.yml.example
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
# Uptime Kuma login credentials
|
||||
# Used by the disk monitoring playbook to create monitors automatically
|
||||
|
||||
uptime_kuma_username: "admin"
|
||||
uptime_kuma_password: "your_password_here"
|
||||
|
||||
# ntfy credentials
|
||||
# Used for notification channel setup in Uptime Kuma
|
||||
|
||||
ntfy_username: "your_ntfy_username"
|
||||
ntfy_password: "your_ntfy_password"
|
||||
|
|
@ -1,3 +1,6 @@
|
|||
# Infrastructure Variables
|
||||
# Generated by setup_layer_0.sh
|
||||
|
||||
new_user: counterweight
|
||||
ssh_port: 22
|
||||
allow_ssh_from: "any"
|
||||
|
|
|
|||
11
ansible/requirements.yml
Normal file
11
ansible/requirements.yml
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
---
|
||||
# Ansible Galaxy Collections Requirements
|
||||
# Install with: ansible-galaxy collection install -r requirements.yml
|
||||
|
||||
collections:
|
||||
# Uptime Kuma Ansible Collection
|
||||
# Used by: infra/41_disk_usage_alerts.yml
|
||||
# Provides modules to manage Uptime Kuma monitors programmatically
|
||||
- name: lucasheld.uptime_kuma
|
||||
version: ">=1.0.0"
|
||||
|
||||
|
|
@ -3,9 +3,14 @@
|
|||
become: yes
|
||||
vars_files:
|
||||
- ../../infra_vars.yml
|
||||
- ../../services_config.yml
|
||||
- ../../infra_secrets.yml
|
||||
- ./forgejo_vars.yml
|
||||
vars:
|
||||
forgejo_subdomain: "{{ subdomains.forgejo }}"
|
||||
caddy_sites_dir: "{{ caddy_sites_dir }}"
|
||||
forgejo_domain: "{{ forgejo_subdomain }}.{{ root_domain }}"
|
||||
uptime_kuma_api_url: "https://{{ subdomains.uptime_kuma }}.{{ root_domain }}"
|
||||
|
||||
tasks:
|
||||
- name: Ensure required packages are installed
|
||||
|
|
@ -98,3 +103,109 @@
|
|||
service:
|
||||
name: caddy
|
||||
state: reloaded
|
||||
|
||||
- name: Create Uptime Kuma monitor setup script for Forgejo
|
||||
delegate_to: localhost
|
||||
become: no
|
||||
copy:
|
||||
dest: /tmp/setup_forgejo_monitor.py
|
||||
content: |
|
||||
#!/usr/bin/env python3
|
||||
import sys
|
||||
import yaml
|
||||
from uptime_kuma_api import UptimeKumaApi, MonitorType
|
||||
|
||||
try:
|
||||
with open('/tmp/ansible_config.yml', 'r') as f:
|
||||
config = yaml.safe_load(f)
|
||||
|
||||
url = config['uptime_kuma_url']
|
||||
username = config['username']
|
||||
password = config['password']
|
||||
monitor_url = config['monitor_url']
|
||||
monitor_name = config['monitor_name']
|
||||
|
||||
api = UptimeKumaApi(url, timeout=30)
|
||||
api.login(username, password)
|
||||
|
||||
# Get all monitors
|
||||
monitors = api.get_monitors()
|
||||
|
||||
# Find or create "services" group
|
||||
group = next((m for m in monitors if m.get('name') == 'services' and m.get('type') == 'group'), None)
|
||||
if not group:
|
||||
group_result = api.add_monitor(type='group', name='services')
|
||||
# Refresh to get the group with id
|
||||
monitors = api.get_monitors()
|
||||
group = next((m for m in monitors if m.get('name') == 'services' and m.get('type') == 'group'), None)
|
||||
|
||||
# Check if monitor already exists
|
||||
existing_monitor = None
|
||||
for monitor in monitors:
|
||||
if monitor.get('name') == monitor_name:
|
||||
existing_monitor = monitor
|
||||
break
|
||||
|
||||
# Get ntfy notification ID
|
||||
notifications = api.get_notifications()
|
||||
ntfy_notification_id = None
|
||||
for notif in notifications:
|
||||
if notif.get('type') == 'ntfy':
|
||||
ntfy_notification_id = notif.get('id')
|
||||
break
|
||||
|
||||
if existing_monitor:
|
||||
print(f"Monitor '{monitor_name}' already exists (ID: {existing_monitor['id']})")
|
||||
print("Skipping - monitor already configured")
|
||||
else:
|
||||
print(f"Creating monitor '{monitor_name}'...")
|
||||
api.add_monitor(
|
||||
type=MonitorType.HTTP,
|
||||
name=monitor_name,
|
||||
url=monitor_url,
|
||||
parent=group['id'],
|
||||
interval=60,
|
||||
maxretries=3,
|
||||
retryInterval=60,
|
||||
notificationIDList={ntfy_notification_id: True} if ntfy_notification_id else {}
|
||||
)
|
||||
|
||||
api.disconnect()
|
||||
print("SUCCESS")
|
||||
|
||||
except Exception as e:
|
||||
print(f"ERROR: {str(e)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
mode: '0755'
|
||||
|
||||
- name: Create temporary config for monitor setup
|
||||
delegate_to: localhost
|
||||
become: no
|
||||
copy:
|
||||
dest: /tmp/ansible_config.yml
|
||||
content: |
|
||||
uptime_kuma_url: "{{ uptime_kuma_api_url }}"
|
||||
username: "{{ uptime_kuma_username }}"
|
||||
password: "{{ uptime_kuma_password }}"
|
||||
monitor_url: "https://{{ forgejo_domain }}/api/healthz"
|
||||
monitor_name: "Forgejo"
|
||||
mode: '0644'
|
||||
|
||||
- name: Run Uptime Kuma monitor setup
|
||||
command: python3 /tmp/setup_forgejo_monitor.py
|
||||
delegate_to: localhost
|
||||
become: no
|
||||
register: monitor_setup
|
||||
changed_when: "'SUCCESS' in monitor_setup.stdout"
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Clean up temporary files
|
||||
delegate_to: localhost
|
||||
become: no
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: absent
|
||||
loop:
|
||||
- /tmp/setup_forgejo_monitor.py
|
||||
- /tmp/ansible_config.yml
|
||||
|
||||
|
|
|
|||
|
|
@ -9,9 +9,7 @@ forgejo_url: "https://codeberg.org/forgejo/forgejo/releases/download/v{{ forgejo
|
|||
forgejo_bin_path: "/usr/local/bin/forgejo"
|
||||
forgejo_user: "git"
|
||||
|
||||
# Caddy
|
||||
caddy_sites_dir: /etc/caddy/sites-enabled
|
||||
forgejo_subdomain: forgejo
|
||||
# (caddy_sites_dir and subdomain now in services_config.yml)
|
||||
|
||||
# Remote access
|
||||
remote_host: "{{ groups['vipy'][0] }}"
|
||||
|
|
|
|||
|
|
@ -3,10 +3,15 @@
|
|||
become: no
|
||||
vars_files:
|
||||
- ../../infra_vars.yml
|
||||
- ../../services_config.yml
|
||||
- ../../infra_secrets.yml
|
||||
- ./headscale_vars.yml
|
||||
vars:
|
||||
headscale_subdomain: "{{ subdomains.headscale }}"
|
||||
caddy_sites_dir: "{{ caddy_sites_dir }}"
|
||||
headscale_domain: "{{ headscale_subdomain }}.{{ root_domain }}"
|
||||
headscale_base_domain: "tailnet.{{ root_domain }}"
|
||||
uptime_kuma_api_url: "https://{{ subdomains.uptime_kuma }}.{{ root_domain }}"
|
||||
|
||||
tasks:
|
||||
- name: Install required packages
|
||||
|
|
@ -167,12 +172,32 @@
|
|||
debug:
|
||||
msg: "{{ headscale_config_test.stdout }}"
|
||||
|
||||
- name: Ensure headscale data directory has correct ownership before starting service
|
||||
become: yes
|
||||
file:
|
||||
path: /var/lib/headscale
|
||||
state: directory
|
||||
owner: headscale
|
||||
group: headscale
|
||||
mode: '0750'
|
||||
recurse: yes
|
||||
|
||||
- name: Ensure headscale run directory has correct ownership
|
||||
become: yes
|
||||
file:
|
||||
path: /var/run/headscale
|
||||
state: directory
|
||||
owner: headscale
|
||||
group: headscale
|
||||
mode: '0770'
|
||||
|
||||
- name: Enable and start headscale service
|
||||
become: yes
|
||||
systemd:
|
||||
name: headscale
|
||||
enabled: yes
|
||||
state: started
|
||||
daemon_reload: yes
|
||||
|
||||
- name: Wait for headscale unix socket to be ready
|
||||
become: yes
|
||||
|
|
@ -244,6 +269,111 @@
|
|||
become: yes
|
||||
command: systemctl reload caddy
|
||||
|
||||
- name: Create Uptime Kuma monitor setup script for Headscale
|
||||
delegate_to: localhost
|
||||
become: no
|
||||
copy:
|
||||
dest: /tmp/setup_headscale_monitor.py
|
||||
content: |
|
||||
#!/usr/bin/env python3
|
||||
import sys
|
||||
import yaml
|
||||
from uptime_kuma_api import UptimeKumaApi, MonitorType
|
||||
|
||||
try:
|
||||
with open('/tmp/ansible_config.yml', 'r') as f:
|
||||
config = yaml.safe_load(f)
|
||||
|
||||
url = config['uptime_kuma_url']
|
||||
username = config['username']
|
||||
password = config['password']
|
||||
monitor_url = config['monitor_url']
|
||||
monitor_name = config['monitor_name']
|
||||
|
||||
api = UptimeKumaApi(url, timeout=30)
|
||||
api.login(username, password)
|
||||
|
||||
# Get all monitors
|
||||
monitors = api.get_monitors()
|
||||
|
||||
# Find or create "services" group
|
||||
group = next((m for m in monitors if m.get('name') == 'services' and m.get('type') == 'group'), None)
|
||||
if not group:
|
||||
group_result = api.add_monitor(type='group', name='services')
|
||||
# Refresh to get the group with id
|
||||
monitors = api.get_monitors()
|
||||
group = next((m for m in monitors if m.get('name') == 'services' and m.get('type') == 'group'), None)
|
||||
|
||||
# Check if monitor already exists
|
||||
existing_monitor = None
|
||||
for monitor in monitors:
|
||||
if monitor.get('name') == monitor_name:
|
||||
existing_monitor = monitor
|
||||
break
|
||||
|
||||
# Get ntfy notification ID
|
||||
notifications = api.get_notifications()
|
||||
ntfy_notification_id = None
|
||||
for notif in notifications:
|
||||
if notif.get('type') == 'ntfy':
|
||||
ntfy_notification_id = notif.get('id')
|
||||
break
|
||||
|
||||
if existing_monitor:
|
||||
print(f"Monitor '{monitor_name}' already exists (ID: {existing_monitor['id']})")
|
||||
print("Skipping - monitor already configured")
|
||||
else:
|
||||
print(f"Creating monitor '{monitor_name}'...")
|
||||
api.add_monitor(
|
||||
type=MonitorType.HTTP,
|
||||
name=monitor_name,
|
||||
url=monitor_url,
|
||||
parent=group['id'],
|
||||
interval=60,
|
||||
maxretries=3,
|
||||
retryInterval=60,
|
||||
notificationIDList={ntfy_notification_id: True} if ntfy_notification_id else {}
|
||||
)
|
||||
|
||||
api.disconnect()
|
||||
print("SUCCESS")
|
||||
|
||||
except Exception as e:
|
||||
print(f"ERROR: {str(e)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
mode: '0755'
|
||||
|
||||
- name: Create temporary config for monitor setup
|
||||
delegate_to: localhost
|
||||
become: no
|
||||
copy:
|
||||
dest: /tmp/ansible_config.yml
|
||||
content: |
|
||||
uptime_kuma_url: "{{ uptime_kuma_api_url }}"
|
||||
username: "{{ uptime_kuma_username }}"
|
||||
password: "{{ uptime_kuma_password }}"
|
||||
monitor_url: "https://{{ headscale_domain }}/health"
|
||||
monitor_name: "Headscale"
|
||||
mode: '0644'
|
||||
|
||||
- name: Run Uptime Kuma monitor setup
|
||||
command: python3 /tmp/setup_headscale_monitor.py
|
||||
delegate_to: localhost
|
||||
become: no
|
||||
register: monitor_setup
|
||||
changed_when: "'SUCCESS' in monitor_setup.stdout"
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Clean up temporary files
|
||||
delegate_to: localhost
|
||||
become: no
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: absent
|
||||
loop:
|
||||
- /tmp/setup_headscale_monitor.py
|
||||
- /tmp/ansible_config.yml
|
||||
|
||||
handlers:
|
||||
- name: Restart headscale
|
||||
become: yes
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
# Headscale service configuration
|
||||
headscale_subdomain: headscale
|
||||
# (subdomain and caddy_sites_dir now in services_config.yml)
|
||||
|
||||
headscale_port: 8080
|
||||
headscale_grpc_port: 50443
|
||||
|
||||
|
|
@ -9,9 +10,6 @@ headscale_version: "0.26.1"
|
|||
# Namespace for devices (users in headscale terminology)
|
||||
headscale_namespace: counter-net
|
||||
|
||||
# Caddy
|
||||
caddy_sites_dir: /etc/caddy/sites-enabled
|
||||
|
||||
# Data directory
|
||||
headscale_data_dir: /var/lib/headscale
|
||||
|
||||
|
|
|
|||
|
|
@ -3,9 +3,14 @@
|
|||
become: yes
|
||||
vars_files:
|
||||
- ../../infra_vars.yml
|
||||
- ../../services_config.yml
|
||||
- ../../infra_secrets.yml
|
||||
- ./lnbits_vars.yml
|
||||
vars:
|
||||
lnbits_subdomain: "{{ subdomains.lnbits }}"
|
||||
caddy_sites_dir: "{{ caddy_sites_dir }}"
|
||||
lnbits_domain: "{{ lnbits_subdomain }}.{{ root_domain }}"
|
||||
uptime_kuma_api_url: "https://{{ subdomains.uptime_kuma }}.{{ root_domain }}"
|
||||
|
||||
tasks:
|
||||
- name: Create lnbits directory
|
||||
|
|
@ -21,99 +26,39 @@
|
|||
name:
|
||||
- python3
|
||||
- python3-pip
|
||||
- python3-venv
|
||||
- python3-dev
|
||||
- git
|
||||
- curl
|
||||
- build-essential
|
||||
- pkg-config
|
||||
- build-essential
|
||||
- libsecp256k1-dev
|
||||
- libffi-dev
|
||||
- libssl-dev
|
||||
- zlib1g-dev
|
||||
- libbz2-dev
|
||||
- libreadline-dev
|
||||
- libsqlite3-dev
|
||||
- libncursesw5-dev
|
||||
- xz-utils
|
||||
- tk-dev
|
||||
- libxml2-dev
|
||||
- libxmlsec1-dev
|
||||
- liblzma-dev
|
||||
- libgmp-dev
|
||||
- libpq-dev
|
||||
- automake
|
||||
- autoconf
|
||||
- libtool
|
||||
- m4
|
||||
- gawk
|
||||
state: present
|
||||
update_cache: yes
|
||||
|
||||
- name: Install pyenv
|
||||
- name: Install uv packaging tool
|
||||
shell: |
|
||||
curl https://pyenv.run | bash
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
args:
|
||||
creates: "/home/{{ ansible_user }}/.pyenv"
|
||||
creates: "/home/{{ ansible_user }}/.local/bin/uv"
|
||||
become: yes
|
||||
become_user: "{{ ansible_user }}"
|
||||
environment:
|
||||
HOME: "/home/{{ ansible_user }}"
|
||||
|
||||
- name: Add pyenv to PATH
|
||||
lineinfile:
|
||||
path: "/home/{{ ansible_user }}/.bashrc"
|
||||
line: 'export PYENV_ROOT="$HOME/.pyenv"'
|
||||
state: present
|
||||
become: yes
|
||||
become_user: "{{ ansible_user }}"
|
||||
|
||||
- name: Add pyenv init to bashrc
|
||||
lineinfile:
|
||||
path: "/home/{{ ansible_user }}/.bashrc"
|
||||
line: 'command -v pyenv >/dev/null || export PATH="$PYENV_ROOT/bin:$PATH"'
|
||||
state: present
|
||||
become: yes
|
||||
become_user: "{{ ansible_user }}"
|
||||
|
||||
- name: Add pyenv init to bashrc (second line)
|
||||
lineinfile:
|
||||
path: "/home/{{ ansible_user }}/.bashrc"
|
||||
line: 'eval "$(pyenv init -)"'
|
||||
state: present
|
||||
become: yes
|
||||
become_user: "{{ ansible_user }}"
|
||||
|
||||
- name: Install Python 3.12 via pyenv
|
||||
shell: |
|
||||
export PYENV_ROOT="$HOME/.pyenv"
|
||||
export PATH="$PYENV_ROOT/bin:$PATH"
|
||||
eval "$(pyenv init -)"
|
||||
pyenv install -s 3.12.7
|
||||
pyenv global 3.12.7
|
||||
args:
|
||||
creates: "/home/{{ ansible_user }}/.pyenv/versions/3.12.7/bin/python3.12"
|
||||
become: yes
|
||||
become_user: "{{ ansible_user }}"
|
||||
environment:
|
||||
HOME: "/home/{{ ansible_user }}"
|
||||
|
||||
- name: Install Poetry
|
||||
shell: |
|
||||
export PYENV_ROOT="$HOME/.pyenv"
|
||||
export PATH="$PYENV_ROOT/bin:$PYENV_ROOT/versions/3.12.7/bin:$PATH"
|
||||
eval "$(pyenv init -)"
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
args:
|
||||
creates: "/home/{{ ansible_user }}/.local/bin/poetry"
|
||||
become: yes
|
||||
become_user: "{{ ansible_user }}"
|
||||
environment:
|
||||
HOME: "/home/{{ ansible_user }}"
|
||||
|
||||
- name: Add Poetry to PATH
|
||||
lineinfile:
|
||||
path: "/home/{{ ansible_user }}/.bashrc"
|
||||
line: 'export PATH="$HOME/.local/bin:$PATH"'
|
||||
state: present
|
||||
become: yes
|
||||
become_user: "{{ ansible_user }}"
|
||||
|
||||
- name: Clone LNBits repository
|
||||
git:
|
||||
repo: https://github.com/lnbits/lnbits.git
|
||||
dest: "{{ lnbits_dir }}/lnbits"
|
||||
version: main
|
||||
version: "v1.3.1"
|
||||
accept_hostkey: yes
|
||||
|
||||
- name: Change ownership of LNBits directory to user
|
||||
|
|
@ -123,27 +68,19 @@
|
|||
group: "{{ ansible_user }}"
|
||||
recurse: yes
|
||||
|
||||
- name: Configure Poetry to use Python 3.12
|
||||
command: /home/{{ ansible_user }}/.local/bin/poetry env use /home/{{ ansible_user }}/.pyenv/versions/3.12.7/bin/python3.12
|
||||
- name: Install LNBits dependencies with uv (Python 3.12)
|
||||
command: /home/{{ ansible_user }}/.local/bin/uv sync --python 3.12 --all-extras --no-dev
|
||||
args:
|
||||
chdir: "{{ lnbits_dir }}/lnbits"
|
||||
become: yes
|
||||
become_user: "{{ ansible_user }}"
|
||||
environment:
|
||||
HOME: "/home/{{ ansible_user }}"
|
||||
PATH: "/home/{{ ansible_user }}/.local/bin:/home/{{ ansible_user }}/.pyenv/versions/3.12.7/bin:/home/{{ ansible_user }}/.pyenv/bin:{{ ansible_env.PATH }}"
|
||||
PYENV_ROOT: "/home/{{ ansible_user }}/.pyenv"
|
||||
|
||||
- name: Install LNBits dependencies
|
||||
command: /home/{{ ansible_user }}/.local/bin/poetry install --only main
|
||||
args:
|
||||
chdir: "{{ lnbits_dir }}/lnbits"
|
||||
become: yes
|
||||
become_user: "{{ ansible_user }}"
|
||||
environment:
|
||||
HOME: "/home/{{ ansible_user }}"
|
||||
PATH: "/home/{{ ansible_user }}/.local/bin:/home/{{ ansible_user }}/.pyenv/versions/3.12.7/bin:/home/{{ ansible_user }}/.pyenv/bin:{{ ansible_env.PATH }}"
|
||||
PYENV_ROOT: "/home/{{ ansible_user }}/.pyenv"
|
||||
PATH: "/home/{{ ansible_user }}/.local/bin:/usr/local/bin:/usr/bin:/bin"
|
||||
SECP_BUNDLED: "0"
|
||||
PKG_CONFIG_PATH: "/usr/lib/x86_64-linux-gnu/pkgconfig"
|
||||
ACLOCAL: "aclocal"
|
||||
AUTOMAKE: "automake"
|
||||
|
||||
- name: Copy .env.example to .env
|
||||
copy:
|
||||
|
|
@ -187,12 +124,12 @@
|
|||
Type=simple
|
||||
User={{ ansible_user }}
|
||||
WorkingDirectory={{ lnbits_dir }}/lnbits
|
||||
ExecStart=/home/{{ ansible_user }}/.local/bin/poetry run lnbits
|
||||
ExecStart=/home/{{ ansible_user }}/.local/bin/uv run --python 3.12 lnbits
|
||||
Restart=always
|
||||
RestartSec=30
|
||||
Environment=PYTHONUNBUFFERED=1
|
||||
Environment="PATH=/home/{{ ansible_user }}/.local/bin:/home/{{ ansible_user }}/.pyenv/versions/3.12.7/bin:/home/{{ ansible_user }}/.pyenv/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
Environment="PYENV_ROOT=/home/{{ ansible_user }}/.pyenv"
|
||||
Environment="PATH=/home/{{ ansible_user }}/.local/bin:/usr/local/bin:/usr/bin:/bin"
|
||||
Environment=SECP_BUNDLED=0
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
|
@ -243,3 +180,109 @@
|
|||
|
||||
- name: Reload Caddy to apply new config
|
||||
command: systemctl reload caddy
|
||||
|
||||
- name: Create Uptime Kuma monitor setup script for LNBits
|
||||
delegate_to: localhost
|
||||
become: no
|
||||
copy:
|
||||
dest: /tmp/setup_lnbits_monitor.py
|
||||
content: |
|
||||
#!/usr/bin/env python3
|
||||
import sys
|
||||
import yaml
|
||||
from uptime_kuma_api import UptimeKumaApi, MonitorType
|
||||
|
||||
try:
|
||||
with open('/tmp/ansible_config.yml', 'r') as f:
|
||||
config = yaml.safe_load(f)
|
||||
|
||||
url = config['uptime_kuma_url']
|
||||
username = config['username']
|
||||
password = config['password']
|
||||
monitor_url = config['monitor_url']
|
||||
monitor_name = config['monitor_name']
|
||||
|
||||
api = UptimeKumaApi(url, timeout=30)
|
||||
api.login(username, password)
|
||||
|
||||
# Get all monitors
|
||||
monitors = api.get_monitors()
|
||||
|
||||
# Find or create "services" group
|
||||
group = next((m for m in monitors if m.get('name') == 'services' and m.get('type') == 'group'), None)
|
||||
if not group:
|
||||
group_result = api.add_monitor(type='group', name='services')
|
||||
# Refresh to get the group with id
|
||||
monitors = api.get_monitors()
|
||||
group = next((m for m in monitors if m.get('name') == 'services' and m.get('type') == 'group'), None)
|
||||
|
||||
# Check if monitor already exists
|
||||
existing_monitor = None
|
||||
for monitor in monitors:
|
||||
if monitor.get('name') == monitor_name:
|
||||
existing_monitor = monitor
|
||||
break
|
||||
|
||||
# Get ntfy notification ID
|
||||
notifications = api.get_notifications()
|
||||
ntfy_notification_id = None
|
||||
for notif in notifications:
|
||||
if notif.get('type') == 'ntfy':
|
||||
ntfy_notification_id = notif.get('id')
|
||||
break
|
||||
|
||||
if existing_monitor:
|
||||
print(f"Monitor '{monitor_name}' already exists (ID: {existing_monitor['id']})")
|
||||
print("Skipping - monitor already configured")
|
||||
else:
|
||||
print(f"Creating monitor '{monitor_name}'...")
|
||||
api.add_monitor(
|
||||
type=MonitorType.HTTP,
|
||||
name=monitor_name,
|
||||
url=monitor_url,
|
||||
parent=group['id'],
|
||||
interval=60,
|
||||
maxretries=3,
|
||||
retryInterval=60,
|
||||
notificationIDList={ntfy_notification_id: True} if ntfy_notification_id else {}
|
||||
)
|
||||
|
||||
api.disconnect()
|
||||
print("SUCCESS")
|
||||
|
||||
except Exception as e:
|
||||
print(f"ERROR: {str(e)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
mode: '0755'
|
||||
|
||||
- name: Create temporary config for monitor setup
|
||||
delegate_to: localhost
|
||||
become: no
|
||||
copy:
|
||||
dest: /tmp/ansible_config.yml
|
||||
content: |
|
||||
uptime_kuma_url: "{{ uptime_kuma_api_url }}"
|
||||
username: "{{ uptime_kuma_username }}"
|
||||
password: "{{ uptime_kuma_password }}"
|
||||
monitor_url: "https://{{ lnbits_domain }}/api/v1/health"
|
||||
monitor_name: "LNBits"
|
||||
mode: '0644'
|
||||
|
||||
- name: Run Uptime Kuma monitor setup
|
||||
command: python3 /tmp/setup_lnbits_monitor.py
|
||||
delegate_to: localhost
|
||||
become: no
|
||||
register: monitor_setup
|
||||
changed_when: "'SUCCESS' in monitor_setup.stdout"
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Clean up temporary files
|
||||
delegate_to: localhost
|
||||
become: no
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: absent
|
||||
loop:
|
||||
- /tmp/setup_lnbits_monitor.py
|
||||
- /tmp/ansible_config.yml
|
||||
|
||||
|
|
|
|||
|
|
@ -3,9 +3,7 @@ lnbits_dir: /opt/lnbits
|
|||
lnbits_data_dir: "{{ lnbits_dir }}/data"
|
||||
lnbits_port: 8765
|
||||
|
||||
# Caddy
|
||||
caddy_sites_dir: /etc/caddy/sites-enabled
|
||||
lnbits_subdomain: wallet
|
||||
# (caddy_sites_dir and subdomain now in services_config.yml)
|
||||
|
||||
# Remote access
|
||||
remote_host: "{{ groups['vipy'][0] }}"
|
||||
|
|
|
|||
175
ansible/services/memos/deploy_memos_playbook.yml
Normal file
175
ansible/services/memos/deploy_memos_playbook.yml
Normal file
|
|
@ -0,0 +1,175 @@
|
|||
- name: Deploy memos and configure Caddy reverse proxy
|
||||
hosts: memos-box
|
||||
become: yes
|
||||
vars_files:
|
||||
- ../../infra_vars.yml
|
||||
- ../../services_config.yml
|
||||
- ./memos_vars.yml
|
||||
vars:
|
||||
memos_subdomain: "{{ subdomains.memos }}"
|
||||
caddy_sites_dir: "{{ caddy_sites_dir }}"
|
||||
memos_domain: "{{ memos_subdomain }}.{{ root_domain }}"
|
||||
|
||||
tasks:
|
||||
- name: Install required packages
|
||||
apt:
|
||||
name:
|
||||
- wget
|
||||
- curl
|
||||
- unzip
|
||||
state: present
|
||||
update_cache: yes
|
||||
|
||||
- name: Get latest memos release version
|
||||
uri:
|
||||
url: https://api.github.com/repos/usememos/memos/releases/latest
|
||||
return_content: yes
|
||||
register: memos_latest_release
|
||||
|
||||
- name: Set memos version and find download URL
|
||||
set_fact:
|
||||
memos_version: "{{ memos_latest_release.json.tag_name | regex_replace('^v', '') }}"
|
||||
|
||||
- name: Find linux-amd64 download URL
|
||||
set_fact:
|
||||
memos_download_url: "{{ memos_latest_release.json.assets | json_query('[?contains(name, `linux-amd64`) && (contains(name, `.tar.gz`) || contains(name, `.zip`))].browser_download_url') | first }}"
|
||||
|
||||
- name: Display memos version to install
|
||||
debug:
|
||||
msg: "Installing memos version {{ memos_version }} from {{ memos_download_url }}"
|
||||
|
||||
- name: Download memos binary
|
||||
get_url:
|
||||
url: "{{ memos_download_url }}"
|
||||
dest: /tmp/memos_archive
|
||||
mode: '0644'
|
||||
register: memos_download
|
||||
|
||||
- name: Extract memos binary
|
||||
unarchive:
|
||||
src: /tmp/memos_archive
|
||||
dest: /tmp/memos_extract
|
||||
remote_src: yes
|
||||
creates: /tmp/memos_extract/memos
|
||||
|
||||
- name: Install memos binary
|
||||
copy:
|
||||
src: /tmp/memos_extract/memos
|
||||
dest: /usr/local/bin/memos
|
||||
mode: '0755'
|
||||
remote_src: yes
|
||||
notify: Restart memos
|
||||
|
||||
- name: Remove temporary files
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: absent
|
||||
loop:
|
||||
- /tmp/memos_archive
|
||||
- /tmp/memos_extract
|
||||
|
||||
- name: Ensure memos user exists
|
||||
user:
|
||||
name: memos
|
||||
system: yes
|
||||
shell: /usr/sbin/nologin
|
||||
home: /var/lib/memos
|
||||
create_home: yes
|
||||
state: present
|
||||
|
||||
- name: Create memos data directory
|
||||
file:
|
||||
path: "{{ memos_data_dir }}"
|
||||
state: directory
|
||||
owner: memos
|
||||
group: memos
|
||||
mode: '0750'
|
||||
|
||||
- name: Create memos systemd service file
|
||||
copy:
|
||||
dest: /etc/systemd/system/memos.service
|
||||
content: |
|
||||
[Unit]
|
||||
Description=memos service
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=memos
|
||||
Group=memos
|
||||
ExecStart=/usr/local/bin/memos --port {{ memos_port }} --data {{ memos_data_dir }}
|
||||
Restart=on-failure
|
||||
RestartSec=5s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
notify: Restart memos
|
||||
|
||||
- name: Enable and start memos service
|
||||
systemd:
|
||||
name: memos
|
||||
enabled: yes
|
||||
state: started
|
||||
daemon_reload: yes
|
||||
|
||||
- name: Wait for memos to be ready
|
||||
uri:
|
||||
url: "http://localhost:{{ memos_port }}/api/v1/status"
|
||||
status_code: 200
|
||||
register: memos_ready
|
||||
until: memos_ready.status == 200
|
||||
retries: 30
|
||||
delay: 2
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Allow HTTPS through UFW
|
||||
ufw:
|
||||
rule: allow
|
||||
port: '443'
|
||||
proto: tcp
|
||||
|
||||
- name: Allow HTTP through UFW (for Let's Encrypt)
|
||||
ufw:
|
||||
rule: allow
|
||||
port: '80'
|
||||
proto: tcp
|
||||
|
||||
- name: Ensure Caddy sites-enabled directory exists
|
||||
file:
|
||||
path: "{{ caddy_sites_dir }}"
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0755'
|
||||
|
||||
- name: Ensure Caddyfile includes import directive for sites-enabled
|
||||
lineinfile:
|
||||
path: /etc/caddy/Caddyfile
|
||||
line: 'import sites-enabled/*'
|
||||
insertafter: EOF
|
||||
state: present
|
||||
backup: yes
|
||||
|
||||
- name: Create Caddy reverse proxy configuration for memos
|
||||
copy:
|
||||
dest: "{{ caddy_sites_dir }}/memos.conf"
|
||||
content: |
|
||||
{{ memos_domain }} {
|
||||
reverse_proxy localhost:{{ memos_port }}
|
||||
}
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
|
||||
- name: Reload Caddy to apply new config
|
||||
command: systemctl reload caddy
|
||||
|
||||
handlers:
|
||||
- name: Restart memos
|
||||
systemd:
|
||||
name: memos
|
||||
state: restarted
|
||||
|
||||
16
ansible/services/memos/memos_vars.yml
Normal file
16
ansible/services/memos/memos_vars.yml
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
# General
|
||||
memos_data_dir: /var/lib/memos
|
||||
memos_port: 5230
|
||||
|
||||
# (caddy_sites_dir and subdomain now in services_config.yml)
|
||||
|
||||
# Remote access
|
||||
remote_host: "{{ groups['memos_box'][0] }}"
|
||||
remote_user: "{{ hostvars[remote_host]['ansible_user'] }}"
|
||||
remote_key_file: "{{ hostvars[remote_host]['ansible_ssh_private_key_file'] | default('') }}"
|
||||
|
||||
# Local backup
|
||||
local_backup_dir: "{{ lookup('env', 'HOME') }}/memos-backups"
|
||||
backup_script_path: "{{ lookup('env', 'HOME') }}/.local/bin/memos_backup.sh"
|
||||
|
||||
|
||||
|
|
@ -3,8 +3,11 @@
|
|||
become: yes
|
||||
vars_files:
|
||||
- ../../infra_vars.yml
|
||||
- ../../services_config.yml
|
||||
- ./ntfy_emergency_app_vars.yml
|
||||
vars:
|
||||
ntfy_emergency_app_subdomain: "{{ subdomains.ntfy_emergency_app }}"
|
||||
caddy_sites_dir: "{{ caddy_sites_dir }}"
|
||||
ntfy_emergency_app_domain: "{{ ntfy_emergency_app_subdomain }}.{{ root_domain }}"
|
||||
|
||||
tasks:
|
||||
|
|
|
|||
|
|
@ -2,9 +2,7 @@
|
|||
ntfy_emergency_app_dir: /opt/ntfy-emergency-app
|
||||
ntfy_emergency_app_port: 3000
|
||||
|
||||
# Caddy
|
||||
caddy_sites_dir: /etc/caddy/sites-enabled
|
||||
ntfy_emergency_app_subdomain: avisame
|
||||
# (caddy_sites_dir and subdomain now in services_config.yml)
|
||||
|
||||
# ntfy configuration
|
||||
ntfy_emergency_app_topic: "emergencia"
|
||||
|
|
|
|||
|
|
@ -3,8 +3,11 @@
|
|||
become: yes
|
||||
vars_files:
|
||||
- ../../infra_vars.yml
|
||||
- ../../services_config.yml
|
||||
- ./ntfy_vars.yml
|
||||
vars:
|
||||
ntfy_subdomain: "{{ subdomains.ntfy }}"
|
||||
caddy_sites_dir: "{{ caddy_sites_dir }}"
|
||||
ntfy_domain: "{{ ntfy_subdomain }}.{{ root_domain }}"
|
||||
|
||||
tasks:
|
||||
|
|
|
|||
|
|
@ -1,3 +1,2 @@
|
|||
caddy_sites_dir: /etc/caddy/sites-enabled
|
||||
ntfy_subdomain: ntfy
|
||||
ntfy_port: 6674
|
||||
ntfy_port: 6674
|
||||
ntfy_topic: alerts # Topic for Uptime Kuma notifications
|
||||
155
ansible/services/ntfy/setup_ntfy_uptime_kuma_notification.yml
Normal file
155
ansible/services/ntfy/setup_ntfy_uptime_kuma_notification.yml
Normal file
|
|
@ -0,0 +1,155 @@
|
|||
- name: Setup ntfy as Uptime Kuma Notification Channel
|
||||
hosts: watchtower
|
||||
become: no
|
||||
vars_files:
|
||||
- ../../infra_vars.yml
|
||||
- ../../services_config.yml
|
||||
- ../../infra_secrets.yml
|
||||
- ./ntfy_vars.yml
|
||||
- ../uptime_kuma/uptime_kuma_vars.yml
|
||||
|
||||
vars:
|
||||
ntfy_subdomain: "{{ subdomains.ntfy }}"
|
||||
uptime_kuma_subdomain: "{{ subdomains.uptime_kuma }}"
|
||||
ntfy_domain: "{{ ntfy_subdomain }}.{{ root_domain }}"
|
||||
ntfy_server_url: "https://{{ ntfy_domain }}"
|
||||
ntfy_priority: 4 # 1=min, 2=low, 3=default, 4=high, 5=max
|
||||
uptime_kuma_api_url: "https://{{ uptime_kuma_subdomain }}.{{ root_domain }}"
|
||||
|
||||
tasks:
|
||||
- name: Validate Uptime Kuma configuration
|
||||
assert:
|
||||
that:
|
||||
- uptime_kuma_api_url is defined
|
||||
- uptime_kuma_api_url != ""
|
||||
- uptime_kuma_username is defined
|
||||
- uptime_kuma_username != ""
|
||||
- uptime_kuma_password is defined
|
||||
- uptime_kuma_password != ""
|
||||
fail_msg: "uptime_kuma_api_url, uptime_kuma_username and uptime_kuma_password must be set"
|
||||
|
||||
- name: Validate ntfy configuration
|
||||
assert:
|
||||
that:
|
||||
- ntfy_domain is defined
|
||||
- ntfy_domain != ""
|
||||
- ntfy_topic is defined
|
||||
- ntfy_topic != ""
|
||||
- ntfy_username is defined
|
||||
- ntfy_username != ""
|
||||
- ntfy_password is defined
|
||||
- ntfy_password != ""
|
||||
fail_msg: "ntfy_domain, ntfy_topic, ntfy_username and ntfy_password must be set"
|
||||
|
||||
- name: Create Uptime Kuma ntfy notification setup script
|
||||
copy:
|
||||
dest: /tmp/setup_uptime_kuma_ntfy_notification.py
|
||||
content: |
|
||||
#!/usr/bin/env python3
|
||||
import sys
|
||||
import json
|
||||
from uptime_kuma_api import UptimeKumaApi
|
||||
|
||||
def main():
|
||||
api_url = sys.argv[1]
|
||||
username = sys.argv[2]
|
||||
password = sys.argv[3]
|
||||
notification_name = sys.argv[4]
|
||||
ntfy_server_url = sys.argv[5]
|
||||
ntfy_topic = sys.argv[6]
|
||||
ntfy_username = sys.argv[7]
|
||||
ntfy_password = sys.argv[8]
|
||||
ntfy_priority = int(sys.argv[9])
|
||||
|
||||
api = UptimeKumaApi(api_url, timeout=60, wait_events=2.0)
|
||||
api.login(username, password)
|
||||
|
||||
# Get all notifications
|
||||
notifications = api.get_notifications()
|
||||
|
||||
# Find existing ntfy notification by name
|
||||
existing_notification = next((n for n in notifications if n.get('name') == notification_name), None)
|
||||
|
||||
notification_data = {
|
||||
'name': notification_name,
|
||||
'type': 'ntfy',
|
||||
'isDefault': True, # Apply to all monitors by default
|
||||
'applyExisting': True, # Apply to existing monitors
|
||||
'ntfyserverurl': ntfy_server_url,
|
||||
'ntfytopic': ntfy_topic,
|
||||
'ntfyusername': ntfy_username,
|
||||
'ntfypassword': ntfy_password,
|
||||
'ntfyPriority': ntfy_priority
|
||||
}
|
||||
|
||||
if existing_notification:
|
||||
notification = api.edit_notification(existing_notification['id'], **notification_data)
|
||||
action = "updated"
|
||||
else:
|
||||
notification = api.add_notification(**notification_data)
|
||||
action = "created"
|
||||
|
||||
# Output result as JSON
|
||||
result = {
|
||||
'notification_id': notification['id'],
|
||||
'notification_name': notification_name,
|
||||
'ntfy_server': ntfy_server_url,
|
||||
'ntfy_topic': ntfy_topic,
|
||||
'action': action
|
||||
}
|
||||
print(json.dumps(result))
|
||||
|
||||
api.disconnect()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
mode: '0755'
|
||||
delegate_to: localhost
|
||||
become: no
|
||||
|
||||
- name: Run Uptime Kuma ntfy notification setup script
|
||||
command: >
|
||||
{{ ansible_playbook_python }}
|
||||
/tmp/setup_uptime_kuma_ntfy_notification.py
|
||||
"{{ uptime_kuma_api_url }}"
|
||||
"{{ uptime_kuma_username }}"
|
||||
"{{ uptime_kuma_password }}"
|
||||
"ntfy ({{ ntfy_topic }})"
|
||||
"{{ ntfy_server_url }}"
|
||||
"{{ ntfy_topic }}"
|
||||
"{{ ntfy_username }}"
|
||||
"{{ ntfy_password }}"
|
||||
"{{ ntfy_priority }}"
|
||||
register: notification_setup_result
|
||||
delegate_to: localhost
|
||||
become: no
|
||||
changed_when: false
|
||||
|
||||
- name: Parse notification setup result
|
||||
set_fact:
|
||||
notification_info_parsed: "{{ notification_setup_result.stdout | from_json }}"
|
||||
|
||||
- name: Display notification information
|
||||
debug:
|
||||
msg: |
|
||||
✓ ntfy notification channel {{ notification_info_parsed.action }} successfully!
|
||||
|
||||
Notification Name: ntfy ({{ ntfy_topic }})
|
||||
ntfy Server: {{ ntfy_server_url }}
|
||||
ntfy Topic: {{ ntfy_topic }}
|
||||
Priority: {{ ntfy_priority }} (4=high)
|
||||
Default for all monitors: Yes
|
||||
Applied to existing monitors: Yes
|
||||
|
||||
All Uptime Kuma monitors will now send alerts to your ntfy server
|
||||
on the "{{ ntfy_topic }}" topic.
|
||||
|
||||
You can subscribe to alerts at: {{ ntfy_server_url }}/{{ ntfy_topic }}
|
||||
|
||||
- name: Clean up temporary Uptime Kuma setup script
|
||||
file:
|
||||
path: /tmp/setup_uptime_kuma_ntfy_notification.py
|
||||
state: absent
|
||||
delegate_to: localhost
|
||||
become: no
|
||||
|
||||
|
|
@ -3,7 +3,12 @@
|
|||
become: yes
|
||||
vars_files:
|
||||
- ../../infra_vars.yml
|
||||
- ../../services_config.yml
|
||||
- ./personal_blog_vars.yml
|
||||
vars:
|
||||
personal_blog_subdomain: "{{ subdomains.personal_blog }}"
|
||||
caddy_sites_dir: "{{ caddy_sites_dir }}"
|
||||
personal_blog_domain: "{{ personal_blog_subdomain }}.{{ root_domain }}"
|
||||
|
||||
tasks:
|
||||
- name: Install git
|
||||
|
|
|
|||
|
|
@ -1,6 +1,4 @@
|
|||
caddy_sites_dir: /etc/caddy/sites-enabled
|
||||
personal_blog_subdomain: pablohere
|
||||
personal_blog_domain: pablohere.contrapeso.xyz
|
||||
# (caddy_sites_dir and subdomain now in services_config.yml)
|
||||
personal_blog_git_repo: https://forgejo.contrapeso.xyz/counterweight/pablohere.git
|
||||
personal_blog_git_username: counterweight
|
||||
personal_blog_source_dir: /opt/personal-blog
|
||||
|
|
|
|||
|
|
@ -3,8 +3,11 @@
|
|||
become: yes
|
||||
vars_files:
|
||||
- ../../infra_vars.yml
|
||||
- ../../services_config.yml
|
||||
- ./uptime_kuma_vars.yml
|
||||
vars:
|
||||
uptime_kuma_subdomain: "{{ subdomains.uptime_kuma }}"
|
||||
caddy_sites_dir: "{{ caddy_sites_dir }}"
|
||||
uptime_kuma_domain: "{{ uptime_kuma_subdomain }}.{{ root_domain }}"
|
||||
|
||||
tasks:
|
||||
|
|
|
|||
|
|
@ -3,9 +3,7 @@ uptime_kuma_dir: /opt/uptime-kuma
|
|||
uptime_kuma_data_dir: "{{ uptime_kuma_dir }}/data"
|
||||
uptime_kuma_port: 3001
|
||||
|
||||
# Caddy
|
||||
caddy_sites_dir: /etc/caddy/sites-enabled
|
||||
uptime_kuma_subdomain: uptime
|
||||
# (caddy_sites_dir and subdomain now in services_config.yml)
|
||||
|
||||
# Remote access
|
||||
remote_host: "{{ groups['watchtower'][0] }}"
|
||||
|
|
|
|||
|
|
@ -3,9 +3,14 @@
|
|||
become: yes
|
||||
vars_files:
|
||||
- ../../infra_vars.yml
|
||||
- ../../services_config.yml
|
||||
- ../../infra_secrets.yml
|
||||
- ./vaultwarden_vars.yml
|
||||
vars:
|
||||
vaultwarden_subdomain: "{{ subdomains.vaultwarden }}"
|
||||
caddy_sites_dir: "{{ caddy_sites_dir }}"
|
||||
vaultwarden_domain: "{{ vaultwarden_subdomain }}.{{ root_domain }}"
|
||||
uptime_kuma_api_url: "https://{{ subdomains.uptime_kuma }}.{{ root_domain }}"
|
||||
|
||||
tasks:
|
||||
- name: Create vaultwarden directory
|
||||
|
|
@ -106,3 +111,110 @@
|
|||
- name: Reload Caddy to apply new config
|
||||
command: systemctl reload caddy
|
||||
|
||||
- name: Create Uptime Kuma monitor setup script for Vaultwarden
|
||||
delegate_to: localhost
|
||||
become: no
|
||||
copy:
|
||||
dest: /tmp/setup_vaultwarden_monitor.py
|
||||
content: |
|
||||
#!/usr/bin/env python3
|
||||
import sys
|
||||
import yaml
|
||||
from uptime_kuma_api import UptimeKumaApi, MonitorType
|
||||
|
||||
try:
|
||||
# Load configs
|
||||
with open('/tmp/ansible_config.yml', 'r') as f:
|
||||
config = yaml.safe_load(f)
|
||||
|
||||
url = config['uptime_kuma_url']
|
||||
username = config['username']
|
||||
password = config['password']
|
||||
monitor_url = config['monitor_url']
|
||||
monitor_name = config['monitor_name']
|
||||
|
||||
# Connect to Uptime Kuma
|
||||
api = UptimeKumaApi(url, timeout=30)
|
||||
api.login(username, password)
|
||||
|
||||
# Get all monitors
|
||||
monitors = api.get_monitors()
|
||||
|
||||
# Find or create "services" group
|
||||
group = next((m for m in monitors if m.get('name') == 'services' and m.get('type') == 'group'), None)
|
||||
if not group:
|
||||
group_result = api.add_monitor(type='group', name='services')
|
||||
# Refresh to get the group with id
|
||||
monitors = api.get_monitors()
|
||||
group = next((m for m in monitors if m.get('name') == 'services' and m.get('type') == 'group'), None)
|
||||
|
||||
# Check if monitor already exists
|
||||
existing_monitor = None
|
||||
for monitor in monitors:
|
||||
if monitor.get('name') == monitor_name:
|
||||
existing_monitor = monitor
|
||||
break
|
||||
|
||||
# Get ntfy notification ID
|
||||
notifications = api.get_notifications()
|
||||
ntfy_notification_id = None
|
||||
for notif in notifications:
|
||||
if notif.get('type') == 'ntfy':
|
||||
ntfy_notification_id = notif.get('id')
|
||||
break
|
||||
|
||||
if existing_monitor:
|
||||
print(f"Monitor '{monitor_name}' already exists (ID: {existing_monitor['id']})")
|
||||
print("Skipping - monitor already configured")
|
||||
else:
|
||||
print(f"Creating monitor '{monitor_name}'...")
|
||||
api.add_monitor(
|
||||
type=MonitorType.HTTP,
|
||||
name=monitor_name,
|
||||
url=monitor_url,
|
||||
parent=group['id'],
|
||||
interval=60,
|
||||
maxretries=3,
|
||||
retryInterval=60,
|
||||
notificationIDList={ntfy_notification_id: True} if ntfy_notification_id else {}
|
||||
)
|
||||
|
||||
api.disconnect()
|
||||
print("SUCCESS")
|
||||
|
||||
except Exception as e:
|
||||
print(f"ERROR: {str(e)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
mode: '0755'
|
||||
|
||||
- name: Create temporary config for monitor setup
|
||||
delegate_to: localhost
|
||||
become: no
|
||||
copy:
|
||||
dest: /tmp/ansible_config.yml
|
||||
content: |
|
||||
uptime_kuma_url: "{{ uptime_kuma_api_url }}"
|
||||
username: "{{ uptime_kuma_username }}"
|
||||
password: "{{ uptime_kuma_password }}"
|
||||
monitor_url: "https://{{ vaultwarden_domain }}/alive"
|
||||
monitor_name: "Vaultwarden"
|
||||
mode: '0644'
|
||||
|
||||
- name: Run Uptime Kuma monitor setup
|
||||
command: python3 /tmp/setup_vaultwarden_monitor.py
|
||||
delegate_to: localhost
|
||||
become: no
|
||||
register: monitor_setup
|
||||
changed_when: "'SUCCESS' in monitor_setup.stdout"
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Clean up temporary files
|
||||
delegate_to: localhost
|
||||
become: no
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: absent
|
||||
loop:
|
||||
- /tmp/setup_vaultwarden_monitor.py
|
||||
- /tmp/ansible_config.yml
|
||||
|
||||
|
|
|
|||
|
|
@ -3,9 +3,7 @@ vaultwarden_dir: /opt/vaultwarden
|
|||
vaultwarden_data_dir: "{{ vaultwarden_dir }}/data"
|
||||
vaultwarden_port: 8222
|
||||
|
||||
# Caddy
|
||||
caddy_sites_dir: /etc/caddy/sites-enabled
|
||||
vaultwarden_subdomain: vault
|
||||
# (caddy_sites_dir and subdomain now in services_config.yml)
|
||||
|
||||
# Remote access
|
||||
remote_host: "{{ groups['vipy'][0] }}"
|
||||
|
|
|
|||
26
ansible/services_config.yml
Normal file
26
ansible/services_config.yml
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
# Centralized Services Configuration
|
||||
# Subdomains and Caddy settings for all services
|
||||
|
||||
# Edit these subdomains to match your preferences
|
||||
subdomains:
|
||||
# Monitoring Services (on watchtower)
|
||||
ntfy: test-ntfy
|
||||
uptime_kuma: test-uptime
|
||||
|
||||
# VPN Infrastructure (on spacey)
|
||||
headscale: test-headscale
|
||||
|
||||
# Core Services (on vipy)
|
||||
vaultwarden: test-vault
|
||||
forgejo: test-git
|
||||
lnbits: test-lnbits
|
||||
|
||||
# Secondary Services (on vipy)
|
||||
personal_blog: test-blog
|
||||
ntfy_emergency_app: test-emergency
|
||||
|
||||
# Memos (on memos-box)
|
||||
memos: test-memos
|
||||
|
||||
# Caddy configuration
|
||||
caddy_sites_dir: /etc/caddy/sites-enabled
|
||||
26
ansible/services_config.yml.example
Normal file
26
ansible/services_config.yml.example
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
# Centralized Services Configuration
|
||||
# Copy this to services_config.yml and customize
|
||||
|
||||
# Edit these subdomains to match your preferences
|
||||
subdomains:
|
||||
# Monitoring Services (on watchtower)
|
||||
ntfy: ntfy
|
||||
uptime_kuma: uptime
|
||||
|
||||
# VPN Infrastructure (on spacey)
|
||||
headscale: headscale
|
||||
|
||||
# Core Services (on vipy)
|
||||
vaultwarden: vault
|
||||
forgejo: git
|
||||
lnbits: lnbits
|
||||
|
||||
# Secondary Services (on vipy)
|
||||
personal_blog: blog
|
||||
ntfy_emergency_app: emergency
|
||||
|
||||
# Memos (on memos-box)
|
||||
memos: memos
|
||||
|
||||
# Caddy configuration
|
||||
caddy_sites_dir: /etc/caddy/sites-enabled
|
||||
Loading…
Add table
Add a link
Reference in a new issue