lots of stuff man
This commit is contained in:
parent
3b88e6c5e8
commit
c8754e1bdc
43 changed files with 7310 additions and 121 deletions
|
|
@ -3,10 +3,15 @@
|
|||
become: no
|
||||
vars_files:
|
||||
- ../../infra_vars.yml
|
||||
- ../../services_config.yml
|
||||
- ../../infra_secrets.yml
|
||||
- ./headscale_vars.yml
|
||||
vars:
|
||||
headscale_subdomain: "{{ subdomains.headscale }}"
|
||||
caddy_sites_dir: "{{ caddy_sites_dir }}"
|
||||
headscale_domain: "{{ headscale_subdomain }}.{{ root_domain }}"
|
||||
headscale_base_domain: "tailnet.{{ root_domain }}"
|
||||
uptime_kuma_api_url: "https://{{ subdomains.uptime_kuma }}.{{ root_domain }}"
|
||||
|
||||
tasks:
|
||||
- name: Install required packages
|
||||
|
|
@ -167,12 +172,32 @@
|
|||
debug:
|
||||
msg: "{{ headscale_config_test.stdout }}"
|
||||
|
||||
- name: Ensure headscale data directory has correct ownership before starting service
|
||||
become: yes
|
||||
file:
|
||||
path: /var/lib/headscale
|
||||
state: directory
|
||||
owner: headscale
|
||||
group: headscale
|
||||
mode: '0750'
|
||||
recurse: yes
|
||||
|
||||
- name: Ensure headscale run directory has correct ownership
|
||||
become: yes
|
||||
file:
|
||||
path: /var/run/headscale
|
||||
state: directory
|
||||
owner: headscale
|
||||
group: headscale
|
||||
mode: '0770'
|
||||
|
||||
- name: Enable and start headscale service
|
||||
become: yes
|
||||
systemd:
|
||||
name: headscale
|
||||
enabled: yes
|
||||
state: started
|
||||
daemon_reload: yes
|
||||
|
||||
- name: Wait for headscale unix socket to be ready
|
||||
become: yes
|
||||
|
|
@ -244,6 +269,111 @@
|
|||
become: yes
|
||||
command: systemctl reload caddy
|
||||
|
||||
- name: Create Uptime Kuma monitor setup script for Headscale
|
||||
delegate_to: localhost
|
||||
become: no
|
||||
copy:
|
||||
dest: /tmp/setup_headscale_monitor.py
|
||||
content: |
|
||||
#!/usr/bin/env python3
|
||||
import sys
|
||||
import yaml
|
||||
from uptime_kuma_api import UptimeKumaApi, MonitorType
|
||||
|
||||
try:
|
||||
with open('/tmp/ansible_config.yml', 'r') as f:
|
||||
config = yaml.safe_load(f)
|
||||
|
||||
url = config['uptime_kuma_url']
|
||||
username = config['username']
|
||||
password = config['password']
|
||||
monitor_url = config['monitor_url']
|
||||
monitor_name = config['monitor_name']
|
||||
|
||||
api = UptimeKumaApi(url, timeout=30)
|
||||
api.login(username, password)
|
||||
|
||||
# Get all monitors
|
||||
monitors = api.get_monitors()
|
||||
|
||||
# Find or create "services" group
|
||||
group = next((m for m in monitors if m.get('name') == 'services' and m.get('type') == 'group'), None)
|
||||
if not group:
|
||||
group_result = api.add_monitor(type='group', name='services')
|
||||
# Refresh to get the group with id
|
||||
monitors = api.get_monitors()
|
||||
group = next((m for m in monitors if m.get('name') == 'services' and m.get('type') == 'group'), None)
|
||||
|
||||
# Check if monitor already exists
|
||||
existing_monitor = None
|
||||
for monitor in monitors:
|
||||
if monitor.get('name') == monitor_name:
|
||||
existing_monitor = monitor
|
||||
break
|
||||
|
||||
# Get ntfy notification ID
|
||||
notifications = api.get_notifications()
|
||||
ntfy_notification_id = None
|
||||
for notif in notifications:
|
||||
if notif.get('type') == 'ntfy':
|
||||
ntfy_notification_id = notif.get('id')
|
||||
break
|
||||
|
||||
if existing_monitor:
|
||||
print(f"Monitor '{monitor_name}' already exists (ID: {existing_monitor['id']})")
|
||||
print("Skipping - monitor already configured")
|
||||
else:
|
||||
print(f"Creating monitor '{monitor_name}'...")
|
||||
api.add_monitor(
|
||||
type=MonitorType.HTTP,
|
||||
name=monitor_name,
|
||||
url=monitor_url,
|
||||
parent=group['id'],
|
||||
interval=60,
|
||||
maxretries=3,
|
||||
retryInterval=60,
|
||||
notificationIDList={ntfy_notification_id: True} if ntfy_notification_id else {}
|
||||
)
|
||||
|
||||
api.disconnect()
|
||||
print("SUCCESS")
|
||||
|
||||
except Exception as e:
|
||||
print(f"ERROR: {str(e)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
mode: '0755'
|
||||
|
||||
- name: Create temporary config for monitor setup
|
||||
delegate_to: localhost
|
||||
become: no
|
||||
copy:
|
||||
dest: /tmp/ansible_config.yml
|
||||
content: |
|
||||
uptime_kuma_url: "{{ uptime_kuma_api_url }}"
|
||||
username: "{{ uptime_kuma_username }}"
|
||||
password: "{{ uptime_kuma_password }}"
|
||||
monitor_url: "https://{{ headscale_domain }}/health"
|
||||
monitor_name: "Headscale"
|
||||
mode: '0644'
|
||||
|
||||
- name: Run Uptime Kuma monitor setup
|
||||
command: python3 /tmp/setup_headscale_monitor.py
|
||||
delegate_to: localhost
|
||||
become: no
|
||||
register: monitor_setup
|
||||
changed_when: "'SUCCESS' in monitor_setup.stdout"
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Clean up temporary files
|
||||
delegate_to: localhost
|
||||
become: no
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: absent
|
||||
loop:
|
||||
- /tmp/setup_headscale_monitor.py
|
||||
- /tmp/ansible_config.yml
|
||||
|
||||
handlers:
|
||||
- name: Restart headscale
|
||||
become: yes
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue