personal_infra/ansible/services/headscale/deploy_headscale_playbook.yml
2025-11-06 23:09:44 +01:00

382 lines
11 KiB
YAML

- name: Deploy headscale and configure Caddy reverse proxy
hosts: spacey
become: no
vars_files:
- ../../infra_vars.yml
- ../../services_config.yml
- ../../infra_secrets.yml
- ./headscale_vars.yml
vars:
headscale_subdomain: "{{ subdomains.headscale }}"
caddy_sites_dir: "{{ caddy_sites_dir }}"
headscale_domain: "{{ headscale_subdomain }}.{{ root_domain }}"
headscale_base_domain: "tailnet.{{ root_domain }}"
uptime_kuma_api_url: "https://{{ subdomains.uptime_kuma }}.{{ root_domain }}"
tasks:
- name: Install required packages
become: yes
apt:
name:
- wget
- gnupg
state: present
update_cache: yes
- name: Download headscale DEB package
get_url:
url: "https://github.com/juanfont/headscale/releases/download/v{{ headscale_version }}/headscale_{{ headscale_version }}_linux_amd64.deb"
dest: /tmp/headscale.deb
mode: '0644'
- name: Install headscale package
become: yes
apt:
deb: /tmp/headscale.deb
state: present
- name: Remove temporary DEB file
file:
path: /tmp/headscale.deb
state: absent
- name: Ensure headscale user exists
become: yes
user:
name: headscale
system: yes
shell: /usr/sbin/nologin
home: /var/lib/headscale
create_home: yes
state: present
- name: Create headscale data directory
become: yes
file:
path: /var/lib/headscale
state: directory
owner: headscale
group: headscale
mode: '0750'
- name: Create headscale run directory
become: yes
file:
path: /var/run/headscale
state: directory
owner: headscale
group: headscale
mode: '0770'
- name: Ensure headscale user owns data directory
become: yes
file:
path: /var/lib/headscale
owner: headscale
group: headscale
recurse: yes
mode: '0750'
- name: Add counterweight user to headscale group
become: yes
user:
name: counterweight
groups: headscale
append: yes
- name: Create ACL policies file
become: yes
copy:
dest: /etc/headscale/acl.json
content: |
{
"ACLs": [],
"Groups": {},
"Hosts": {},
"TagOwners": {},
"Tests": []
}
owner: headscale
group: headscale
mode: '0640'
notify: Restart headscale
- name: Deploy headscale configuration file
become: yes
copy:
dest: /etc/headscale/config.yaml
content: |
server_url: https://{{ headscale_domain }}
listen_addr: 0.0.0.0:{{ headscale_port }}
grpc_listen_addr: 0.0.0.0:{{ headscale_grpc_port }}
grpc_allow_insecure: false
private_key_path: /var/lib/headscale/private.key
noise:
private_key_path: /var/lib/headscale/noise_private.key
prefixes:
v4: 100.64.0.0/10
v6: fd7a:115c:a1e0::/48
derp:
server:
enabled: true
region_id: 999
region_code: "headscale"
region_name: "Headscale Embedded DERP"
verify_clients: true
stun_listen_addr: "0.0.0.0:3478"
private_key_path: /var/lib/headscale/derp_server_private.key
automatically_add_embedded_derp_region: true
urls:
- https://controlplane.tailscale.com/derpmap/default
database:
type: sqlite3
sqlite:
path: /var/lib/headscale/db.sqlite
unix_socket: /var/run/headscale/headscale.sock
unix_socket_permission: "0770"
log:
level: info
format: text
policy:
path: /etc/headscale/acl.json
dns:
base_domain: {{ headscale_base_domain | quote }}
magic_dns: true
search_domains:
- {{ headscale_base_domain | quote }}
nameservers:
global:
- 1.1.1.1
- 1.0.0.1
owner: root
group: headscale
mode: '0640'
notify: Restart headscale
- name: Test headscale configuration
become: yes
command: headscale configtest
register: headscale_config_test
failed_when: headscale_config_test.rc != 0
- name: Display headscale config test results
debug:
msg: "{{ headscale_config_test.stdout }}"
- name: Ensure headscale data directory has correct ownership before starting service
become: yes
file:
path: /var/lib/headscale
state: directory
owner: headscale
group: headscale
mode: '0750'
recurse: yes
- name: Ensure headscale run directory has correct ownership
become: yes
file:
path: /var/run/headscale
state: directory
owner: headscale
group: headscale
mode: '0770'
- name: Enable and start headscale service
become: yes
systemd:
name: headscale
enabled: yes
state: started
daemon_reload: yes
- name: Wait for headscale unix socket to be ready
become: yes
wait_for:
path: /var/run/headscale/headscale.sock
state: present
timeout: 60
delay: 2
- name: Create headscale namespace if it doesn't exist
become: yes
command: headscale users create {{ headscale_namespace }}
register: create_namespace_result
failed_when: create_namespace_result.rc != 0 and 'already exists' not in create_namespace_result.stderr and 'UNIQUE constraint' not in create_namespace_result.stderr
changed_when: create_namespace_result.rc == 0
- name: Allow HTTPS through UFW
become: yes
ufw:
rule: allow
port: '443'
proto: tcp
- name: Allow HTTP through UFW (for Let's Encrypt)
become: yes
ufw:
rule: allow
port: '80'
proto: tcp
- name: Allow STUN through UFW (for DERP server)
become: yes
ufw:
rule: allow
port: '3478'
proto: udp
- name: Ensure Caddy sites-enabled directory exists
become: yes
file:
path: "{{ caddy_sites_dir }}"
state: directory
owner: root
group: root
mode: '0755'
- name: Ensure Caddyfile includes import directive for sites-enabled
become: yes
lineinfile:
path: /etc/caddy/Caddyfile
line: 'import sites-enabled/*'
insertafter: EOF
state: present
backup: yes
- name: Create Caddy reverse proxy configuration for headscale
become: yes
copy:
dest: "{{ caddy_sites_dir }}/headscale.conf"
content: |
{{ headscale_domain }} {
reverse_proxy localhost:{{ headscale_port }}
}
owner: root
group: root
mode: '0644'
- name: Reload Caddy to apply new config
become: yes
command: systemctl reload caddy
- name: Create Uptime Kuma monitor setup script for Headscale
delegate_to: localhost
become: no
copy:
dest: /tmp/setup_headscale_monitor.py
content: |
#!/usr/bin/env python3
import sys
import yaml
from uptime_kuma_api import UptimeKumaApi, MonitorType
try:
with open('/tmp/ansible_config.yml', 'r') as f:
config = yaml.safe_load(f)
url = config['uptime_kuma_url']
username = config['username']
password = config['password']
monitor_url = config['monitor_url']
monitor_name = config['monitor_name']
api = UptimeKumaApi(url, timeout=30)
api.login(username, password)
# Get all monitors
monitors = api.get_monitors()
# Find or create "services" group
group = next((m for m in monitors if m.get('name') == 'services' and m.get('type') == 'group'), None)
if not group:
group_result = api.add_monitor(type='group', name='services')
# Refresh to get the group with id
monitors = api.get_monitors()
group = next((m for m in monitors if m.get('name') == 'services' and m.get('type') == 'group'), None)
# Check if monitor already exists
existing_monitor = None
for monitor in monitors:
if monitor.get('name') == monitor_name:
existing_monitor = monitor
break
# Get ntfy notification ID
notifications = api.get_notifications()
ntfy_notification_id = None
for notif in notifications:
if notif.get('type') == 'ntfy':
ntfy_notification_id = notif.get('id')
break
if existing_monitor:
print(f"Monitor '{monitor_name}' already exists (ID: {existing_monitor['id']})")
print("Skipping - monitor already configured")
else:
print(f"Creating monitor '{monitor_name}'...")
api.add_monitor(
type=MonitorType.HTTP,
name=monitor_name,
url=monitor_url,
parent=group['id'],
interval=60,
maxretries=3,
retryInterval=60,
notificationIDList={ntfy_notification_id: True} if ntfy_notification_id else {}
)
api.disconnect()
print("SUCCESS")
except Exception as e:
print(f"ERROR: {str(e)}", file=sys.stderr)
sys.exit(1)
mode: '0755'
- name: Create temporary config for monitor setup
delegate_to: localhost
become: no
copy:
dest: /tmp/ansible_config.yml
content: |
uptime_kuma_url: "{{ uptime_kuma_api_url }}"
username: "{{ uptime_kuma_username }}"
password: "{{ uptime_kuma_password }}"
monitor_url: "https://{{ headscale_domain }}/health"
monitor_name: "Headscale"
mode: '0644'
- name: Run Uptime Kuma monitor setup
command: python3 /tmp/setup_headscale_monitor.py
delegate_to: localhost
become: no
register: monitor_setup
changed_when: "'SUCCESS' in monitor_setup.stdout"
ignore_errors: yes
- name: Clean up temporary files
delegate_to: localhost
become: no
file:
path: "{{ item }}"
state: absent
loop:
- /tmp/setup_headscale_monitor.py
- /tmp/ansible_config.yml
handlers:
- name: Restart headscale
become: yes
systemd:
name: headscale
state: restarted