personal_infra/ansible/services/vaultwarden/deploy_vaultwarden_playbook.yml
2025-11-06 23:09:44 +01:00

220 lines
7.2 KiB
YAML

- name: Deploy Vaultwarden with Docker Compose and configure Caddy reverse proxy
hosts: vipy
become: yes
vars_files:
- ../../infra_vars.yml
- ../../services_config.yml
- ../../infra_secrets.yml
- ./vaultwarden_vars.yml
vars:
vaultwarden_subdomain: "{{ subdomains.vaultwarden }}"
caddy_sites_dir: "{{ caddy_sites_dir }}"
vaultwarden_domain: "{{ vaultwarden_subdomain }}.{{ root_domain }}"
uptime_kuma_api_url: "https://{{ subdomains.uptime_kuma }}.{{ root_domain }}"
tasks:
- name: Create vaultwarden directory
file:
path: "{{ vaultwarden_dir }}"
state: directory
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: '0755'
- name: Create docker-compose.yml for vaultwarden
copy:
dest: "{{ vaultwarden_dir }}/docker-compose.yml"
content: |
version: "3"
services:
vaultwarden:
image: vaultwarden/server:latest
container_name: vaultwarden
restart: unless-stopped
ports:
- "{{ vaultwarden_port }}:80"
volumes:
- ./data:/data
environment:
WEBSOCKET_ENABLED: 'true'
DOMAIN: "https://{{ vaultwarden_domain }}"
SIGNUPS_ALLOWED: 'true'
LOG_FILE: /data/vaultwarden.log
- name: Deploy vaultwarden container with docker compose
command: docker compose up -d
args:
chdir: "{{ vaultwarden_dir }}"
- name: Create Fail2Ban filter for Vaultwarden
copy:
dest: /etc/fail2ban/filter.d/vaultwarden.local
owner: root
group: root
mode: '0644'
content: |
[INCLUDES]
before = common.conf
[Definition]
failregex = ^.*?Username or password is incorrect\. Try again\. IP: <ADDR>\. Username:.*$
ignoreregex =
- name: Create Fail2Ban jail for Vaultwarden
copy:
dest: /etc/fail2ban/jail.d/vaultwarden.local
owner: root
group: root
mode: '0644'
content: |
[vaultwarden]
enabled = true
port = http,https
filter = vaultwarden
logpath = {{ vaultwarden_data_dir }}/vaultwarden.log
maxretry = 10
findtime = 10m
bantime = 1h
- name: Restart fail2ban to apply changes
systemd:
name: fail2ban
state: restarted
- name: Ensure Caddy sites-enabled directory exists
file:
path: "{{ caddy_sites_dir }}"
state: directory
owner: root
group: root
mode: '0755'
- name: Ensure Caddyfile includes import directive for sites-enabled
lineinfile:
path: /etc/caddy/Caddyfile
line: 'import sites-enabled/*'
insertafter: EOF
state: present
backup: yes
- name: Create Caddy reverse proxy configuration for vaultwarden
copy:
dest: "{{ caddy_sites_dir }}/vaultwarden.conf"
content: |
{{ vaultwarden_domain }} {
reverse_proxy localhost:{{ vaultwarden_port }}
}
owner: root
group: root
mode: '0644'
- name: Reload Caddy to apply new config
command: systemctl reload caddy
- name: Create Uptime Kuma monitor setup script for Vaultwarden
delegate_to: localhost
become: no
copy:
dest: /tmp/setup_vaultwarden_monitor.py
content: |
#!/usr/bin/env python3
import sys
import yaml
from uptime_kuma_api import UptimeKumaApi, MonitorType
try:
# Load configs
with open('/tmp/ansible_config.yml', 'r') as f:
config = yaml.safe_load(f)
url = config['uptime_kuma_url']
username = config['username']
password = config['password']
monitor_url = config['monitor_url']
monitor_name = config['monitor_name']
# Connect to Uptime Kuma
api = UptimeKumaApi(url, timeout=30)
api.login(username, password)
# Get all monitors
monitors = api.get_monitors()
# Find or create "services" group
group = next((m for m in monitors if m.get('name') == 'services' and m.get('type') == 'group'), None)
if not group:
group_result = api.add_monitor(type='group', name='services')
# Refresh to get the group with id
monitors = api.get_monitors()
group = next((m for m in monitors if m.get('name') == 'services' and m.get('type') == 'group'), None)
# Check if monitor already exists
existing_monitor = None
for monitor in monitors:
if monitor.get('name') == monitor_name:
existing_monitor = monitor
break
# Get ntfy notification ID
notifications = api.get_notifications()
ntfy_notification_id = None
for notif in notifications:
if notif.get('type') == 'ntfy':
ntfy_notification_id = notif.get('id')
break
if existing_monitor:
print(f"Monitor '{monitor_name}' already exists (ID: {existing_monitor['id']})")
print("Skipping - monitor already configured")
else:
print(f"Creating monitor '{monitor_name}'...")
api.add_monitor(
type=MonitorType.HTTP,
name=monitor_name,
url=monitor_url,
parent=group['id'],
interval=60,
maxretries=3,
retryInterval=60,
notificationIDList={ntfy_notification_id: True} if ntfy_notification_id else {}
)
api.disconnect()
print("SUCCESS")
except Exception as e:
print(f"ERROR: {str(e)}", file=sys.stderr)
sys.exit(1)
mode: '0755'
- name: Create temporary config for monitor setup
delegate_to: localhost
become: no
copy:
dest: /tmp/ansible_config.yml
content: |
uptime_kuma_url: "{{ uptime_kuma_api_url }}"
username: "{{ uptime_kuma_username }}"
password: "{{ uptime_kuma_password }}"
monitor_url: "https://{{ vaultwarden_domain }}/alive"
monitor_name: "Vaultwarden"
mode: '0644'
- name: Run Uptime Kuma monitor setup
command: python3 /tmp/setup_vaultwarden_monitor.py
delegate_to: localhost
become: no
register: monitor_setup
changed_when: "'SUCCESS' in monitor_setup.stdout"
ignore_errors: yes
- name: Clean up temporary files
delegate_to: localhost
become: no
file:
path: "{{ item }}"
state: absent
loop:
- /tmp/setup_vaultwarden_monitor.py
- /tmp/ansible_config.yml