lots of stuff man

This commit is contained in:
counterweight 2025-11-06 23:09:44 +01:00
parent 3b88e6c5e8
commit c8754e1bdc
Signed by: counterweight
GPG key ID: 883EDBAA726BD96C
43 changed files with 7310 additions and 121 deletions

View file

@ -3,9 +3,14 @@
become: yes
vars_files:
- ../../infra_vars.yml
- ../../services_config.yml
- ../../infra_secrets.yml
- ./forgejo_vars.yml
vars:
forgejo_subdomain: "{{ subdomains.forgejo }}"
caddy_sites_dir: "{{ caddy_sites_dir }}"
forgejo_domain: "{{ forgejo_subdomain }}.{{ root_domain }}"
uptime_kuma_api_url: "https://{{ subdomains.uptime_kuma }}.{{ root_domain }}"
tasks:
- name: Ensure required packages are installed
@ -98,3 +103,109 @@
service:
name: caddy
state: reloaded
- name: Create Uptime Kuma monitor setup script for Forgejo
delegate_to: localhost
become: no
copy:
dest: /tmp/setup_forgejo_monitor.py
content: |
#!/usr/bin/env python3
import sys
import yaml
from uptime_kuma_api import UptimeKumaApi, MonitorType
try:
with open('/tmp/ansible_config.yml', 'r') as f:
config = yaml.safe_load(f)
url = config['uptime_kuma_url']
username = config['username']
password = config['password']
monitor_url = config['monitor_url']
monitor_name = config['monitor_name']
api = UptimeKumaApi(url, timeout=30)
api.login(username, password)
# Get all monitors
monitors = api.get_monitors()
# Find or create "services" group
group = next((m for m in monitors if m.get('name') == 'services' and m.get('type') == 'group'), None)
if not group:
group_result = api.add_monitor(type='group', name='services')
# Refresh to get the group with id
monitors = api.get_monitors()
group = next((m for m in monitors if m.get('name') == 'services' and m.get('type') == 'group'), None)
# Check if monitor already exists
existing_monitor = None
for monitor in monitors:
if monitor.get('name') == monitor_name:
existing_monitor = monitor
break
# Get ntfy notification ID
notifications = api.get_notifications()
ntfy_notification_id = None
for notif in notifications:
if notif.get('type') == 'ntfy':
ntfy_notification_id = notif.get('id')
break
if existing_monitor:
print(f"Monitor '{monitor_name}' already exists (ID: {existing_monitor['id']})")
print("Skipping - monitor already configured")
else:
print(f"Creating monitor '{monitor_name}'...")
api.add_monitor(
type=MonitorType.HTTP,
name=monitor_name,
url=monitor_url,
parent=group['id'],
interval=60,
maxretries=3,
retryInterval=60,
notificationIDList={ntfy_notification_id: True} if ntfy_notification_id else {}
)
api.disconnect()
print("SUCCESS")
except Exception as e:
print(f"ERROR: {str(e)}", file=sys.stderr)
sys.exit(1)
mode: '0755'
- name: Create temporary config for monitor setup
delegate_to: localhost
become: no
copy:
dest: /tmp/ansible_config.yml
content: |
uptime_kuma_url: "{{ uptime_kuma_api_url }}"
username: "{{ uptime_kuma_username }}"
password: "{{ uptime_kuma_password }}"
monitor_url: "https://{{ forgejo_domain }}/api/healthz"
monitor_name: "Forgejo"
mode: '0644'
- name: Run Uptime Kuma monitor setup
command: python3 /tmp/setup_forgejo_monitor.py
delegate_to: localhost
become: no
register: monitor_setup
changed_when: "'SUCCESS' in monitor_setup.stdout"
ignore_errors: yes
- name: Clean up temporary files
delegate_to: localhost
become: no
file:
path: "{{ item }}"
state: absent
loop:
- /tmp/setup_forgejo_monitor.py
- /tmp/ansible_config.yml

View file

@ -9,9 +9,7 @@ forgejo_url: "https://codeberg.org/forgejo/forgejo/releases/download/v{{ forgejo
forgejo_bin_path: "/usr/local/bin/forgejo"
forgejo_user: "git"
# Caddy
caddy_sites_dir: /etc/caddy/sites-enabled
forgejo_subdomain: forgejo
# (caddy_sites_dir and subdomain now in services_config.yml)
# Remote access
remote_host: "{{ groups['vipy'][0] }}"

View file

@ -3,10 +3,15 @@
become: no
vars_files:
- ../../infra_vars.yml
- ../../services_config.yml
- ../../infra_secrets.yml
- ./headscale_vars.yml
vars:
headscale_subdomain: "{{ subdomains.headscale }}"
caddy_sites_dir: "{{ caddy_sites_dir }}"
headscale_domain: "{{ headscale_subdomain }}.{{ root_domain }}"
headscale_base_domain: "tailnet.{{ root_domain }}"
uptime_kuma_api_url: "https://{{ subdomains.uptime_kuma }}.{{ root_domain }}"
tasks:
- name: Install required packages
@ -167,12 +172,32 @@
debug:
msg: "{{ headscale_config_test.stdout }}"
- name: Ensure headscale data directory has correct ownership before starting service
become: yes
file:
path: /var/lib/headscale
state: directory
owner: headscale
group: headscale
mode: '0750'
recurse: yes
- name: Ensure headscale run directory has correct ownership
become: yes
file:
path: /var/run/headscale
state: directory
owner: headscale
group: headscale
mode: '0770'
- name: Enable and start headscale service
become: yes
systemd:
name: headscale
enabled: yes
state: started
daemon_reload: yes
- name: Wait for headscale unix socket to be ready
become: yes
@ -244,6 +269,111 @@
become: yes
command: systemctl reload caddy
- name: Create Uptime Kuma monitor setup script for Headscale
delegate_to: localhost
become: no
copy:
dest: /tmp/setup_headscale_monitor.py
content: |
#!/usr/bin/env python3
import sys
import yaml
from uptime_kuma_api import UptimeKumaApi, MonitorType
try:
with open('/tmp/ansible_config.yml', 'r') as f:
config = yaml.safe_load(f)
url = config['uptime_kuma_url']
username = config['username']
password = config['password']
monitor_url = config['monitor_url']
monitor_name = config['monitor_name']
api = UptimeKumaApi(url, timeout=30)
api.login(username, password)
# Get all monitors
monitors = api.get_monitors()
# Find or create "services" group
group = next((m for m in monitors if m.get('name') == 'services' and m.get('type') == 'group'), None)
if not group:
group_result = api.add_monitor(type='group', name='services')
# Refresh to get the group with id
monitors = api.get_monitors()
group = next((m for m in monitors if m.get('name') == 'services' and m.get('type') == 'group'), None)
# Check if monitor already exists
existing_monitor = None
for monitor in monitors:
if monitor.get('name') == monitor_name:
existing_monitor = monitor
break
# Get ntfy notification ID
notifications = api.get_notifications()
ntfy_notification_id = None
for notif in notifications:
if notif.get('type') == 'ntfy':
ntfy_notification_id = notif.get('id')
break
if existing_monitor:
print(f"Monitor '{monitor_name}' already exists (ID: {existing_monitor['id']})")
print("Skipping - monitor already configured")
else:
print(f"Creating monitor '{monitor_name}'...")
api.add_monitor(
type=MonitorType.HTTP,
name=monitor_name,
url=monitor_url,
parent=group['id'],
interval=60,
maxretries=3,
retryInterval=60,
notificationIDList={ntfy_notification_id: True} if ntfy_notification_id else {}
)
api.disconnect()
print("SUCCESS")
except Exception as e:
print(f"ERROR: {str(e)}", file=sys.stderr)
sys.exit(1)
mode: '0755'
- name: Create temporary config for monitor setup
delegate_to: localhost
become: no
copy:
dest: /tmp/ansible_config.yml
content: |
uptime_kuma_url: "{{ uptime_kuma_api_url }}"
username: "{{ uptime_kuma_username }}"
password: "{{ uptime_kuma_password }}"
monitor_url: "https://{{ headscale_domain }}/health"
monitor_name: "Headscale"
mode: '0644'
- name: Run Uptime Kuma monitor setup
command: python3 /tmp/setup_headscale_monitor.py
delegate_to: localhost
become: no
register: monitor_setup
changed_when: "'SUCCESS' in monitor_setup.stdout"
ignore_errors: yes
- name: Clean up temporary files
delegate_to: localhost
become: no
file:
path: "{{ item }}"
state: absent
loop:
- /tmp/setup_headscale_monitor.py
- /tmp/ansible_config.yml
handlers:
- name: Restart headscale
become: yes

View file

@ -1,5 +1,6 @@
# Headscale service configuration
headscale_subdomain: headscale
# (subdomain and caddy_sites_dir now in services_config.yml)
headscale_port: 8080
headscale_grpc_port: 50443
@ -9,9 +10,6 @@ headscale_version: "0.26.1"
# Namespace for devices (users in headscale terminology)
headscale_namespace: counter-net
# Caddy
caddy_sites_dir: /etc/caddy/sites-enabled
# Data directory
headscale_data_dir: /var/lib/headscale

View file

@ -3,9 +3,14 @@
become: yes
vars_files:
- ../../infra_vars.yml
- ../../services_config.yml
- ../../infra_secrets.yml
- ./lnbits_vars.yml
vars:
lnbits_subdomain: "{{ subdomains.lnbits }}"
caddy_sites_dir: "{{ caddy_sites_dir }}"
lnbits_domain: "{{ lnbits_subdomain }}.{{ root_domain }}"
uptime_kuma_api_url: "https://{{ subdomains.uptime_kuma }}.{{ root_domain }}"
tasks:
- name: Create lnbits directory
@ -21,99 +26,39 @@
name:
- python3
- python3-pip
- python3-venv
- python3-dev
- git
- curl
- build-essential
- pkg-config
- build-essential
- libsecp256k1-dev
- libffi-dev
- libssl-dev
- zlib1g-dev
- libbz2-dev
- libreadline-dev
- libsqlite3-dev
- libncursesw5-dev
- xz-utils
- tk-dev
- libxml2-dev
- libxmlsec1-dev
- liblzma-dev
- libgmp-dev
- libpq-dev
- automake
- autoconf
- libtool
- m4
- gawk
state: present
update_cache: yes
- name: Install pyenv
- name: Install uv packaging tool
shell: |
curl https://pyenv.run | bash
curl -LsSf https://astral.sh/uv/install.sh | sh
args:
creates: "/home/{{ ansible_user }}/.pyenv"
creates: "/home/{{ ansible_user }}/.local/bin/uv"
become: yes
become_user: "{{ ansible_user }}"
environment:
HOME: "/home/{{ ansible_user }}"
- name: Add pyenv to PATH
lineinfile:
path: "/home/{{ ansible_user }}/.bashrc"
line: 'export PYENV_ROOT="$HOME/.pyenv"'
state: present
become: yes
become_user: "{{ ansible_user }}"
- name: Add pyenv init to bashrc
lineinfile:
path: "/home/{{ ansible_user }}/.bashrc"
line: 'command -v pyenv >/dev/null || export PATH="$PYENV_ROOT/bin:$PATH"'
state: present
become: yes
become_user: "{{ ansible_user }}"
- name: Add pyenv init to bashrc (second line)
lineinfile:
path: "/home/{{ ansible_user }}/.bashrc"
line: 'eval "$(pyenv init -)"'
state: present
become: yes
become_user: "{{ ansible_user }}"
- name: Install Python 3.12 via pyenv
shell: |
export PYENV_ROOT="$HOME/.pyenv"
export PATH="$PYENV_ROOT/bin:$PATH"
eval "$(pyenv init -)"
pyenv install -s 3.12.7
pyenv global 3.12.7
args:
creates: "/home/{{ ansible_user }}/.pyenv/versions/3.12.7/bin/python3.12"
become: yes
become_user: "{{ ansible_user }}"
environment:
HOME: "/home/{{ ansible_user }}"
- name: Install Poetry
shell: |
export PYENV_ROOT="$HOME/.pyenv"
export PATH="$PYENV_ROOT/bin:$PYENV_ROOT/versions/3.12.7/bin:$PATH"
eval "$(pyenv init -)"
curl -sSL https://install.python-poetry.org | python3 -
args:
creates: "/home/{{ ansible_user }}/.local/bin/poetry"
become: yes
become_user: "{{ ansible_user }}"
environment:
HOME: "/home/{{ ansible_user }}"
- name: Add Poetry to PATH
lineinfile:
path: "/home/{{ ansible_user }}/.bashrc"
line: 'export PATH="$HOME/.local/bin:$PATH"'
state: present
become: yes
become_user: "{{ ansible_user }}"
- name: Clone LNBits repository
git:
repo: https://github.com/lnbits/lnbits.git
dest: "{{ lnbits_dir }}/lnbits"
version: main
version: "v1.3.1"
accept_hostkey: yes
- name: Change ownership of LNBits directory to user
@ -123,27 +68,19 @@
group: "{{ ansible_user }}"
recurse: yes
- name: Configure Poetry to use Python 3.12
command: /home/{{ ansible_user }}/.local/bin/poetry env use /home/{{ ansible_user }}/.pyenv/versions/3.12.7/bin/python3.12
- name: Install LNBits dependencies with uv (Python 3.12)
command: /home/{{ ansible_user }}/.local/bin/uv sync --python 3.12 --all-extras --no-dev
args:
chdir: "{{ lnbits_dir }}/lnbits"
become: yes
become_user: "{{ ansible_user }}"
environment:
HOME: "/home/{{ ansible_user }}"
PATH: "/home/{{ ansible_user }}/.local/bin:/home/{{ ansible_user }}/.pyenv/versions/3.12.7/bin:/home/{{ ansible_user }}/.pyenv/bin:{{ ansible_env.PATH }}"
PYENV_ROOT: "/home/{{ ansible_user }}/.pyenv"
- name: Install LNBits dependencies
command: /home/{{ ansible_user }}/.local/bin/poetry install --only main
args:
chdir: "{{ lnbits_dir }}/lnbits"
become: yes
become_user: "{{ ansible_user }}"
environment:
HOME: "/home/{{ ansible_user }}"
PATH: "/home/{{ ansible_user }}/.local/bin:/home/{{ ansible_user }}/.pyenv/versions/3.12.7/bin:/home/{{ ansible_user }}/.pyenv/bin:{{ ansible_env.PATH }}"
PYENV_ROOT: "/home/{{ ansible_user }}/.pyenv"
PATH: "/home/{{ ansible_user }}/.local/bin:/usr/local/bin:/usr/bin:/bin"
SECP_BUNDLED: "0"
PKG_CONFIG_PATH: "/usr/lib/x86_64-linux-gnu/pkgconfig"
ACLOCAL: "aclocal"
AUTOMAKE: "automake"
- name: Copy .env.example to .env
copy:
@ -187,12 +124,12 @@
Type=simple
User={{ ansible_user }}
WorkingDirectory={{ lnbits_dir }}/lnbits
ExecStart=/home/{{ ansible_user }}/.local/bin/poetry run lnbits
ExecStart=/home/{{ ansible_user }}/.local/bin/uv run --python 3.12 lnbits
Restart=always
RestartSec=30
Environment=PYTHONUNBUFFERED=1
Environment="PATH=/home/{{ ansible_user }}/.local/bin:/home/{{ ansible_user }}/.pyenv/versions/3.12.7/bin:/home/{{ ansible_user }}/.pyenv/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
Environment="PYENV_ROOT=/home/{{ ansible_user }}/.pyenv"
Environment="PATH=/home/{{ ansible_user }}/.local/bin:/usr/local/bin:/usr/bin:/bin"
Environment=SECP_BUNDLED=0
[Install]
WantedBy=multi-user.target
@ -243,3 +180,109 @@
- name: Reload Caddy to apply new config
command: systemctl reload caddy
- name: Create Uptime Kuma monitor setup script for LNBits
delegate_to: localhost
become: no
copy:
dest: /tmp/setup_lnbits_monitor.py
content: |
#!/usr/bin/env python3
import sys
import yaml
from uptime_kuma_api import UptimeKumaApi, MonitorType
try:
with open('/tmp/ansible_config.yml', 'r') as f:
config = yaml.safe_load(f)
url = config['uptime_kuma_url']
username = config['username']
password = config['password']
monitor_url = config['monitor_url']
monitor_name = config['monitor_name']
api = UptimeKumaApi(url, timeout=30)
api.login(username, password)
# Get all monitors
monitors = api.get_monitors()
# Find or create "services" group
group = next((m for m in monitors if m.get('name') == 'services' and m.get('type') == 'group'), None)
if not group:
group_result = api.add_monitor(type='group', name='services')
# Refresh to get the group with id
monitors = api.get_monitors()
group = next((m for m in monitors if m.get('name') == 'services' and m.get('type') == 'group'), None)
# Check if monitor already exists
existing_monitor = None
for monitor in monitors:
if monitor.get('name') == monitor_name:
existing_monitor = monitor
break
# Get ntfy notification ID
notifications = api.get_notifications()
ntfy_notification_id = None
for notif in notifications:
if notif.get('type') == 'ntfy':
ntfy_notification_id = notif.get('id')
break
if existing_monitor:
print(f"Monitor '{monitor_name}' already exists (ID: {existing_monitor['id']})")
print("Skipping - monitor already configured")
else:
print(f"Creating monitor '{monitor_name}'...")
api.add_monitor(
type=MonitorType.HTTP,
name=monitor_name,
url=monitor_url,
parent=group['id'],
interval=60,
maxretries=3,
retryInterval=60,
notificationIDList={ntfy_notification_id: True} if ntfy_notification_id else {}
)
api.disconnect()
print("SUCCESS")
except Exception as e:
print(f"ERROR: {str(e)}", file=sys.stderr)
sys.exit(1)
mode: '0755'
- name: Create temporary config for monitor setup
delegate_to: localhost
become: no
copy:
dest: /tmp/ansible_config.yml
content: |
uptime_kuma_url: "{{ uptime_kuma_api_url }}"
username: "{{ uptime_kuma_username }}"
password: "{{ uptime_kuma_password }}"
monitor_url: "https://{{ lnbits_domain }}/api/v1/health"
monitor_name: "LNBits"
mode: '0644'
- name: Run Uptime Kuma monitor setup
command: python3 /tmp/setup_lnbits_monitor.py
delegate_to: localhost
become: no
register: monitor_setup
changed_when: "'SUCCESS' in monitor_setup.stdout"
ignore_errors: yes
- name: Clean up temporary files
delegate_to: localhost
become: no
file:
path: "{{ item }}"
state: absent
loop:
- /tmp/setup_lnbits_monitor.py
- /tmp/ansible_config.yml

View file

@ -3,9 +3,7 @@ lnbits_dir: /opt/lnbits
lnbits_data_dir: "{{ lnbits_dir }}/data"
lnbits_port: 8765
# Caddy
caddy_sites_dir: /etc/caddy/sites-enabled
lnbits_subdomain: wallet
# (caddy_sites_dir and subdomain now in services_config.yml)
# Remote access
remote_host: "{{ groups['vipy'][0] }}"

View file

@ -0,0 +1,175 @@
- name: Deploy memos and configure Caddy reverse proxy
hosts: memos-box
become: yes
vars_files:
- ../../infra_vars.yml
- ../../services_config.yml
- ./memos_vars.yml
vars:
memos_subdomain: "{{ subdomains.memos }}"
caddy_sites_dir: "{{ caddy_sites_dir }}"
memos_domain: "{{ memos_subdomain }}.{{ root_domain }}"
tasks:
- name: Install required packages
apt:
name:
- wget
- curl
- unzip
state: present
update_cache: yes
- name: Get latest memos release version
uri:
url: https://api.github.com/repos/usememos/memos/releases/latest
return_content: yes
register: memos_latest_release
- name: Set memos version and find download URL
set_fact:
memos_version: "{{ memos_latest_release.json.tag_name | regex_replace('^v', '') }}"
- name: Find linux-amd64 download URL
set_fact:
memos_download_url: "{{ memos_latest_release.json.assets | json_query('[?contains(name, `linux-amd64`) && (contains(name, `.tar.gz`) || contains(name, `.zip`))].browser_download_url') | first }}"
- name: Display memos version to install
debug:
msg: "Installing memos version {{ memos_version }} from {{ memos_download_url }}"
- name: Download memos binary
get_url:
url: "{{ memos_download_url }}"
dest: /tmp/memos_archive
mode: '0644'
register: memos_download
- name: Extract memos binary
unarchive:
src: /tmp/memos_archive
dest: /tmp/memos_extract
remote_src: yes
creates: /tmp/memos_extract/memos
- name: Install memos binary
copy:
src: /tmp/memos_extract/memos
dest: /usr/local/bin/memos
mode: '0755'
remote_src: yes
notify: Restart memos
- name: Remove temporary files
file:
path: "{{ item }}"
state: absent
loop:
- /tmp/memos_archive
- /tmp/memos_extract
- name: Ensure memos user exists
user:
name: memos
system: yes
shell: /usr/sbin/nologin
home: /var/lib/memos
create_home: yes
state: present
- name: Create memos data directory
file:
path: "{{ memos_data_dir }}"
state: directory
owner: memos
group: memos
mode: '0750'
- name: Create memos systemd service file
copy:
dest: /etc/systemd/system/memos.service
content: |
[Unit]
Description=memos service
After=network.target
[Service]
Type=simple
User=memos
Group=memos
ExecStart=/usr/local/bin/memos --port {{ memos_port }} --data {{ memos_data_dir }}
Restart=on-failure
RestartSec=5s
[Install]
WantedBy=multi-user.target
owner: root
group: root
mode: '0644'
notify: Restart memos
- name: Enable and start memos service
systemd:
name: memos
enabled: yes
state: started
daemon_reload: yes
- name: Wait for memos to be ready
uri:
url: "http://localhost:{{ memos_port }}/api/v1/status"
status_code: 200
register: memos_ready
until: memos_ready.status == 200
retries: 30
delay: 2
ignore_errors: yes
- name: Allow HTTPS through UFW
ufw:
rule: allow
port: '443'
proto: tcp
- name: Allow HTTP through UFW (for Let's Encrypt)
ufw:
rule: allow
port: '80'
proto: tcp
- name: Ensure Caddy sites-enabled directory exists
file:
path: "{{ caddy_sites_dir }}"
state: directory
owner: root
group: root
mode: '0755'
- name: Ensure Caddyfile includes import directive for sites-enabled
lineinfile:
path: /etc/caddy/Caddyfile
line: 'import sites-enabled/*'
insertafter: EOF
state: present
backup: yes
- name: Create Caddy reverse proxy configuration for memos
copy:
dest: "{{ caddy_sites_dir }}/memos.conf"
content: |
{{ memos_domain }} {
reverse_proxy localhost:{{ memos_port }}
}
owner: root
group: root
mode: '0644'
- name: Reload Caddy to apply new config
command: systemctl reload caddy
handlers:
- name: Restart memos
systemd:
name: memos
state: restarted

View file

@ -0,0 +1,16 @@
# General
memos_data_dir: /var/lib/memos
memos_port: 5230
# (caddy_sites_dir and subdomain now in services_config.yml)
# Remote access
remote_host: "{{ groups['memos_box'][0] }}"
remote_user: "{{ hostvars[remote_host]['ansible_user'] }}"
remote_key_file: "{{ hostvars[remote_host]['ansible_ssh_private_key_file'] | default('') }}"
# Local backup
local_backup_dir: "{{ lookup('env', 'HOME') }}/memos-backups"
backup_script_path: "{{ lookup('env', 'HOME') }}/.local/bin/memos_backup.sh"

View file

@ -3,8 +3,11 @@
become: yes
vars_files:
- ../../infra_vars.yml
- ../../services_config.yml
- ./ntfy_emergency_app_vars.yml
vars:
ntfy_emergency_app_subdomain: "{{ subdomains.ntfy_emergency_app }}"
caddy_sites_dir: "{{ caddy_sites_dir }}"
ntfy_emergency_app_domain: "{{ ntfy_emergency_app_subdomain }}.{{ root_domain }}"
tasks:

View file

@ -2,9 +2,7 @@
ntfy_emergency_app_dir: /opt/ntfy-emergency-app
ntfy_emergency_app_port: 3000
# Caddy
caddy_sites_dir: /etc/caddy/sites-enabled
ntfy_emergency_app_subdomain: avisame
# (caddy_sites_dir and subdomain now in services_config.yml)
# ntfy configuration
ntfy_emergency_app_topic: "emergencia"

View file

@ -3,8 +3,11 @@
become: yes
vars_files:
- ../../infra_vars.yml
- ../../services_config.yml
- ./ntfy_vars.yml
vars:
ntfy_subdomain: "{{ subdomains.ntfy }}"
caddy_sites_dir: "{{ caddy_sites_dir }}"
ntfy_domain: "{{ ntfy_subdomain }}.{{ root_domain }}"
tasks:

View file

@ -1,3 +1,2 @@
caddy_sites_dir: /etc/caddy/sites-enabled
ntfy_subdomain: ntfy
ntfy_port: 6674
ntfy_port: 6674
ntfy_topic: alerts # Topic for Uptime Kuma notifications

View file

@ -0,0 +1,155 @@
- name: Setup ntfy as Uptime Kuma Notification Channel
hosts: watchtower
become: no
vars_files:
- ../../infra_vars.yml
- ../../services_config.yml
- ../../infra_secrets.yml
- ./ntfy_vars.yml
- ../uptime_kuma/uptime_kuma_vars.yml
vars:
ntfy_subdomain: "{{ subdomains.ntfy }}"
uptime_kuma_subdomain: "{{ subdomains.uptime_kuma }}"
ntfy_domain: "{{ ntfy_subdomain }}.{{ root_domain }}"
ntfy_server_url: "https://{{ ntfy_domain }}"
ntfy_priority: 4 # 1=min, 2=low, 3=default, 4=high, 5=max
uptime_kuma_api_url: "https://{{ uptime_kuma_subdomain }}.{{ root_domain }}"
tasks:
- name: Validate Uptime Kuma configuration
assert:
that:
- uptime_kuma_api_url is defined
- uptime_kuma_api_url != ""
- uptime_kuma_username is defined
- uptime_kuma_username != ""
- uptime_kuma_password is defined
- uptime_kuma_password != ""
fail_msg: "uptime_kuma_api_url, uptime_kuma_username and uptime_kuma_password must be set"
- name: Validate ntfy configuration
assert:
that:
- ntfy_domain is defined
- ntfy_domain != ""
- ntfy_topic is defined
- ntfy_topic != ""
- ntfy_username is defined
- ntfy_username != ""
- ntfy_password is defined
- ntfy_password != ""
fail_msg: "ntfy_domain, ntfy_topic, ntfy_username and ntfy_password must be set"
- name: Create Uptime Kuma ntfy notification setup script
copy:
dest: /tmp/setup_uptime_kuma_ntfy_notification.py
content: |
#!/usr/bin/env python3
import sys
import json
from uptime_kuma_api import UptimeKumaApi
def main():
api_url = sys.argv[1]
username = sys.argv[2]
password = sys.argv[3]
notification_name = sys.argv[4]
ntfy_server_url = sys.argv[5]
ntfy_topic = sys.argv[6]
ntfy_username = sys.argv[7]
ntfy_password = sys.argv[8]
ntfy_priority = int(sys.argv[9])
api = UptimeKumaApi(api_url, timeout=60, wait_events=2.0)
api.login(username, password)
# Get all notifications
notifications = api.get_notifications()
# Find existing ntfy notification by name
existing_notification = next((n for n in notifications if n.get('name') == notification_name), None)
notification_data = {
'name': notification_name,
'type': 'ntfy',
'isDefault': True, # Apply to all monitors by default
'applyExisting': True, # Apply to existing monitors
'ntfyserverurl': ntfy_server_url,
'ntfytopic': ntfy_topic,
'ntfyusername': ntfy_username,
'ntfypassword': ntfy_password,
'ntfyPriority': ntfy_priority
}
if existing_notification:
notification = api.edit_notification(existing_notification['id'], **notification_data)
action = "updated"
else:
notification = api.add_notification(**notification_data)
action = "created"
# Output result as JSON
result = {
'notification_id': notification['id'],
'notification_name': notification_name,
'ntfy_server': ntfy_server_url,
'ntfy_topic': ntfy_topic,
'action': action
}
print(json.dumps(result))
api.disconnect()
if __name__ == '__main__':
main()
mode: '0755'
delegate_to: localhost
become: no
- name: Run Uptime Kuma ntfy notification setup script
command: >
{{ ansible_playbook_python }}
/tmp/setup_uptime_kuma_ntfy_notification.py
"{{ uptime_kuma_api_url }}"
"{{ uptime_kuma_username }}"
"{{ uptime_kuma_password }}"
"ntfy ({{ ntfy_topic }})"
"{{ ntfy_server_url }}"
"{{ ntfy_topic }}"
"{{ ntfy_username }}"
"{{ ntfy_password }}"
"{{ ntfy_priority }}"
register: notification_setup_result
delegate_to: localhost
become: no
changed_when: false
- name: Parse notification setup result
set_fact:
notification_info_parsed: "{{ notification_setup_result.stdout | from_json }}"
- name: Display notification information
debug:
msg: |
✓ ntfy notification channel {{ notification_info_parsed.action }} successfully!
Notification Name: ntfy ({{ ntfy_topic }})
ntfy Server: {{ ntfy_server_url }}
ntfy Topic: {{ ntfy_topic }}
Priority: {{ ntfy_priority }} (4=high)
Default for all monitors: Yes
Applied to existing monitors: Yes
All Uptime Kuma monitors will now send alerts to your ntfy server
on the "{{ ntfy_topic }}" topic.
You can subscribe to alerts at: {{ ntfy_server_url }}/{{ ntfy_topic }}
- name: Clean up temporary Uptime Kuma setup script
file:
path: /tmp/setup_uptime_kuma_ntfy_notification.py
state: absent
delegate_to: localhost
become: no

View file

@ -3,7 +3,12 @@
become: yes
vars_files:
- ../../infra_vars.yml
- ../../services_config.yml
- ./personal_blog_vars.yml
vars:
personal_blog_subdomain: "{{ subdomains.personal_blog }}"
caddy_sites_dir: "{{ caddy_sites_dir }}"
personal_blog_domain: "{{ personal_blog_subdomain }}.{{ root_domain }}"
tasks:
- name: Install git

View file

@ -1,6 +1,4 @@
caddy_sites_dir: /etc/caddy/sites-enabled
personal_blog_subdomain: pablohere
personal_blog_domain: pablohere.contrapeso.xyz
# (caddy_sites_dir and subdomain now in services_config.yml)
personal_blog_git_repo: https://forgejo.contrapeso.xyz/counterweight/pablohere.git
personal_blog_git_username: counterweight
personal_blog_source_dir: /opt/personal-blog

View file

@ -3,8 +3,11 @@
become: yes
vars_files:
- ../../infra_vars.yml
- ../../services_config.yml
- ./uptime_kuma_vars.yml
vars:
uptime_kuma_subdomain: "{{ subdomains.uptime_kuma }}"
caddy_sites_dir: "{{ caddy_sites_dir }}"
uptime_kuma_domain: "{{ uptime_kuma_subdomain }}.{{ root_domain }}"
tasks:

View file

@ -3,9 +3,7 @@ uptime_kuma_dir: /opt/uptime-kuma
uptime_kuma_data_dir: "{{ uptime_kuma_dir }}/data"
uptime_kuma_port: 3001
# Caddy
caddy_sites_dir: /etc/caddy/sites-enabled
uptime_kuma_subdomain: uptime
# (caddy_sites_dir and subdomain now in services_config.yml)
# Remote access
remote_host: "{{ groups['watchtower'][0] }}"

View file

@ -3,9 +3,14 @@
become: yes
vars_files:
- ../../infra_vars.yml
- ../../services_config.yml
- ../../infra_secrets.yml
- ./vaultwarden_vars.yml
vars:
vaultwarden_subdomain: "{{ subdomains.vaultwarden }}"
caddy_sites_dir: "{{ caddy_sites_dir }}"
vaultwarden_domain: "{{ vaultwarden_subdomain }}.{{ root_domain }}"
uptime_kuma_api_url: "https://{{ subdomains.uptime_kuma }}.{{ root_domain }}"
tasks:
- name: Create vaultwarden directory
@ -106,3 +111,110 @@
- name: Reload Caddy to apply new config
command: systemctl reload caddy
- name: Create Uptime Kuma monitor setup script for Vaultwarden
delegate_to: localhost
become: no
copy:
dest: /tmp/setup_vaultwarden_monitor.py
content: |
#!/usr/bin/env python3
import sys
import yaml
from uptime_kuma_api import UptimeKumaApi, MonitorType
try:
# Load configs
with open('/tmp/ansible_config.yml', 'r') as f:
config = yaml.safe_load(f)
url = config['uptime_kuma_url']
username = config['username']
password = config['password']
monitor_url = config['monitor_url']
monitor_name = config['monitor_name']
# Connect to Uptime Kuma
api = UptimeKumaApi(url, timeout=30)
api.login(username, password)
# Get all monitors
monitors = api.get_monitors()
# Find or create "services" group
group = next((m for m in monitors if m.get('name') == 'services' and m.get('type') == 'group'), None)
if not group:
group_result = api.add_monitor(type='group', name='services')
# Refresh to get the group with id
monitors = api.get_monitors()
group = next((m for m in monitors if m.get('name') == 'services' and m.get('type') == 'group'), None)
# Check if monitor already exists
existing_monitor = None
for monitor in monitors:
if monitor.get('name') == monitor_name:
existing_monitor = monitor
break
# Get ntfy notification ID
notifications = api.get_notifications()
ntfy_notification_id = None
for notif in notifications:
if notif.get('type') == 'ntfy':
ntfy_notification_id = notif.get('id')
break
if existing_monitor:
print(f"Monitor '{monitor_name}' already exists (ID: {existing_monitor['id']})")
print("Skipping - monitor already configured")
else:
print(f"Creating monitor '{monitor_name}'...")
api.add_monitor(
type=MonitorType.HTTP,
name=monitor_name,
url=monitor_url,
parent=group['id'],
interval=60,
maxretries=3,
retryInterval=60,
notificationIDList={ntfy_notification_id: True} if ntfy_notification_id else {}
)
api.disconnect()
print("SUCCESS")
except Exception as e:
print(f"ERROR: {str(e)}", file=sys.stderr)
sys.exit(1)
mode: '0755'
- name: Create temporary config for monitor setup
delegate_to: localhost
become: no
copy:
dest: /tmp/ansible_config.yml
content: |
uptime_kuma_url: "{{ uptime_kuma_api_url }}"
username: "{{ uptime_kuma_username }}"
password: "{{ uptime_kuma_password }}"
monitor_url: "https://{{ vaultwarden_domain }}/alive"
monitor_name: "Vaultwarden"
mode: '0644'
- name: Run Uptime Kuma monitor setup
command: python3 /tmp/setup_vaultwarden_monitor.py
delegate_to: localhost
become: no
register: monitor_setup
changed_when: "'SUCCESS' in monitor_setup.stdout"
ignore_errors: yes
- name: Clean up temporary files
delegate_to: localhost
become: no
file:
path: "{{ item }}"
state: absent
loop:
- /tmp/setup_vaultwarden_monitor.py
- /tmp/ansible_config.yml

View file

@ -3,9 +3,7 @@ vaultwarden_dir: /opt/vaultwarden
vaultwarden_data_dir: "{{ vaultwarden_dir }}/data"
vaultwarden_port: 8222
# Caddy
caddy_sites_dir: /etc/caddy/sites-enabled
vaultwarden_subdomain: vault
# (caddy_sites_dir and subdomain now in services_config.yml)
# Remote access
remote_host: "{{ groups['vipy'][0] }}"