From 3343de2dc07dcdcb0162c5c52f04cdbc31fe0540 Mon Sep 17 00:00:00 2001 From: Pablo Martin Date: Tue, 1 Jul 2025 16:14:44 +0200 Subject: [PATCH 01/10] thingies --- .gitignore | 3 +- 01_infra_setup.md | 18 +++-- 02_vps_core_services_setup.md | 18 +++++ ansible/example.inventory.ini | 2 + .../01_user_and_access_setup_playbook.yml | 65 +++++++++++++++++++ .../infra/02_firewall_playbook.yml | 51 +-------------- ansible/services/caddy_playbook.yml | 61 +++++++++++++++++ ansible/services/docker_playbook.yml | 59 +++++++++++++++++ ansible/services/uptime_kuma_playbook.yml | 51 +++++++++++++++ {infra => ansible}/vars.yml | 3 +- infra/example.inventory.ini | 2 - requirements.txt | 10 +++ 12 files changed, 286 insertions(+), 57 deletions(-) create mode 100644 02_vps_core_services_setup.md create mode 100644 ansible/example.inventory.ini create mode 100644 ansible/infra/01_user_and_access_setup_playbook.yml rename infra/playbook.yml => ansible/infra/02_firewall_playbook.yml (52%) create mode 100644 ansible/services/caddy_playbook.yml create mode 100644 ansible/services/docker_playbook.yml create mode 100644 ansible/services/uptime_kuma_playbook.yml rename {infra => ansible}/vars.yml (75%) delete mode 100644 infra/example.inventory.ini create mode 100644 requirements.txt diff --git a/.gitignore b/.gitignore index c13bb9b..48494c3 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,2 @@ -inventory.ini \ No newline at end of file +inventory.ini +venv/* \ No newline at end of file diff --git a/01_infra_setup.md b/01_infra_setup.md index 7599746..6fb7442 100644 --- a/01_infra_setup.md +++ b/01_infra_setup.md @@ -5,7 +5,11 @@ This describes how to prepare each machine before deploying services on them. ## 01.01 First steps * Create an ssh key or pick an existing one. We'll refer to it as the `personal_ssh_key`. -* The guide assumes the laptop (Lapy) has `ansible` installed. If not, do `sudo apt install -y ansible` and `ansible --version` to check. +* Deploy ansible on the laptop (Lapy), which will act as the ansible control node. To do so: + * Create a `venv`: `python3 -m venv venv` + * Activate it: `source venv/bin/activate` + * Install the listed ansible requirements with `pip install -r requirements.txt` +* Keep in mind you should activate this `venv` from now on when running `ansible` commands. ## 01.02 Prepare the VPS (Vipy) @@ -20,9 +24,13 @@ This describes how to prepare each machine before deploying services on them. ### 01.02.02 Prepare Ansible vars -* You have an example `infra/example.inventory.ini`. Copy it with `cp example.inventory.ini inventory.ini` and fill in with the vars for your VPS. +* You have an example `ansible/example.inventory.ini`. Copy it with `cp ansible/example.inventory.ini ansible/inventory.ini` and fill in with the values for your VPS. -### 01.02.03 First steps with Ansible +### 01.02.03 Create user and secure VPS access -* cd into `infra` -* Run `ansible-playbook playbook.yml` +* Ansible will create a user on the first playbook `01_basic_vps_setup_playbook.yml`. This is the user that will get used regularly. But, since this user doesn't exist, you obviosuly need to first run this playbook from some other user. We assume your VPS provider has given you a root user, which is what you need to define as the running user in the next command. +* cd into `ansible` +* Run `ansible-playbook -i inventory.ini infra/01_user_and_access_setup_playbook.yml -e 'ansible_user="your root user here"' +* Then, configure firewall access, fail2ban and auditd with `ansible-playbook -i inventory.ini infra/02_firewall_playbook.yml` + +Note that both the root user and the `counterweight` user will use the same SSH pubkey for auth. \ No newline at end of file diff --git a/02_vps_core_services_setup.md b/02_vps_core_services_setup.md new file mode 100644 index 0000000..f81ff63 --- /dev/null +++ b/02_vps_core_services_setup.md @@ -0,0 +1,18 @@ +# 02. VPS Core Services Setup + +Now that Vipy is ready, we need to deploy some basic services which are foundational for the apps we're actually interested in. + +This assumes you've completed the markdown `01`. + +## 02.01 Deploy Caddy + +* Use Ansible to run the caddy playbook: + + ``` + cd ansible + ansible-playbook -i inventory.ini services/caddy_playbook.yml + ``` + +* Starting config will be empty. Modifying the caddy config file to add endpoints as we add services is covered by the instructions of each service. + +## 02.02 Deploy Uptime Kuma \ No newline at end of file diff --git a/ansible/example.inventory.ini b/ansible/example.inventory.ini new file mode 100644 index 0000000..5958a54 --- /dev/null +++ b/ansible/example.inventory.ini @@ -0,0 +1,2 @@ +[vipy] +your.vps.ip.here ansible_user=counterweight ansible_port=22 ansible_ssh_private_key_file=~/.ssh/your-key \ No newline at end of file diff --git a/ansible/infra/01_user_and_access_setup_playbook.yml b/ansible/infra/01_user_and_access_setup_playbook.yml new file mode 100644 index 0000000..0b25b9a --- /dev/null +++ b/ansible/infra/01_user_and_access_setup_playbook.yml @@ -0,0 +1,65 @@ +- name: Secure Debian VPS + hosts: vipy + vars_files: + - ../vars.yml + become: true + + tasks: + - name: Update and upgrade apt packages + apt: + update_cache: yes + upgrade: full + autoremove: yes + + - name: Create new user + user: + name: "{{ new_user }}" + groups: sudo + shell: /bin/bash + state: present + create_home: yes + + - name: Set up SSH directory for new user + file: + path: "/home/{{ new_user }}/.ssh" + state: directory + mode: "0700" + owner: "{{ new_user }}" + group: "{{ new_user }}" + + - name: Copy current user's authorized_keys to new user + copy: + src: "/home/{{ ansible_user }}/.ssh/authorized_keys" + dest: "/home/{{ new_user }}/.ssh/authorized_keys" + owner: "{{ new_user }}" + group: "{{ new_user }}" + mode: "0600" + remote_src: true + + - name: Allow new user to run sudo without password + copy: + dest: "/etc/sudoers.d/{{ new_user }}" + content: "{{ new_user }} ALL=(ALL) NOPASSWD:ALL" + owner: root + group: root + mode: "0440" + + - name: Disable root login + lineinfile: + path: /etc/ssh/sshd_config + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + state: present + backrefs: yes + loop: + - { regexp: "^#?PermitRootLogin .*", line: "PermitRootLogin no" } + - { + regexp: "^#?PasswordAuthentication .*", + line: "PasswordAuthentication no", + } + + - name: Restart SSH + service: + name: ssh + state: restarted + diff --git a/infra/playbook.yml b/ansible/infra/02_firewall_playbook.yml similarity index 52% rename from infra/playbook.yml rename to ansible/infra/02_firewall_playbook.yml index 991fe81..31e6848 100644 --- a/infra/playbook.yml +++ b/ansible/infra/02_firewall_playbook.yml @@ -1,56 +1,10 @@ - name: Secure Debian VPS hosts: vipy vars_files: - - vars.yml + - ../vars.yml become: true tasks: - - name: Update and upgrade apt packages - apt: - update_cache: yes - upgrade: full - autoremove: yes - - - name: Create new user - user: - name: "{{ new_user }}" - groups: sudo - shell: /bin/bash - state: present - create_home: yes - - - name: Set up SSH directory for new user - file: - path: "/home/{{ new_user }}/.ssh" - state: directory - mode: "0700" - owner: "{{ new_user }}" - group: "{{ new_user }}" - - - name: Change SSH port and disable root login - lineinfile: - path: /etc/ssh/sshd_config - regexp: "{{ item.regexp }}" - line: "{{ item.line }}" - state: present - backrefs: yes - loop: - - { regexp: "^#?Port .*", line: "Port {{ ssh_port }}" } - - { regexp: "^#?PermitRootLogin .*", line: "PermitRootLogin no" } - - { - regexp: "^#?PasswordAuthentication .*", - line: "PasswordAuthentication no", - } - - - name: Restart SSH - service: - name: ssh - state: restarted - - - name: Set SSH port to new port - set_fact: - ansible_port: "{{ ssh_port }}" - - name: Install UFW apt: name: ufw @@ -68,11 +22,12 @@ - name: Allow outgoing traffic ufw: rule: allow - direction: outgoing + direction: out - name: Allow SSH port through UFW ufw: rule: allow + direction: in port: "{{ ssh_port }}" proto: tcp from_ip: "{{ allow_ssh_from if allow_ssh_from != 'any' else omit }}" diff --git a/ansible/services/caddy_playbook.yml b/ansible/services/caddy_playbook.yml new file mode 100644 index 0000000..13ff64b --- /dev/null +++ b/ansible/services/caddy_playbook.yml @@ -0,0 +1,61 @@ +- name: Install and configure Caddy on Debian 12 + hosts: vipy + become: yes + + tasks: + - name: Install required packages + apt: + name: + - debian-keyring + - debian-archive-keyring + - apt-transport-https + - curl + state: present + update_cache: yes + + - name: Download Caddy GPG armored key + ansible.builtin.get_url: + url: https://dl.cloudsmith.io/public/caddy/stable/gpg.key + dest: /tmp/caddy-stable-archive-keyring.asc + mode: '0644' + + - name: Convert ASCII armored key to binary keyring + ansible.builtin.command: + cmd: gpg --dearmor -o /usr/share/keyrings/caddy-stable-archive-keyring.gpg /tmp/caddy-stable-archive-keyring.asc + args: + creates: /usr/share/keyrings/caddy-stable-archive-keyring.gpg + + - name: Ensure permissions on keyring file + ansible.builtin.file: + path: /usr/share/keyrings/caddy-stable-archive-keyring.gpg + owner: root + group: root + mode: '0644' + + - name: Add Caddy repository list file + ansible.builtin.get_url: + url: https://dl.cloudsmith.io/public/caddy/stable/debian.deb.txt + dest: /etc/apt/sources.list.d/caddy-stable.list + mode: '0644' + validate_certs: yes + + - name: Update apt cache after adding repo + apt: + update_cache: yes + + - name: Install Caddy + apt: + name: caddy + state: present + + - name: Ensure Caddy service is enabled and started + systemd: + name: caddy + enabled: yes + state: started + + - name: Allow HTTPS through UFW + ufw: + rule: allow + port: '443' + proto: tcp \ No newline at end of file diff --git a/ansible/services/docker_playbook.yml b/ansible/services/docker_playbook.yml new file mode 100644 index 0000000..3474a65 --- /dev/null +++ b/ansible/services/docker_playbook.yml @@ -0,0 +1,59 @@ +- name: Install Docker and Docker Compose on Debian 12 + hosts: all + become: yes + + tasks: + - name: Ensure required packages are installed + apt: + name: + - ca-certificates + - curl + - gnupg + - lsb-release + state: present + update_cache: yes + + - name: Add Docker GPG key + ansible.builtin.apt_key: + url: https://download.docker.com/linux/debian/gpg + state: present + + - name: Add Docker repository + ansible.builtin.apt_repository: + repo: "deb [arch=amd64] https://download.docker.com/linux/debian {{ ansible_distribution_release }} stable" + state: present + filename: docker + + - name: Update apt cache after adding Docker repo + apt: + update_cache: yes + + - name: Install Docker Engine and CLI + apt: + name: + - docker-ce + - docker-ce-cli + - containerd.io + - docker-buildx-plugin + - docker-compose-plugin + state: latest + + - name: Ensure Docker is started and enabled + systemd: + name: docker + enabled: yes + state: started + + - name: Add user to docker group + user: + name: "{{ ansible_user }}" + groups: docker + append: yes + + - name: Create symlink for docker-compose (optional CLI alias) + file: + src: /usr/libexec/docker/cli-plugins/docker-compose + dest: /usr/local/bin/docker-compose + state: link + when: ansible_facts['os_family'] == "Debian" + ignore_errors: true # In case the plugin path differs slightly diff --git a/ansible/services/uptime_kuma_playbook.yml b/ansible/services/uptime_kuma_playbook.yml new file mode 100644 index 0000000..9f52798 --- /dev/null +++ b/ansible/services/uptime_kuma_playbook.yml @@ -0,0 +1,51 @@ +- name: Deploy Uptime Kuma with Docker Compose and configure Caddy reverse proxy + hosts: vipy + become: yes + vars: + uptime_kuma_dir: /opt/uptime-kuma + uptime_kuma_port: 3001 + caddy_sites_dir: /etc/caddy/sites-enabled + uptime_kuma_domain: uptime.example.com # Change to your domain + + tasks: + - name: Create uptime kuma directory + file: + path: "{{ uptime_kuma_dir }}" + state: directory + owner: {{ ansible_user }} + group: {{ ansible_user }} + mode: '0755' + + - name: Create docker-compose.yml for uptime kuma + copy: + dest: "{{ uptime_kuma_dir }}/docker-compose.yml" + content: | + version: "3" + services: + uptime-kuma: + image: louislam/uptime-kuma:latest + container_name: uptime-kuma + restart: unless-stopped + ports: + - "{{ uptime_kuma_port }}:3001" + volumes: + - ./data:/app/data + + - name: Deploy uptime kuma container with docker compose + command: docker-compose up -d + args: + chdir: "{{ uptime_kuma_dir }}" + + - name: Create Caddy reverse proxy configuration for uptime kuma + copy: + dest: "{{ caddy_sites_dir }}/uptime-kuma.conf" + content: | + {{ uptime_kuma_domain }} { + reverse_proxy localhost:{{ uptime_kuma_port }} + } + owner: root + group: root + mode: '0644' + + - name: Reload Caddy to apply new config + command: systemctl reload caddy \ No newline at end of file diff --git a/infra/vars.yml b/ansible/vars.yml similarity index 75% rename from infra/vars.yml rename to ansible/vars.yml index 193ccf4..44e4e24 100644 --- a/infra/vars.yml +++ b/ansible/vars.yml @@ -1,3 +1,4 @@ new_user: counterweight -ssh_port: 2222 +ssh_port: 22 allow_ssh_from: "any" + diff --git a/infra/example.inventory.ini b/infra/example.inventory.ini deleted file mode 100644 index 30f75b0..0000000 --- a/infra/example.inventory.ini +++ /dev/null @@ -1,2 +0,0 @@ -[vipy] -your.vps.ip.here ansible_user=debian ansible_port=22 \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..cfc541b --- /dev/null +++ b/requirements.txt @@ -0,0 +1,10 @@ +ansible==10.7.0 +ansible-core==2.17.12 +cffi==1.17.1 +cryptography==45.0.4 +Jinja2==3.1.6 +MarkupSafe==3.0.2 +packaging==25.0 +pycparser==2.22 +PyYAML==6.0.2 +resolvelib==1.0.1 From 97ff4b40e34d3741ff936435a5425936600a75a1 Mon Sep 17 00:00:00 2001 From: Pablo Martin Date: Tue, 1 Jul 2025 16:50:58 +0200 Subject: [PATCH 02/10] docker playbook --- ansible/services/docker_playbook.yml | 70 ++++++++++++++++++---------- 1 file changed, 45 insertions(+), 25 deletions(-) diff --git a/ansible/services/docker_playbook.yml b/ansible/services/docker_playbook.yml index 3474a65..8e8e430 100644 --- a/ansible/services/docker_playbook.yml +++ b/ansible/services/docker_playbook.yml @@ -1,34 +1,60 @@ - name: Install Docker and Docker Compose on Debian 12 hosts: all - become: yes + become: yes tasks: - - name: Ensure required packages are installed + - name: Remove old Docker-related packages + apt: + name: + - docker.io + - docker-doc + - docker-compose + - podman-docker + - containerd + - runc + state: absent + purge: yes + autoremove: yes + + - name: Update apt cache + apt: + update_cache: yes + + - name: Install prerequisites apt: name: - ca-certificates - curl - - gnupg - - lsb-release + state: present + + - name: Create directory for Docker GPG key + file: + path: /etc/apt/keyrings + state: directory + mode: '0755' + + - name: Download Docker GPG key + get_url: + url: https://download.docker.com/linux/debian/gpg + dest: /etc/apt/keyrings/docker.asc + mode: '0644' + + - name: Get Debian architecture + command: dpkg --print-architecture + register: deb_arch + + - name: Add Docker repository + apt_repository: + repo: "deb [arch={{ deb_arch.stdout }} signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/debian {{ ansible_lsb.codename }} stable" + filename: docker state: present update_cache: yes - - name: Add Docker GPG key - ansible.builtin.apt_key: - url: https://download.docker.com/linux/debian/gpg - state: present - - - name: Add Docker repository - ansible.builtin.apt_repository: - repo: "deb [arch=amd64] https://download.docker.com/linux/debian {{ ansible_distribution_release }} stable" - state: present - filename: docker - - - name: Update apt cache after adding Docker repo + - name: Update apt cache apt: update_cache: yes - - name: Install Docker Engine and CLI + - name: Install Docker packages apt: name: - docker-ce @@ -36,7 +62,8 @@ - containerd.io - docker-buildx-plugin - docker-compose-plugin - state: latest + state: present + update_cache: yes - name: Ensure Docker is started and enabled systemd: @@ -50,10 +77,3 @@ groups: docker append: yes - - name: Create symlink for docker-compose (optional CLI alias) - file: - src: /usr/libexec/docker/cli-plugins/docker-compose - dest: /usr/local/bin/docker-compose - state: link - when: ansible_facts['os_family'] == "Debian" - ignore_errors: true # In case the plugin path differs slightly From eddde5e53ae82a76fc41d8e5399e7531084d92b7 Mon Sep 17 00:00:00 2001 From: Pablo Martin Date: Tue, 1 Jul 2025 17:02:28 +0200 Subject: [PATCH 03/10] uptime kuma works --- .../01_user_and_access_setup_playbook.yml | 2 +- ansible/infra/02_firewall_playbook.yml | 2 +- ansible/{vars.yml => infra_vars.yml} | 2 +- ansible/services/uptime_kuma_playbook.yml | 27 ++++++++++++++++--- 4 files changed, 26 insertions(+), 7 deletions(-) rename ansible/{vars.yml => infra_vars.yml} (67%) diff --git a/ansible/infra/01_user_and_access_setup_playbook.yml b/ansible/infra/01_user_and_access_setup_playbook.yml index 0b25b9a..ed8918b 100644 --- a/ansible/infra/01_user_and_access_setup_playbook.yml +++ b/ansible/infra/01_user_and_access_setup_playbook.yml @@ -1,7 +1,7 @@ - name: Secure Debian VPS hosts: vipy vars_files: - - ../vars.yml + - ../infra_vars.yml become: true tasks: diff --git a/ansible/infra/02_firewall_playbook.yml b/ansible/infra/02_firewall_playbook.yml index 31e6848..d6abd95 100644 --- a/ansible/infra/02_firewall_playbook.yml +++ b/ansible/infra/02_firewall_playbook.yml @@ -1,7 +1,7 @@ - name: Secure Debian VPS hosts: vipy vars_files: - - ../vars.yml + - ../infra_vars.yml become: true tasks: diff --git a/ansible/vars.yml b/ansible/infra_vars.yml similarity index 67% rename from ansible/vars.yml rename to ansible/infra_vars.yml index 44e4e24..952df93 100644 --- a/ansible/vars.yml +++ b/ansible/infra_vars.yml @@ -1,4 +1,4 @@ new_user: counterweight ssh_port: 22 allow_ssh_from: "any" - +root_domain: contrapeso.xyz diff --git a/ansible/services/uptime_kuma_playbook.yml b/ansible/services/uptime_kuma_playbook.yml index 9f52798..c2eff16 100644 --- a/ansible/services/uptime_kuma_playbook.yml +++ b/ansible/services/uptime_kuma_playbook.yml @@ -1,19 +1,22 @@ - name: Deploy Uptime Kuma with Docker Compose and configure Caddy reverse proxy hosts: vipy become: yes + vars_files: + - ../infra_vars.yml vars: uptime_kuma_dir: /opt/uptime-kuma uptime_kuma_port: 3001 caddy_sites_dir: /etc/caddy/sites-enabled - uptime_kuma_domain: uptime.example.com # Change to your domain + subdomain: uptime + uptime_kuma_domain: "{{ subdomain }}.{{ root_domain }}" tasks: - name: Create uptime kuma directory file: path: "{{ uptime_kuma_dir }}" state: directory - owner: {{ ansible_user }} - group: {{ ansible_user }} + owner: "{{ ansible_user }}" + group: "{{ ansible_user }}" mode: '0755' - name: Create docker-compose.yml for uptime kuma @@ -32,10 +35,26 @@ - ./data:/app/data - name: Deploy uptime kuma container with docker compose - command: docker-compose up -d + command: docker compose up -d args: chdir: "{{ uptime_kuma_dir }}" + - name: Ensure Caddy sites-enabled directory exists + file: + path: /etc/caddy/sites-enabled + state: directory + owner: root + group: root + mode: '0755' + + - name: Ensure Caddyfile includes import directive for sites-enabled + lineinfile: + path: /etc/caddy/Caddyfile + line: 'import sites-enabled/*' + insertafter: EOF + state: present + backup: yes + - name: Create Caddy reverse proxy configuration for uptime kuma copy: dest: "{{ caddy_sites_dir }}/uptime-kuma.conf" From dac4a98f79fd49c9279bc5d3194b479c233879ee Mon Sep 17 00:00:00 2001 From: Pablo Martin Date: Wed, 2 Jul 2025 17:17:56 +0200 Subject: [PATCH 04/10] uptime kuma backups work --- ansible/example.inventory.ini | 5 +- ansible/infra/900_install_rsync.yml | 11 ++++ .../backup_uptime_kuma_to_lapy.yml | 65 +++++++++++++++++++ .../deploy_uptime_kuma_playbook.yml} | 9 +-- .../services/uptime_kuma/uptime_kuma_vars.yml | 20 ++++++ 5 files changed, 103 insertions(+), 7 deletions(-) create mode 100644 ansible/infra/900_install_rsync.yml create mode 100644 ansible/services/uptime_kuma/backup_uptime_kuma_to_lapy.yml rename ansible/services/{uptime_kuma_playbook.yml => uptime_kuma/deploy_uptime_kuma_playbook.yml} (89%) create mode 100644 ansible/services/uptime_kuma/uptime_kuma_vars.yml diff --git a/ansible/example.inventory.ini b/ansible/example.inventory.ini index 5958a54..f432107 100644 --- a/ansible/example.inventory.ini +++ b/ansible/example.inventory.ini @@ -1,2 +1,5 @@ [vipy] -your.vps.ip.here ansible_user=counterweight ansible_port=22 ansible_ssh_private_key_file=~/.ssh/your-key \ No newline at end of file +your.vps.ip.here ansible_user=counterweight ansible_port=22 ansible_ssh_private_key_file=~/.ssh/your-key + +[lapy] +localhost ansible_connection=local ansible_user=your laptop user \ No newline at end of file diff --git a/ansible/infra/900_install_rsync.yml b/ansible/infra/900_install_rsync.yml new file mode 100644 index 0000000..c0b7318 --- /dev/null +++ b/ansible/infra/900_install_rsync.yml @@ -0,0 +1,11 @@ +- name: Install rsync + hosts: all + vars_files: + - ../infra_vars.yml + become: true + + tasks: + - name: Install rsync + apt: + name: rsync + state: present \ No newline at end of file diff --git a/ansible/services/uptime_kuma/backup_uptime_kuma_to_lapy.yml b/ansible/services/uptime_kuma/backup_uptime_kuma_to_lapy.yml new file mode 100644 index 0000000..5aa5beb --- /dev/null +++ b/ansible/services/uptime_kuma/backup_uptime_kuma_to_lapy.yml @@ -0,0 +1,65 @@ +- name: Configure local backup for Uptime Kuma from remote + hosts: lapy + gather_facts: no + vars_files: + - ../../infra_vars.yml + - ./uptime_kuma_vars.yml + vars: + remote_data_path: "{{ uptime_kuma_data_dir }}" + local_backup_dir: "{{ lookup('env', 'HOME') }}/uptime-kuma-backups" + backup_script_path: "{{ lookup('env', 'HOME') }}/.local/bin/uptime_kuma_backup.sh" + + tasks: + - name: Debug remote backup vars + debug: + msg: + - "remote_host={{ remote_host }}" + - "remote_user={{ remote_user }}" + - "remote_data_path='{{ remote_data_path }}'" + - "local_backup_dir={{ local_backup_dir }}" + + - name: Ensure local backup directory exists + file: + path: "{{ local_backup_dir }}" + state: directory + mode: '0755' + + - name: Ensure ~/.local/bin exists + file: + path: "{{ lookup('env', 'HOME') }}/.local/bin" + state: directory + mode: '0755' + + - name: Create backup script + copy: + dest: "{{ backup_script_path }}" + mode: '0750' + content: | + #!/bin/bash + set -euo pipefail + + TIMESTAMP=$(date +'%Y-%m-%d') + BACKUP_DIR="{{ local_backup_dir }}/$TIMESTAMP" + mkdir -p "$BACKUP_DIR" + + {% if remote_key_file %} + SSH_CMD="ssh -i {{ remote_key_file }} -p {{ hostvars[remote_host]['ansible_port'] | default(22) }}" + {% else %} + SSH_CMD="ssh -p {{ hostvars[remote_host]['ansible_port'] | default(22) }}" + {% endif %} + + rsync -az -e "$SSH_CMD" --delete {{ remote_user }}@{{ remote_host }}:{{ remote_data_path }}/ "$BACKUP_DIR/" + + # Rotate old backups (keep 14 days) + find "{{ local_backup_dir }}" -maxdepth 1 -type d -name '20*' -mtime +13 -exec rm -rf {} \; + + - name: Ensure cronjob for backup exists + cron: + name: "Uptime Kuma backup" + user: "{{ lookup('env', 'USER') }}" + job: "{{ backup_script_path }}" + minute: 0 + hour: "9,12,15,18" + + - name: Run the backup script to make the first backup + command: "{{ backup_script_path }}" diff --git a/ansible/services/uptime_kuma_playbook.yml b/ansible/services/uptime_kuma/deploy_uptime_kuma_playbook.yml similarity index 89% rename from ansible/services/uptime_kuma_playbook.yml rename to ansible/services/uptime_kuma/deploy_uptime_kuma_playbook.yml index c2eff16..f7fe4b9 100644 --- a/ansible/services/uptime_kuma_playbook.yml +++ b/ansible/services/uptime_kuma/deploy_uptime_kuma_playbook.yml @@ -2,13 +2,10 @@ hosts: vipy become: yes vars_files: - - ../infra_vars.yml + - ../../infra_vars.yml + - ./uptime_kuma_vars.yml vars: - uptime_kuma_dir: /opt/uptime-kuma - uptime_kuma_port: 3001 - caddy_sites_dir: /etc/caddy/sites-enabled - subdomain: uptime - uptime_kuma_domain: "{{ subdomain }}.{{ root_domain }}" + uptime_kuma_domain: "{{ uptime_kuma_subdomain }}.{{ root_domain }}" tasks: - name: Create uptime kuma directory diff --git a/ansible/services/uptime_kuma/uptime_kuma_vars.yml b/ansible/services/uptime_kuma/uptime_kuma_vars.yml new file mode 100644 index 0000000..9b41e82 --- /dev/null +++ b/ansible/services/uptime_kuma/uptime_kuma_vars.yml @@ -0,0 +1,20 @@ +# General +uptime_kuma_dir: /opt/uptime-kuma +uptime_kuma_data_dir: "{{ uptime_kuma_dir }}/data" +uptime_kuma_port: 3001 + +# Caddy +caddy_sites_dir: /etc/caddy/sites-enabled +uptime_kuma_subdomain: uptime + +# Remote access +remote_host: "{{ groups['vipy'][0] }}" +remote_user: "{{ hostvars[remote_host]['ansible_user'] }}" +remote_key_file: "{{ hostvars[remote_host]['ansible_ssh_private_key_file'] | default('') }}" + +# Local backup +local_backup_dir: "{{ lookup('env', 'HOME') }}/uptime-kuma-backups" +backup_script_path: "{{ lookup('env', 'HOME') }}/.local/bin/uptime_kuma_backup.sh" + +# Encryption +pgp_recipient: "your-gpg-id@example.com" # Replace this with your actual GPG email or ID From 3d3d65575b09a2796a87ea497bd9ad3060e1eabf Mon Sep 17 00:00:00 2001 From: Pablo Martin Date: Thu, 3 Jul 2025 17:21:31 +0200 Subject: [PATCH 05/10] lots of stuff --- 01_infra_setup.md | 31 +++-- 02_vps_core_services_setup.md | 85 +++++++++++++- README.md | 4 + ansible/example.inventory.ini | 2 + ... => 02_firewall_and_fail2ban_playbook.yml} | 0 .../910_docker_playbook.yml} | 0 ...l => setup_backup_uptime_kuma_to_lapy.yml} | 0 .../services/uptime_kuma/uptime_kuma_vars.yml | 3 - .../deploy_vaultwarden_playbook.yml | 108 ++++++++++++++++++ .../setup_backup_vaultwarden_to_lapy.yml.yml | 63 ++++++++++ .../services/vaultwarden/vaultwarden_vars.yml | 17 +++ 11 files changed, 296 insertions(+), 17 deletions(-) rename ansible/infra/{02_firewall_playbook.yml => 02_firewall_and_fail2ban_playbook.yml} (100%) rename ansible/{services/docker_playbook.yml => infra/910_docker_playbook.yml} (100%) rename ansible/services/uptime_kuma/{backup_uptime_kuma_to_lapy.yml => setup_backup_uptime_kuma_to_lapy.yml} (100%) create mode 100644 ansible/services/vaultwarden/deploy_vaultwarden_playbook.yml create mode 100644 ansible/services/vaultwarden/setup_backup_vaultwarden_to_lapy.yml.yml create mode 100644 ansible/services/vaultwarden/vaultwarden_vars.yml diff --git a/01_infra_setup.md b/01_infra_setup.md index 6fb7442..1642d40 100644 --- a/01_infra_setup.md +++ b/01_infra_setup.md @@ -1,8 +1,8 @@ -# 01. Infra Setup +# 01 Infra Setup This describes how to prepare each machine before deploying services on them. -## 01.01 First steps +## First steps * Create an ssh key or pick an existing one. We'll refer to it as the `personal_ssh_key`. * Deploy ansible on the laptop (Lapy), which will act as the ansible control node. To do so: @@ -11,26 +11,35 @@ This describes how to prepare each machine before deploying services on them. * Install the listed ansible requirements with `pip install -r requirements.txt` * Keep in mind you should activate this `venv` from now on when running `ansible` commands. -## 01.02 Prepare the VPS (Vipy) +## Domain -### 01.02.01 Source the VPS +* Some services are designed to be accessible through WAN through a friendly URL. +* You'll need to have a domain where you can set DNS records and have the ability to create different subdomains, as the guide assumes each service will get its own subdomain. +* Getting and configuring the domain is outside the scope of this repo. Whenever a service needs you to set up a subdomain, it will be mentioned explictly. +* You should add the domain to the var `root_domain` in `ansible/infra_vars.yml`. + +## Prepare the VPS (Vipy) + +### Source the VPS * The guide is agnostic to which provider you pick, but has been tested with VMs from https://lnvps.net. * The expectations are that the VPS ticks the following boxes: + Runs Debian 12 bookworm. + Has a public IP4 and starts out with SSH listening on port 22. - + Boots with one of your SSH keys already authorized. -* Move on once your VPS is running. + + Boots with one of your SSH keys already authorized. If this is not the case, you'll have to manually drop the pubkey there before using the playbooks. +* Move on once your VPS is running and satisfies the prerequisites. -### 01.02.02 Prepare Ansible vars +### Prepare Ansible vars * You have an example `ansible/example.inventory.ini`. Copy it with `cp ansible/example.inventory.ini ansible/inventory.ini` and fill in with the values for your VPS. +* A few notes: + * The guides assume you'll only have one VPS in the `[Vipy]` group. Stuff will break if you have multiple, so avoid that. -### 01.02.03 Create user and secure VPS access +### Create user and secure VPS access * Ansible will create a user on the first playbook `01_basic_vps_setup_playbook.yml`. This is the user that will get used regularly. But, since this user doesn't exist, you obviosuly need to first run this playbook from some other user. We assume your VPS provider has given you a root user, which is what you need to define as the running user in the next command. * cd into `ansible` -* Run `ansible-playbook -i inventory.ini infra/01_user_and_access_setup_playbook.yml -e 'ansible_user="your root user here"' -* Then, configure firewall access, fail2ban and auditd with `ansible-playbook -i inventory.ini infra/02_firewall_playbook.yml` +* Run `ansible-playbook -i inventory.ini infra/01_user_and_access_setup_playbook.yml -e 'ansible_user="your root user here"'` +* Then, configure firewall access, fail2ban and auditd with `ansible-playbook -i inventory.ini infra/02_firewall_and_fail2ban_playbook.yml`. Since the user we will use is now present, there is no need to specify the user anymore. -Note that both the root user and the `counterweight` user will use the same SSH pubkey for auth. \ No newline at end of file +Note that, by applying this playbooks, both the root user and the `counterweight` user will use the same SSH pubkey for auth. \ No newline at end of file diff --git a/02_vps_core_services_setup.md b/02_vps_core_services_setup.md index f81ff63..19cb5d3 100644 --- a/02_vps_core_services_setup.md +++ b/02_vps_core_services_setup.md @@ -1,10 +1,35 @@ -# 02. VPS Core Services Setup +# 02 VPS Core Services Setup Now that Vipy is ready, we need to deploy some basic services which are foundational for the apps we're actually interested in. This assumes you've completed the markdown `01`. -## 02.01 Deploy Caddy +## General tools + +This repo contains some rather general tools that you may or may not need depending on what services you want to deploy and what device you're working on. This tools can be installed with the `900` group of playbooks sitting at `ansible/infra`. + +By default, these playbooks are configured for `hosts: all`. Be mindful if you want to limit, you can use the `--limit groupname` flag when running the playbook. + +Below you have notes on adding each specific tool to a device. + +### rsync + +Simply run the playbook: + +``` +ansible-playbook -i inventory.ini infra/900_install_rsync.yml +``` + +### docker and compose + +Simply run the playbook: + +``` +ansible-playbook -i inventory.ini infra/910_docker_playbook.yml +``` + + +## Deploy Caddy * Use Ansible to run the caddy playbook: @@ -15,4 +40,58 @@ This assumes you've completed the markdown `01`. * Starting config will be empty. Modifying the caddy config file to add endpoints as we add services is covered by the instructions of each service. -## 02.02 Deploy Uptime Kuma \ No newline at end of file + +## Uptime Kuma + +Uptime Kuma gets used to monitor the availability of services, keep track of their uptime and notify issues. + +### Deploy + +* Decide what subdomain you want to serve Uptime Kuma on and add it to `services/uptime_kuma/uptime_kuma_vars.yml` on the `uptime_kuma_subdomain`. +* Make sure docker is available on the host. +* Run the deployment playbook: `ansible-playbook -i inventory.ini services/uptime_kuma/deploy_uptime_kuma_playbook.yml`. + +### Set up backups to Lapy + +* Make sure rsync is available on the host and on Lapy. +* Run the backup playbook: `ansible-playbook -i inventory.ini services/uptime_kuma/setup_backup_uptime_kuma_to_lapy.yml`. +* A first backup process gets executed and then a cronjob is set up to refresh backups periodically. + +### Configure + +* Uptime Kuma will be available for you to create a user on first start. Do that and store the creds safe. +* From that point on, you can configure through the Web UI. + +### Restoring to a previous state + +* Stop Uptime Kuma. +* Overwrite the data folder with one of the backups. +* Start it up again. + + +## Vaultwarden + +Vaultwarden is a credentials manager. + +### Deploy + +* Decide what subdomain you want to serve Vaultwarden on and add it to `services/vaultwarden/vaultwarden_vars.yml` on the `vaultwarden_subdomain`. +* Make sure docker is available on the host. +* Run the deployment playbook: `ansible-playbook -i inventory.ini services/vaultwarden/deploy_vaultwarden_playbook.yml`. + +### Set up backups to Lapy + +* Make sure rsync is available on the host and on Lapy. +* Run the backup playbook: `ansible-playbook -i inventory.ini services/vaultwarden/setup_backup_vaultwarden_to_lapy.yml`. +* A first backup process gets executed and then a cronjob is set up to refresh backups periodically. + +### Configure + +* Vaultwarden will be available for you to create a user on first start. Do that and store the creds safe. +* From that point on, you can configure through the Web UI. + +### Restoring to a previous state + +* Stop Vaultwarden. +* Overwrite the data folder with one of the backups. +* Start it up again. diff --git a/README.md b/README.md index 23be743..f343cbc 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,10 @@ My repo documenting my personal infra, along with artifacts, scripts, etc. +## How to use + +Go through the different numbered markdowns in the repo root to do the different parts. + ## Overview ### Services diff --git a/ansible/example.inventory.ini b/ansible/example.inventory.ini index f432107..605b476 100644 --- a/ansible/example.inventory.ini +++ b/ansible/example.inventory.ini @@ -1,5 +1,7 @@ [vipy] your.vps.ip.here ansible_user=counterweight ansible_port=22 ansible_ssh_private_key_file=~/.ssh/your-key +# Local connection to laptop: this assumes you're running ansible commands from your personal laptop +# Make sure to adjust the username [lapy] localhost ansible_connection=local ansible_user=your laptop user \ No newline at end of file diff --git a/ansible/infra/02_firewall_playbook.yml b/ansible/infra/02_firewall_and_fail2ban_playbook.yml similarity index 100% rename from ansible/infra/02_firewall_playbook.yml rename to ansible/infra/02_firewall_and_fail2ban_playbook.yml diff --git a/ansible/services/docker_playbook.yml b/ansible/infra/910_docker_playbook.yml similarity index 100% rename from ansible/services/docker_playbook.yml rename to ansible/infra/910_docker_playbook.yml diff --git a/ansible/services/uptime_kuma/backup_uptime_kuma_to_lapy.yml b/ansible/services/uptime_kuma/setup_backup_uptime_kuma_to_lapy.yml similarity index 100% rename from ansible/services/uptime_kuma/backup_uptime_kuma_to_lapy.yml rename to ansible/services/uptime_kuma/setup_backup_uptime_kuma_to_lapy.yml diff --git a/ansible/services/uptime_kuma/uptime_kuma_vars.yml b/ansible/services/uptime_kuma/uptime_kuma_vars.yml index 9b41e82..eb235d6 100644 --- a/ansible/services/uptime_kuma/uptime_kuma_vars.yml +++ b/ansible/services/uptime_kuma/uptime_kuma_vars.yml @@ -15,6 +15,3 @@ remote_key_file: "{{ hostvars[remote_host]['ansible_ssh_private_key_file'] | def # Local backup local_backup_dir: "{{ lookup('env', 'HOME') }}/uptime-kuma-backups" backup_script_path: "{{ lookup('env', 'HOME') }}/.local/bin/uptime_kuma_backup.sh" - -# Encryption -pgp_recipient: "your-gpg-id@example.com" # Replace this with your actual GPG email or ID diff --git a/ansible/services/vaultwarden/deploy_vaultwarden_playbook.yml b/ansible/services/vaultwarden/deploy_vaultwarden_playbook.yml new file mode 100644 index 0000000..204a312 --- /dev/null +++ b/ansible/services/vaultwarden/deploy_vaultwarden_playbook.yml @@ -0,0 +1,108 @@ +- name: Deploy Vaultwarden with Docker Compose and configure Caddy reverse proxy + hosts: vipy + become: yes + vars_files: + - ../../infra_vars.yml + - ./vaultwarden_vars.yml + vars: + vaultwarden_domain: "{{ vaultwarden_subdomain }}.{{ root_domain }}" + + tasks: + - name: Create vaultwarden directory + file: + path: "{{ vaultwarden_dir }}" + state: directory + owner: "{{ ansible_user }}" + group: "{{ ansible_user }}" + mode: '0755' + + - name: Create docker-compose.yml for vaultwarden + copy: + dest: "{{ vaultwarden_dir }}/docker-compose.yml" + content: | + version: "3" + services: + vaultwarden: + image: vaultwarden/server:latest + container_name: vaultwarden + restart: unless-stopped + ports: + - "{{ vaultwarden_port }}:80" + volumes: + - ./data:/data + environment: + WEBSOCKET_ENABLED: 'true' + DOMAIN: "https://{{ vaultwarden_domain }}" + SIGNUPS_ALLOWED: 'true' + LOG_FILE: /data/vaultwarden.log + + - name: Deploy vaultwarden container with docker compose + command: docker compose up -d + args: + chdir: "{{ vaultwarden_dir }}" + + - name: Create Fail2Ban filter for Vaultwarden + copy: + dest: /etc/fail2ban/filter.d/vaultwarden.local + owner: root + group: root + mode: '0644' + content: | + [INCLUDES] + before = common.conf + + [Definition] + failregex = ^.*?Username or password is incorrect\. Try again\. IP: \. Username:.*$ + ignoreregex = + + - name: Create Fail2Ban jail for Vaultwarden + copy: + dest: /etc/fail2ban/jail.d/vaultwarden.local + owner: root + group: root + mode: '0644' + content: | + [vaultwarden] + enabled = true + port = http,https + filter = vaultwarden + logpath = {{ vaultwarden_data_dir }}/vaultwarden.log + maxretry = 10 + findtime = 10m + bantime = 1h + + - name: Restart fail2ban to apply changes + systemd: + name: fail2ban + state: restarted + + - name: Ensure Caddy sites-enabled directory exists + file: + path: "{{ caddy_sites_dir }}" + state: directory + owner: root + group: root + mode: '0755' + + - name: Ensure Caddyfile includes import directive for sites-enabled + lineinfile: + path: /etc/caddy/Caddyfile + line: 'import sites-enabled/*' + insertafter: EOF + state: present + backup: yes + + - name: Create Caddy reverse proxy configuration for vaultwarden + copy: + dest: "{{ caddy_sites_dir }}/vaultwarden.conf" + content: | + {{ vaultwarden_domain }} { + reverse_proxy localhost:{{ vaultwarden_port }} + } + owner: root + group: root + mode: '0644' + + - name: Reload Caddy to apply new config + command: systemctl reload caddy + diff --git a/ansible/services/vaultwarden/setup_backup_vaultwarden_to_lapy.yml.yml b/ansible/services/vaultwarden/setup_backup_vaultwarden_to_lapy.yml.yml new file mode 100644 index 0000000..68cd588 --- /dev/null +++ b/ansible/services/vaultwarden/setup_backup_vaultwarden_to_lapy.yml.yml @@ -0,0 +1,63 @@ +- name: Configure local backup for Vaultwarden from remote + hosts: lapy + gather_facts: no + vars_files: + - ../../infra_vars.yml + - ./vaultwarden_vars.yml + vars: + remote_data_path: "{{ vaultwarden_data_dir }}" + + tasks: + - name: Debug remote backup vars + debug: + msg: + - "remote_host={{ remote_host }}" + - "remote_user={{ remote_user }}" + - "remote_data_path='{{ remote_data_path }}'" + - "local_backup_dir={{ local_backup_dir }}" + + - name: Ensure local backup directory exists + file: + path: "{{ local_backup_dir }}" + state: directory + mode: '0755' + + - name: Ensure ~/.local/bin exists + file: + path: "{{ lookup('env', 'HOME') }}/.local/bin" + state: directory + mode: '0755' + + - name: Create backup script + copy: + dest: "{{ backup_script_path }}" + mode: '0750' + content: | + #!/bin/bash + set -euo pipefail + + TIMESTAMP=$(date +'%Y-%m-%d') + BACKUP_DIR="{{ local_backup_dir }}/$TIMESTAMP" + mkdir -p "$BACKUP_DIR" + + {% if remote_key_file %} + SSH_CMD="ssh -i {{ remote_key_file }} -p {{ hostvars[remote_host]['ansible_port'] | default(22) }}" + {% else %} + SSH_CMD="ssh -p {{ hostvars[remote_host]['ansible_port'] | default(22) }}" + {% endif %} + + rsync -az -e "$SSH_CMD" --delete {{ remote_user }}@{{ remote_host }}:{{ remote_data_path }}/ "$BACKUP_DIR/" + + # Rotate old backups (keep 14 days) + find "{{ local_backup_dir }}" -maxdepth 1 -type d -name '20*' -mtime +13 -exec rm -rf {} \; + + - name: Ensure cronjob for backup exists + cron: + name: "Vaultwarden backup" + user: "{{ lookup('env', 'USER') }}" + job: "{{ backup_script_path }}" + minute: 5 + hour: "9,12,15,18" + + - name: Run the backup script to make the first backup + command: "{{ backup_script_path }}" diff --git a/ansible/services/vaultwarden/vaultwarden_vars.yml b/ansible/services/vaultwarden/vaultwarden_vars.yml new file mode 100644 index 0000000..fd58e2f --- /dev/null +++ b/ansible/services/vaultwarden/vaultwarden_vars.yml @@ -0,0 +1,17 @@ +# General +vaultwarden_dir: /opt/vaultwarden +vaultwarden_data_dir: "{{ vaultwarden_dir }}/data" +vaultwarden_port: 8222 + +# Caddy +caddy_sites_dir: /etc/caddy/sites-enabled +vaultwarden_subdomain: vault + +# Remote access +remote_host: "{{ groups['vipy'][0] }}" +remote_user: "{{ hostvars[remote_host]['ansible_user'] }}" +remote_key_file: "{{ hostvars[remote_host]['ansible_ssh_private_key_file'] | default('') }}" + +# Local backup +local_backup_dir: "{{ lookup('env', 'HOME') }}/vaultwarden-backups" +backup_script_path: "{{ lookup('env', 'HOME') }}/.local/bin/vaultwarden_backup.sh" From f3030f9d6dc85f782a22a241742f2a1382db6d8d Mon Sep 17 00:00:00 2001 From: Pablo Martin Date: Fri, 4 Jul 2025 15:53:27 +0200 Subject: [PATCH 06/10] allow http, so caddy can redirect --- ansible/services/caddy_playbook.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/ansible/services/caddy_playbook.yml b/ansible/services/caddy_playbook.yml index 13ff64b..e422c78 100644 --- a/ansible/services/caddy_playbook.yml +++ b/ansible/services/caddy_playbook.yml @@ -54,6 +54,12 @@ enabled: yes state: started + - name: Allow HTTP through UFW + ufw: + rule: allow + port: '80' + proto: tcp + - name: Allow HTTPS through UFW ufw: rule: allow From 14075fe1ccb7674b178fe533affd310a13cee77c Mon Sep 17 00:00:00 2001 From: Pablo Martin Date: Fri, 4 Jul 2025 15:53:35 +0200 Subject: [PATCH 07/10] add playbook to disable registration --- .../disable_vaultwarden_sign_ups_playbook.yml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 ansible/services/vaultwarden/disable_vaultwarden_sign_ups_playbook.yml diff --git a/ansible/services/vaultwarden/disable_vaultwarden_sign_ups_playbook.yml b/ansible/services/vaultwarden/disable_vaultwarden_sign_ups_playbook.yml new file mode 100644 index 0000000..b041e8e --- /dev/null +++ b/ansible/services/vaultwarden/disable_vaultwarden_sign_ups_playbook.yml @@ -0,0 +1,18 @@ +- name: Disable Vaultwarden Signups + hosts: vipy + become: yes + vars_files: + - ../../infra_vars.yml + - ./vaultwarden_vars.yml + + tasks: + - name: Disable signups in docker-compose.yml + replace: + path: "{{ vaultwarden_dir }}/docker-compose.yml" + regexp: 'SIGNUPS_ALLOWED:.*' + replace: "SIGNUPS_ALLOWED: 'false'" + + - name: Re-deploy Vaultwarden with signups disabled + command: docker compose up -d + args: + chdir: "{{ vaultwarden_dir }}" \ No newline at end of file From 2097a39663ea62c200f9267126fe2ffc61e49590 Mon Sep 17 00:00:00 2001 From: Pablo Martin Date: Fri, 4 Jul 2025 15:53:44 +0200 Subject: [PATCH 08/10] update vaultwarden docs --- 02_vps_core_services_setup.md | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/02_vps_core_services_setup.md b/02_vps_core_services_setup.md index 19cb5d3..9843cdb 100644 --- a/02_vps_core_services_setup.md +++ b/02_vps_core_services_setup.md @@ -48,6 +48,7 @@ Uptime Kuma gets used to monitor the availability of services, keep track of the ### Deploy * Decide what subdomain you want to serve Uptime Kuma on and add it to `services/uptime_kuma/uptime_kuma_vars.yml` on the `uptime_kuma_subdomain`. + * Note that you will have to add a DNS entry to point to the VPS public IP. * Make sure docker is available on the host. * Run the deployment playbook: `ansible-playbook -i inventory.ini services/uptime_kuma/deploy_uptime_kuma_playbook.yml`. @@ -76,20 +77,26 @@ Vaultwarden is a credentials manager. ### Deploy * Decide what subdomain you want to serve Vaultwarden on and add it to `services/vaultwarden/vaultwarden_vars.yml` on the `vaultwarden_subdomain`. + * Note that you will have to add a DNS entry to point to the VPS public IP. * Make sure docker is available on the host. * Run the deployment playbook: `ansible-playbook -i inventory.ini services/vaultwarden/deploy_vaultwarden_playbook.yml`. +### Configure + +* Vaultwarden will be available for you to create a user on first start. Do that and store the creds safely. +* From that point on, you can configure through the Web UI. + +### Disable registration + +* You probably don't want anyone to just be able to register without permission. +* To prevent that, you can run the playbook `disable_vaultwarden_sign_ups_playbook.yml` after creating the first user. + ### Set up backups to Lapy * Make sure rsync is available on the host and on Lapy. * Run the backup playbook: `ansible-playbook -i inventory.ini services/vaultwarden/setup_backup_vaultwarden_to_lapy.yml`. * A first backup process gets executed and then a cronjob is set up to refresh backups periodically. -### Configure - -* Vaultwarden will be available for you to create a user on first start. Do that and store the creds safe. -* From that point on, you can configure through the Web UI. - ### Restoring to a previous state * Stop Vaultwarden. From 04fce4fcae81f33d4d750c2cac0b3e50252aad40 Mon Sep 17 00:00:00 2001 From: Pablo Martin Date: Fri, 4 Jul 2025 16:52:08 +0200 Subject: [PATCH 09/10] forgejo work in progress --- .../forgejo/deploy_forgejo_playbook.yml | 94 +++++++++++++++++++ ansible/services/forgejo/forgejo_vars.yml | 23 +++++ 2 files changed, 117 insertions(+) create mode 100644 ansible/services/forgejo/deploy_forgejo_playbook.yml create mode 100644 ansible/services/forgejo/forgejo_vars.yml diff --git a/ansible/services/forgejo/deploy_forgejo_playbook.yml b/ansible/services/forgejo/deploy_forgejo_playbook.yml new file mode 100644 index 0000000..e0a2750 --- /dev/null +++ b/ansible/services/forgejo/deploy_forgejo_playbook.yml @@ -0,0 +1,94 @@ +- name: Install Forgejo on Debian 12 with Caddy reverse proxy + hosts: vipy + become: yes + vars: + forgejo_domain: "{{ forgejo_subdomain }}.{{ root_domain }}" + + tasks: + - name: Ensure required packages are installed + apt: + name: + - git + - git-lfs + - wget + state: present + update_cache: true + + - name: Download Forgejo binary + get_url: + url: "{{ forgejo_url }}" + dest: "/tmp/forgejo" + mode: '0755' + + - name: Move Forgejo binary to /usr/local/bin + copy: + src: "/tmp/forgejo" + dest: "{{ forgejo_bin_path }}" + remote_src: yes + mode: '0755' + + - name: Create git system user + user: + name: "{{ forgejo_user }}" + system: yes + shell: /bin/bash + home: "/home/{{ forgejo_user }}" + create_home: yes + comment: 'Git Version Control' + + - name: Create Forgejo data directory + file: + path: "{{ forgejo_data_dir }}" + state: directory + owner: "{{ forgejo_user }}" + group: "{{ forgejo_user }}" + mode: '0750' + + - name: Create Forgejo config directory + file: + path: "{{ forgejo_config_dir }}" + state: directory + owner: "root" + group: "{{ forgejo_user }}" + mode: '0770' + + - name: Download Forgejo systemd service file + get_url: + url: "{{ forgejo_service_url }}" + dest: "/etc/systemd/system/forgejo.service" + mode: '0644' + + - name: Reload systemd + systemd: + daemon_reload: yes + + - name: Enable and start Forgejo service + systemd: + name: forgejo + enabled: yes + state: started + + - name: Add Caddy reverse proxy config for Forgejo + copy: + dest: "{{ caddy_config_path }}" + mode: '0644' + content: | + {{ caddy_site_domain }} { + reverse_proxy localhost:3000 + } + + - name: Create Caddy reverse proxy configuration for uptime kuma + copy: + dest: "{{ caddy_sites_dir }}/forgejo.conf" + content: | + {{ uptime_kuma_domain }} { + reverse_proxy localhost:{{ uptime_kuma_port }} + } + owner: root + group: root + mode: '0644' + + - name: Reload Caddy to apply new config + service: + name: caddy + state: reloaded diff --git a/ansible/services/forgejo/forgejo_vars.yml b/ansible/services/forgejo/forgejo_vars.yml new file mode 100644 index 0000000..f15a67c --- /dev/null +++ b/ansible/services/forgejo/forgejo_vars.yml @@ -0,0 +1,23 @@ +# General +forgejo_data_dir: "/var/lib/forgejo" +forgejo_config_dir: "/etc/forgejo" +forgejo_port: 7657 +forgejo_service_url: "https://codeberg.org/forgejo/forgejo/raw/branch/forgejo/contrib/systemd/forgejo.service" +forgejo_version: "11.0.2" +forgejo_arch: "linux-amd64" +forgejo_url: "https://codeberg.org/forgejo/forgejo/releases/download/v{{ forgejo_version }}/forgejo-{{ forgejo_version }}-{{ forgejo_arch }}" +forgejo_bin_path: "/usr/local/bin/forgejo" +forgejo_user: "git" + +# Caddy +caddy_sites_dir: /etc/caddy/sites-enabled +forgejo_subdomain: forgejo + +# Remote access +remote_host: "{{ groups['vipy'][0] }}" +remote_user: "{{ hostvars[remote_host]['ansible_user'] }}" +remote_key_file: "{{ hostvars[remote_host]['ansible_ssh_private_key_file'] | default('') }}" + +# Local backup +local_backup_dir: "{{ lookup('env', 'HOME') }}/forgejo-backups" +backup_script_path: "{{ lookup('env', 'HOME') }}/.local/bin/forgejo_backup.sh" From 8766af831c50a8b2298d23dbe35ca3a78eab3269 Mon Sep 17 00:00:00 2001 From: counterweight Date: Wed, 9 Jul 2025 00:32:51 +0200 Subject: [PATCH 10/10] a few things --- 01_infra_setup.md | 2 +- ansible/infra/01_user_and_access_setup_playbook.yml | 11 +++++++++-- ...y.yml.yml => setup_backup_vaultwarden_to_lapy.yml} | 0 3 files changed, 10 insertions(+), 3 deletions(-) rename ansible/services/vaultwarden/{setup_backup_vaultwarden_to_lapy.yml.yml => setup_backup_vaultwarden_to_lapy.yml} (100%) diff --git a/01_infra_setup.md b/01_infra_setup.md index 1642d40..b34549e 100644 --- a/01_infra_setup.md +++ b/01_infra_setup.md @@ -22,7 +22,7 @@ This describes how to prepare each machine before deploying services on them. ### Source the VPS -* The guide is agnostic to which provider you pick, but has been tested with VMs from https://lnvps.net. +* The guide is agnostic to which provider you pick, but has been tested with VMs from https://99stack.com and contains some operations that are specifically relevant to their VPSs. * The expectations are that the VPS ticks the following boxes: + Runs Debian 12 bookworm. + Has a public IP4 and starts out with SSH listening on port 22. diff --git a/ansible/infra/01_user_and_access_setup_playbook.yml b/ansible/infra/01_user_and_access_setup_playbook.yml index ed8918b..a812242 100644 --- a/ansible/infra/01_user_and_access_setup_playbook.yml +++ b/ansible/infra/01_user_and_access_setup_playbook.yml @@ -29,7 +29,7 @@ - name: Copy current user's authorized_keys to new user copy: - src: "/home/{{ ansible_user }}/.ssh/authorized_keys" + src: "{{ (ansible_user == 'root') | ternary('/root/.ssh/authorized_keys', '/home/' + ansible_user + '/.ssh/authorized_keys') }}" dest: "/home/{{ new_user }}/.ssh/authorized_keys" owner: "{{ new_user }}" group: "{{ new_user }}" @@ -58,8 +58,15 @@ line: "PasswordAuthentication no", } + - name: Ensure PasswordAuthentication is set to no in cloud-init config + lineinfile: + path: /etc/ssh/sshd_config.d/50-cloud-init.conf + regexp: "^PasswordAuthentication" + line: "PasswordAuthentication no" + create: yes + backup: yes + - name: Restart SSH service: name: ssh state: restarted - diff --git a/ansible/services/vaultwarden/setup_backup_vaultwarden_to_lapy.yml.yml b/ansible/services/vaultwarden/setup_backup_vaultwarden_to_lapy.yml similarity index 100% rename from ansible/services/vaultwarden/setup_backup_vaultwarden_to_lapy.yml.yml rename to ansible/services/vaultwarden/setup_backup_vaultwarden_to_lapy.yml