From 91ae8c89f8354b6d1a4af064063d0abf4ae851c2 Mon Sep 17 00:00:00 2001
From: counterweight
Date: Tue, 22 Jul 2025 14:06:35 +0000
Subject: [PATCH 01/12] Add public/keybase.txt
---
public/keybase.txt | 56 ++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 56 insertions(+)
create mode 100644 public/keybase.txt
diff --git a/public/keybase.txt b/public/keybase.txt
new file mode 100644
index 0000000..0d3405f
--- /dev/null
+++ b/public/keybase.txt
@@ -0,0 +1,56 @@
+==================================================================
+https://keybase.io/pablomartincalvo
+--------------------------------------------------------------------
+
+I hereby claim:
+
+ * I am an admin of https://pablohere.contrapeso.xyz
+ * I am pablomartincalvo (https://keybase.io/pablomartincalvo) on keybase.
+ * I have a public key ASDgHxztDlU_R4hjxbkO21-rS4Iv1gABa3BPb_Aff7aNAgo
+
+To do so, I am signing this object:
+
+{
+ "body": {
+ "key": {
+ "eldest_kid": "0120d9bde13d9012e681cef2edd668d70426f1f6ef69ce7dfae20b404096eca5b06f0a",
+ "host": "keybase.io",
+ "kid": "0120e01f1ced0e553f478863c5b90edb5fab4b822fd600016b704f6ff01f7fb68d020a",
+ "uid": "8e71277fbc0fb1fea28d60308f495d19",
+ "username": "pablomartincalvo"
+ },
+ "merkle_root": {
+ "ctime": 1753193114,
+ "hash": "30476b9dd587e65241454c447b71ef3f393f88b579350bd89bf5b9e443e6ba4d8ba99216710c622429c4fea48f95001effd4b4d8ec33decf6a7591c98f114460",
+ "hash_meta": "cdcde70dfa48dad33277cf144e2a9dc0c55916d315685b913efc887b7c51247e",
+ "seqno": 26961345
+ },
+ "service": {
+ "entropy": "+5f0nk/+mBs8GItHh7xtyv3J",
+ "hostname": "pablohere.contrapeso.xyz",
+ "protocol": "https:"
+ },
+ "type": "web_service_binding",
+ "version": 2
+ },
+ "client": {
+ "name": "keybase.io go client",
+ "version": "6.5.1"
+ },
+ "ctime": 1753193129,
+ "expire_in": 504576000,
+ "prev": "17f8a85c13ee480621129b8e320fe83d45bfcfea7d5f956bf1adbf5def17d39b",
+ "seqno": 25,
+ "tag": "signature"
+}
+
+which yields the signature:
+
+hKRib2R5hqhkZXRhY2hlZMOpaGFzaF90eXBlCqNrZXnEIwEg4B8c7Q5VP0eIY8W5Dttfq0uCL9YAAWtwT2/wH3+2jQIKp3BheWxvYWTESpcCGcQgF/ioXBPuSAYhEpuOMg/oPUW/z+p9X5Vr8a2/Xe8X05vEIDSxwrsQKywdTQC4/Z9ff1hsg9jf+HPjtzdFmUX6h/8oAgHCo3NpZ8RAwcxkmlMFZ2cJ4T638SWLTUnlWhDrJgDn18SM+CvvYOo60INaUdD/ou5jX62LIN7OOqQfdw5MoYEQTgMt5zGJD6hzaWdfdHlwZSCkaGFzaIKkdHlwZQildmFsdWXEIKg7r57boivDmPxyU3ai8XN+JeIf6Ct2Kz+LjJt7VPvVo3RhZ80CAqd2ZXJzaW9uAQ==
+
+And finally, I am proving ownership of this host by posting or
+appending to this document.
+
+View my publicly-auditable identity here: https://keybase.io/pablomartincalvo
+
+==================================================================
\ No newline at end of file
From c4fab04e88f48f7fe8f487d22ac0e5189ac9b8c2 Mon Sep 17 00:00:00 2001
From: counterweight
Date: Mon, 18 Aug 2025 08:36:35 +0200
Subject: [PATCH 02/12] start taxes
---
public/writings/taxes.md | 10 ++++++++++
1 file changed, 10 insertions(+)
create mode 100644 public/writings/taxes.md
diff --git a/public/writings/taxes.md b/public/writings/taxes.md
new file mode 100644
index 0000000..b8fa2c3
--- /dev/null
+++ b/public/writings/taxes.md
@@ -0,0 +1,10 @@
+I hate taxes deeply. I feel through the rabbit hole of libertarian and anarcocapitalist ideas some years ago, and taxes have been repulsive to me ever since. I go to great lengths to not pay them, and feel deeply hurt everytime they sting my wallet.
+
+I know life goes by fast, and what once was vivid in your memory fades away bit by bit until it's gone. I'm truly hoping that, some day in the future, the world will have changed to the better and young people won't be paying as much taxes as we're doing today in the West. Since in that bright, utopical future I'm dreaming of I might have forgotten about how bad things were in 2025, I've decided to make a little entry here making an estimate on how many taxes I'm bleeding on a yearly basis right now. So that we can someday look back in time and wonder: "how the fuck did we tolerate that pillaging".
+
+## Inventory
+
+Before going into the number, let's make a list of all the taxes I'm currently facing.
+
+- Income Tax: for the sin of making money, the state takes some.
+- Social Security: in Spain, you're forcefully enrolled in a pension system designed as a ponzi scheme (not ponzi-like: it's actually a ponzi scheme), with no way to opt out.
From 12dc73abe30b04fd3425069e9fc8f290111a08e3 Mon Sep 17 00:00:00 2001
From: counterweight
Date: Mon, 18 Aug 2025 23:20:43 +0200
Subject: [PATCH 03/12] things
---
nodesource_setup.sh | 113 +++++++++++
public/index.html | 4 +
...r-the-future-the-tax-bleeding-in-2025.html | 188 ++++++++++++++++++
public/writings/taxes.md | 10 -
4 files changed, 305 insertions(+), 10 deletions(-)
create mode 100644 nodesource_setup.sh
create mode 100644 public/writings/a-note-for-the-future-the-tax-bleeding-in-2025.html
delete mode 100644 public/writings/taxes.md
diff --git a/nodesource_setup.sh b/nodesource_setup.sh
new file mode 100644
index 0000000..82155c8
--- /dev/null
+++ b/nodesource_setup.sh
@@ -0,0 +1,113 @@
+#!/bin/bash
+
+# Logger Function
+log() {
+ local message="$1"
+ local type="$2"
+ local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
+ local color
+ local endcolor="\033[0m"
+
+ case "$type" in
+ "info") color="\033[38;5;79m" ;;
+ "success") color="\033[1;32m" ;;
+ "error") color="\033[1;31m" ;;
+ *) color="\033[1;34m" ;;
+ esac
+
+ echo -e "${color}${timestamp} - ${message}${endcolor}"
+}
+
+# Error handler function
+handle_error() {
+ local exit_code=$1
+ local error_message="$2"
+ log "Error: $error_message (Exit Code: $exit_code)" "error"
+ exit $exit_code
+}
+
+# Function to check for command availability
+command_exists() {
+ command -v "$1" &> /dev/null
+}
+
+check_os() {
+ if ! [ -f "/etc/debian_version" ]; then
+ echo "Error: This script is only supported on Debian-based systems."
+ exit 1
+ fi
+}
+
+# Function to Install the script pre-requisites
+install_pre_reqs() {
+ log "Installing pre-requisites" "info"
+
+ # Run 'apt-get update'
+ if ! apt-get update -y; then
+ handle_error "$?" "Failed to run 'apt-get update'"
+ fi
+
+ # Run 'apt-get install'
+ if ! apt-get install -y apt-transport-https ca-certificates curl gnupg; then
+ handle_error "$?" "Failed to install packages"
+ fi
+
+ if ! mkdir -p /usr/share/keyrings; then
+ handle_error "$?" "Makes sure the path /usr/share/keyrings exist or run ' mkdir -p /usr/share/keyrings' with sudo"
+ fi
+
+ rm -f /usr/share/keyrings/nodesource.gpg || true
+ rm -f /etc/apt/sources.list.d/nodesource.list || true
+
+ # Run 'curl' and 'gpg' to download and import the NodeSource signing key
+ if ! curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | gpg --dearmor -o /usr/share/keyrings/nodesource.gpg; then
+ handle_error "$?" "Failed to download and import the NodeSource signing key"
+ fi
+
+ # Explicitly set the permissions to ensure the file is readable by all
+ if ! chmod 644 /usr/share/keyrings/nodesource.gpg; then
+ handle_error "$?" "Failed to set correct permissions on /usr/share/keyrings/nodesource.gpg"
+ fi
+}
+
+# Function to configure the Repo
+configure_repo() {
+ local node_version=$1
+
+ arch=$(dpkg --print-architecture)
+ if [ "$arch" != "amd64" ] && [ "$arch" != "arm64" ] && [ "$arch" != "armhf" ]; then
+ handle_error "1" "Unsupported architecture: $arch. Only amd64, arm64, and armhf are supported."
+ fi
+
+ echo "deb [arch=$arch signed-by=/usr/share/keyrings/nodesource.gpg] https://deb.nodesource.com/node_$node_version nodistro main" | tee /etc/apt/sources.list.d/nodesource.list > /dev/null
+
+ # N|solid Config
+ echo "Package: nsolid" | tee /etc/apt/preferences.d/nsolid > /dev/null
+ echo "Pin: origin deb.nodesource.com" | tee -a /etc/apt/preferences.d/nsolid > /dev/null
+ echo "Pin-Priority: 600" | tee -a /etc/apt/preferences.d/nsolid > /dev/null
+
+ # Nodejs Config
+ echo "Package: nodejs" | tee /etc/apt/preferences.d/nodejs > /dev/null
+ echo "Pin: origin deb.nodesource.com" | tee -a /etc/apt/preferences.d/nodejs > /dev/null
+ echo "Pin-Priority: 600" | tee -a /etc/apt/preferences.d/nodejs > /dev/null
+
+ # Run 'apt-get update'
+ if ! apt-get update -y; then
+ handle_error "$?" "Failed to run 'apt-get update'"
+ else
+ log "Repository configured successfully."
+ log "To install Node.js, run: apt-get install nodejs -y" "info"
+ log "You can use N|solid Runtime as a node.js alternative" "info"
+ log "To install N|solid Runtime, run: apt-get install nsolid -y \n" "success"
+ fi
+}
+
+# Define Node.js version
+NODE_VERSION="22.x"
+
+# Check OS
+check_os
+
+# Main execution
+install_pre_reqs || handle_error $? "Failed installing pre-requisites"
+configure_repo "$NODE_VERSION" || handle_error $? "Failed configuring repository"
diff --git a/public/index.html b/public/index.html
index abfdcd4..1097fe2 100644
--- a/public/index.html
+++ b/public/index.html
@@ -142,6 +142,10 @@
Writings
Sometimes I like to jot down ideas and drop them here.
Notes and lessons from my departure from Superhog
diff --git a/public/writings/a-note-for-the-future-the-tax-bleeding-in-2025.html b/public/writings/a-note-for-the-future-the-tax-bleeding-in-2025.html
new file mode 100644
index 0000000..abefd1d
--- /dev/null
+++ b/public/writings/a-note-for-the-future-the-tax-bleeding-in-2025.html
@@ -0,0 +1,188 @@
+
+
+
+ Pablo here
+
+
+
+
+
+
+
+
+ I hate taxes deeply. I fell through the rabbit hole of libertarian and
+ anarcocapitalist ideas some years ago, and taxes have been repulsive
+ to me ever since. I go to great lengths to not pay them, and feel
+ deeply hurt everytime they sting my wallet against my will.
+
+
+ I know life goes by fast, and what today is vivid in your memory fades
+ away bit by bit until it's gone. I'm truly hoping that, some day in
+ the future, the world will have changed to the better and people won't
+ be paying as much tax as we're doing today in the West. Since in that
+ bright, utopical future I'm dreaming of I might have forgotten about
+ how bad things were on this matter in 2025, I've decided to make a
+ little entry here making an estimate on how many taxes I'm
+ theoretically bleeding on a yearly basis right now. So that we can
+ someday look back in time and wonder: "how the fuck did we tolerate
+ that pillaging".
+
+
Inventory
+
+ Before going hard into the number crunching let's list all the tax
+ items I'm aware of being subject to:
+
+
+
+ Income Tax: for the sin of making money, the state takes a hefty
+ bite of my salary.
+
+
+ Social Security: the state runs a forceful Social Security
+ programme. If you work, it is illegal to not pay for it. It is
+ specially unnerving since it is quite literally a ponzi scheme. At
+ least Madoff lured you into it with pretty words, not violence.
+
+
+ VAT Tax: for the sin of buying stuff, the state takes another hefty
+ bite.
+
+
+ Real State Tax: for the sin of owning an apartment, the state
+ charges me rent. Do I own it actually?
+
+
+ Vehicle Tax: for the sin of owning a motorcycle, the state charges
+ me a yearly fee.
+
+
+ Wealth Transfer Tax: when you buy real state, you must pay 10% of
+ its value in taxes. This is a one off fee if you only buy one house
+ in your lifetime, but it is such a slap on the face that it would be
+ dishonest to not consider it.
+
+
+ Inheritance tax: you thought you were going to keep daddy's loot all
+ for yourself? When you inherit, you'll go through the register
+ again. Like the wealth transfer tax, is not a frequent one, but it's
+ big so let's consider it.
+
+
+
+ There may be some other small, less frequent taxes that I'm not
+ considering. These are the ones that will hit most people in my
+ country.
+
+
The numbers
+
+ Okay, let's go compute the hideous bill. I'll make a hypothetical
+ profile that's roughly close to mine, with a few assumptions along the
+ way.
+
+
+
+ Salary: online sources say the typical salary for my job
+ position in my area is 70k€ yearly. Including the Social Security
+ paid by the company, the sum rises to ~85K€. I consider this way of
+ measuring honest, since I think that all the money paid out by the
+ employer reflects what's the true salary and value of the employee.
+ I read it as, "the company is willing to pay 85K€ for this. What
+ ends up in the employees pocket, and what in the State's, they don't
+ mind".
+
+
Expenses: I'll assume I spend half of my salary.
+
+ Home Purchase: I'll assume that, during my adult life, I
+ would buy once the average home in my town. From what I could find
+ online, that's somewhere around 500K€.
+
+
+ Vehicles: I own a motorcycle and share the expenses of a
+ car with my partner, so I'll count 1.5 vehicles.
+
+
+ Inheritance tax: I found a figure stating the average
+ windfall in my country is 250K€. We'll go with that.
+
+ So there you go. A peaceful existence as a tech professional living a
+ normal life leads to bleeding at least 55K€ per year, all while
+ getting an 85K€ salary. The tax rate sits at a wonderful 64%. How far
+ away is this from hardcore USSR-grade communism?
+
+
+ And this is generous, since I didn't model (1) what gets stolen
+ through inflation diluting savings and (2) any capital gains that this
+ profile might end up paying for whatever investments he is doing with
+ his savings.
+
+
+ Then you'll see mainstream media puppets discussing why young people
+ don't have children. As if it was some kind of mistery. They're being
+ robbed their children's bread left and right, while getting hypnotized
+ into believing that protecting themselves against this outrageous
+ robbery is somehow morally despicable.
+
+
+
+
+
diff --git a/public/writings/taxes.md b/public/writings/taxes.md
deleted file mode 100644
index b8fa2c3..0000000
--- a/public/writings/taxes.md
+++ /dev/null
@@ -1,10 +0,0 @@
-I hate taxes deeply. I feel through the rabbit hole of libertarian and anarcocapitalist ideas some years ago, and taxes have been repulsive to me ever since. I go to great lengths to not pay them, and feel deeply hurt everytime they sting my wallet.
-
-I know life goes by fast, and what once was vivid in your memory fades away bit by bit until it's gone. I'm truly hoping that, some day in the future, the world will have changed to the better and young people won't be paying as much taxes as we're doing today in the West. Since in that bright, utopical future I'm dreaming of I might have forgotten about how bad things were in 2025, I've decided to make a little entry here making an estimate on how many taxes I'm bleeding on a yearly basis right now. So that we can someday look back in time and wonder: "how the fuck did we tolerate that pillaging".
-
-## Inventory
-
-Before going into the number, let's make a list of all the taxes I'm currently facing.
-
-- Income Tax: for the sin of making money, the state takes some.
-- Social Security: in Spain, you're forcefully enrolled in a pension system designed as a ponzi scheme (not ponzi-like: it's actually a ponzi scheme), with no way to opt out.
From f2968ca002a4b4da24e3e05b64cea4e19b99d2b0 Mon Sep 17 00:00:00 2001
From: counterweight
Date: Mon, 18 Aug 2025 23:20:58 +0200
Subject: [PATCH 04/12] remove
---
nodesource_setup.sh | 113 --------------------------------------------
1 file changed, 113 deletions(-)
delete mode 100644 nodesource_setup.sh
diff --git a/nodesource_setup.sh b/nodesource_setup.sh
deleted file mode 100644
index 82155c8..0000000
--- a/nodesource_setup.sh
+++ /dev/null
@@ -1,113 +0,0 @@
-#!/bin/bash
-
-# Logger Function
-log() {
- local message="$1"
- local type="$2"
- local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
- local color
- local endcolor="\033[0m"
-
- case "$type" in
- "info") color="\033[38;5;79m" ;;
- "success") color="\033[1;32m" ;;
- "error") color="\033[1;31m" ;;
- *) color="\033[1;34m" ;;
- esac
-
- echo -e "${color}${timestamp} - ${message}${endcolor}"
-}
-
-# Error handler function
-handle_error() {
- local exit_code=$1
- local error_message="$2"
- log "Error: $error_message (Exit Code: $exit_code)" "error"
- exit $exit_code
-}
-
-# Function to check for command availability
-command_exists() {
- command -v "$1" &> /dev/null
-}
-
-check_os() {
- if ! [ -f "/etc/debian_version" ]; then
- echo "Error: This script is only supported on Debian-based systems."
- exit 1
- fi
-}
-
-# Function to Install the script pre-requisites
-install_pre_reqs() {
- log "Installing pre-requisites" "info"
-
- # Run 'apt-get update'
- if ! apt-get update -y; then
- handle_error "$?" "Failed to run 'apt-get update'"
- fi
-
- # Run 'apt-get install'
- if ! apt-get install -y apt-transport-https ca-certificates curl gnupg; then
- handle_error "$?" "Failed to install packages"
- fi
-
- if ! mkdir -p /usr/share/keyrings; then
- handle_error "$?" "Makes sure the path /usr/share/keyrings exist or run ' mkdir -p /usr/share/keyrings' with sudo"
- fi
-
- rm -f /usr/share/keyrings/nodesource.gpg || true
- rm -f /etc/apt/sources.list.d/nodesource.list || true
-
- # Run 'curl' and 'gpg' to download and import the NodeSource signing key
- if ! curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | gpg --dearmor -o /usr/share/keyrings/nodesource.gpg; then
- handle_error "$?" "Failed to download and import the NodeSource signing key"
- fi
-
- # Explicitly set the permissions to ensure the file is readable by all
- if ! chmod 644 /usr/share/keyrings/nodesource.gpg; then
- handle_error "$?" "Failed to set correct permissions on /usr/share/keyrings/nodesource.gpg"
- fi
-}
-
-# Function to configure the Repo
-configure_repo() {
- local node_version=$1
-
- arch=$(dpkg --print-architecture)
- if [ "$arch" != "amd64" ] && [ "$arch" != "arm64" ] && [ "$arch" != "armhf" ]; then
- handle_error "1" "Unsupported architecture: $arch. Only amd64, arm64, and armhf are supported."
- fi
-
- echo "deb [arch=$arch signed-by=/usr/share/keyrings/nodesource.gpg] https://deb.nodesource.com/node_$node_version nodistro main" | tee /etc/apt/sources.list.d/nodesource.list > /dev/null
-
- # N|solid Config
- echo "Package: nsolid" | tee /etc/apt/preferences.d/nsolid > /dev/null
- echo "Pin: origin deb.nodesource.com" | tee -a /etc/apt/preferences.d/nsolid > /dev/null
- echo "Pin-Priority: 600" | tee -a /etc/apt/preferences.d/nsolid > /dev/null
-
- # Nodejs Config
- echo "Package: nodejs" | tee /etc/apt/preferences.d/nodejs > /dev/null
- echo "Pin: origin deb.nodesource.com" | tee -a /etc/apt/preferences.d/nodejs > /dev/null
- echo "Pin-Priority: 600" | tee -a /etc/apt/preferences.d/nodejs > /dev/null
-
- # Run 'apt-get update'
- if ! apt-get update -y; then
- handle_error "$?" "Failed to run 'apt-get update'"
- else
- log "Repository configured successfully."
- log "To install Node.js, run: apt-get install nodejs -y" "info"
- log "You can use N|solid Runtime as a node.js alternative" "info"
- log "To install N|solid Runtime, run: apt-get install nsolid -y \n" "success"
- fi
-}
-
-# Define Node.js version
-NODE_VERSION="22.x"
-
-# Check OS
-check_os
-
-# Main execution
-install_pre_reqs || handle_error $? "Failed installing pre-requisites"
-configure_repo "$NODE_VERSION" || handle_error $? "Failed configuring repository"
From 810708960ed7d24193db5638d1ffaee173025e41 Mon Sep 17 00:00:00 2001
From: counterweight
Date: Mon, 18 Aug 2025 23:33:12 +0200
Subject: [PATCH 05/12] add keybase
---
public/index.html | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/public/index.html b/public/index.html
index 1097fe2..3122866 100644
--- a/public/index.html
+++ b/public/index.html
@@ -75,11 +75,11 @@
- On Nostr. My npub is:
- npub1a29gdc6p7c05az2ka3qwwpl9kfcqmws3xlwmjefmtkulfhgd7u6shuqatg
+ On keybase: https://keybase.io/pablomartincalvo.
-
At this stage I'm not open to other contacts.
+ On Nostr. My npub is:
+ npub1a29gdc6p7c05az2ka3qwwpl9kfcqmws3xlwmjefmtkulfhgd7u6shuqatg
From d954c592c0f79381a9e1fe97025c315a94a349e6 Mon Sep 17 00:00:00 2001
From: counterweight
Date: Mon, 18 Aug 2025 23:34:29 +0200
Subject: [PATCH 06/12] bad tag
---
public/index.html | 2 --
1 file changed, 2 deletions(-)
diff --git a/public/index.html b/public/index.html
index 3122866..c7d4697 100644
--- a/public/index.html
+++ b/public/index.html
@@ -69,10 +69,8 @@
There are also some other projects that I generally keep private but
From 9950201dd75a4cab3fc7d54975fa9bce9b1b239e Mon Sep 17 00:00:00 2001
From: counterweight
Date: Wed, 24 Dec 2025 10:52:09 +0100
Subject: [PATCH 09/12] add link to bitcoin infra
---
public/index.html | 3 +++
public/styles.css | 3 ++-
2 files changed, 5 insertions(+), 1 deletion(-)
diff --git a/public/index.html b/public/index.html
index 7021775..e4c5a83 100644
--- a/public/index.html
+++ b/public/index.html
@@ -123,6 +123,9 @@
My Python package to
handle Spanish DNIs better
A note for the future: the tax bleeding in 2025
diff --git a/public/writings/a-degraded-pool-with-a-healthy-disk.html b/public/writings/a-degraded-pool-with-a-healthy-disk.html
new file mode 100644
index 0000000..521b7cc
--- /dev/null
+++ b/public/writings/a-degraded-pool-with-a-healthy-disk.html
@@ -0,0 +1,133 @@
+
+
+
+
+ Pablo here
+
+
+
+
+
+
+
+
+
I wasn't even looking for trouble. I was clicking around the Proxmox web UI, exploring some storage views I hadn't noticed before, when I saw it: my ZFS pool was in DEGRADED state.
+
I opened the details. One of my two mirrored drives was listed as FAULTED.
+
I was very surprised. This box and disks were brand new and didn't even have three months of running on them. I was not expecting HW issues to come at me that fast. I SSH'd into the server and ran the command that would become my best friend over the next 24 hours:
+
zpool status -v proxmox-tank-1
+
No glitch. The pool was degraded. The drive had racked up over 100 read errors, 600+ write errors, and 129 checksum errors. ZFS had given up on it.
+
NAME STATE READ WRITE CKSUM
+ proxmox-tank-1 DEGRADED 0 0 0
+ mirror-0 DEGRADED 0 0 0
+ ata-ST4000NT001-3M2101_WX11TN0Z FAULTED 108 639 129 too many errors
+ ata-ST4000NT001-3M2101_WX11TN2P ONLINE 0 0 0
+
The good news: errors: No known data errors. ZFS was serving all my data from the healthy drive. Nothing was lost yet.
+
The bad news: I was running on a single point of failure. If AGAPITO2 decided to have a bad day too, I'd be in real trouble.
+
I tried the classic IT move: rebooting. The system came back up and ZFS immediately started trying to resilver (rebuild) the degraded drive. But within minutes, the errors started piling up again and the resilver stalled.
+
Time to actually figure out what was wrong.
+
The Diagnostic Toolbox
+
When a ZFS drive acts up, you have two main sources of truth: what the kernel sees happening at the hardware level, and what the drive itself reports about its health. This can be looked up with dmesg and smartctl.
+
dmesg: The Kernel's Diary
+
The Linux kernel maintains a ring buffer of messages about hardware events, driver activities, and system operations. The dmesg command lets you read it. For disk issues, you want to grep for SATA-related keywords:
The -T flag gives you human-readable timestamps instead of seconds-since-boot.
+
What I saw was... weird. Here's an excerpt:
+
[Fri Jan 2 22:25:13 2026] ata4.00: exception Emask 0x50 SAct 0x70220001 SErr 0xe0802 action 0x6 frozen
+[Fri Jan 2 22:25:13 2026] ata4.00: irq_stat 0x08000000, interface fatal error
+[Fri Jan 2 22:25:13 2026] ata4.00: failed command: READ FPDMA QUEUED
+[Fri Jan 2 22:25:13 2026] ata4: hard resetting link
+[Fri Jan 2 22:25:14 2026] ata4: SATA link down (SStatus 0 SControl 300)
+
Let me translate: the kernel tried to read from the drive on ata4, got a "fatal error," and responded by doing a hard reset of the SATA link. Then the link went down entirely. The drive just... disappeared.
+
But it didn't stay gone. A few seconds later:
+
[Fri Jan 2 22:25:24 2026] ata4: link is slow to respond, please be patient (ready=0)
+[Fri Jan 2 22:25:24 2026] ata4: SATA link up 6.0 Gbps (SStatus 133 SControl 300)
+
The drive came back! At full speed! But then...
+
[Fri Jan 2 22:25:29 2026] ata4.00: qc timeout after 5000 msecs (cmd 0xec)
+[Fri Jan 2 22:25:29 2026] ata4.00: failed to IDENTIFY (I/O error, err_mask=0x4)
+[Fri Jan 2 22:25:29 2026] ata4: limiting SATA link speed to 3.0 Gbps
+
It failed again. The kernel, trying to be helpful, dropped the link speed from 6.0 Gbps to 3.0 Gbps. Maybe a slower speed would be more stable?
+
It wasn't. The pattern repeated: connect, fail, reset, reconnect at a slower speed. 6.0 Gbps, then 3.0 Gbps, then 1.5 Gbps. Eventually:
+
[Fri Jan 2 22:27:06 2026] ata4.00: disable device
+
The kernel gave up entirely.
+
This wasn't what a dying drive looks like. A dying drive throws read errors on specific bad sectors. This drive was connecting and disconnecting like someone was jiggling the cable. The kernel was calling it "interface fatal error", emphasis on interface.
+
smartctl: Asking the Drive Directly
+
Every modern hard drive has S.M.A.R.T. (Self-Monitoring, Analysis, and Reporting Technology) — basically a built-in health monitor. The smartctl command lets you get info out of it.
+
First, the overall health check:
+
smartctl -H /dev/sdb
+
SMART overall-health self-assessment test result: PASSED
+
Okay, that looks great. But if the disk is healthy, what the hell is going on, and where are all those errors that ZFS was spotting coming from?
+
Let's dig deeper with the extended info:
+
smartctl -x /dev/sdb
+
The key attributes I was looking for:
+
+
+
+
Attribute
+
Value
+
What it means
+
+
+
+
+
Reallocated_Sector_Ct
+
0
+
Bad sectors the drive has swapped out. Zero is good.
+
+
+
Current_Pending_Sector
+
0
+
Sectors waiting to be checked. Zero is good.
+
+
+
UDMA_CRC_Error_Count
+
0
+
Data corruption during transfer. Zero is good.
+
+
+
Number of Hardware Resets
+
39
+
Times the connection has been reset. Uh...
+
+
+
+
All the sector-level health metrics looked perfect. No bad blocks, no pending errors, no CRC errors. The drive's magnetic platters and read/write heads were fine.
+
But 39 hardware resets? That's not normal. That's the drive (or its connection) getting reset nearly 40 times.
+
I ran the short self-test to be sure:
+
smartctl -t short /dev/sdb
+# Wait a minute...
+smartctl -l selftest /dev/sdb
+
# 1 Short offline Completed without error 00%
+
The drive passed its own self-test. The platters spin, the heads move, the firmware works, and it can read its own data just fine.
+
Hypothesis
+
At this point, the evidence was pointing clearly away from "the drive is dying" and toward "something is wrong with the connection."
+
What the kernel logs told me: the drive keeps connecting and disconnecting. Each time it reconnects, the kernel tries slower speeds. Eventually it gives up entirely. This is what you see with an unstable physical connection.
+
What SMART told me: the drive itself is healthy. No bad sectors, no media errors, no signs of wear. But there have been dozens of hardware resets — the connection keeps getting interrupted.
+
The suspects, in order of likelihood:
+
+
SATA data cable: the most common culprit for intermittent connection issues. Cables go bad, or weren't seated properly in the first place.
+
Power connection: if the drive isn't getting stable power, it might brown out intermittently.
+
SATA port on the motherboard: less likely, but possible.
+
PSU: power supply issues could affect the power rail feeding the drive. Unlikely, since both disks where feeding from the same cable tread, but still an option.
+
+
Given that I had just built this server a few weeks earlier, and a good part of that happened after midnight... I was beginning to suspect that perhaps I simply might not have plugged in the disk properly.
+
The Verdict
+
I was pretty confident now: the drive was fine, but the connection was bad. Most likely the SATA data cable, and most probably simply not connected properly.
+
The fix would require shutting down the server, opening the case, and reseating (or replacing) cables. Before doing that, I wanted to take the drive offline cleanly and document everything.
+
In Part 3, I'll walk through exactly how I fixed it: the ZFS commands, the physical work, and the validation to make sure everything was actually okay afterward.
By now I was pretty confident about what was wrong: not a dying drive, but a flaky SATA connection. The fix should be straightforward. Just take the drive offline, shut down, reseat the cables, bring it back up, and let ZFS heal itself.
+
But I wanted to do this methodically. ZFS is forgiving, but I didn't want to make things worse by rushing.
+
Here was my plan:
+
+
Take the faulty drive offline in ZFS (tell ZFS "stop trying to use this drive")
+
Power down the server
+
Open the case, inspect and reseat cables
+
Boot up, verify the drive is detected
+
Bring the drive back online in ZFS
+
Let the resilver complete
+
Run a scrub to verify data integrity
+
Check SMART one more time
+
+
Let's walk through each step.
+
Step 1: Taking the Drive Offline
+
Before touching hardware, I wanted ZFS to stop trying to use the problematic drive.
+
First, I set up some variables to avoid typos with that long disk ID:
The state changed from FAULTED to OFFLINE. ZFS knows I intentionally took it offline rather than it failing on its own. The error counts are still there as a historical record, but ZFS isn't actively trying to use the drive anymore.
+
Time to shut down and get my hands dirty.
+
Step 2: Opening the Case
+
I powered down the server and opened up the Fractal Node 804. This case has a lovely design with drive bays accessible from the side, which I love. No reaching out into weird corners in the case, just unscrew a couple screws, slide the drive bay out and there they are, handy and reachable.
+
I located AGAPITO1 (I had handwritten labels on the drives, lesson learned after many sessions of playing "which drive is which") and inspected the connections.
+
Here's the honest truth: everything looked fine. The SATA data cable was plugged in. The power connector was plugged in. Nothing was obviously loose or damaged. There was a bit of tension in the cable as it moved from one area of the case (where the motherboard is) to the drives area, but I really didn't think that was affecting the connection to either the drive or the motherboard itself.
+
But "looks fine" doesn't mean "is fine". So I did a full reseat:
+
+
Unplugged and firmly replugged the SATA data cable at both ends (drive and motherboard).
+
Unplugged and firmly replugged the power connector.
+
While I was in there, checked the connections on the other disk of the mirror as well.
+
+
I made sure each connector clicked in solidly. Then I closed up the case and hit the power button.
I opened a second terminal and started watching the kernel log in real time:
+
dmesg -Tw
+
This would show me immediately if the connection started acting flaky again. For now, it was quiet, showing just normal boot messages, the drive being detected successfully, etc. Nothing alarming.
+
Step 4: Bringing It Back Online
+
Moment of truth. I told ZFS to start using the drive again:
+
zpool online proxmox-tank-1 "$DISKID"
+
Immediately checked the status:
+
zpool status -v proxmox-tank-1
+
pool: proxmox-tank-1
+ state: DEGRADED
+status: One or more devices is currently being resilvered.
+action: Wait for the resilver to complete.
+ scan: resilver in progress since Fri Jan 2 23:17:35 2026
+ 0B resilvered, 0.00% done, no estimated completion time
+
+ NAME STATE READ WRITE CKSUM
+ proxmox-tank-1 DEGRADED 0 0 0
+ mirror-0 DEGRADED 0 0 0
+ ata-ST4000NT001-3M2101_WX11TN0Z DEGRADED 0 0 0 too many errors
+ ata-ST4000NT001-3M2101_WX11TN2P ONLINE 0 0 0
+
Two things to notice: the drive's error counters are now at zero (we're starting fresh), and ZFS immediately started resilvering. It shows "too many errors" as the reason for the degraded state, which is historical, it remembers why the drive was marked bad before.
+
I kept watching both the status and the kernel log. No errors, no link resets.
+
Step 5: The Resilver
+
Resilvering is ZFS's term for rebuilding redundancy. Copying data from the healthy drive to the one that fell behind. In my case, the drive had been desynchronized for who knows how long (the pool had drifted 524GB out of sync before I noticed), so there was a lot to copy.
+
I shut down my VMs to reduce I/O contention and let the resilver have the disk bandwidth. Progress:
+
scan: resilver in progress since Fri Jan 2 23:17:35 2026
+ 495G / 618G scanned, 320G / 618G issued at 100M/s
+ 320G resilvered, 51.78% done, 00:50:12 to go
+
The kernel log stayed quiet the whole time. Everything was indicating the cable reseat had worked.
+
I went to bed and let it run overnight. The next morning:
+
scan: resilvered 495G in 01:07:58 with 0 errors on Sat Jan 3 00:25:33 2026
+
495 gigabytes resilvered in about an hour, zero errors. But the pool still showed DEGRADED with a warning about "unrecoverable error." I was very confused about this, but I solved that with some research. Apparently, ZFS is cautious and wants human acknowledgement before declaring everything healthy again.
pool: proxmox-tank-1
+ state: ONLINE
+ scan: resilvered 495G in 01:07:58 with 0 errors on Sat Jan 3 00:25:33 2026
+
+ NAME STATE READ WRITE CKSUM
+ proxmox-tank-1 ONLINE 0 0 0
+ mirror-0 ONLINE 0 0 0
+ ata-ST4000NT001-3M2101_WX11TN0Z ONLINE 0 0 0
+ ata-ST4000NT001-3M2101_WX11TN2P ONLINE 0 0 0
+
Damn, seeing this felt nice.
+
Step 6: The Scrub
+
A resilver copies data to bring the drives back in sync, but it doesn't verify that all the existing data is still good. For that, you run a scrub. ZFS reads every block on the pool, verifies checksums, and repairs anything that doesn't match.
+
zpool scrub proxmox-tank-1
+
I let this run while I brought my VMs back up (scrubs can run in the background without blocking normal operations, though performance takes a hit). A few hours later:
+
scan: scrub repaired 13.0M in 02:14:22 with 0 errors on Sat Jan 3 11:03:54 2026
+
+ NAME STATE READ WRITE CKSUM
+ proxmox-tank-1 ONLINE 0 0 0
+ mirror-0 ONLINE 0 0 0
+ ata-ST4000NT001-3M2101_WX11TN0Z ONLINE 0 0 992
+ ata-ST4000NT001-3M2101_WX11TN2P ONLINE 0 0 0
+
Interesting. The scrub repaired 13MB of data and found 992 checksum mismatches on AGAPITO1. From what I read, checksum errors are typically a sign of the disk being in terrible shape and needing a replacement ASAP. That sounds scary, but I took the risk and assumed those were blocks that had been written incorrectly (or not at all) during the period when the connection was flaky, and not an issue with the disk itself. ZFS detected the bad checksums and healed them using the good copies from AGAPITO2.
+
I cleared the errors again and the pool was clean:
Still passing. The hardware reset count went from 39 to 41 — just the reboots I did during this process.
+
For completeness, I ran the long self-test. The short test only takes a minute and does basic checks, the long test actually reads every sector on the disk, which for a 4TB drive takes... a while.
+
smartctl -t long /dev/sdb
+
The estimated time was about 6 hours. In practice, it took closer to 12. Running VMs in parallel probably didn't help.
+
But eventually:
+
SMART Self-test log structure revision number 1
+Num Test_Description Status Remaining LifeTime(hours) LBA_of_first_error
+# 1 Extended offline Completed without error 00% 1563 -
+# 2 Short offline Completed without error 00% 1551 -
+# 3 Short offline Completed without error 00% 1462 -
+
The extended test passed. Every sector on the disk is readable. The drive is genuinely healthy — it was just the connection that was bad.
+
Lessons Learned
+
+
ZFS did exactly what it's supposed to do: Despite 524+ gigabytes of desync and nearly a thousand checksum errors, I lost zero data and was back on action while keeping my VMs running. The healthy drive kept serving everything while the flaky drive was acting up, and once the connection was fixed, ZFS healed itself automatically. Also, I was operating for an unknown amount of time with only one drive. In this case it seems it was due to stupid me messing up cable management, but I'm very happy knowing if the disk had been genuinely faulty, services would have continued just fine.
+
Physical connections matter: It's easy to not pay that much attention when building a new box. Well, it bites back.
+
Monitor your pools. I only found this issue by accident, clicking around in the Proxmox UI. The pool had been degraded for who knows how long before I noticed. I'm already working in setting up a monitor to my Uptime Kuma instance so that next time the pool status stops being ONLINE I get notified immediately.
+
+
I'm happy I was able to test out recoverying from a faulty disk with such a tiny issue. I learned a lot fixing it, and now I'm even more happy than before having decided to go for this ZFS pool setup.
+
Quick Reference: The Commands
+
For future me (and anyone else who ends up here with a degraded pool):
+
# Check pool status
+zpool status -v
+
+# Watch kernel logs in real time
+dmesg -Tw
+
+# Check SMART health
+smartctl -H /dev/sdX
+smartctl -x /dev/sdX
+
+# Take a drive offline before physical work
+zpool offline
+
+# Bring a drive back online
+zpool online
+
+# Clear error flags after recovery
+zpool clear
+
+# Run a scrub to verify all data
+zpool scrub
+
+# Run SMART self-tests
+smartctl -t short /dev/sdX # Quick test (~1 min)
+smartctl -t long /dev/sdX # Full surface scan (hours)
+smartctl -l selftest /dev/sdX # Check test results
A few weeks into running my new homelab server, I stumbled upon something I wasn't expecting to see that early: my ZFS pool was in "DEGRADED" state. One of my two mirrored drives had gone FAULTED.
+
This was the first machine I had set up with a ZFS mirror, precisely to be able to deal with disk issues smoothly, without losing data and having downtime. Although it felt like a pain in the ass to spot the problem, I was also happy because it gave me a chance to drill the kind of disk maintenance I was hoping to do in this new server.
+
But here's the thing: when I was in the middle of it, I couldn't find a single resource that walked through the whole experience in detail. Plenty of docs explain what ZFS is. Plenty of forum posts have people asking "help my pool is degraded." But nothing that said "here's what it actually feels like to go through this, step by step, with all the commands and logs and reasoning behind the decisions."
+
So I wrote it down. I took a lot of notes during the process and crafted a more or less organized story from them. This three-part series is for fellow amateur homelabbers who are curious about ZFS, maybe a little intimidated by it, and want to know what happens when things go sideways. I wish I had found a very detailed log like this when I was researching ZFS initially. Hope it helps you.
+
The server and disks
+
My homelab server is a modest but capable box I built in late 2025. It has decent consumer hardware, but nothing remarkable. I'll only specify that I have currently three disks on it:
+
+
OS Drive: Kingston KC3000 512GB NVMe. Proxmox lives here.
+
Data Drives: Two Seagate IronWolf Pro 4TB drives (ST4000NT001). This is where my Proxmox VMs get their disks stored.
+
+
The two IronWolf drives are where this story takes place. I labeled them AGAPITO1 and AGAPITO2 because... well, every pair of drives deserves a silly name. I have issues remembering serial numbers.
+
The server runs Proxmox and hosts most of my self-hosted life: personal services, testing VMs, and my Bitcoin infrastructure (which I share over at bitcoininfra.contrapeso.xyz). If this pool goes down, everything goes down.
+
Why ZFS?
+
I'll be honest: I didn't overthink this decision. ZFS is the default storage recommendation for Proxmox, it has a reputation for being rock-solid, and I'd heard enough horror stories about silent data corruption to want something with checksumming built in.
+
What I was most interested in was the ability to define RAID setups in software and deal easily with disks going in and out of them. I had never gone beyond the naive "one disk for the OS, one disk for data" setup in previous servers. After having disks failing on me in previous boxes, I decided it was time to gear up and do it proper this time. My main concern initially was just saving time: it's messy when a "simple" host has disk issues, and I hoped mirroring would allow me to invest less time in cleaning up disasters.
+
Why a Mirror?
+
When I set up the pool, I had two 4TB drives. That gave me a few options:
+
+
Single disk: Maximum space (8TB usable), zero redundancy. One bad sector and you're crying.
+
Mirror: Half the space (4TB usable from 8TB raw), but everything is written to both drives. One drive can completely die and you lose nothing.
+
RAIDZ: Needs at least 3 drives, gives you parity-based redundancy. More space-efficient than mirrors at scale.
+
+
I went with the mirror for a few reasons.
+
First, I only had two drives to start with, so RAIDZ wasn't even an option yet.
+
Second, mirrors are simple. Data goes to both drives. If one dies, the other has everything. No parity calculations, no write penalties, no complexity.
+
Third (and this is the one that sold me), mirrors let you expand incrementally. With ZFS, you can add more mirror pairs (called "vdevs") to your pool later. You can even mix sizes: start with two 4TB drives, add two 8TB drives later, and ZFS will use all of it. RAIDZ doesn't give you that flexibility; once you set your vdev width, you're stuck with it.
+
When Would RAIDZ Make More Sense?
+
If you're starting with 4+ drives and you want to maximize usable space, RAIDZ starts looking attractive:
+
+
+
+
Configuration
+
Drives
+
Usable Space
+
Fault Tolerance
+
+
+
+
+
Mirror
+
2
+
50%
+
1 drive
+
+
+
RAIDZ1
+
3
+
~67%
+
1 drive
+
+
+
RAIDZ1
+
4
+
75%
+
1 drive
+
+
+
RAIDZ2
+
4
+
50%
+
2 drives
+
+
+
RAIDZ2
+
6
+
~67%
+
2 drives
+
+
+
+
RAIDZ2 is popular for larger arrays because it can survive two drive failures, which matters more as you add drives (more drives = higher chance of one failing during a resilver).
+
But for a two-drive homelab that might grow to four drives someday, I felt a mirror was the right call. I can always add another mirror pair later.
+
The Pool: proxmox-tank-1
+
My ZFS pool is called proxmox-tank-1. Here's what it looks like when everything is healthy:
That's it. One pool, one mirror vdev, two drives. The drives are identified by their serial numbers (the WX11TN0Z and WX11TN2P parts), which is important — ZFS uses stable identifiers so it doesn't get confused if Linux decides to shuffle around /dev/sda and /dev/sdb.
+
All my Proxmox VMs store their virtual disks on this pool. When I create a new VM, I point its storage at proxmox-tank-1 and ZFS handles the rest.
+
What Could Possibly Go Wrong?
+
Everything was humming along nicely. VMs were running fine and I was feeling pretty good about my setup.
+
Then, a few weeks in, I was poking around the Proxmox web UI and noticed something that caught my eye.
+
The ZFS pool was DEGRADED. One of my drives — AGAPITO1, serial WX11TN0Z — was FAULTED.
+
In Part 2, I'll walk through how I diagnosed what was actually wrong. Spoiler: the drive itself was fine. The problem was much dumber than that.