root@pve-lab:~# zfs get all NVMe/vm-580-disk-0 | grep used
NVMe/vm-580-disk-0 used 10.3G -
NVMe/vm-580-disk-0 usedbysnapshots 0B -
NVMe/vm-580-disk-0 usedbydataset 7.66G -
NVMe/vm-580-disk-0 usedbychildren 0B -
NVMe/vm-580-disk-0 usedbyrefreservation 2.66G -
NVMe/vm-580-disk-0 logicalused 7.62G -
root@pve-lab:~# zfs get all NVMe/vm-580-disk-0 | grep used
NVMe/vm-580-disk-0 used 9.96G -
NVMe/vm-580-disk-0 usedbysnapshots 0B -
NVMe/vm-580-disk-0 usedbydataset 9.96G -
NVMe/vm-580-disk-0 usedbychildren 0B -
NVMe/vm-580-disk-0 usedbyrefreservation 0B -
NVMe/vm-580-disk-0 logicalused 9.93G -
root@pve-lab:~# zfs get all NVMe/vm-901-disk-0 | grep used
NVMe/vm-901-disk-0 used 82.5G -
NVMe/vm-901-disk-0 usedbysnapshots 0B -
NVMe/vm-901-disk-0 usedbydataset 12.9G -
NVMe/vm-901-disk-0 usedbychildren 0B -
NVMe/vm-901-disk-0 usedbyrefreservation 69.6G -
NVMe/vm-901-disk-0 logicalused 12.8G -
I've set it to sparse 1 now, and migrate the disks from that datastore to another and then back, so it's a new disk, right?
(zfs set reservation=0 NVMe/vm-540-disk-1 doesn't change anything)