I have a 3x 12TB RAIDZ zfs pool called zfsdata and am trying to import my Hyper-V VHDXs, however i am getting an out of space error and I cannot work out why:
I have no snapshots:
The disk image I am trying to import is 10TB with 7TB used (SelmaData.vhdx). If my calculations are correct I should have about 21.8TB of useable space on zfsdata. My other big VM (which I have successfully imported) is also a 10TB (10,000GB) image with about 2TB used.
Now... I did temporarily copy one of the disk images to /zfsdata and then deleted it (on the command line using rm) once I had imported it to the VM. I'm wondering if this file is hanging about still somewhere, although I have tried rebooting in cases unmounting the zfs filesystem helped. No change.
Can anyone shed some light on what I am doing wrong here? Thanks
				
			root@abe:~# qm importdisk 101 /mnt/ntfs/SelmaData.vhdx zfsdata
importing disk '/mnt/ntfs/SelmaData.vhdx' to VM 101 ...
zfs error: cannot create 'zfsdata/vm-101-disk-0': out of spaceroot@abe:~# ls -lah /mnt/ntfs/SelmaData.vhdx
-rwxrwxrwx 1 root root 7.0T Sep 23 08:25 /mnt/ntfs/SelmaData.vhdxroot@abe:~# zfs list
NAME                    USED  AVAIL     REFER  MOUNTPOINT
zfsdata                13.6T  8.05T      128K  /zfsdata
zfsdata/vm-100-disk-0  43.7G  8.09T     5.71G  -
zfsdata/vm-101-disk-1  43.7G  8.07T     20.0G  -
zfsdata/vm-101-disk-2  3.33M  8.05T      160K  -
zfsdata/vm-102-disk-0  3.33M  8.05T      181K  -
zfsdata/vm-102-disk-1  43.7G  8.08T     15.2G  -
zfsdata/vm-102-disk-2  13.3T  19.0T     2.33T  -
zfsdata/vm-103-disk-0  3.33M  8.05T      144K  -
zfsdata/vm-103-disk-1  7.33M  8.05T     90.6K  -
zfsdata/vm-103-disk-2   175G  8.21T     16.2G  -
root@abe:~# zfs list -o space -r zfsdata
NAME                   AVAIL   USED  USEDSNAP  USEDDS  USEDREFRESERV  USEDCHILD
zfsdata                8.05T  13.6T        0B    128K             0B      13.6T
zfsdata/vm-100-disk-0  8.09T  43.7G        0B   5.71G          38.0G         0B
zfsdata/vm-101-disk-1  8.07T  43.7G        0B   20.0G          23.6G         0B
zfsdata/vm-101-disk-2  8.05T  3.33M        0B    160K          3.18M         0B
zfsdata/vm-102-disk-0  8.05T  3.33M        0B    181K          3.16M         0B
zfsdata/vm-102-disk-1  8.08T  43.7G        0B   15.2G          28.4G         0B
zfsdata/vm-102-disk-2  19.0T  13.3T        0B   2.33T          11.0T         0B
zfsdata/vm-103-disk-0  8.05T  3.33M        0B    144K          3.19M         0B
zfsdata/vm-103-disk-1  8.05T  7.33M        0B   90.6K          7.24M         0B
zfsdata/vm-103-disk-2  8.21T   175G        0B   16.2G           158G         0B
I have no snapshots:
root@abe:~# zfs list -t snapshot 
no datasets availableThe disk image I am trying to import is 10TB with 7TB used (SelmaData.vhdx). If my calculations are correct I should have about 21.8TB of useable space on zfsdata. My other big VM (which I have successfully imported) is also a 10TB (10,000GB) image with about 2TB used.
Now... I did temporarily copy one of the disk images to /zfsdata and then deleted it (on the command line using rm) once I had imported it to the VM. I'm wondering if this file is hanging about still somewhere, although I have tried rebooting in cases unmounting the zfs filesystem helped. No change.
Can anyone shed some light on what I am doing wrong here? Thanks
root@abe:~# zpool history
History for 'zfsdata':
2022-09-22.19:50:33 zpool create -o ashift=12 zfsdata raidz /dev/disk/by-id/ata-WDC_WD120EMFZ-11A6JA0_XJG004GM /dev/disk/by-id/ata-WDC_WD120EMAZ-11BLFA0_5PGW3M9E /dev/disk/by-id/ata-WDC_WD120EDBZ-11B1HA0_5QG4TBGF
2022-09-22.19:50:34 zfs set compression=on zfsdata
2022-09-22.20:13:19 zpool import -c /etc/zfs/zpool.cache -aN
2022-09-22.20:19:04 zfs create -V 33554432k zfsdata/vm-100-disk-0
2022-09-22.20:40:31 zfs create -V 33554432k zfsdata/vm-101-disk-0
2022-09-22.20:55:27 zfs create -V 33554432k zfsdata/vm-101-disk-1
2022-09-22.21:40:59 zfs create -V 1024k zfsdata/vm-101-disk-2
2022-09-22.22:03:51 zfs destroy -r zfsdata/vm-101-disk-0
2022-09-22.22:19:09 zfs create -V 1024k zfsdata/vm-102-disk-0
2022-09-22.22:20:38 zfs create -V 33554432k zfsdata/vm-102-disk-1
2022-09-22.22:29:42 zfs create -V 10485760000k zfsdata/vm-102-disk-2
2022-09-22.22:39:34 zfs destroy -r zfsdata/vm-102-disk-2
2022-09-22.22:54:22 zfs create -V 1024k zfsdata/vm-103-disk-0
2022-09-22.22:54:23 zfs create -V 4096k zfsdata/vm-103-disk-1
2022-09-22.22:54:23 zfs create -V 134217728k zfsdata/vm-103-disk-2
2022-09-23.06:59:22 zfs create -V 10485760000k zfsdata/vm-102-disk-2
2022-09-23.07:13:01 zfs destroy -r zfsdata/vm-102-disk-2
2022-09-23.08:39:29 zfs create -V 10485760000k zfsdata/vm-102-disk-2
2022-09-23.08:44:13 zfs destroy -r zfsdata/vm-102-disk-2
2022-09-23.09:30:44 zpool import -c /etc/zfs/zpool.cache -aN
2022-09-23.09:47:48 zpool import -c /etc/zfs/zpool.cache -aN
2022-09-23.09:50:49 zfs create -V 10485760000k zfsdata/vm-102-disk-2
2022-09-23.09:52:33 zfs destroy -r zfsdata/vm-102-disk-2
2022-09-24.07:42:40 zfs create -V 33554432k zfsdata/vm-102-disk-2
2022-09-24.07:44:18 zfs destroy -r zfsdata/vm-102-disk-2
2022-09-24.07:45:14 zfs create -V 10485760000k zfsdata/vm-102-disk-2
2022-09-24.23:52:08 zpool import -c /etc/zfs/zpool.cache -aNroot@abe:~# zpool status
  pool: zfsdata
 state: ONLINE
config:
        NAME                                    STATE     READ WRITE CKSUM
        zfsdata                                 ONLINE       0     0     0
          raidz1-0                              ONLINE       0     0     0
            ata-WDC_WD120EMFZ-11A6JA0_XJG004GM  ONLINE       0     0     0
            ata-WDC_WD120EMAZ-11BLFA0_5PGW3M9E  ONLINE       0     0     0
            ata-WDC_WD120EDBZ-11B1HA0_5QG4TBGF  ONLINE       0     0     0
errors: No known data errors