I have a cluster of 8 nodes. ZFS storage shows up just fine as an option when creating VMs, but not when creating a container. The only storage that shows up as an option for containers is "local". I have this same config saved from an older (working) version of proxmox (6.4) and the only difference is that the fast_zfs and encrypted_zfs entries don't have a mountpoint. But the mountpoints were added by the gui, so I've left them. Any pointers would be appreciated. Any additional info added upon request.
/etc/pve/storage.cfg:
dir: local
path /var/lib/vz
content vztmpl,iso,backup
zfspool: local-zfs
pool rpool/data
content images,rootdir
sparse 1
zfspool: fast_zfs
pool tank/data
content rootdir,images
mountpoint /tank/data
nodes cpbx-lindon1n04,cpbx-dbgp1n2,cpbx-dbgp1n3,cpbx-lindon1n05,cpbx-dbgp1n1,cpbx-dbgp1n5,cpbx-dbgp1n4,cpbx-lindon1n01,cpbx-lindon1n02,cpbx-lindon1n03
sparse 1
zfspool: encrypted_zfs
pool tank/encrypted_data
content rootdir,images
mountpoint /tank/encrypted_data
sparse 1
=========================================
root@cpbx-dbgp1n1:/etc/pve# zpool status
pool: rpool
state: ONLINE
scan: scrub repaired 0B in 00:00:11 with 0 errors on Sun Jul 14 00:24:12 2024
config:
NAME STATE READ WRITE CKSUM
rpool ONLINE 0 0 0
mirror-0 ONLINE 0 0 0
ata-Samsung_SSD_870_EVO_1TB_S75BNS0W424837K-part3 ONLINE 0 0 0
ata-Samsung_SSD_870_EVO_1TB_S75BNS0W424835W-part3 ONLINE 0 0 0
errors: No known data errors
pool: tank
state: ONLINE
scan: scrub repaired 0B in 00:00:00 with 0 errors on Sun Jul 14 00:24:01 2024
config:
NAME STATE READ WRITE CKSUM
tank ONLINE 0 0 0
mirror-0 ONLINE 0 0 0
wwn-0x5002538f33345344 ONLINE 0 0 0
wwn-0x5002538f33345350 ONLINE 0 0 0
errors: No known data errors
=========================================
root@cpbx-dbgp1n1:/etc/pve# zfs list
NAME USED AVAIL REFER MOUNTPOINT
rpool 3.71G 896G 104K /rpool
rpool/ROOT 3.64G 896G 96K /rpool/ROOT
rpool/ROOT/pve-1 3.64G 896G 3.64G /
rpool/data 96K 896G 96K /rpool/data
tank 25.6M 3.51T 96K /tank
tank/config 19.7M 3.51T 6.14M /tank/config
tank/data 96K 3.51T 96K /tank/data
tank/encrypted_data 192K 3.51T 192K /tank/encrypted_data
==========================================
Any pointers would be appreciated.
Thanks,
Elliot
/etc/pve/storage.cfg:
dir: local
path /var/lib/vz
content vztmpl,iso,backup
zfspool: local-zfs
pool rpool/data
content images,rootdir
sparse 1
zfspool: fast_zfs
pool tank/data
content rootdir,images
mountpoint /tank/data
nodes cpbx-lindon1n04,cpbx-dbgp1n2,cpbx-dbgp1n3,cpbx-lindon1n05,cpbx-dbgp1n1,cpbx-dbgp1n5,cpbx-dbgp1n4,cpbx-lindon1n01,cpbx-lindon1n02,cpbx-lindon1n03
sparse 1
zfspool: encrypted_zfs
pool tank/encrypted_data
content rootdir,images
mountpoint /tank/encrypted_data
sparse 1
=========================================
root@cpbx-dbgp1n1:/etc/pve# zpool status
pool: rpool
state: ONLINE
scan: scrub repaired 0B in 00:00:11 with 0 errors on Sun Jul 14 00:24:12 2024
config:
NAME STATE READ WRITE CKSUM
rpool ONLINE 0 0 0
mirror-0 ONLINE 0 0 0
ata-Samsung_SSD_870_EVO_1TB_S75BNS0W424837K-part3 ONLINE 0 0 0
ata-Samsung_SSD_870_EVO_1TB_S75BNS0W424835W-part3 ONLINE 0 0 0
errors: No known data errors
pool: tank
state: ONLINE
scan: scrub repaired 0B in 00:00:00 with 0 errors on Sun Jul 14 00:24:01 2024
config:
NAME STATE READ WRITE CKSUM
tank ONLINE 0 0 0
mirror-0 ONLINE 0 0 0
wwn-0x5002538f33345344 ONLINE 0 0 0
wwn-0x5002538f33345350 ONLINE 0 0 0
errors: No known data errors
=========================================
root@cpbx-dbgp1n1:/etc/pve# zfs list
NAME USED AVAIL REFER MOUNTPOINT
rpool 3.71G 896G 104K /rpool
rpool/ROOT 3.64G 896G 96K /rpool/ROOT
rpool/ROOT/pve-1 3.64G 896G 3.64G /
rpool/data 96K 896G 96K /rpool/data
tank 25.6M 3.51T 96K /tank
tank/config 19.7M 3.51T 6.14M /tank/config
tank/data 96K 3.51T 96K /tank/data
tank/encrypted_data 192K 3.51T 192K /tank/encrypted_data
==========================================
Any pointers would be appreciated.
Thanks,
Elliot