NFS 4.1 - How does proxmox gets the status?

kellogs

Active Member
May 14, 2024
233
35
28
Hello @fiona and all

Do you know how proxmox get a NFS share status?

I have managed to add Tintri 5.6.0 release using NFS 4.1 and able to have live VMs on it but the status is shown as such
root@node16-84:~# pvesm status
Name Type Status Total Used Available %
PBS-16-254 pbs active 14912781196 8874556332 5286589256 59.51%
local dir active 5698374656 256 5698374400 0.00%
pvedatastore-16195 nfs active 16815803648 17826048 16797977600 0.11%
tintri1638 nfs inactive 0 0 0 0.00%


nfsstat -m
/mnt/pve/tintri1638 from 10.88.90.38:/n04
Flags: rw,relatime,vers=4.1,rsize=262264,wsize=262144,namlen=255,hard,proto=tcp,timeo=600,retrans=2,sec=null,clientaddr=10.88.90.84,local_lock=none,addr=10.88.90.38


1738194499648.png
 
we simply check if it's mounted and if yes parse the output of /proc/mounts

can you post the output of
Code:
cat /proc/mounts
?
 
  • Like
Reactions: Johannes S
we simply check if it's mounted and if yes parse the output of /proc/mounts

can you post the output of
Code:
cat /proc/mounts
?
Hello Dominik,

Here is the requested info

root@node16-84:~# cat /proc/mounts
sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0
proc /proc proc rw,relatime 0 0
udev /dev devtmpfs rw,nosuid,relatime,size=264130948k,nr_inodes=66032737,mode=755,inode64 0 0
devpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0
tmpfs /run tmpfs rw,nosuid,nodev,noexec,relatime,size=52832916k,mode=755,inode64 0 0
rpool/ROOT/pve-1 / zfs rw,relatime,xattr,posixacl,casesensitive 0 0
securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0
tmpfs /dev/shm tmpfs rw,nosuid,nodev,inode64 0 0
tmpfs /run/lock tmpfs rw,nosuid,nodev,noexec,relatime,size=5120k,inode64 0 0
cgroup2 /sys/fs/cgroup cgroup2 rw,nosuid,nodev,noexec,relatime 0 0
pstore /sys/fs/pstore pstore rw,nosuid,nodev,noexec,relatime 0 0
bpf /sys/fs/bpf bpf rw,nosuid,nodev,noexec,relatime,mode=700 0 0
systemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=30,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=28326 0 0
mqueue /dev/mqueue mqueue rw,nosuid,nodev,noexec,relatime 0 0
debugfs /sys/kernel/debug debugfs rw,nosuid,nodev,noexec,relatime 0 0
tracefs /sys/kernel/tracing tracefs rw,nosuid,nodev,noexec,relatime 0 0
hugetlbfs /dev/hugepages hugetlbfs rw,relatime,pagesize=2M 0 0
fusectl /sys/fs/fuse/connections fusectl rw,nosuid,nodev,noexec,relatime 0 0
configfs /sys/kernel/config configfs rw,nosuid,nodev,noexec,relatime 0 0
ramfs /run/credentials/systemd-sysusers.service ramfs ro,nosuid,nodev,noexec,relatime,mode=700 0 0
ramfs /run/credentials/systemd-sysctl.service ramfs ro,nosuid,nodev,noexec,relatime,mode=700 0 0
ramfs /run/credentials/systemd-tmpfiles-setup-dev.service ramfs ro,nosuid,nodev,noexec,relatime,mode=700 0 0
rpool/var-lib-vz /var/lib/vz zfs rw,relatime,xattr,noacl,casesensitive 0 0
rpool /rpool zfs rw,relatime,xattr,noacl,casesensitive 0 0
rpool/ROOT /rpool/ROOT zfs rw,relatime,xattr,noacl,casesensitive 0 0
rpool/data /rpool/data zfs rw,relatime,xattr,noacl,casesensitive 0 0
ramfs /run/credentials/systemd-tmpfiles-setup.service ramfs ro,nosuid,nodev,noexec,relatime,mode=700 0 0
binfmt_misc /proc/sys/fs/binfmt_misc binfmt_misc rw,nosuid,nodev,noexec,relatime 0 0
sunrpc /run/rpc_pipefs rpc_pipefs rw,relatime 0 0
lxcfs /var/lib/lxcfs fuse.lxcfs rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other 0 0
/dev/fuse /etc/pve fuse rw,nosuid,nodev,relatime,user_id=0,group_id=0,default_permissions,allow_other 0 0
172.16.16.195:/volume1/pvedatastore /mnt/pve/pvedatastore-16195 nfs4 rw,relatime,vers=4.0,rsize=131072,wsize=131072,namlen=255,hard,proto=tcp,timeo=600,retrans=2,sec=sys,clientaddr=172.16.16.84,local_lock=none,addr=172.16.16.195 0 0
10.88.90.38:/n04 /mnt/pve/tintri1638 nfs4 rw,relatime,vers=4.1,rsize=262264,wsize=262144,namlen=255,hard,proto=tcp,timeo=600,retrans=2,sec=null,clientaddr=10.88.90.84,local_lock=none,addr=10.88.90.38 0 0
tmpfs /run/user/0 tmpfs rw,nosuid,nodev,relatime,size=52832912k,nr_inodes=13208228,mode=700,inode64 0 0

Thank you for looking into this
 
Last edited:
Last edited:
Maybe you have slow acces to this NFS share ? You can send the results of :
time /usr/sbin/rpcinfo -T tcp 10.88.90.38 nfs 4
time /sbin/showmount --exports 10.88.90.38
time ls /mnt/pve/mnt/pve/tintri1638 nfs4
 
vers=4.1,rsize=262264,wsize=262144
Still wondering why is your tintri nfs export and/or nfs client mount "limited" to version and r/w size, should be default v4.2 and r|wsize=1048576 ?!
But anyway that is no reason for your "?" on pve webui storage icon view.
 
Maybe you have slow acces to this NFS share ? You can send the results of :
time /usr/sbin/rpcinfo -T tcp 10.88.90.38 nfs 4
time /sbin/showmount --exports 10.88.90.38
time ls /mnt/pve/mnt/pve/tintri1638 nfs4
root@node16-84:~# time /usr/sbin/rpcinfo -T tcp 10.88.90.38 nfs 4
program 100003 version 4 ready and waiting

real 0m0.006s
user 0m0.000s
sys 0m0.003s
root@node16-84:~# time /sbin/showmount --exports 10.88.90.38
Export list for 10.88.90.38:
/tintri *

real 0m0.018s
user 0m0.002s
sys 0m0.003s


root@node16-84:~# time ls /mnt/pve/tintri1638 nfs4
ls: cannot access 'nfs4': No such file or directory
/mnt/pve/tintri1638:
dump images template

real 0m0.004s
user 0m0.002s
sys 0m0.000s
 
root@node16-84:~# time /usr/sbin/rpcinfo -T tcp 10.88.90.38 nfs 4
program 100003 version 4 ready and waiting

real 0m0.006s
user 0m0.000s
sys 0m0.003s
root@node16-84:~# time /sbin/showmount --exports 10.88.90.38
Export list for 10.88.90.38:
/tintri *

real 0m0.018s
user 0m0.002s
sys 0m0.003s


root@node16-84:~# time ls /mnt/pve/tintri1638 nfs4
ls: cannot access 'nfs4': No such file or directory
/mnt/pve/tintri1638:
dump images template

real 0m0.004s
user 0m0.002s
sys 0m0.000s
Ok, no problem here.
Can you dump here the file /etc/pve/storage.conf ?
 
root@node16-84:~# cat /etc/pve/storage.cfg
dir: local
path /var/lib/vz
content backup,vztmpl,iso

nfs: pvedatastore-16195
export /volume1/pvedatastore
path /mnt/pve/pvedatastore-16195
server 172.16.16.195
content backup,iso,images
options vers=4
prune-backups keep-all=1

pbs: PBS-16-254
datastore backup
server 172.16.16.254
content backup
fingerprint xxx
namespace N04-LAB
prune-backups keep-all=1
username root@pam

nfs: tintri1638
export /n04/
path /mnt/pve/tintri1638
server 10.88.90.38
content images,iso,vztmpl,backup
options vers=4.1
prune-backups keep-all=1