Could you please copy & paste the output of the following commands?
Code:cat /etc/pve/storage.cfg pvesm status mount | grep nfs
root@proxmox:~# cat /etc/pve/storage.cfg
dir: local
path /var/lib/vz
content backup,iso,vztmpl
lvmthin: local-lvm
thinpool data
vgname pve
content rootdir,images
dir: VMs
path /mnt/pve/VMs
content backup,snippets,rootdir,vztmpl,images,iso
is_mountpoint 1
nodes proxmox
lvm: raid
vgname pve
content rootdir,images
shared 0
lvm: NAS
vgname freenas
content rootdir,images
shared 0
nfs: freenas
export /mnt/NAS/NFS
path /mnt/pve/freenas
server 172.16.1.88
content iso,images,vztmpl,snippets,rootdir,backup
options vers=3
prune-backups keep-last=1
nfs: STOR2
export /mnt/NAS/NFS
path /mnt/pve/STOR2
server 172.16.1.115
content images,iso,backup,snippets,rootdir
prune-backups keep-last=5
nfs: wb-storage-nfs
export /mnt/NAS/NFS
path /mnt/pve/wb-storage-nfs
server wb-storage
content vztmpl,backup,snippets,rootdir,iso,images
options vers=3
prune-backups keep-all=1
root@proxmox:~# pvesm status
Name Type Status Total Used Available %
NAS lvm active 36335235072 36335235072 0 100.00%
STOR2 nfs active 18930489728 5728724224 13201765504 30.26%
VMs dir active 10402030284 500182436 9377543520 4.81%
freenas nfs active 32215149184 4406821248 27808327936 13.68%
local dir active 15158232 5036040 9332484 33.22%
local-lvm lvmthin active 29356032 0 29356032 0.00%
raid lvm active 62386176 54652928 7733248 87.60%
wb-storage-nfs nfs inactive 0 0 0 0.00%
root@proxmox:~# mount | grep nfs
172.16.1.115:/mnt/NAS/NFS on /mnt/pve/STOR2 type nfs (rw,relatime,vers=3,rsize=131072,wsize=131072,namlen=255,hard,proto=tcp,timeo=600,retrans=2,sec=sys,mountaddr=172.16.1.115,mountvers=3,mountport=630,mountproto=udp,local_lock=none,addr=172.16.1.115)
172.16.1.88:/mnt/NAS/NFS on /mnt/pve/freenas type nfs (rw,relatime,vers=3,rsize=131072,wsize=131072,namlen=255,hard,proto=tcp,timeo=600,retrans=2,sec=sys,mountaddr=172.16.1.88,mountvers=3,mountport=626,mountproto=udp,local_lock=none,addr=172.16.1.88)
172.16.1.88:/mnt/NAS/NFS on /mnt/pve/wb-storage-nfs type nfs (rw,relatime,vers=3,rsize=131072,wsize=131072,namlen=255,hard,proto=tcp,timeo=600,retrans=2,sec=sys,mountaddr=172.16.1.88,mountvers=3,mountport=626,mountproto=udp,local_lock=none,addr=172.16.1.88)
172.16.1.88:/mnt/NAS/NFS on /mnt/pve/wb-storage type nfs (rw,relatime,vers=3,rsize=131072,wsize=131072,namlen=255,hard,proto=tcp,timeo=600,retrans=2,sec=sys,mountaddr=172.16.1.88,mountvers=3,mountport=626,mountproto=udp,local_lock=none,addr=172.16.1.88)
I know, nfs mounted shares are in /mnt/pve.First of all, you can just unmount the deleted share /mnt/pve/wb-storage manually.
Storage changes status to active. That ip is in /etc/hosts so it should work.What happens if you change the server property of wb-storage-nfs from wb-storage to the respective IP address?
dir: local
path /var/lib/vz
content iso,backup,vztmpl
zfspool: local-zfs
pool rpool/data
content images,rootdir
sparse 1
zfspool: VMs
pool r10-1TB/VMs
content rootdir,images
mountpoint /r10-1TB/VMs
dir: ISOs
path /r0-500GB/ISOs
content iso
prune-backups keep-all=1
shared 0
nfs: verified-ISOs
export /volume1/proxmox/ISOs/verified/
path /mnt/pve/verified-ISOs
server 172.25.20.18
content iso
prune-backups keep-all=1
nfs: unverified-ISOs
export /volume1/proxmox/ISOs/unverified/
path /mnt/pve/unverified-ISOs
server 172.25.20.18
content iso
prune-backups keep-all=1
Name Type Status Total Used Available %
ISOs dir active 471334272 128 471334144 0.00%
VMs zfspool active 1885338868 192 1885338676 0.00%
local dir active 202142336 1496192 200646144 0.74%
local-zfs zfspool active 200646336 96 200646240 0.00%
unverified-ISOs nfs inactive 0 0 0 0.00%
verified-ISOs nfs inactive 0 0 0 0.00%
172.25.20.18:/volume1/proxmox/ISOs/unverified on /mnt/pve/unverified-ISOs type nfs4 (rw,relatime,vers=4.1,rsize=131072,wsize=131072,namlen=255,hard,proto=tcp,timeo=600,retrans=2,sec=sys,clientaddr=172.25.20.201,local_lock=none,addr=172.25.20.18)
172.25.20.18:/volume1/proxmox/ISOs/verified on /mnt/pve/verified-ISOs type nfs4 (rw,relatime,vers=4.1,rsize=131072,wsize=131072,namlen=255,hard,proto=tcp,timeo=600,retrans=2,sec=sys,clientaddr=172.25.20.201,local_lock=none,addr=172.25.20.18)