I have an issue with the server not seeing the LVMs on the two inactive drives. I have made no changes to those drives, But I did add some different HDD drives. I have looked on the Forum but I am not finding anything that matches my issue.
Code:
root@prox:~# pvesm status
Command failed with status code 5.
command '/sbin/vgscan --ignorelockingfailure --mknodes' failed: exit code 5
no such logical volume PrimaryRaid0-Thin/PrimaryRaid0-Thin
Name Type Status Total Used Available %
LargeHDD-6 lvm active 3907014656 0 3907014656 0.00%
LargeHDD-7 lvm active 3907014656 0 3907014656 0.00%
PrimaryRaid0 lvm inactive 0 0 0 0.00%
PrimaryRaid0-Thin lvmthin inactive 0 0 0 0.00%
SecondaryRaid0 lvm inactive 0 0 0 0.00%
local dir active 98497780 7286836 86161396 7.40%
local-lvm lvmthin active 832356352 220824140 611532211 26.53%
root@prox:~# cat /etc/pve/storage.cfg
dir: local
path /var/lib/vz
content backup,vztmpl,iso
lvmthin: local-lvm
thinpool data
vgname pve
content rootdir,images
lvm: SecondaryRaid0
vgname SecondaryRaid0
content images,rootdir
nodes prox
shared 0
lvm: PrimaryRaid0
vgname PrimaryRaid0
content images,rootdir
nodes prox
shared 0
lvmthin: PrimaryRaid0-Thin
thinpool PrimaryRaid0-Thin
vgname PrimaryRaid0-Thin
content rootdir,images
nodes prox
lvm: LargeHDD-6
vgname LargeHDD-6
content rootdir,images
nodes prox
shared 0
lvm: LargeHDD-7
vgname LargeHDD-7
content rootdir,images
nodes prox
shared 0