Very strange : LVM unable to memorize storage configuration on a HPE MSA 1050 SAN

dominix

Renowned Member
Jan 10, 2012
48
3
73
GMT -10
Over my dozens of distant Proxmox install in the pacific island all around I have a very strange case today.
I have a refurbished 4 years old DL 380 with a MSA 1050 SAN from HPE,
just installed proxmox 8.2.4 (last update)
I can see my 2 HBA, connected with fibre channel to the SAN bay,

Code:
lspci |grep Fibre
12:00.0 Fibre Channel: QLogic Corp. ISP2722-based 16/32Gb Fibre Channel to PCIe Adapter (rev 01)
13:00.0 Fibre Channel: QLogic Corp. ISP2722-based 16/32Gb Fibre Channel to PCIe Adapter (rev 01)

Code:
 lsblk
NAME         MAJ:MIN RM   SIZE RO TYPE MOUNTPOINTS
sda            8:0    0 279.4G  0 disk
├─sda1         8:1    0  1007K  0 part
├─sda2         8:2    0     1G  0 part /boot/efi
└─sda3         8:3    0 278.4G  0 part
  ├─pve-swap 252:0    0     8G  0 lvm  [SWAP]
  ├─pve-root 252:1    0    50G  0 lvm  /
  └─pve-data 252:2    0   777G  0 lvm  /var/lib/vz
sdb            8:16   0 279.4G  0 disk 
└─sdb3         8:19   0 279.1G  0 part 
  └─pve-data 252:2    0   777G  0 lvm  /var/lib/vz
sdc            8:32   0 279.4G  0 disk 
└─sdc3         8:35   0 278.9G  0 part 
  └─pve-data 252:2    0   777G  0 lvm  /var/lib/vz
sdd            8:48   1     0B  0 disk
sde            8:64   0   2.9T  0 disk
└─sde1         8:65   0   2.9T  0 part
sdf            8:80   0   2.9T  0 disk
└─sdf1         8:81   0   2.9T  0 part
sdg            8:96   0   2.9T  0 disk
└─sdg1         8:97   0   2.9T  0 part
sdh            8:112  0   2.9T  0 disk
└─sdh1         8:113  0   2.9T  0 part
sr0           11:0    1  1024M  0 rom

I can do a pvcreate /dev/sde1 and also for sdf1 , its says "successfull"
but pvs do NOT show /dev/sde1 nor /dev/sdf1
I can do vgcreate baie2 /dev/sde1 /dev/sdf1, its says "successfull"
but vgs do NOT show baie2

Code:
pvck --dump metadata  /dev/sde1
  metadata text at 4608 crc 0xa9ab2f15 # vgname baie2 seqno 1
  ---
baie2 {
id = "H2vcgx-PruG-3Unj-rqD1-wdMn-RsSt-NXW5qn"
seqno = 1
format = "lvm2"
status = ["RESIZEABLE", "READ", "WRITE"]
flags = []
extent_size = 8192
max_lv = 0
max_pv = 0
metadata_copies = 0

physical_volumes {

pv0 {
id = "xiq7eB-Tm09-uVeX-5asf-LbUY-lAGo-0dHUgj"
device = "/dev/sde1"

status = ["ALLOCATABLE"]
flags = []
dev_size = 6240227328
pe_start = 2048
pe_count = 761746
}

pv1 {
id = "pYYpMh-uUog-C7YF-r4fz-p6rs-9jhO-Pc4df0"
device = "/dev/sdf1"

status = ["ALLOCATABLE"]
flags = []
dev_size = 6240227328
pe_start = 2048
pe_count = 761746
}
}

}
# Generated by LVM2 version 2.03.16(2) (2022-05-18): Fri Sep 13 06:34:18 2024

contents = "Text Format Volume Group"
version = 1

description = "Write from vgcreate -v baie2 /dev/sde1 /dev/sdf1."

creation_host = "vms-infomic-3"    # Linux vms-infomic-3 6.8.12-1-pve #1 SMP PREEMPT_DYNAMIC PMX 6.8.12-1 (2024-08-05T16:17Z) x86_64
creation_time = 1726245258    # Fri Sep 13 06:34:18 2024
  ---

however

Code:
pvdisplay /dev/sde1 /dev/sdf1 
  Failed to find physical volume "/dev/sde1".
  Failed to find physical volume "/dev/sdf1".

pvdisplay /dev/sde1 /dev/sdf1 --devices /dev/sde1  --devices /dev/sdf1
  --- Physical volume ---
  PV Name               /dev/sde1
  VG Name               baie2
  PV Size               <2.91 TiB / not usable 2.00 MiB
  Allocatable           yes 
  PE Size               4.00 MiB
  Total PE              761746
  Free PE               761746
  Allocated PE          0
  PV UUID               xiq7eB-Tm09-uVeX-5asf-LbUY-lAGo-0dHUgj
   
  --- Physical volume ---
  PV Name               /dev/sdf1
  VG Name               baie2
  PV Size               <2.91 TiB / not usable 2.00 MiB
  Allocatable           yes 
  PE Size               4.00 MiB
  Total PE              761746
  Free PE               761746
  Allocated PE          0
  PV UUID               pYYpMh-uUog-C7YF-r4fz-p6rs-9jhO-Pc4df0

And to complete the confusion

Code:
vgdisplay baie2
  Volume group "baie2" not found
  Cannot process volume group baie2

 vgdisplay baie2 --devices /dev/sde1 --devices /dev/sdf1
  --- Volume group ---
  VG Name               baie2
  System ID             
  Format                lvm2
  Metadata Areas        2
  Metadata Sequence No  1
  VG Access             read/write
  VG Status             resizable
  MAX LV                0
  Cur LV                0
  Open LV               0
  Max PV                0
  Cur PV                2
  Act PV                2
  VG Size               5.81 TiB
  PE Size               4.00 MiB
  Total PE              1523492
  Alloc PE / Size       0 / 0   
  Free  PE / Size       1523492 / 5.81 TiB
  VG UUID               H2vcgx-PruG-3Unj-rqD1-wdMn-RsSt-NXW5qn

Code:
hdparm -t  /dev/sde1
/dev/sde1:
 Timing buffered disk reads: 2214 MB in  3.00 seconds = 737.54 MB/sec

hdparm -t  /dev/sdf1
/dev/sdf1:
 Timing buffered disk reads: 2312 MB in  3.00 seconds = 770.03 MB/sec

there is no problem to the SAN side, volume are created , Computer can access it, write it ...
I dont understand with LVM can't retrieve its VG or PV ...
any help tips or direction will be appreciated, thanks for your time guys.
 
There where no data yet on the SAN, so I reconfigured the MSA 1050 to make it simple, with only 1 pool, 1 raid and 1 volume mapped to all known WWN, and after that the LVM config is getting my setup at creation time BUT NOT after a reboot.
I was able to create a PV, a VG, a LV, created a VM on it, all was fine, but after reboot I return to the state were config disapear, 2sd VG in no more present, VM do not start.

Code:
pvs
  PV         VG  Fmt  Attr PSize    PFree
  /dev/sda3  pve lvm2 a--   278.39g <1.37g
  /dev/sdb3  pve lvm2 a--  <279.11g     0
  /dev/sdc3  pve lvm2 a--   278.86g     0

no /dev/sde1, but

pvs --devices /dev/sde1
  PV         VG    Fmt  Attr PSize  PFree
  /dev/sde1  baie2 lvm2 a--  <7.27t <2.27t

vgs
  VG  #PV #LV #SN Attr   VSize    VFree
  pve   3   3   0 wz--n- <836.37g <1.37g

no baie2 VG
vgs --devices /dev/sde1
  VG    #PV #LV #SN Attr   VSize  VFree
  baie2   1   1   0 wz--n- <7.27t <2.27t

dont know why the LVM is not persistant after reboot, but data are still on the volume.

Code:
vgdisplay baie2
  Volume group "baie2" not found
  Cannot process volume group baie2
...
vgdisplay baie2 --devices /dev/sde1
  --- Volume group ---
  VG Name               baie2
  System ID             
  Format                lvm2
  Metadata Areas        1
  Metadata Sequence No  2
  VG Access             read/write
  VG Status             resizable
  MAX LV                0
  Cur LV                1
  Open LV               0
  Max PV                0
  Cur PV                1
  Act PV                1
  VG Size               <7.27 TiB
  PE Size               4.00 MiB
  Total PE              1905149
  Alloc PE / Size       1311232 / 5.00 TiB
  Free  PE / Size       593917 / <2.27 TiB
  VG UUID               4cgfxo-JwAx-6XdU-bin6-ripS-6PuU-WIIqy7

looks to me ist a multipath thing... but no clue
 
Last edited:
Do you setup multipath for your inside created volume(s) in pve (so you should see your volume just once as dm-"x" depending on pve OS install) ?
 
Do you setup multipath for your inside created volume(s) in pve (so you should see your volume just once as dm-"x" depending on pve OS install) ?

Code:
Sep 16 02:09:59 vms-infomic-3 lvm[1337]: PV /dev/sdf1 8:81 is duplicate for PVID xjfJIT8MROgpYDWhl6EemDn78fyvNhWy on 8:65 /dev/sde1.

So I have added the WWN multipath -a 3600xxxxxxxx... now I have error Cannot use /dev/sde1: device is a multipath component

I'm getting nuts. How to I recover from this mess ?
 
Last edited:
vgremove -> pvremove -> multipath setup -> pvcreate -> vgcreate.
OK, thanks waltar.
So now I am after multipath setup.
multipath looks good,
Code:
lsblk
...
sdd                                           8:48   1     0B  0 disk 
sde                                           8:64   0   7.3T  0 disk 
└─3600c0ff0003c10d74b7be76601000000         252:3    0   7.3T  0 mpath
  └─3600c0ff0003c10d74b7be76601000000-part1 252:4    0   7.3T  0 part 
sdf                                           8:80   0   7.3T  0 disk 
└─3600c0ff0003c10d74b7be76601000000         252:3    0   7.3T  0 mpath
  └─3600c0ff0003c10d74b7be76601000000-part1 252:4    0   7.3T  0 part 
sr0                                          11:0    1  1024M  0 rom

but device to choose from is not obvious,
Code:
pvcreate /dev/sde1
  No device found for /dev/sde1

it's not obvious to use a device name such as /dev/mapper/3600c0ff0003c10d74b7be76601000000-part1 as a PV name, but its sound it is the way to go.

Code:
pvs
  PV                                                  VG    Fmt  Attr PSize    PFree
  /dev/mapper/3600c0ff0003c10d74b7be76601000000-part1 baie2 lvm2 a--    <7.27t <7.27t
  /dev/sda3                                           pve   lvm2 a--   278.39g <1.37g
 
Default you came to alias mpathA1 and mpathB1 (free) names in config. One step back further is to "multipath" the msa volumes first and then (if needed for usage) gpt label dm-device, create part1 -> pvcreate -> vgcreate - which end up to pvs eg. : /dev/mapper/mpathA1 baie2 ...
 
Last edited:
Code:
lsblk

└─pve-data               252:2    0   777G  0 lvm  /var/lib/vz
sdd                          8:48   1     0B  0 disk
sde                          8:64   0   7.3T  0 disk
└─sde1                       8:65   0   7.3T  0 part
  └─baie2-vm--501--disk--0 252:3    0     5T  0 lvm
sdf                          8:80   0   7.3T  0 disk
└─sdf1                       8:81   0   7.3T  0 part
sr0                         11:0    1  1024M  0 rom

can not see vg baie2 cannot see pv ...
pvck /dev/sde1
Cannot use /dev/sde1: device is a multipath component.

the thing is : I can see /dev/mapper/baie2-vm--501--disk--0:
Code:
fdisk -l /dev/mapper/baie2-vm--501--disk--0
Disk /dev/mapper/baie2-vm--501--disk--0: 5 TiB, 5499705622528 bytes, 10741612544 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 4096 bytes
I/O size (minimum/optimal): 4096 bytes / 1048576 bytes
Disklabel type: gpt
Disk identifier: 498A7893-442B-4E7D-BA57-7DEBB2736C44

Device                                     Start         End     Sectors Size Type
/dev/mapper/baie2-vm--501--disk--0-part1    2048        4095        2048   1M BIOS boot
/dev/mapper/baie2-vm--501--disk--0-part2    4096     4198399     4194304   2G Linux filesystem
/dev/mapper/baie2-vm--501--disk--0-part3 4198400 10741610495 10737412096   5T Linux filesystem
But I can NOT see baie2 but I can see a LV wich is in baie2. sound silly to me.
 
Last edited:
pvck /dev/mapper/baie2 ?
See lvm as a zfs pool where you create vm's (with (thin) size) inside.
 
Code:
pvck /dev/mapper/baie2
  Cannot access device /dev/mapper/baie2

seams to be a persistant multipath problem

Code:
multipath -v2
5550.353950 | mpath0: addmap [0 15606988800 multipath 1 queue_if_no_path 1 alua 2 1 service-time 0 1 1 8:64 1 service-time 0 1 1 8:80 1]
5550.354290 | libdevmapper: ioctl/libdm-iface.c(1980): device-mapper: reload ioctl on mpath0 (252:8) failed: Device or resource busy
5550.354577 | dm_addmap: libdm task=0 error: Success
5550.354759 | mpath0: ignoring map
5550.355710 | mpath0: addmap [0 15606988800 multipath 1 queue_if_no_path 1 alua 2 1 service-time 0 1 1 8:64 1 service-time 0 1 1 8:80 1]
5550.355895 | libdevmapper: ioctl/libdm-iface.c(1980): device-mapper: reload ioctl on mpath0 (252:8) failed: Device or resource busy
5550.355988 | dm_addmap: libdm task=0 error: Success
5550.356042 | mpath0: ignoring map
 
Last edited:
What does your /etc/multipath.conf look like now ?
What does "multipath -ll" say ?

Functioning example - change to your msa volume wwid's and your wish alias names :
cat /etc/multipath.conf
# device-mapper-multipath configuration file
# For a complete list of the default configuration values, run either:
# # multipath -t
# or
# # multipathd show config
# For a list of configuration options with descriptions, see the
# multipath.conf man page.
defaults {
      user_friendly_names yes
      find_multipaths yes
      enable_foreign "^$"
}
blacklist_exceptions {
property "(SCSI_IDENT_|ID_WWN)"
}
blacklist {
}
# multipaths: settings for individual multipath devices, eg individual MSA volumes
multipaths {
      multipath {
            wwid  3600c0ff000514066b61b256001000000
            #alias      mpatha
            alias AVol01-usr2
      }
      multipath {
            wwid  3600c0ff000514ea4081c256001000000
            #alias      mpathb
            alias BVol01-usr3
      }
}
##################################################################################
# devices: settings for individual storage controllers, eg MSA2050 all volumes
#devices {
#}
# overrides: settings for all devices
#overrides {
#}


multipath -ll
AVol01-usr2 (3600c0ff000514066b61b256001000000) dm-0 HPE,MSA 2050 SAS
size=58T features='1 queue_if_no_path' hwhandler='1 alua' wp=rw
|-+- policy='service-time 0' prio=50 status=active
| `- 1:3:1:1 sdb 8:16 active ready running
`-+- policy='service-time 0' prio=10 status=enabled
`- 1:3:2:2 sde 8:64 active ready running
BVol01-usr3 (3600c0ff000514ea4081c256001000000) dm-1 HPE,MSA 2050 SAS
size=58T features='1 queue_if_no_path' hwhandler='1 alua' wp=rw
|-+- policy='service-time 0' prio=50 status=active
| `- 1:3:2:1 sdd 8:48 active ready running
`-+- policy='service-time 0' prio=10 status=enabled
`- 1:3:1:2 sdc 8:32 active ready running
 

About

The Proxmox community has been around for many years and offers help and support for Proxmox VE, Proxmox Backup Server, and Proxmox Mail Gateway.
We think our community is one of the best thanks to people like you!

Get your subscription!

The Proxmox team works very hard to make sure you are running the best software and getting stable updates and security enhancements, as well as quick enterprise support. Tens of thousands of happy customers have a Proxmox subscription. Get yours easily in our online shop.

Buy now!