Hi all, quick rundown of my setup and issue.
I've a 2 node cluster connected to a HP MSA 2040 SAS array, it took me ages to get the multipath set and working across both nodes.
I haven't rebooted the pve nodes since, but I decided to update them today which required a reboot.
I evacuated all VM's from node 1 and rebooted, no issue and all the disks show as mpath device - all good.
I then moved all the vms over to node 1 so I could reboot node 2. This is when the issues started.
Node now will not load the disks into multipath with an error saying Device or resource busy!
root@proxmox02:~# multipath -v2
1534.847304 | Virtual_Machines: addmap [0 3906248704 multipath 1 queue_if_no_path 1 alua 2 1 round-robin 0 1 1 8:48 1 round-robin 0 1 1 8:0 1]
1534.848258 | libdevmapper: ioctl/libdm-iface.c(1980): device-mapper: reload ioctl on Virtual_Machines (252:31) failed: Device or resource busy
1534.848544 | dm_addmap: libdm task=0 error: Success
1534.848586 | Virtual_Machines: ignoring map
1534.848964 | Data1: addmap [0 3906248704 multipath 1 queue_if_no_path 1 alua 2 1 round-robin 0 1 1 8:16 1 round-robin 0 1 1 8:64 1]
1534.849356 | libdevmapper: ioctl/libdm-iface.c(1980): device-mapper: reload ioctl on Data1 (252:31) failed: Device or resource busy
1534.849528 | dm_addmap: libdm task=0 error: Success
1534.849570 | Data1: ignoring map
1534.849898 | Data2: addmap [0 1171873792 multipath 1 queue_if_no_path 1 alua 2 1 round-robin 0 1 1 8:112 1 round-robin 0 1 1 8:32 1]
1534.850123 | libdevmapper: ioctl/libdm-iface.c(1980): device-mapper: reload ioctl on Data2 (252:31) failed: Device or resource busy
1534.850202 | dm_addmap: libdm task=0 error: Success
1534.850237 | Data2: ignoring map
1534.850805 | Virtual_Machines: addmap [0 3906248704 multipath 1 queue_if_no_path 1 alua 2 1 round-robin 0 1 1 8:48 1 round-robin 0 1 1 8:0 1]
1534.851015 | libdevmapper: ioctl/libdm-iface.c(1980): device-mapper: reload ioctl on Virtual_Machines (252:31) failed: Device or resource busy
1534.851102 | dm_addmap: libdm task=0 error: Success
1534.851130 | Virtual_Machines: ignoring map
1534.851667 | Data1: addmap [0 3906248704 multipath 1 queue_if_no_path 1 alua 2 1 round-robin 0 1 1 8:16 1 round-robin 0 1 1 8:64 1]
1534.851876 | libdevmapper: ioctl/libdm-iface.c(1980): device-mapper: reload ioctl on Data1 (252:31) failed: Device or resource busy
1534.851938 | dm_addmap: libdm task=0 error: Success
1534.851962 | Data1: ignoring map
1534.852502 | Data2: addmap [0 1171873792 multipath 1 queue_if_no_path 1 alua 2 1 round-robin 0 1 1 8:112 1 round-robin 0 1 1 8:32 1]
1534.852682 | libdevmapper: ioctl/libdm-iface.c(1980): device-mapper: reload ioctl on Data2 (252:31) failed: Device or resource busy
1534.852746 | dm_addmap: libdm task=0 error: Success
1534.852771 | Data2: ignoring map
root@proxmox02:~# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
sda 8:0 0 1.8T 0 disk
└─sda1 8:1 0 1.8T 0 part
sdb 8:16 0 1.8T 0 disk
└─sdb1 8:17 0 1.8T 0 part
└─Data1-vm--102--disk--0 252:0 0 1T 0 lvm
sdc 8:32 0 558.8G 0 disk
└─sdc1 8:33 0 558.8G 0 part
sdd 8:48 0 1.8T 0 disk
└─sdd1 8:49 0 1.8T 0 part
├─Virtual_Machines-vm--103--disk--0 252:6 0 4M 0 lvm
├─Virtual_Machines-vm--103--disk--1 252:7 0 100G 0 lvm
├─Virtual_Machines-vm--103--disk--2 252:8 0 4M 0 lvm
├─Virtual_Machines-vm--101--disk--0 252:9 0 4M 0 lvm
├─Virtual_Machines-vm--101--disk--1 252:10 0 4M 0 lvm
├─Virtual_Machines-vm--101--disk--2 252:11 0 100G 0 lvm
├─Virtual_Machines-vm--106--disk--0 252:12 0 32G 0 lvm
├─Virtual_Machines-vm--102--disk--0 252:13 0 4M 0 lvm
├─Virtual_Machines-vm--102--disk--1 252:14 0 4M 0 lvm
├─Virtual_Machines-vm--102--disk--2 252:15 0 100G 0 lvm
├─Virtual_Machines-vm--105--disk--0 252:16 0 4M 0 lvm
├─Virtual_Machines-vm--105--disk--1 252:17 0 4M 0 lvm
├─Virtual_Machines-vm--105--disk--2 252:18 0 100G 0 lvm
├─Virtual_Machines-vm--107--disk--0 252:19 0 4M 0 lvm
├─Virtual_Machines-vm--107--disk--1 252:20 0 4M 0 lvm
├─Virtual_Machines-vm--107--disk--2 252:21 0 80G 0 lvm
├─Virtual_Machines-vm--104--disk--0 252:22 0 4M 0 lvm
├─Virtual_Machines-vm--104--disk--1 252:23 0 4M 0 lvm
├─Virtual_Machines-vm--104--disk--2 252:24 0 80G 0 lvm
├─Virtual_Machines-vm--108--disk--0 252:25 0 32G 0 lvm
├─Virtual_Machines-vm--109--disk--0 252:26 0 32G 0 lvm
└─Virtual_Machines-vm--100--disk--0 252:27 0 32G 0 lvm
sde 8:64 0 1.8T 0 disk
└─sde1 8:65 0 1.8T 0 part
sdf 8:80 0 838.2G 0 disk
└─ISOs 252:30 0 838.2G 0 mpath
sdg 8:96 0 838.2G 0 disk
└─ISOs 252:30 0 838.2G 0 mpath
sdh 8:112 0 558.8G 0 disk
└─sdh1 8:113 0 558.8G 0 part
└─Data2-vm--104--disk--0 252:5 0 300G 0 lvm
sdi 8:128 1 0B 0 disk
sdj 8:144 0 223.5G 0 disk
├─sdj1 8:145 0 1007K 0 part
├─sdj2 8:146 0 1G 0 part /boot/efi
└─sdj3 8:147 0 222.5G 0 part
├─pve-swap 252:1 0 8G 0 lvm [SWAP]
├─pve-root 252:2 0 65.6G 0 lvm /
├─pve-data_tmeta 252:3 0 1.3G 0 lvm
│ └─pve-data-tpool 252:28 0 130.2G 0 lvm
│ └─pve-data 252:29 0 130.2G 1 lvm
└─pve-data_tdata 252:4 0 130.2G 0 lvm
└─pve-data-tpool 252:28 0 130.2G 0 lvm
└─pve-data 252:29 0 130.2G 1 lvm
Any ideas where i'm going wrong here would be very much appreciated,
Thanks
I've a 2 node cluster connected to a HP MSA 2040 SAS array, it took me ages to get the multipath set and working across both nodes.
I haven't rebooted the pve nodes since, but I decided to update them today which required a reboot.
I evacuated all VM's from node 1 and rebooted, no issue and all the disks show as mpath device - all good.
I then moved all the vms over to node 1 so I could reboot node 2. This is when the issues started.
Node now will not load the disks into multipath with an error saying Device or resource busy!
root@proxmox02:~# multipath -v2
1534.847304 | Virtual_Machines: addmap [0 3906248704 multipath 1 queue_if_no_path 1 alua 2 1 round-robin 0 1 1 8:48 1 round-robin 0 1 1 8:0 1]
1534.848258 | libdevmapper: ioctl/libdm-iface.c(1980): device-mapper: reload ioctl on Virtual_Machines (252:31) failed: Device or resource busy
1534.848544 | dm_addmap: libdm task=0 error: Success
1534.848586 | Virtual_Machines: ignoring map
1534.848964 | Data1: addmap [0 3906248704 multipath 1 queue_if_no_path 1 alua 2 1 round-robin 0 1 1 8:16 1 round-robin 0 1 1 8:64 1]
1534.849356 | libdevmapper: ioctl/libdm-iface.c(1980): device-mapper: reload ioctl on Data1 (252:31) failed: Device or resource busy
1534.849528 | dm_addmap: libdm task=0 error: Success
1534.849570 | Data1: ignoring map
1534.849898 | Data2: addmap [0 1171873792 multipath 1 queue_if_no_path 1 alua 2 1 round-robin 0 1 1 8:112 1 round-robin 0 1 1 8:32 1]
1534.850123 | libdevmapper: ioctl/libdm-iface.c(1980): device-mapper: reload ioctl on Data2 (252:31) failed: Device or resource busy
1534.850202 | dm_addmap: libdm task=0 error: Success
1534.850237 | Data2: ignoring map
1534.850805 | Virtual_Machines: addmap [0 3906248704 multipath 1 queue_if_no_path 1 alua 2 1 round-robin 0 1 1 8:48 1 round-robin 0 1 1 8:0 1]
1534.851015 | libdevmapper: ioctl/libdm-iface.c(1980): device-mapper: reload ioctl on Virtual_Machines (252:31) failed: Device or resource busy
1534.851102 | dm_addmap: libdm task=0 error: Success
1534.851130 | Virtual_Machines: ignoring map
1534.851667 | Data1: addmap [0 3906248704 multipath 1 queue_if_no_path 1 alua 2 1 round-robin 0 1 1 8:16 1 round-robin 0 1 1 8:64 1]
1534.851876 | libdevmapper: ioctl/libdm-iface.c(1980): device-mapper: reload ioctl on Data1 (252:31) failed: Device or resource busy
1534.851938 | dm_addmap: libdm task=0 error: Success
1534.851962 | Data1: ignoring map
1534.852502 | Data2: addmap [0 1171873792 multipath 1 queue_if_no_path 1 alua 2 1 round-robin 0 1 1 8:112 1 round-robin 0 1 1 8:32 1]
1534.852682 | libdevmapper: ioctl/libdm-iface.c(1980): device-mapper: reload ioctl on Data2 (252:31) failed: Device or resource busy
1534.852746 | dm_addmap: libdm task=0 error: Success
1534.852771 | Data2: ignoring map
root@proxmox02:~# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
sda 8:0 0 1.8T 0 disk
└─sda1 8:1 0 1.8T 0 part
sdb 8:16 0 1.8T 0 disk
└─sdb1 8:17 0 1.8T 0 part
└─Data1-vm--102--disk--0 252:0 0 1T 0 lvm
sdc 8:32 0 558.8G 0 disk
└─sdc1 8:33 0 558.8G 0 part
sdd 8:48 0 1.8T 0 disk
└─sdd1 8:49 0 1.8T 0 part
├─Virtual_Machines-vm--103--disk--0 252:6 0 4M 0 lvm
├─Virtual_Machines-vm--103--disk--1 252:7 0 100G 0 lvm
├─Virtual_Machines-vm--103--disk--2 252:8 0 4M 0 lvm
├─Virtual_Machines-vm--101--disk--0 252:9 0 4M 0 lvm
├─Virtual_Machines-vm--101--disk--1 252:10 0 4M 0 lvm
├─Virtual_Machines-vm--101--disk--2 252:11 0 100G 0 lvm
├─Virtual_Machines-vm--106--disk--0 252:12 0 32G 0 lvm
├─Virtual_Machines-vm--102--disk--0 252:13 0 4M 0 lvm
├─Virtual_Machines-vm--102--disk--1 252:14 0 4M 0 lvm
├─Virtual_Machines-vm--102--disk--2 252:15 0 100G 0 lvm
├─Virtual_Machines-vm--105--disk--0 252:16 0 4M 0 lvm
├─Virtual_Machines-vm--105--disk--1 252:17 0 4M 0 lvm
├─Virtual_Machines-vm--105--disk--2 252:18 0 100G 0 lvm
├─Virtual_Machines-vm--107--disk--0 252:19 0 4M 0 lvm
├─Virtual_Machines-vm--107--disk--1 252:20 0 4M 0 lvm
├─Virtual_Machines-vm--107--disk--2 252:21 0 80G 0 lvm
├─Virtual_Machines-vm--104--disk--0 252:22 0 4M 0 lvm
├─Virtual_Machines-vm--104--disk--1 252:23 0 4M 0 lvm
├─Virtual_Machines-vm--104--disk--2 252:24 0 80G 0 lvm
├─Virtual_Machines-vm--108--disk--0 252:25 0 32G 0 lvm
├─Virtual_Machines-vm--109--disk--0 252:26 0 32G 0 lvm
└─Virtual_Machines-vm--100--disk--0 252:27 0 32G 0 lvm
sde 8:64 0 1.8T 0 disk
└─sde1 8:65 0 1.8T 0 part
sdf 8:80 0 838.2G 0 disk
└─ISOs 252:30 0 838.2G 0 mpath
sdg 8:96 0 838.2G 0 disk
└─ISOs 252:30 0 838.2G 0 mpath
sdh 8:112 0 558.8G 0 disk
└─sdh1 8:113 0 558.8G 0 part
└─Data2-vm--104--disk--0 252:5 0 300G 0 lvm
sdi 8:128 1 0B 0 disk
sdj 8:144 0 223.5G 0 disk
├─sdj1 8:145 0 1007K 0 part
├─sdj2 8:146 0 1G 0 part /boot/efi
└─sdj3 8:147 0 222.5G 0 part
├─pve-swap 252:1 0 8G 0 lvm [SWAP]
├─pve-root 252:2 0 65.6G 0 lvm /
├─pve-data_tmeta 252:3 0 1.3G 0 lvm
│ └─pve-data-tpool 252:28 0 130.2G 0 lvm
│ └─pve-data 252:29 0 130.2G 1 lvm
└─pve-data_tdata 252:4 0 130.2G 0 lvm
└─pve-data-tpool 252:28 0 130.2G 0 lvm
└─pve-data 252:29 0 130.2G 1 lvm
Any ideas where i'm going wrong here would be very much appreciated,
Thanks