Hi,
I upgraded from version 6.4 to 7.0 of one of the blades on my Intel Modular Server
After reboot multipath not show any device:
on another node with proxmox 6.4 I have:
multipath -v1 show:
pveversion -v
/etc/multipath.conf
How can I resolve?
I upgraded from version 6.4 to 7.0 of one of the blades on my Intel Modular Server
After reboot multipath not show any device:
Code:
root@proxmox106:~# multipath -ll
root@proxmox106:~#
on another node with proxmox 6.4 I have:
Code:
root@proxmox105:~# multipath -ll
sistema (222be000155bb7f72) dm-0 Intel,Multi-Flex
size=20G features='1 queue_if_no_path' hwhandler='1 alua' wp=rw
|-+- policy='round-robin 0' prio=50 status=active
| `- 0:0:0:0 sda 8:0 active ready running
`-+- policy='round-robin 0' prio=1 status=enabled
`- 0:0:1:0 sde 8:64 active ready running
vol2 (222640001555bdbf2) dm-6 Intel,Multi-Flex
size=1.3T features='1 queue_if_no_path' hwhandler='1 alua' wp=rw
|-+- policy='round-robin 0' prio=50 status=active
| `- 0:0:1:2 sdg 8:96 active ready running
`-+- policy='round-robin 0' prio=1 status=enabled
`- 0:0:0:2 sdc 8:32 active ready running
volssd (22298000155c08ddd) dm-8 Intel,Multi-Flex
size=2.1T features='1 queue_if_no_path' hwhandler='1 alua' wp=rw
|-+- policy='round-robin 0' prio=50 status=active
| `- 0:0:0:3 sdd 8:48 active ready running
`-+- policy='round-robin 0' prio=1 status=enabled
`- 0:0:1:3 sdh 8:112 active ready running
vol1 (222d9000155080015) dm-4 Intel,Multi-Flex
size=1.3T features='1 queue_if_no_path' hwhandler='1 alua' wp=rw
|-+- policy='round-robin 0' prio=50 status=active
| `- 0:0:0:1 sdb 8:16 active ready running
`-+- policy='round-robin 0' prio=1 status=enabled
`- 0:0:1:1 sdf 8:80 active ready running
multipath -v1 show:
Code:
root@proxmox106:~# multipath -v1
Jul 20 15:29:16 | libdevmapper: ioctl/libdm-iface.c(1927): device-mapper: reload ioctl on sistema (253:98) failed: Device or resource busy
Jul 20 15:29:16 | libdevmapper: ioctl/libdm-iface.c(1927): device-mapper: reload ioctl on vol1 (253:98) failed: Device or resource busy
Jul 20 15:29:16 | libdevmapper: ioctl/libdm-iface.c(1927): device-mapper: reload ioctl on vol2 (253:98) failed: Device or resource busy
Jul 20 15:29:16 | libdevmapper: ioctl/libdm-iface.c(1927): device-mapper: reload ioctl on volssd (253:98) failed: Device or resource busy
Jul 20 15:29:17 | libdevmapper: ioctl/libdm-iface.c(1927): device-mapper: reload ioctl on sistema (253:98) failed: Device or resource busy
Jul 20 15:29:17 | libdevmapper: ioctl/libdm-iface.c(1927): device-mapper: reload ioctl on vol1 (253:98) failed: Device or resource busy
Jul 20 15:29:18 | libdevmapper: ioctl/libdm-iface.c(1927): device-mapper: reload ioctl on vol2 (253:98) failed: Device or resource busy
Jul 20 15:29:18 | libdevmapper: ioctl/libdm-iface.c(1927): device-mapper: reload ioctl on volssd (253:98) failed: Device or resource busy
pveversion -v
Code:
proxmox-ve: 7.0-2 (running kernel: 5.11.22-1-pve)
pve-manager: 7.0-9 (running version: 7.0-9/228c9caa)
pve-kernel-helper: 7.0-4
pve-kernel-5.11: 7.0-3
pve-kernel-5.4: 6.4-4
pve-kernel-5.11.22-1-pve: 5.11.22-2
pve-kernel-5.4.124-1-pve: 5.4.124-1
ceph-fuse: 14.2.21-1
corosync: 3.1.2-pve2
criu: 3.15-1+pve-1
glusterfs-client: 9.2-1
ifupdown: 0.8.36
ksm-control-daemon: 1.4-1
libjs-extjs: 7.0.0-1
libknet1: 1.21-pve1
libproxmox-acme-perl: 1.1.1
libproxmox-backup-qemu0: 1.2.0-1
libpve-access-control: 7.0-4
libpve-apiclient-perl: 3.2-1
libpve-common-perl: 7.0-4
libpve-guest-common-perl: 4.0-2
libpve-http-server-perl: 4.0-2
libpve-storage-perl: 7.0-9
libqb0: 1.0.5-1
libspice-server1: 0.14.3-2.1
lvm2: 2.03.11-2.1
lxc-pve: 4.0.9-2
lxcfs: 4.0.8-pve2
novnc-pve: 1.2.0-3
proxmox-backup-client: 2.0.4-1
proxmox-backup-file-restore: 2.0.4-1
proxmox-mini-journalreader: 1.2-1
proxmox-widget-toolkit: 3.3-4
pve-cluster: 7.0-3
pve-container: 4.0-8
pve-docs: 7.0-5
pve-edk2-firmware: 3.20200531-1
pve-firewall: 4.2-2
pve-firmware: 3.2-4
pve-ha-manager: 3.3-1
pve-i18n: 2.4-1
pve-qemu-kvm: 6.0.0-2
pve-xtermjs: 4.12.0-1
qemu-server: 7.0-10
smartmontools: 7.2-pve2
spiceterm: 3.2-2
vncterm: 1.7-1
zfsutils-linux: 2.0.4-pve1
/etc/multipath.conf
Code:
##
## multipath-tools configuration file /etc/multipath.conf
##
blacklist {
devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*"
devnode "^(hd|xvd)[a-z][[0-9]*]"
devnode "^cciss!c[0-9]d[0-9]*[p[0-9]*]"
}
blacklist_exceptions {
property "(SCSI_IDENT_.*|ID_WWN|ID_SERIAL)"
wwid "22250000155dcef22"
wwid "222d9000155080015"
wwid "222640001555bdbf2"
wwid "22298000155c08ddd"
}
defaults {
polling_interval 2
path_selector "round-robin 0"
path_grouping_policy multibus
getuid_callout "/lib/udev/scsi_id -g -u -d /dev/%n"
rr_min_io 100
failback immediate
no_path_retry queue
user_friendly_names yes
}
devices {
device {
vendor "Intel"
product "Multi-Flex"
uid_attribute "ID_SERIAL"
path_grouping_policy "group_by_prio"
prio "alua"
path_checker tur
hardware_handler "1 alua"
rr_min_io 100
failback immediate
no_path_retry queue
rr_weight uniform
product_blacklist "VTrak V-LUN"
}
}
multipaths {
multipath {
wwid 22250000155dcef22
alias sistema
}
multipath {
wwid 222d9000155080015
alias vol1
}
multipath {
wwid 222640001555bdbf2
alias vol2
}
multipath {
wwid 22298000155c08ddd
alias volssd
}
}
How can I resolve?