I have three nodes [PVENODE01,PVENODE02,PVENODE03] i have successfully configured multipath both VG and LVs are available to all nodes.
Now is this mandatory to configure lvmlockd, dlm for shared LVM?
I didn't find about this is administration guide.
I stuck in it my lvmlockd service is not stable.
root@PVENODE01:~# systemctl status lvmlockd.service
○ lvmlockd.service - LVM Lock Daemon
Loaded: loaded (/etc/systemd/system/lvmlockd.service; enabled; preset: enabled)
Active: inactive (dead) since Thu 2025-08-07 15:29:17 PKT; 13min ago
Duration: 2ms
Main PID: 2971 (code=exited, status=0/SUCCESS)
CPU: 3ms
Aug 07 15:29:17 PVENODE01 systemd[1]: Started lvmlockd.service - LVM Lock Daemon.
Aug 07 15:29:17 PVENODE01 lvmlockd[2972]: 1754562557 lvmlockd started
Aug 07 15:29:17 PVENODE01 systemd[1]: lvmlockd.service: Deactivated successfully.
root@PVENODE01:~#
======================================================================
Multipath
root@PVENODE01:~# multipath -ll
mpatha (3600c0ff000f6113fda26d86501000000) dm-6 HPE,MSA 2060 FC
size=17T features='1 queue_if_no_path' hwhandler='1 alua' wp=rw
`-+- policy='round-robin 0' prio=10 status=active
`- 1:0:0:1 sdb 8:16 active ready running
mpathb (3600c0ff000f61468b326d86501000000) dm-10 HPE,MSA 2060 FC
size=17T features='1 queue_if_no_path' hwhandler='1 alua' wp=rw
`-+- policy='round-robin 0' prio=50 status=active
`- 1:0:0:2 sdc 8:32 active ready running
LSBLK
root@PVENODE01:~# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
sda 8:0 0 223.1G 0 disk
├─sda1 8:1 0 1007K 0 part
├─sda2 8:2 0 1G 0 part /boot/efi
└─sda3 8:3 0 222.1G 0 part
├─pve-swap 252:0 0 5G 0 lvm [SWAP]
├─pve-root 252:1 0 66.3G 0 lvm /
├─pve-data_tmeta 252:2 0 1.3G 0 lvm
│ └─pve-data-tpool 252:4 0 132.1G 0 lvm
│ └─pve-data 252:5 0 132.1G 1 lvm
└─pve-data_tdata 252:3 0 132.1G 0 lvm
└─pve-data-tpool 252:4 0 132.1G 0 lvm
└─pve-data 252:5 0 132.1G 1 lvm
sdb 8:16 0 17.4T 0 disk
└─mpatha 252:6 0 17.4T 0 mpath
├─MSA2060VG1-thinpool1_tmeta 252:7 0 128M 0 lvm
│ └─MSA2060VG1-thinpool1 252:9 0 16T 0 lvm
└─MSA2060VG1-thinpool1_tdata 252:8 0 16T 0 lvm
└─MSA2060VG1-thinpool1 252:9 0 16T 0 lvm
sdc 8:32 0 17.4T 0 disk
└─mpathb 252:10 0 17.4T 0 mpath
├─MSA2060VG2-thinpool2_tmeta 252:11 0 128M 0 lvm
│ └─MSA2060VG2-thinpool2 252:13 0 16T 0 lvm
└─MSA2060VG2-thinpool2_tdata 252:12 0 16T 0 lvm
└─MSA2060VG2-thinpool2 252:13 0 16T 0 lvm
VGS
root@PVENODE01:~# vgs
Configuration setting "activation/use_lvmlockd" unknown.
WARNING: lvmlockd process is not running.
Reading without shared global lock.
VG #PV #LV #SN Attr VSize VFree
MSA2060VG1 1 1 0 wz--n- 17.43t 1.43t
MSA2060VG2 1 1 0 wz--n- 17.43t 1.43t
===========================================================
root@PVENODE01:~# vgs -o +locktype
Configuration setting "activation/use_lvmlockd" unknown.
VG #PV #LV #SN Attr VSize VFree LockType
MSA2060VG1 1 1 0 wz--n- 17.43t 1.43t
MSA2060VG2 1 1 0 wz--n- 17.43t 1.43t
====================================================
/etc/lvm/lvm.conf
Is this file is ok?
root@PVENODE01:~# cat /etc/lvm/lvm.conf
# This is an example configuration file for the LVM2 system.
# It contains the default settings that would be used if there was no
# /etc/lvm/lvm.conf file.
#
# Refer to 'man lvm.conf' for further information including the file layout.
#
# Refer to 'man lvm.conf' for information about how settings configured in
# this file are combined with built-in values and command line options to
# arrive at the final values used by LVM.
#
# Refer to 'man lvmconfig' for information about displaying the built-in
# and configured values used by LVM.
#
# If a default value is set in this file (not commented out), then a
# new version of LVM using this file will continue using that value,
# even if the new version of LVM changes the built-in default value.
#
# To put this file in a different directory and override /etc/lvm set
# the environment variable LVM_SYSTEM_DIR before running the tools.
#
# N.B. Take care that each setting only appears once if uncommenting
# example settings in this file.
# Configuration section config.
# How LVM configuration settings are handled.
config {
# Configuration option config/checks.
# If enabled, any LVM configuration mismatch is reported.
# This implies checking that the configuration key is understood by
# LVM and that the value of the key is the proper type. If disabled,
# any configuration mismatch is ignored and the default value is used
# without any warning (a message about the configuration key not being
# found is issued in verbose mode only).
# This configuration option has an automatic default value.
# checks = 1
# Configuration option config/abort_on_errors.
# Abort the LVM process if a configuration mismatch is found.
# This configuration option has an automatic default value.
# abort_on_errors = 0
# Configuration option config/profile_dir.
# Directory where LVM looks for configuration profiles.
# This configuration option has an automatic default value.
# profile_dir = "/etc/lvm/profile"
}
# Configuration section devices.
# How LVM uses block devices.
devices {
# Prefer FC multipath and WWN-style identifiers
preferred_names = [ "^/dev/mapper/mpath", "^/dev/disk/by-id/" ]
# Allowlisted devices: multipath and MSA LUNs
filter = [
"a|/dev/mapper/mpath.*|",
"a|/dev/disk/by-id/scsi-3600c0ff.*|",
"r|.*|"
]
# Skip scanning ZFS and Ceph devices (Proxmox default)
global_filter = [ "r|/dev/zd.*|", "r|/dev/rbd.*|" ]
# Performance and stability
cache_dir = "/etc/lvm/cache"
write_cache_state = 1
multipath_component_detection = 1
}
# Configuration section allocation.
# How LVM selects space and applies properties to LVs.
allocation {
}
# Configuration section log.
# How LVM log information is reported.
log {
verbose = 0
syslog = 1
overwrite = 0
level = 0
indent = 1
}
# Configuration section backup.
# How LVM metadata is backed up and archived.
# In LVM, a 'backup' is a copy of the metadata for the current system,
# and an 'archive' contains old metadata configurations. They are
# stored in a human readable text format.
backup {
}
# Configuration section shell.
# Settings for running LVM in shell (readline) mode.
shell {
# Configuration option shell/history_size.
# Number of lines of history to store in ~/.lvm_history.
# This configuration option has an automatic default value.
# history_size = 100
}
# Configuration section global.
# Miscellaneous global LVM settings.
global {
locking_type = 1
use_lvmlockd = 1
}
# Configuration section activation.
activation {
volume_list = [ "MSA2060VG1", "MSA2060VG2", "@proxmox" ]
use_lvmlockd = 1
}
# Configuration section metadata.
# This configuration section has an automatic default value.
# metadata {
# }
# Configuration section report.
# LVM report command output formatting.
# This configuration section has an automatic default value.
# report {
# }
# Configuration section dmeventd.
# Settings for the LVM event daemon.
dmeventd {
}
# Configuration section tags.
# Host tag settings.
# This configuration section has an automatic default value.
# tags {
# }
root@PVENODE01:~#
=========================================================================================
/etc/systemd/system/lvmlockd.conf
root@PVENODE01:~# cat /etc/systemd/system/lvmlockd.service
[Unit]
Description=LVM Lock Daemon
Requires=dlm.service
After=dlm.service
[Service]
Type=simple
ExecStart=/usr/sbin/lvmlockd --gl-type dlm
Restart=on-failure
[Install]
WantedBy=multi-user.target
root@PVENODE01:~#
====================================================
root@PVENODE01:~# systemctl status dlm
● dlm.service - dlm control daemon
Loaded: loaded (/lib/systemd/system/dlm.service; enabled; preset: enabled)
Active: active (running) since Thu 2025-08-07 15:21:48 PKT; 26min ago
Docs: man:dlm_controld
man:dlm.conf
man:dlm_stonith
Main PID: 1569 (dlm_controld)
Tasks: 3 (limit: 154238)
Memory: 2.8M
CPU: 41ms
CGroup: /system.slice/dlm.service
├─1569 /usr/sbin/dlm_controld --foreground
└─1570 /usr/sbin/dlm_controld --foreground
Aug 07 15:21:48 PVENODE01 systemd[1]: Starting dlm.service - dlm control daemon...
Aug 07 15:21:48 PVENODE01 dlm_controld[1569]: 18 dlm_controld 4.2.0 started
Aug 07 15:21:48 PVENODE01 systemd[1]: Started dlm.service - dlm control daemon.
Aug 07 15:22:19 PVENODE01 dlm_controld[1569]: 49 receive_fence_result 1 from 2 result 177 no need_fencing
root@PVENODE01:~#
==========================================================
Now is this mandatory to configure lvmlockd, dlm for shared LVM?
I didn't find about this is administration guide.
I stuck in it my lvmlockd service is not stable.
root@PVENODE01:~# systemctl status lvmlockd.service
○ lvmlockd.service - LVM Lock Daemon
Loaded: loaded (/etc/systemd/system/lvmlockd.service; enabled; preset: enabled)
Active: inactive (dead) since Thu 2025-08-07 15:29:17 PKT; 13min ago
Duration: 2ms
Main PID: 2971 (code=exited, status=0/SUCCESS)
CPU: 3ms
Aug 07 15:29:17 PVENODE01 systemd[1]: Started lvmlockd.service - LVM Lock Daemon.
Aug 07 15:29:17 PVENODE01 lvmlockd[2972]: 1754562557 lvmlockd started
Aug 07 15:29:17 PVENODE01 systemd[1]: lvmlockd.service: Deactivated successfully.
root@PVENODE01:~#
======================================================================
Multipath
root@PVENODE01:~# multipath -ll
mpatha (3600c0ff000f6113fda26d86501000000) dm-6 HPE,MSA 2060 FC
size=17T features='1 queue_if_no_path' hwhandler='1 alua' wp=rw
`-+- policy='round-robin 0' prio=10 status=active
`- 1:0:0:1 sdb 8:16 active ready running
mpathb (3600c0ff000f61468b326d86501000000) dm-10 HPE,MSA 2060 FC
size=17T features='1 queue_if_no_path' hwhandler='1 alua' wp=rw
`-+- policy='round-robin 0' prio=50 status=active
`- 1:0:0:2 sdc 8:32 active ready running
LSBLK
root@PVENODE01:~# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
sda 8:0 0 223.1G 0 disk
├─sda1 8:1 0 1007K 0 part
├─sda2 8:2 0 1G 0 part /boot/efi
└─sda3 8:3 0 222.1G 0 part
├─pve-swap 252:0 0 5G 0 lvm [SWAP]
├─pve-root 252:1 0 66.3G 0 lvm /
├─pve-data_tmeta 252:2 0 1.3G 0 lvm
│ └─pve-data-tpool 252:4 0 132.1G 0 lvm
│ └─pve-data 252:5 0 132.1G 1 lvm
└─pve-data_tdata 252:3 0 132.1G 0 lvm
└─pve-data-tpool 252:4 0 132.1G 0 lvm
└─pve-data 252:5 0 132.1G 1 lvm
sdb 8:16 0 17.4T 0 disk
└─mpatha 252:6 0 17.4T 0 mpath
├─MSA2060VG1-thinpool1_tmeta 252:7 0 128M 0 lvm
│ └─MSA2060VG1-thinpool1 252:9 0 16T 0 lvm
└─MSA2060VG1-thinpool1_tdata 252:8 0 16T 0 lvm
└─MSA2060VG1-thinpool1 252:9 0 16T 0 lvm
sdc 8:32 0 17.4T 0 disk
└─mpathb 252:10 0 17.4T 0 mpath
├─MSA2060VG2-thinpool2_tmeta 252:11 0 128M 0 lvm
│ └─MSA2060VG2-thinpool2 252:13 0 16T 0 lvm
└─MSA2060VG2-thinpool2_tdata 252:12 0 16T 0 lvm
└─MSA2060VG2-thinpool2 252:13 0 16T 0 lvm
VGS
root@PVENODE01:~# vgs
Configuration setting "activation/use_lvmlockd" unknown.
WARNING: lvmlockd process is not running.
Reading without shared global lock.
VG #PV #LV #SN Attr VSize VFree
MSA2060VG1 1 1 0 wz--n- 17.43t 1.43t
MSA2060VG2 1 1 0 wz--n- 17.43t 1.43t
===========================================================
root@PVENODE01:~# vgs -o +locktype
Configuration setting "activation/use_lvmlockd" unknown.
VG #PV #LV #SN Attr VSize VFree LockType
MSA2060VG1 1 1 0 wz--n- 17.43t 1.43t
MSA2060VG2 1 1 0 wz--n- 17.43t 1.43t
====================================================
/etc/lvm/lvm.conf
Is this file is ok?
root@PVENODE01:~# cat /etc/lvm/lvm.conf
# This is an example configuration file for the LVM2 system.
# It contains the default settings that would be used if there was no
# /etc/lvm/lvm.conf file.
#
# Refer to 'man lvm.conf' for further information including the file layout.
#
# Refer to 'man lvm.conf' for information about how settings configured in
# this file are combined with built-in values and command line options to
# arrive at the final values used by LVM.
#
# Refer to 'man lvmconfig' for information about displaying the built-in
# and configured values used by LVM.
#
# If a default value is set in this file (not commented out), then a
# new version of LVM using this file will continue using that value,
# even if the new version of LVM changes the built-in default value.
#
# To put this file in a different directory and override /etc/lvm set
# the environment variable LVM_SYSTEM_DIR before running the tools.
#
# N.B. Take care that each setting only appears once if uncommenting
# example settings in this file.
# Configuration section config.
# How LVM configuration settings are handled.
config {
# Configuration option config/checks.
# If enabled, any LVM configuration mismatch is reported.
# This implies checking that the configuration key is understood by
# LVM and that the value of the key is the proper type. If disabled,
# any configuration mismatch is ignored and the default value is used
# without any warning (a message about the configuration key not being
# found is issued in verbose mode only).
# This configuration option has an automatic default value.
# checks = 1
# Configuration option config/abort_on_errors.
# Abort the LVM process if a configuration mismatch is found.
# This configuration option has an automatic default value.
# abort_on_errors = 0
# Configuration option config/profile_dir.
# Directory where LVM looks for configuration profiles.
# This configuration option has an automatic default value.
# profile_dir = "/etc/lvm/profile"
}
# Configuration section devices.
# How LVM uses block devices.
devices {
# Prefer FC multipath and WWN-style identifiers
preferred_names = [ "^/dev/mapper/mpath", "^/dev/disk/by-id/" ]
# Allowlisted devices: multipath and MSA LUNs
filter = [
"a|/dev/mapper/mpath.*|",
"a|/dev/disk/by-id/scsi-3600c0ff.*|",
"r|.*|"
]
# Skip scanning ZFS and Ceph devices (Proxmox default)
global_filter = [ "r|/dev/zd.*|", "r|/dev/rbd.*|" ]
# Performance and stability
cache_dir = "/etc/lvm/cache"
write_cache_state = 1
multipath_component_detection = 1
}
# Configuration section allocation.
# How LVM selects space and applies properties to LVs.
allocation {
}
# Configuration section log.
# How LVM log information is reported.
log {
verbose = 0
syslog = 1
overwrite = 0
level = 0
indent = 1
}
# Configuration section backup.
# How LVM metadata is backed up and archived.
# In LVM, a 'backup' is a copy of the metadata for the current system,
# and an 'archive' contains old metadata configurations. They are
# stored in a human readable text format.
backup {
}
# Configuration section shell.
# Settings for running LVM in shell (readline) mode.
shell {
# Configuration option shell/history_size.
# Number of lines of history to store in ~/.lvm_history.
# This configuration option has an automatic default value.
# history_size = 100
}
# Configuration section global.
# Miscellaneous global LVM settings.
global {
locking_type = 1
use_lvmlockd = 1
}
# Configuration section activation.
activation {
volume_list = [ "MSA2060VG1", "MSA2060VG2", "@proxmox" ]
use_lvmlockd = 1
}
# Configuration section metadata.
# This configuration section has an automatic default value.
# metadata {
# }
# Configuration section report.
# LVM report command output formatting.
# This configuration section has an automatic default value.
# report {
# }
# Configuration section dmeventd.
# Settings for the LVM event daemon.
dmeventd {
}
# Configuration section tags.
# Host tag settings.
# This configuration section has an automatic default value.
# tags {
# }
root@PVENODE01:~#
=========================================================================================
/etc/systemd/system/lvmlockd.conf
root@PVENODE01:~# cat /etc/systemd/system/lvmlockd.service
[Unit]
Description=LVM Lock Daemon
Requires=dlm.service
After=dlm.service
[Service]
Type=simple
ExecStart=/usr/sbin/lvmlockd --gl-type dlm
Restart=on-failure
[Install]
WantedBy=multi-user.target
root@PVENODE01:~#
====================================================
root@PVENODE01:~# systemctl status dlm
● dlm.service - dlm control daemon
Loaded: loaded (/lib/systemd/system/dlm.service; enabled; preset: enabled)
Active: active (running) since Thu 2025-08-07 15:21:48 PKT; 26min ago
Docs: man:dlm_controld
man:dlm.conf
man:dlm_stonith
Main PID: 1569 (dlm_controld)
Tasks: 3 (limit: 154238)
Memory: 2.8M
CPU: 41ms
CGroup: /system.slice/dlm.service
├─1569 /usr/sbin/dlm_controld --foreground
└─1570 /usr/sbin/dlm_controld --foreground
Aug 07 15:21:48 PVENODE01 systemd[1]: Starting dlm.service - dlm control daemon...
Aug 07 15:21:48 PVENODE01 dlm_controld[1569]: 18 dlm_controld 4.2.0 started
Aug 07 15:21:48 PVENODE01 systemd[1]: Started dlm.service - dlm control daemon.
Aug 07 15:22:19 PVENODE01 dlm_controld[1569]: 49 receive_fence_result 1 from 2 result 177 no need_fencing
root@PVENODE01:~#
==========================================================
Last edited: