root@proxmox:~# ls /dev/mapper
control pve-swap VMs-vm--103--disk--0 VMs-vm--108--disk--0 VMs-vm--113--disk--0 VMs-VMs-tpool
pve-data VMs-vm--100--disk--0 VMs-vm--104--disk--0 VMs-vm--109--disk--0 VMs-vm--114--disk--0
pve-data_tdata VMs-vm--101--disk--0 VMs-vm--105--disk--0 VMs-vm--110--disk--0 VMs-VMs
pve-data_tmeta VMs-vm--102--disk--0 VMs-vm--106--disk--0 VMs-vm--111--disk--0 VMs-VMs_tdata
pve-root VMs-vm--102--disk--1 VMs-vm--107--disk--0 VMs-vm--112--disk--0 VMs-VMs_tmeta
global_filter = [ "r|/dev/zd.*|", "r|/dev/mapper/pve-.*|" "r|/dev/mapper/.*-(vm|base)--[0-9]+--disk--[0-9]+|", "r|/dev/s$
root@proxmox:~# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 931.5G 0 disk
└─sda1 8:1 0 931.5G 0 part /mnt/pve/Backups
sdb 8:16 0 232.9G 0 disk
├─sdb1 8:17 0 1007K 0 part
├─sdb2 8:18 0 512M 0 part /boot/efi
└─sdb3 8:19 0 229.5G 0 part
├─pve-swap 253:0 0 8G 0 lvm [SWAP]
├─pve-root 253:1 0 57.3G 0 lvm /
├─pve-data_tmeta 253:2 0 1.5G 0 lvm
│ └─pve-data 253:4 0 145.3G 0 lvm
└─pve-data_tdata 253:3 0 145.3G 0 lvm
└─pve-data 253:4 0 145.3G 0 lvm
nvme0n1 259:0 0 465.8G 0 disk
├─VMs-VMs_tmeta 253:5 0 4.7G 0 lvm
│ └─VMs-VMs-tpool 253:7 0 456.3G 0 lvm
│ ├─VMs-VMs 253:8 0 456.3G 0 lvm
│ ├─VMs-vm--101--disk--0 253:9 0 2G 0 lvm
│ ├─VMs-vm--100--disk--0 253:10 0 3G 0 lvm
│ ├─VMs-vm--104--disk--0 253:11 0 2G 0 lvm
│ ├─VMs-vm--105--disk--0 253:12 0 8G 0 lvm
│ ├─VMs-vm--108--disk--0 253:13 0 50G 0 lvm
│ ├─VMs-vm--103--disk--0 253:14 0 160G 0 lvm
│ ├─VMs-vm--106--disk--0 253:15 0 32G 0 lvm
│ ├─VMs-vm--107--disk--0 253:16 0 8G 0 lvm
│ ├─VMs-vm--109--disk--0 253:17 0 3G 0 lvm
│ ├─VMs-vm--102--disk--0 253:18 0 10G 0 lvm
│ ├─VMs-vm--102--disk--1 253:19 0 200G 0 lvm
│ ├─VMs-vm--110--disk--0 253:20 0 50G 0 lvm
│ ├─VMs-vm--111--disk--0 253:21 0 32G 0 lvm
│ ├─VMs-vm--112--disk--0 253:22 0 32G 0 lvm
│ ├─VMs-vm--113--disk--0 253:23 0 32G 0 lvm
│ └─VMs-vm--114--disk--0 253:24 0 32G 0 lvm
└─VMs-VMs_tdata 253:6 0 456.3G 0 lvm
└─VMs-VMs-tpool 253:7 0 456.3G 0 lvm
├─VMs-VMs 253:8 0 456.3G 0 lvm
├─VMs-vm--101--disk--0 253:9 0 2G 0 lvm
├─VMs-vm--100--disk--0 253:10 0 3G 0 lvm
├─VMs-vm--104--disk--0 253:11 0 2G 0 lvm
├─VMs-vm--105--disk--0 253:12 0 8G 0 lvm
├─VMs-vm--108--disk--0 253:13 0 50G 0 lvm
├─VMs-vm--103--disk--0 253:14 0 160G 0 lvm
├─VMs-vm--106--disk--0 253:15 0 32G 0 lvm
├─VMs-vm--107--disk--0 253:16 0 8G 0 lvm
├─VMs-vm--109--disk--0 253:17 0 3G 0 lvm
├─VMs-vm--102--disk--0 253:18 0 10G 0 lvm
├─VMs-vm--102--disk--1 253:19 0 200G 0 lvm
├─VMs-vm--110--disk--0 253:20 0 50G 0 lvm
├─VMs-vm--111--disk--0 253:21 0 32G 0 lvm
├─VMs-vm--112--disk--0 253:22 0 32G 0 lvm
├─VMs-vm--113--disk--0 253:23 0 32G 0 lvm
└─VMs-vm--114--disk--0 253:24 0 32G 0 lvm
root@proxmox:~#
MAYBE!! i need to edit the /etc/lvm/lvm.conf global filter?
cryptsetup: WARNING: The initramfs image may not contain cryptsetup binaries
nor crypto modules. If that's on purpose, you may want to uninstall the
'cryptsetup-initramfs' package in order to disable the cryptsetup initramfs
integration and avoid this warning.
update-initramfs -u -k all
after fiddling with PCIe pass through. I reinstalled proxmox after that incident. I think I did something wrong there so not sure if this is related to the problem.I have to say that the initial post of this made probably a typo.
They state that 5.4.44-2 is broken and 5.4.44-1 works, but I think they meant to write 5.4.41-1...
No, the initramfs doesn't look at that, it rather seems like a kernel issue - just trying to find the common factor so that we can try to reproduce it here, or pin it down to a specific change...
three things you still could check:any news ?:/
diff
. The file etc/lvm/lvm.conf is different (I edited it to exclude to disks from the lvm stat scan so they spin down after some time):$ diff 41/etc/lvm/lvm.conf 44/etc/lvm/lvm.conf
129c129
< global_filter = [ "r|/dev/zd.*|", "r|/dev/mapper/pve-.*|" "r|/dev/mapper/.*-(vm|base)--[0-9]+--disk--[0-9]+|"]
---
> global_filter = [ "r|/dev/zd.*|", "r|/dev/mapper/pve-.*|" "r|/dev/mapper/.*-(vm|base)--[0-9]+--disk--[0-9]+|", "r|/dev/sde*|", "r|/dev/sdf*|" ]
$ diff 41/usr/lib/modprobe.d/blacklist_pve-kernel-5.4.41-1-pve.conf 44/usr/lib/modprobe.d/blacklist_pve-kernel-5.4.44-1-pve.conf
16a17,18
> blacklist iTCO_vendor_support
> blacklist iTCO_wdt
23,24d24
< blacklist iTCO_vendor_support
< blacklist iTCO_wdt
that could very well be the cause in your case! - please keep us postedI copied initrd.img-5.4.41-1-pve and initrd.img-5.4.44-1-pve from my server to my workstation, unpacked both files into two directories and compared both directories withdiff
. The file etc/lvm/lvm.conf is different (I edited it to exclude to disks from the lvm stat scan so they spin down after some time):
global_filter = [ "r|/dev/zd.*|", "r|/dev/mapper/pve-.*|" "r|/dev/mapper/.*-(vm|base)--[0-9$--[0-9]+--disk--[0-9]+|", "r|/dev/sda*|" ] is my lvm.confthat could very well be the cause in your case! - please keep us posted![]()
root@proxmox:~# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 931.5G 0 disk
└─sda1 8:1 0 931.5G 0 part /mnt/pve/Backups
sdb 8:16 0 232.9G 0 disk
├─sdb1 8:17 0 1007K 0 part
├─sdb2 8:18 0 512M 0 part /boot/efi
└─sdb3 8:19 0 229.5G 0 part
├─pve-swap 253:0 0 8G 0 lvm [SWAP]
├─pve-root 253:1 0 57.3G 0 lvm /
├─pve-data_tmeta 253:2 0 1.5G 0 lvm
│ └─pve-data 253:4 0 145.3G 0 lvm
└─pve-data_tdata 253:3 0 145.3G 0 lvm
└─pve-data 253:4 0 145.3G 0 lvm
nvme0n1 259:0 0 465.8G 0 disk
├─VMs-VMs_tmeta 253:5 0 4.7G 0 lvm
│ └─VMs-VMs-tpool 253:7 0 456.3G 0 lvm
│ ├─VMs-VMs 253:8 0 456.3G 0 lvm
│ ├─VMs-vm--101--disk--0 253:9 0 2G 0 lvm
│ ├─VMs-vm--100--disk--0 253:10 0 3G 0 lvm
│ ├─VMs-vm--104--disk--0 253:11 0 2G 0 lvm
│ ├─VMs-vm--105--disk--0 253:12 0 8G 0 lvm
│ ├─VMs-vm--108--disk--0 253:13 0 50G 0 lvm
│ ├─VMs-vm--103--disk--0 253:14 0 160G 0 lvm
│ ├─VMs-vm--106--disk--0 253:15 0 32G 0 lvm
│ ├─VMs-vm--107--disk--0 253:16 0 8G 0 lvm
│ ├─VMs-vm--109--disk--0 253:17 0 3G 0 lvm
│ ├─VMs-vm--102--disk--0 253:18 0 10G 0 lvm
│ ├─VMs-vm--102--disk--1 253:19 0 200G 0 lvm
│ ├─VMs-vm--110--disk--0 253:20 0 50G 0 lvm
│ ├─VMs-vm--111--disk--0 253:21 0 32G 0 lvm
│ ├─VMs-vm--112--disk--0 253:22 0 32G 0 lvm
│ ├─VMs-vm--113--disk--0 253:23 0 32G 0 lvm
│ └─VMs-vm--114--disk--0 253:24 0 32G 0 lvm
└─VMs-VMs_tdata 253:6 0 456.3G 0 lvm
└─VMs-VMs-tpool 253:7 0 456.3G 0 lvm
├─VMs-VMs 253:8 0 456.3G 0 lvm
├─VMs-vm--101--disk--0 253:9 0 2G 0 lvm
├─VMs-vm--100--disk--0 253:10 0 3G 0 lvm
├─VMs-vm--104--disk--0 253:11 0 2G 0 lvm
├─VMs-vm--105--disk--0 253:12 0 8G 0 lvm
├─VMs-vm--108--disk--0 253:13 0 50G 0 lvm
├─VMs-vm--103--disk--0 253:14 0 160G 0 lvm
├─VMs-vm--106--disk--0 253:15 0 32G 0 lvm
├─VMs-vm--107--disk--0 253:16 0 8G 0 lvm
├─VMs-vm--109--disk--0 253:17 0 3G 0 lvm
├─VMs-vm--102--disk--0 253:18 0 10G 0 lvm
├─VMs-vm--102--disk--1 253:19 0 200G 0 lvm
├─VMs-vm--110--disk--0 253:20 0 50G 0 lvm
├─VMs-vm--111--disk--0 253:21 0 32G 0 lvm
├─VMs-vm--112--disk--0 253:22 0 32G 0 lvm
├─VMs-vm--113--disk--0 253:23 0 32G 0 lvm
└─VMs-vm--114--disk--0 253:24 0 32G 0 lvm
root@proxmox:~#
this is probably the issue - the global filter uses regular expressions and not shell-globs:r|/dev/sda*|"
global_filter = [ "r|/dev/zd.*|", "r|/dev/mapper/pve-.*|" "r|/dev/mapper/.*-(vm|base)--[0-9]+--disk--[0-9]+|"]
update-initramfs -k 5.4.44-2-pve -u
I die something wrong, now both Kernels hangs with pve Not found, Maybe i have wrotten something wrong :/this is probably the issue - the global filter uses regular expressions and not shell-globs:
`sda*` also matches `sd` and thus `sdb`
try resetting the filter to the default - and regenerate the initrd (only for the current 5.4.44-2 kernel, so that the old ones don't get overwritten with a potentially broken one:Code:global_filter = [ "r|/dev/zd.*|", "r|/dev/mapper/pve-.*|" "r|/dev/mapper/.*-(vm|base)--[0-9]+--disk--[0-9]+|"]
and rebootCode:update-initramfs -k 5.4.44-2-pve -u
IT workssssssssssssssssssssssssssssssssssssssscheckout this link for activating the lv:
https://documentation.online.net/en/dedicated-server/rescue/mount-lvm-partition
checkout this for the necessary bindmounts and chrooting:
https://wiki.debian.org/RescueLive
fix the global_filter line - then run 'pvs', 'vgs' , 'lvs' to see whether the config is ok
then run the update-initramfs command from above
I hope this helps!
could have many reasons - but did you update-initramfs and reboot after changing the /etc/lvm/lvm.conf -because with this line as it stands I doubt that the system would bootwhy the command works on the old kernel and not on the new?
We use essential cookies to make this site work, and optional cookies to enhance your experience.