Hi, after a power loss proxmox displays a message when trying to start the machine:
"TASK ERROR: activating LV ‘pve/vm-100-disk-0’ failed: device-mapper: reload ioctl on (253:7) failed: no data available’ "
below the command logs
lvs -o name,metadata_percent,data_percent,chunk_size,size --all
lsblk -a
Machine vm-999 was created after the whole incident and is working correctly, only problem with machines created earlier.
"TASK ERROR: activating LV ‘pve/vm-100-disk-0’ failed: device-mapper: reload ioctl on (253:7) failed: no data available’ "
below the command logs
lvs -o name,metadata_percent,data_percent,chunk_size,size --all
Code:
LV Meta% Data% Chunk LSize
base-105-disk-0 0 40.00g
base-106-disk-0 0 40.00g
base-108-disk-0 0 32.00g
base-902-disk-0 0 40.00g
base-905-disk-0 0 32.00g
data 0.24 0.00 64.00k <5.31t
[data_tdata] 0 <5.31t
[data_tmeta] 0 15.81g
[lvol0_pmspare] 0 15.81g
root 0 96.00g
snap_vm-102-disk-0_first_test 0 32.00g
snap_vm-109-disk-0_BeforeOpenVasClear 0 64.00g
snap_vm-203-disk-0_snapshot-2023-10-12--11-50 0 40.00g
snap_vm-204-disk-0_snapshot-2023-10-12--11-50 0 40.00g
snap_vm-205-disk-0_snapshot-2023-10-12--11-51 0 40.00g
snap_vm-206-disk-0_snapshot-2023-10-12--11-52 0 40.00g
snap_vm-501-disk-0_Install 0 60.00g
snap_vm-502-disk-0_Install 0 4.00m
snap_vm-502-disk-1_Install 0 4.00m
snap_vm-502-disk-2_Install 0 64.00g
snap_vm-802-disk-0_boot_errror 0 4.00m
snap_vm-802-disk-1_boot_errror 0 96.00g
snap_vm-802-disk-2_boot_errror 0 4.00m
snap_vm-802-disk-3_boot_errror 0 512.00g
snap_vm-907-disk-0_ExtraTools 0 4.00m
snap_vm-907-disk-0_TiaPortal17NewInstallation 0 4.00m
snap_vm-907-disk-1_ExtraTools 0 4.00m
snap_vm-907-disk-1_TiaPortal17NewInstallation 0 4.00m
snap_vm-907-disk-2_ExtraTools 0 64.00g
snap_vm-907-disk-2_TiaPortal17NewInstallation 0 64.00g
swap 0 8.00g
vm-100-disk-0 0 32.00g
vm-101-disk-0 0 128.00g
vm-101-disk-1 0 1.50t
vm-102-disk-0 0 32.00g
vm-102-state-first_test 0 <16.49g
vm-104-disk-0 0 4.00m
vm-104-disk-1 0 4.00m
vm-104-disk-2 0 128.00g
vm-105-cloudinit 0 4.00m
vm-106-cloudinit 0 4.00m
vm-109-disk-0 0 64.00g
vm-110-disk-0 0 32.00g
vm-111-disk-0 0 64.00g
vm-112-disk-0 0 32.00g
vm-113-disk-0 0 32.00g
vm-114-disk-0 0 32.00g
vm-115-disk-0 0 32.00g
vm-201-disk-0 0 40.00g
vm-203-cloudinit 0 4.00m
vm-203-disk-0 0 40.00g
vm-203-state-snapshot-2023-10-12--11-50 0 <8.49g
vm-204-cloudinit 0 4.00m
vm-204-disk-0 0 40.00g
vm-204-state-snapshot-2023-10-12--11-50 0 <8.49g
vm-205-cloudinit 0 4.00m
vm-205-disk-0 0 40.00g
vm-205-state-snapshot-2023-10-12--11-51 0 <8.49g
vm-206-cloudinit 0 4.00m
vm-206-disk-0 0 40.00g
vm-206-state-snapshot-2023-10-12--11-52 0 <8.49g
vm-501-disk-0 0 60.00g
vm-501-state-Install 0 <16.49g
vm-502-disk-0 0 4.00m
vm-502-disk-1 0 4.00m
vm-502-disk-2 0 64.00g
vm-502-state-Install 0 <16.49g
vm-510-disk-0 0 50.00g
vm-601-disk-0 0 32.00g
vm-801-disk-0 0 64.00g
vm-802-disk-0 0 4.00m
vm-802-disk-1 0 96.00g
vm-802-disk-2 0 4.00m
vm-802-disk-3 0 536.00g
vm-906-disk-0 0 4.00m
vm-906-disk-1 0 4.00m
vm-906-disk-2 0 64.00g
vm-907-disk-0 0 4.00m
vm-907-disk-1 0 4.00m
vm-907-disk-2 0 64.00g
vm-915-disk-0 0 32.00g
vm-916-disk-0 0 32.00g
vm-999-disk-0 0.00 0 10.00g
lsblk -a
Code:
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
loop0 7:0 0 0B 0 loop
loop1 7:1 0 0B 0 loop
loop2 7:2 0 0B 0 loop
loop3 7:3 0 0B 0 loop
loop4 7:4 0 0B 0 loop
loop5 7:5 0 0B 0 loop
loop6 7:6 0 0B 0 loop
loop7 7:7 0 0B 0 loop
sda 8:0 0 5.5T 0 disk
├─sda1 8:1 0 1007K 0 part
├─sda2 8:2 0 1G 0 part
└─sda3 8:3 0 5.5T 0 part
├─pve-swap 253:0 0 8G 0 lvm [SWAP]
├─pve-root 253:1 0 96G 0 lvm /
├─pve-data_tmeta 253:2 0 15.8G 0 lvm
│ └─pve-data-tpool 253:4 0 5.3T 0 lvm
│ ├─pve-data 253:5 0 5.3T 1 lvm
│ └─pve-vm--999--disk--0 253:6 0 10G 0 lvm
└─pve-data_tdata 253:3 0 5.3T 0 lvm
└─pve-data-tpool 253:4 0 5.3T 0 lvm
├─pve-data 253:5 0 5.3T 1 lvm
└─pve-vm--999--disk--0 253:6 0 10G 0 lvm
sdb 8:16 0 2.7T 0 disk
└─sdb1 8:17 0 2.7T 0 part /mnt/pve/DIR01
Code:
root@host1:/# thin_check /dev/mapper/pve-data
examining superblock
superblock is corrupt
bad checksum in superblock, wanted 487673258
Code:
root@host1:/# lvchange -a n pve/data
root@host1:/# lvconvert --repair pve/data
Active pools cannot be repaired. Use lvchange -an first.
Machine vm-999 was created after the whole incident and is working correctly, only problem with machines created earlier.