Hi,
i've deleted two VMs (VMID 199 and 201) and the devices are still there.
root@pm-01:~# pvesm list vg-cluster01-storage01|grep vm-201
vg-cluster01-storage01:vm-201-disk-1 raw 106300440576 201
root@pm-01:~# pvesm list vg-cluster01-storage01|grep vm-199
vg-cluster01-storage01:vm-199-disk-1 raw 106300440576 199
root@pm-01:~# ls /dev/mapper/*201*
/dev/mapper/vg--cluster01--storage01-vm--201--disk--1
root@pm-01:~# ls /dev/mapper/*199*
/dev/mapper/vg--cluster01--storage01-vm--199--disk--1
root@pm-01:~# cd /etc/pve/
The VM config is gone:
root@pm-01:/etc/pve# find . -iname 199.conf
root@pm-01:/etc/pve# find . -iname 201.conf
root@pm-01:/etc/pve#
lsblk:
├─vg--cluster01--storage01-vm--199--disk--1 253:107 0 99G 0 lvm
│ ├─vg_4a9d834009d319ddb8e76c38f1ad6800-tp_5a94a981e87233b582c437aab1b7e55a_tmeta 253:180 0 52M 0 lvm
│ │ └─vg_4a9d834009d319ddb8e76c38f1ad6800-tp_5a94a981e87233b582c437aab1b7e55a-tpool 253:182 0 10G 0 lvm
│ │ ├─vg_4a9d834009d319ddb8e76c38f1ad6800-tp_5a94a981e87233b582c437aab1b7e55a 253:183 0 10G 0 lvm
│ │ └─vg_4a9d834009d319ddb8e76c38f1ad6800-brick_1159251d5cd1268c47a53f70593b566b 253:184 0 10G 0 lvm
│ ├─vg_4a9d834009d319ddb8e76c38f1ad6800-tp_5a94a981e87233b582c437aab1b7e55a_tdata 253:181 0 10G 0 lvm
│ │ └─vg_4a9d834009d319ddb8e76c38f1ad6800-tp_5a94a981e87233b582c437aab1b7e55a-tpool 253:182 0 10G 0 lvm
│ │ ├─vg_4a9d834009d319ddb8e76c38f1ad6800-tp_5a94a981e87233b582c437aab1b7e55a 253:183 0 10G 0 lvm
│ │ └─vg_4a9d834009d319ddb8e76c38f1ad6800-brick_1159251d5cd1268c47a53f70593b566b 253:184 0 10G 0 lvm
│ ├─vg_4a9d834009d319ddb8e76c38f1ad6800-tp_b48f654cbb990227fe5a4d77844e3ab5_tmeta 253:185 0 256M 0 lvm
│ │ └─vg_4a9d834009d319ddb8e76c38f1ad6800-tp_b48f654cbb990227fe5a4d77844e3ab5-tpool 253:187 0 50G 0 lvm
│ │ ├─vg_4a9d834009d319ddb8e76c38f1ad6800-tp_b48f654cbb990227fe5a4d77844e3ab5 253:188 0 50G 0 lvm
│ │ └─vg_4a9d834009d319ddb8e76c38f1ad6800-brick_b48f654cbb990227fe5a4d77844e3ab5 253:189 0 50G 0 lvm
│ └─vg_4a9d834009d319ddb8e76c38f1ad6800-tp_b48f654cbb990227fe5a4d77844e3ab5_tdata 253:186 0 50G 0 lvm
│ └─vg_4a9d834009d319ddb8e76c38f1ad6800-tp_b48f654cbb990227fe5a4d77844e3ab5-tpool 253:187 0 50G 0 lvm
│ ├─vg_4a9d834009d319ddb8e76c38f1ad6800-tp_b48f654cbb990227fe5a4d77844e3ab5 253:188 0 50G 0 lvm
│ └─vg_4a9d834009d319ddb8e76c38f1ad6800-brick_b48f654cbb990227fe5a4d77844e3ab5 253:189 0 50G 0 lvm
├─vg--cluster01--storage01-vm--201--disk--1 253:108 0 99G 0 lvm
│ ├─vg_7fecd21df3064ad10ae45bab13776110-tp_8ca7e9904657bd7da00182f974341d55_tmeta 253:170 0 52M 0 lvm
│ │ └─vg_7fecd21df3064ad10ae45bab13776110-tp_8ca7e9904657bd7da00182f974341d55-tpool 253:172 0 10G 0 lvm
│ │ ├─vg_7fecd21df3064ad10ae45bab13776110-tp_8ca7e9904657bd7da00182f974341d55 253:173 0 10G 0 lvm
│ │ └─vg_7fecd21df3064ad10ae45bab13776110-brick_8ca7e9904657bd7da00182f974341d55 253:174 0 10G 0 lvm
│ ├─vg_7fecd21df3064ad10ae45bab13776110-tp_8ca7e9904657bd7da00182f974341d55_tdata 253:171 0 10G 0 lvm
│ │ └─vg_7fecd21df3064ad10ae45bab13776110-tp_8ca7e9904657bd7da00182f974341d55-tpool 253:172 0 10G 0 lvm
│ │ ├─vg_7fecd21df3064ad10ae45bab13776110-tp_8ca7e9904657bd7da00182f974341d55 253:173 0 10G 0 lvm
│ │ └─vg_7fecd21df3064ad10ae45bab13776110-brick_8ca7e9904657bd7da00182f974341d55 253:174 0 10G 0 lvm
│ ├─vg_7fecd21df3064ad10ae45bab13776110-tp_8cc33cc7b577c0f7c03960a49801ec20_tmeta 253:175 0 256M 0 lvm
│ │ └─vg_7fecd21df3064ad10ae45bab13776110-tp_8cc33cc7b577c0f7c03960a49801ec20-tpool 253:177 0 50G 0 lvm
│ │ ├─vg_7fecd21df3064ad10ae45bab13776110-tp_8cc33cc7b577c0f7c03960a49801ec20 253:178 0 50G 0 lvm
│ │ └─vg_7fecd21df3064ad10ae45bab13776110-brick_8cc33cc7b577c0f7c03960a49801ec20 253:179 0 50G 0 lvm
│ └─vg_7fecd21df3064ad10ae45bab13776110-tp_8cc33cc7b577c0f7c03960a49801ec20_tdata 253:176 0 50G 0 lvm
│ └─vg_7fecd21df3064ad10ae45bab13776110-tp_8cc33cc7b577c0f7c03960a49801ec20-tpool 253:177 0 50G 0 lvm
│ ├─vg_7fecd21df3064ad10ae45bab13776110-tp_8cc33cc7b577c0f7c03960a49801ec20 253:178 0 50G 0 lvm
│ └─vg_7fecd21df3064ad10ae45bab13776110-brick_8cc33cc7b577c0f7c03960a49801ec20 253:179 0 50G 0 lvm
In the past it helped to
dmsetup remove <device>
But those two are still in use according to dmsetup remove and they are not configured on any other VM as shared disk.
root@pm-01:/etc/pve# find . -iname ???.conf -exec grep vm-199 {} \;
root@pm-01:/etc/pve# find . -iname ???.conf -exec grep vm-201 {} \;
Those where glusterfs test VMs ...
Whats the best way to get rid of those? pvesm remove vg-cluster01-storage01:vm-201-disk-1?
Thanks & Cheers,
Daniel
i've deleted two VMs (VMID 199 and 201) and the devices are still there.
root@pm-01:~# pvesm list vg-cluster01-storage01|grep vm-201
vg-cluster01-storage01:vm-201-disk-1 raw 106300440576 201
root@pm-01:~# pvesm list vg-cluster01-storage01|grep vm-199
vg-cluster01-storage01:vm-199-disk-1 raw 106300440576 199
root@pm-01:~# ls /dev/mapper/*201*
/dev/mapper/vg--cluster01--storage01-vm--201--disk--1
root@pm-01:~# ls /dev/mapper/*199*
/dev/mapper/vg--cluster01--storage01-vm--199--disk--1
root@pm-01:~# cd /etc/pve/
The VM config is gone:
root@pm-01:/etc/pve# find . -iname 199.conf
root@pm-01:/etc/pve# find . -iname 201.conf
root@pm-01:/etc/pve#
lsblk:
├─vg--cluster01--storage01-vm--199--disk--1 253:107 0 99G 0 lvm
│ ├─vg_4a9d834009d319ddb8e76c38f1ad6800-tp_5a94a981e87233b582c437aab1b7e55a_tmeta 253:180 0 52M 0 lvm
│ │ └─vg_4a9d834009d319ddb8e76c38f1ad6800-tp_5a94a981e87233b582c437aab1b7e55a-tpool 253:182 0 10G 0 lvm
│ │ ├─vg_4a9d834009d319ddb8e76c38f1ad6800-tp_5a94a981e87233b582c437aab1b7e55a 253:183 0 10G 0 lvm
│ │ └─vg_4a9d834009d319ddb8e76c38f1ad6800-brick_1159251d5cd1268c47a53f70593b566b 253:184 0 10G 0 lvm
│ ├─vg_4a9d834009d319ddb8e76c38f1ad6800-tp_5a94a981e87233b582c437aab1b7e55a_tdata 253:181 0 10G 0 lvm
│ │ └─vg_4a9d834009d319ddb8e76c38f1ad6800-tp_5a94a981e87233b582c437aab1b7e55a-tpool 253:182 0 10G 0 lvm
│ │ ├─vg_4a9d834009d319ddb8e76c38f1ad6800-tp_5a94a981e87233b582c437aab1b7e55a 253:183 0 10G 0 lvm
│ │ └─vg_4a9d834009d319ddb8e76c38f1ad6800-brick_1159251d5cd1268c47a53f70593b566b 253:184 0 10G 0 lvm
│ ├─vg_4a9d834009d319ddb8e76c38f1ad6800-tp_b48f654cbb990227fe5a4d77844e3ab5_tmeta 253:185 0 256M 0 lvm
│ │ └─vg_4a9d834009d319ddb8e76c38f1ad6800-tp_b48f654cbb990227fe5a4d77844e3ab5-tpool 253:187 0 50G 0 lvm
│ │ ├─vg_4a9d834009d319ddb8e76c38f1ad6800-tp_b48f654cbb990227fe5a4d77844e3ab5 253:188 0 50G 0 lvm
│ │ └─vg_4a9d834009d319ddb8e76c38f1ad6800-brick_b48f654cbb990227fe5a4d77844e3ab5 253:189 0 50G 0 lvm
│ └─vg_4a9d834009d319ddb8e76c38f1ad6800-tp_b48f654cbb990227fe5a4d77844e3ab5_tdata 253:186 0 50G 0 lvm
│ └─vg_4a9d834009d319ddb8e76c38f1ad6800-tp_b48f654cbb990227fe5a4d77844e3ab5-tpool 253:187 0 50G 0 lvm
│ ├─vg_4a9d834009d319ddb8e76c38f1ad6800-tp_b48f654cbb990227fe5a4d77844e3ab5 253:188 0 50G 0 lvm
│ └─vg_4a9d834009d319ddb8e76c38f1ad6800-brick_b48f654cbb990227fe5a4d77844e3ab5 253:189 0 50G 0 lvm
├─vg--cluster01--storage01-vm--201--disk--1 253:108 0 99G 0 lvm
│ ├─vg_7fecd21df3064ad10ae45bab13776110-tp_8ca7e9904657bd7da00182f974341d55_tmeta 253:170 0 52M 0 lvm
│ │ └─vg_7fecd21df3064ad10ae45bab13776110-tp_8ca7e9904657bd7da00182f974341d55-tpool 253:172 0 10G 0 lvm
│ │ ├─vg_7fecd21df3064ad10ae45bab13776110-tp_8ca7e9904657bd7da00182f974341d55 253:173 0 10G 0 lvm
│ │ └─vg_7fecd21df3064ad10ae45bab13776110-brick_8ca7e9904657bd7da00182f974341d55 253:174 0 10G 0 lvm
│ ├─vg_7fecd21df3064ad10ae45bab13776110-tp_8ca7e9904657bd7da00182f974341d55_tdata 253:171 0 10G 0 lvm
│ │ └─vg_7fecd21df3064ad10ae45bab13776110-tp_8ca7e9904657bd7da00182f974341d55-tpool 253:172 0 10G 0 lvm
│ │ ├─vg_7fecd21df3064ad10ae45bab13776110-tp_8ca7e9904657bd7da00182f974341d55 253:173 0 10G 0 lvm
│ │ └─vg_7fecd21df3064ad10ae45bab13776110-brick_8ca7e9904657bd7da00182f974341d55 253:174 0 10G 0 lvm
│ ├─vg_7fecd21df3064ad10ae45bab13776110-tp_8cc33cc7b577c0f7c03960a49801ec20_tmeta 253:175 0 256M 0 lvm
│ │ └─vg_7fecd21df3064ad10ae45bab13776110-tp_8cc33cc7b577c0f7c03960a49801ec20-tpool 253:177 0 50G 0 lvm
│ │ ├─vg_7fecd21df3064ad10ae45bab13776110-tp_8cc33cc7b577c0f7c03960a49801ec20 253:178 0 50G 0 lvm
│ │ └─vg_7fecd21df3064ad10ae45bab13776110-brick_8cc33cc7b577c0f7c03960a49801ec20 253:179 0 50G 0 lvm
│ └─vg_7fecd21df3064ad10ae45bab13776110-tp_8cc33cc7b577c0f7c03960a49801ec20_tdata 253:176 0 50G 0 lvm
│ └─vg_7fecd21df3064ad10ae45bab13776110-tp_8cc33cc7b577c0f7c03960a49801ec20-tpool 253:177 0 50G 0 lvm
│ ├─vg_7fecd21df3064ad10ae45bab13776110-tp_8cc33cc7b577c0f7c03960a49801ec20 253:178 0 50G 0 lvm
│ └─vg_7fecd21df3064ad10ae45bab13776110-brick_8cc33cc7b577c0f7c03960a49801ec20 253:179 0 50G 0 lvm
In the past it helped to
dmsetup remove <device>
But those two are still in use according to dmsetup remove and they are not configured on any other VM as shared disk.
root@pm-01:/etc/pve# find . -iname ???.conf -exec grep vm-199 {} \;
root@pm-01:/etc/pve# find . -iname ???.conf -exec grep vm-201 {} \;
Those where glusterfs test VMs ...
Whats the best way to get rid of those? pvesm remove vg-cluster01-storage01:vm-201-disk-1?
Thanks & Cheers,
Daniel