Hi
we are running ceph rbd (squid 19.2.0) in a cluster setup with 5 nodes ( pve-8.3.3)
Although when testing the snapshot through CLI, we could not do a rollback. While it seems to work with via gui.
root@pve1-me:~# ceph version
ceph version 19.2.0 (3815e3391b18c593539df6fa952c9f45c37ee4d0) squid (stable)
root@pve1-me:~# pveversion
pve-manager/8.3.3/f157a38b211595d6 (running kernel: 6.8.12-6-pve)
root@pve1-me:~# rbd snap create pmoxpool01/vm-100-disk-0@snap_20250130_1
Creating snap: 100% complete...done.
root@pve1-me:~# rbd ls -l -p pmoxpool01
NAME SIZE PARENT FMT PROT LOCK
vm-100-disk-0 30 GiB 2 excl
vm-100-disk-0@snap_20250130 30 GiB 2
vm-100-disk-0@snap_20250130_1 30 GiB 2
root@pve1-me:~# rbd snap rollback pmoxpool01/vm-100-disk-0@snap_20250130_1
Rolling back to snapshot: 0% complete...failed.
rbd: rollback failed: (30) Read-only file system
The snapshot via CLI does not appear in '/etc/pve/qemu-server/100.conf'
root@pve1-me:~# cat /etc/pve/qemu-server/100.conf
boot: order=scsi0;ide2;net0
cores: 2
cpu: x86-64-v2-AES
ide2: none,media=cdrom
machine: q35
memory: 4096
meta: creation-qemu=9.0.2,ctime=1738159040
name: tommie
net0: virtio=BC:24:11:32:51:C6,bridge=vmbr0v249
numa: 0
ostype: l26
parent: snap_20250130
scsi0: pmoxpool01:vm-100-disk-0,discard=on,mbps_wr=20,size=30G,ssd=1
scsihw: virtio-scsi-pci
smbios1: uuid=75333f37-e91a-4894-afad-66a6a47460bc
sockets: 1
unused0: VM-VMware:100/vm-100-disk-0.vmdk
vmgenid: a9ebfdb6-d068-4df3-b324-4b4cb6b60f5a
[snap_20250130]
boot: order=scsi0;ide2;net0
cores: 2
cpu: x86-64-v2-AES
ide2: none,media=cdrom
machine: q35
memory: 4096
meta: creation-qemu=9.0.2,ctime=1738159040
name: tommie
net0: virtio=BC:24:11:32:51:C6,bridge=vmbr0v249
numa: 0
ostype: l26
scsi0: pmoxpool01:vm-100-disk-0,discard=on,mbps_wr=20,size=30G,ssd=1
scsihw: virtio-scsi-pci
smbios1: uuid=75333f37-e91a-4894-afad-66a6a47460bc
snaptime: 1738241169
sockets: 1
vmgenid: d0ff4518-c326-4eb2-84f0-96b802740c70