VM starting error if conf has a lot of entries since latest update

Erik_ch

Renowned Member
Jul 3, 2013
10
1
68
www.guggach.com
We have a VM with 16 network interfaces. Therefore the entry list has a lot of characters. With this VM we never had an issue. Since last update to current verision (see below) the VM stops starting with error "failed: got timeout". If I delete some networks or another line (eg. ide2 CD) then it starts. I assume that the starting string is too long. If I start the VM on another node with an older version then it works. Even I can migrate the running VM to the node with new version.

Has somebody else same issue? What could be the reason?

Regards,
Enrica

Here is my log:

TASK ERROR: start failed: command '/usr/bin/kvm -id 1202 -name 'Firewall1,debug-threads=on' -no-shutdown -chardev 'socket,id=qmp,path=/var/run/qemu-server/1202.qmp,server=on,wait=off' -mon 'chardev=qmp,mode=control' -chardev 'socket,id=qmp-event,path=/var/run/qmeventd.sock,reconnect=5' -mon 'chardev=qmp-event,mode=control' -pidfile /var/run/qemu-server/1202.pid -daemonize -smbios 'type=1,uuid=c7739cc8-e06a-4fe1-a565-2f6c2078ab94' -smp '4,sockets=1,cores=4,maxcpus=4' -nodefaults -boot 'menu=on,strict=on,reboot-timeout=1000,splash=/usr/share/qemu-server/bootsplash.jpg' -vnc 'unix:/var/run/qemu-server/1202.vnc,password=on' -cpu qemu64,+aes,enforce,+kvm_pv_eoi,+kvm_pv_unhalt,+pni,+popcnt,+sse4.1,+sse4.2,+ssse3 -m 4096 -object 'iothread,id=iothread-virtio0' -device 'pci-bridge,id=pci.1,chassis_nr=1,bus=pci.0,addr=0x1e' -device 'pci-bridge,id=pci.2,chassis_nr=2,bus=pci.0,addr=0x1f' -device 'vmgenid,guid=8fa53b44-065c-4125-a04f-7d7c50b74995' -device 'piix3-usb-uhci,id=uhci,bus=pci.0,addr=0x1.0x2' -device 'usb-tablet,id=tablet,bus=uhci.0,port=1' -device 'VGA,id=vga,bus=pci.0,addr=0x2' -device 'virtio-balloon-pci,id=balloon0,bus=pci.0,addr=0x3,free-page-reporting=on' -iscsi 'initiator-name=iqn.1993-08.org.debian:01:486e5e47ce9d' -drive 'file=/mnt/pve/img-nas1/template/iso/pfSense-CE-2.7.2-RELEASE-amd64.iso,if=none,id=drive-ide2,media=cdrom,aio=io_uring' -device 'ide-cd,bus=ide.1,unit=0,drive=drive-ide2,id=ide2,bootindex=101' -drive 'file=/dev/zvol/rpool/data/vm-1202-disk-0,if=none,id=drive-virtio0,format=raw,cache=none,aio=io_uring,detect-zeroes=on' -device 'virtio-blk-pci,drive=drive-virtio0,id=virtio0,bus=pci.0,addr=0xa,iothread=iothread-virtio0,bootindex=100' -netdev 'type=tap,id=net0,ifname=tap1202i0,script=/var/lib/qemu-server/pve-bridge,downscript=/var/lib/qemu-server/pve-bridgedown,vhost=on,queues=8' -device 'virtio-net-pci,mac=BC:24:12:C8:10:00,netdev=net0,bus=pci.0,addr=0x12,id=net0,vectors=18,mq=on,packed=on,rx_queue_size=1024,tx_queue_size=256,bootindex=102' -netdev 'type=tap,id=net1,ifname=tap1202i1,script=/var/lib/qemu-server/pve-bridge,downscript=/var/lib/qemu-server/pve-bridgedown,vhost=on,queues=8' -device 'virtio-net-pci,mac=BC:24:12:0F:00:92,netdev=net1,bus=pci.0,addr=0x13,id=net1,vectors=18,mq=on,packed=on,rx_queue_size=1024,tx_queue_size=256' -netdev 'type=tap,id=net2,ifname=tap1202i2,script=/var/lib/qemu-server/pve-bridge,downscript=/var/lib/qemu-server/pve-bridgedown,vhost=on,queues=8' -device 'virtio-net-pci,mac=BC:24:12:09:00:10,netdev=net2,bus=pci.0,addr=0x14,id=net2,vectors=18,mq=on,packed=on,rx_queue_size=1024,tx_queue_size=256' -netdev 'type=tap,id=net3,ifname=tap1202i3,script=/var/lib/qemu-server/pve-bridge,downscript=/var/lib/qemu-server/pve-bridgedown,vhost=on,queues=8' -device 'virtio-net-pci,mac=BC:24:12:E2:00:11,netdev=net3,bus=pci.0,addr=0x15,id=net3,vectors=18,mq=on,packed=on,rx_queue_size=1024,tx_queue_size=256' -netdev 'type=tap,id=net4,ifname=tap1202i4,script=/var/lib/qemu-server/pve-bridge,downscript=/var/lib/qemu-server/pve-bridgedown,vhost=on,queues=8' -device 'virtio-net-pci,mac=BC:24:12:BE:00:20,netdev=net4,bus=pci.0,addr=0x16,id=net4,vectors=18,mq=on,packed=on,rx_queue_size=1024,tx_queue_size=256' -netdev 'type=tap,id=net5,ifname=tap1202i5,script=/var/lib/qemu-server/pve-bridge,downscript=/var/lib/qemu-server/pve-bridgedown,vhost=on,queues=8' -device 'virtio-net-pci,mac=BC:24:12:81:00:30,netdev=net5,bus=pci.0,addr=0x17,id=net5,vectors=18,mq=on,packed=on,rx_queue_size=1024,tx_queue_size=256,host_mtu=1500' -netdev 'type=tap,id=net6,ifname=tap1202i6,script=/var/lib/qemu-server/pve-bridge,downscript=/var/lib/qemu-server/pve-bridgedown,vhost=on,queues=8' -device 'virtio-net-pci,mac=BC:24:12:BC:00:40,netdev=net6,bus=pci.1,addr=0x1,id=net6,vectors=18,mq=on,packed=on,rx_queue_size=1024,tx_queue_size=256' -netdev 'type=tap,id=net7,ifname=tap1202i7,script=/var/lib/qemu-server/pve-bridge,downscript=/var/lib/qemu-server/pve-bridgedown,vhost=on,queues=8' -device 'virtio-net-pci,mac=BC:24:12:AE:00:50,netdev=net7,bus=pci.1,addr=0x2,id=net7,vectors=18,mq=on,packed=on,rx_queue_size=1024,tx_queue_size=256' -netdev 'type=tap,id=net8,ifname=tap1202i8,script=/var/lib/qemu-server/pve-bridge,downscript=/var/lib/qemu-server/pve-bridgedown,vhost=on,queues=8' -device 'virtio-net-pci,mac=BC:24:12:FA:00:60,netdev=net8,bus=pci.1,addr=0x3,id=net8,vectors=18,mq=on,packed=on,rx_queue_size=1024,tx_queue_size=256' -netdev 'type=tap,id=net9,ifname=tap1202i9,script=/var/lib/qemu-server/pve-bridge,downscript=/var/lib/qemu-server/pve-bridgedown,vhost=on,queues=8' -device 'virtio-net-pci,mac=BC:24:12:04:00:70,netdev=net9,bus=pci.1,addr=0x4,id=net9,vectors=18,mq=on,packed=on,rx_queue_size=1024,tx_queue_size=256' -netdev 'type=tap,id=net10,ifname=tap1202i10,script=/var/lib/qemu-server/pve-bridge,downscript=/var/lib/qemu-server/pve-bridgedown,vhost=on,queues=8' -device 'virtio-net-pci,mac=BC:24:12:60:00:80,netdev=net10,bus=pci.1,addr=0x5,id=net10,vectors=18,mq=on,packed=on,rx_queue_size=1024,tx_queue_size=256' -netdev 'type=tap,id=net11,ifname=tap1202i11,script=/var/lib/qemu-server/pve-bridge,downscript=/var/lib/qemu-server/pve-bridgedown,vhost=on,queues=8' -device 'virtio-net-pci,mac=BC:24:12:06:00:90,netdev=net11,bus=pci.1,addr=0x6,id=net11,vectors=18,mq=on,packed=on,rx_queue_size=1024,tx_queue_size=256,host_mtu=9000' -netdev 'type=tap,id=net12,ifname=tap1202i12,script=/var/lib/qemu-server/pve-bridge,downscript=/var/lib/qemu-server/pve-bridgedown,vhost=on,queues=8' -device 'virtio-net-pci,mac=BC:24:12:01:00:98,netdev=net12,bus=pci.1,addr=0x7,id=net12,vectors=18,mq=on,packed=on,rx_queue_size=1024,tx_queue_size=256' -netdev 'type=tap,id=net13,ifname=tap1202i13,script=/var/lib/qemu-server/pve-bridge,downscript=/var/lib/qemu-server/pve-bridgedown,vhost=on,queues=8' -device 'virtio-net-pci,mac=BC:24:12:E1:00:99,netdev=net13,bus=pci.1,addr=0x8,id=net13,vectors=18,mq=on,packed=on,rx_queue_size=1024,tx_queue_size=256' -netdev 'type=tap,id=net14,ifname=tap1202i14,script=/var/lib/qemu-server/pve-bridge,downscript=/var/lib/qemu-server/pve-bridgedown,vhost=on,queues=8' -device 'virtio-net-pci,mac=BC:24:12:E5:00:00,netdev=net14,bus=pci.1,addr=0x9,id=net14,vectors=18,mq=on,packed=on,rx_queue_size=1024,tx_queue_size=256' -netdev 'type=tap,id=net15,ifname=tap1202i15,script=/var/lib/qemu-server/pve-bridge,downscript=/var/lib/qemu-server/pve-bridgedown,vhost=on,queues=8' -device 'virtio-net-pci,mac=BC:24:12:12:00:01,netdev=net15,bus=pci.1,addr=0xa,id=net15,vectors=18,mq=on,packed=on,rx_queue_size=1024,tx_queue_size=256' -machine 'type=pc+pve0'' failed: got timeout

Here is my vm.conf content:
boot: order=virtio0;ide2;net0
cores: 4
cpu: x86-64-v2-AES
ide2: img-nas1:iso/pfSense-CE-2.7.2-RELEASE-amd64.iso,media=cdrom,size=854172K
memory: 4096
meta: creation-qemu=9.0.2,ctime=1730992215
name: Firewall1
net0: virtio=BC:24:12:C8:10:00,bridge=vmbr0,queues=8
net1: virtio=BC:24:12:0F:00:92,bridge=vmbr0,queues=8,tag=92
net10: virtio=BC:24:12:60:00:80,bridge=vmbr0,queues=8,tag=80
net11: virtio=BC:24:12:06:00:90,bridge=vmbr90,mtu=1,queues=8
net12: virtio=BC:24:12:01:00:98,bridge=vmbr98,queues=8
net13: virtio=BC:24:12:E1:00:99,bridge=vmbr0,queues=8,tag=99
net14: virtio=BC:24:12:E5:00:00,bridge=vmbr1,queues=8
net15: virtio=BC:24:12:12:00:01,bridge=vmbr2,queues=8
net16: virtio=BC:24:12:A5:C8:07,bridge=vmbr3,queues=8
net2: virtio=BC:24:12:09:00:10,bridge=vmbr10,queues=8
net3: virtio=BC:24:12:E2:00:11,bridge=vmbr0,queues=8,tag=11
net4: virtio=BC:24:12:BE:00:20,bridge=vmbr0,queues=8,tag=20
net5: virtio=BC:24:12:81:00:30,bridge=vmbr30,mtu=1,queues=8
net6: virtio=BC:24:12:BC:00:40,bridge=vmbr0,queues=8,tag=40
net7: virtio=BC:24:12:AE:00:50,bridge=vmbr0,queues=8,tag=50
net8: virtio=BC:24:12:FA:00:60,bridge=vmbr60,queues=8
net9: virtio=BC:24:12:04:00:70,bridge=vmbr0,queues=8,tag=70
numa: 0
ostype: l26
scsihw: virtio-scsi-pci
smbios1: uuid=c7739cc8-e06a-4fe1-a565-2f6c2078ab94
sockets: 1
virtio0: local-zfs:vm-1202-disk-0,iothread=1,size=40G
vmgenid: 8fa53b44-065c-4125-a04f-7d7c50b74995

Version list:
proxmox-ve: 8.2.0 (running kernel: 6.8.12-3-pve)
pve-manager: 8.2.7 (running version: 8.2.7/3e0176e6bb2ade3b)
proxmox-kernel-helper: 8.1.0
pve-kernel-6.2: 8.0.5
proxmox-kernel-6.8: 6.8.12-3
proxmox-kernel-6.8.12-3-pve-signed: 6.8.12-3
proxmox-kernel-6.8.12-2-pve-signed: 6.8.12-2
proxmox-kernel-6.5.13-6-pve-signed: 6.5.13-6
proxmox-kernel-6.5: 6.5.13-6
proxmox-kernel-6.2.16-20-pve: 6.2.16-20
proxmox-kernel-6.2: 6.2.16-20
pve-kernel-6.2.16-3-pve: 6.2.16-3
ceph-fuse: 17.2.7-pve3
corosync: 3.1.7-pve3
criu: 3.17.1-2
dnsmasq: 2.89-1
frr-pythontools: 8.5.2-1+pve1
glusterfs-client: 10.3-5
ifupdown2: 3.2.0-1+pmx9
intel-microcode: 3.20240813.1~deb12u1
ksm-control-daemon: 1.5-1
libjs-extjs: 7.0.0-4
libknet1: 1.28-pve1
libproxmox-acme-perl: 1.5.1
libproxmox-backup-qemu0: 1.4.1
libproxmox-rs-perl: 0.3.4
libpve-access-control: 8.1.4
libpve-apiclient-perl: 3.3.2
libpve-cluster-api-perl: 8.0.8
libpve-cluster-perl: 8.0.8
libpve-common-perl: 8.2.5
libpve-guest-common-perl: 5.1.4
libpve-http-server-perl: 5.1.2
libpve-network-perl: 0.9.8
libpve-rs-perl: 0.8.10
libpve-storage-perl: 8.2.5
libspice-server1: 0.15.1-1
lvm2: 2.03.16-2
lxc-pve: 6.0.0-1
lxcfs: 6.0.0-pve2
novnc-pve: 1.4.0-4
openvswitch-switch: 3.1.0-2+deb12u1
proxmox-backup-client: 3.2.7-1
proxmox-backup-file-restore: 3.2.7-1
proxmox-firewall: 0.5.0
proxmox-kernel-helper: 8.1.0
proxmox-mail-forward: 0.2.3
proxmox-mini-journalreader: 1.4.0
proxmox-widget-toolkit: 4.2.4
pve-cluster: 8.0.8
pve-container: 5.2.0
pve-docs: 8.2.3
pve-edk2-firmware: 4.2023.08-4
pve-esxi-import-tools: 0.7.2
pve-firewall: 5.0.7
pve-firmware: 3.14-1
pve-ha-manager: 4.0.5
pve-i18n: 3.2.4
pve-qemu-kvm: 9.0.2-3
pve-xtermjs: 5.3.0-3
qemu-server: 8.2.4
smartmontools: 7.3-pve1
spiceterm: 3.3.0
swtpm: 0.8.0+pve1
vncterm: 1.8.0
zfsutils-linux: 2.2.6-pve1
 
Last edited:
Hi, I have the same problem menthioned by @Erik_ch.
I have a machine with 25 network interfaces.
I tried to start the VM with --timeout 300 parameter and the VM start correctly.

The VM need about 45 seconds to start.
 
Is it possible to modify the default timeout (30 seconds) used for start a virtual machine directly from the Web GUI ?
Or setting the parameter directly into the single virtual machine config file ?
 
Is it possible to modify the default timeout (30 seconds) used for start a virtual machine directly from the Web GUI ?
Or setting the parameter directly into the single virtual machine config file ?
Unfortunately, no. There was a proposed patch series for such an option, but it didn't land yet, so currently there is no config or UI option.
 
I tried to start with timeout of 300s as Fiona proposed. The VM have needed 42 seconds to start. As Comune di Levico proposed it would be good to extend the standard timeout or let us set a timeout property. Thank you very much for your hint.