proxmox 5.4 My virtual machine can only configure these 4 pcie gpu devices,
/usr/share/perl5/PVE/QemuServer/PCI.pm
and /usr/share/perl5/PVE/QemuServer.pm
of my Proxmox installation using the corresponding files form the Git repository (master @ 6cb7b041cec6220b1b105f2b2f22a38216f7e110).TASK ERROR: start failed: command '/usr/bin/kvm -id 101 -name test -chardev 'socket,id=qmp,path=/var/run/qemu-server/101.qmp,server,nowait' -mon 'chardev=qmp,mode=control' -chardev 'socket,id=qmp-event,path=/var/run/qmeventd.sock,reconnect=5' -mon 'chardev=qmp-event,mode=control' -pidfile /var/run/qemu-server/101.pid -daemonize -smbios 'type=1,uuid=4308a1ec-7f0e-413d-8d68-44c94a1704b3' -drive 'if=pflash,unit=0,format=raw,readonly,file=/usr/share/pve-edk2-firmware//OVMF_CODE.fd' -drive 'if=pflash,unit=1,format=raw,id=drive-efidisk0,file=/dev/zvol/rpool/data/vm-101-disk-1' -smp '16,sockets=1,cores=16,maxcpus=16' -nodefaults -boot 'menu=on,strict=on,reboot-timeout=1000,splash=/usr/share/qemu-server/bootsplash.jpg' -vga none -nographic -cpu 'host,-md-clear,-pcid,-spec-ctrl,-ibpb,-virt-ssbd,-amd-ssbd,-amd-no-ssb,+pdpe1gb,-hv-tlbflush,+kvm_pv_unhalt,+kvm_pv_eoi,kvm=off' -m 98304 -device 'vmgenid,guid=9391eca2-6181-4ca3-be2c-97588e840eb9' -readconfig /usr/share/qemu-server/pve-q35-4.0.cfg -device 'usb-tablet,id=tablet,bus=ehci.0,port=1' -device 'vfio-pci,host=1b:00.0,id=hostpci0.0,bus=ich9-pcie-port-1,addr=0x0.0,multifunction=on' -device 'vfio-pci,host=1b:00.1,id=hostpci0.1,bus=ich9-pcie-port-1,addr=0x0.1' -device 'vfio-pci,host=1c:00.0,id=hostpci1.0,bus=ich9-pcie-port-2,addr=0x0.0,multifunction=on' -device 'vfio-pci,host=1c:00.1,id=hostpci1.1,bus=ich9-pcie-port-2,addr=0x0.1' -device 'vfio-pci,host=1d:00.0,id=hostpci2.0,bus=ich9-pcie-port-3,addr=0x0.0,multifunction=on' -device 'vfio-pci,host=1d:00.1,id=hostpci2.1,bus=ich9-pcie-port-3,addr=0x0.1' -device 'vfio-pci,host=1e:00.0,id=hostpci3.0,bus=ich9-pcie-port-4,addr=0x0.0,multifunction=on' -device 'vfio-pci,host=1e:00.1,id=hostpci3.1,bus=ich9-pcie-port-4,addr=0x0.1' -device 'pcie-root-port,id=ich9-pcie-port-5,addr=10.0,x-speed=16,x-width=32,multifunction=on,bus=pcie.0,port=5,chassis=5' -device 'vfio-pci,host=3d:00.0,id=hostpci4.0,bus=ich9-pcie-port-5,addr=0x0.0,multifunction=on' -device 'vfio-pci,host=3d:00.1,id=hostpci4.1,bus=ich9-pcie-port-5,addr=0x0.1' -device 'vfio-pci,host=3d:00.2,id=hostpci4.2,bus=ich9-pcie-port-5,addr=0x0.2' -device 'vfio-pci,host=3d:00.3,id=hostpci4.3,bus=ich9-pcie-port-5,addr=0x0.3' -chardev 'socket,path=/var/run/qemu-server/101.qga,server,nowait,id=qga0' -device 'virtio-serial,id=qga0,bus=pci.0,addr=0x8' -device 'virtserialport,chardev=qga0,name=org.qemu.guest_agent.0' -device 'virtio-balloon-pci,id=balloon0,bus=pci.0,addr=0x3' -iscsi 'initiator-name=iqn.1993-08.org.debian:01:6675b9c82fb6' -drive 'file=/var/lib/vz/template/iso/ubuntu-18.04.3-live-server-amd64.iso,if=none,id=drive-ide2,media=cdrom,aio=threads' -device 'ide-cd,bus=ide.1,unit=0,drive=drive-ide2,id=ide2,bootindex=200' -device 'virtio-scsi-pci,id=scsihw0,bus=pci.0,addr=0x5' -drive 'file=/dev/zvol/rpool/data/vm-101-disk-0,if=none,id=drive-scsi0,cache=directsync,format=raw,aio=native,detect-zeroes=on' -device 'scsi-hd,bus=scsihw0.0,channel=0,scsi-id=0,lun=0,drive=drive-scsi0,id=scsi0,bootindex=100' -netdev 'type=tap,id=net0,ifname=tap101i0,script=/var/lib/qemu-server/pve-bridge,downscript=/var/lib/qemu-server/pve-bridgedown,vhost=on' -device 'virtio-net-pci,mac=0A:04:96:2A:99:E5,netdev=net0,bus=pci.0,addr=0x12,id=net0,bootindex=300' -machine 'type=q35'' failed: got timeout
agent: 1
bios: ovmf
bootdisk: scsi0
cores: 16
cpu: host,flags=-md-clear;-pcid;-spec-ctrl;-ibpb;-virt-ssbd;-amd-ssbd;-amd-no-ssb;+pdpe1gb;-hv-tlbflush
efidisk0: local-zfs:vm-101-disk-1,size=128K
ide2: local:iso/ubuntu-18.04.3-live-server-amd64.iso,media=cdrom
machine: q35
memory: 98304
name: test
net0: virtio=0A:04:96:2A:99:E5,bridge=vmbr1
numa: 0
ostype: l26
scsi0: local-zfs:vm-101-disk-0,cache=directsync,size=1T
scsihw: virtio-scsi-pci
smbios1: uuid=4308a1ec-7f0e-413d-8d68-44c94a1704b3
sockets: 1
vmgenid: 9391eca2-6181-4ca3-be2c-97588e840eb9
hostpci0: 1b:00,pcie=1,x-vga=1
hostpci1: 1c:00,pcie=1
hostpci2: 1d:00,pcie=1
hostpci3: 1e:00,pcie=1
hostpci4: 3d:00,pcie=1
auto lo
iface lo inet loopback
iface eno0 inet manual
iface ens4f1 inet manual
auto vmbr0
iface vmbr0 inet static
address <ip1>
netmask 255.255.255.0
gateway <ip2>
bridge-ports eno0
bridge-stp off
bridge-fd 0
auto vmbr1
iface vmbr1 inet static
address 192.168.0.1
netmask 24
bridge-ports none
bridge-stp off
bridge-fd 0
post-up echo 1 > /proc/sys/net/ipv4/ip_forward
post-up iptables -t nat -A POSTROUTING -s '192.168.0.0/24' -o vmbr0 -j MASQUERADE
post-down iptables -t nat -D POSTROUTING -s '192.168.0.0/24' -o vmbr0 -j MASQUERADE
post-up iptables -t nat -A PREROUTING -i vmbr0 -p tcp --dport 1000 -j DNAT --to 192.168.0.2:22
post-down iptables -t nat -D PREROUTING -i vmbr0 -p tcp --dport 1000 -j DNAT --to 192.168.0.2:22
Check the IOMMU Groups column. Your problem sounds a lot like the one problematic GPU and your main network interface are in the same group. You can only pass through the whole group and not individual device of the same group.I have succeeded passing through up to 4 GPUs to a VM (Ubuntu Server 18.04.3), but trying to pass through a 5th GPU results in network problems.
enp6s18
changed to enp10s18
. With netplan having been configured for enp6s18
during the installation with 0-4 GPUs, it did no longer work when booting up with 4+ GPUs and the resulting unassigned enp10s18
network adapter.match
, macaddress
and set-name
properties (example).We use essential cookies to make this site work, and optional cookies to enhance your experience.