i have a problem with ipv6 networking on my guest vm's. The vm show a message saying
IPv6: ens18: IPv6 duplicate address [link local address] detected! on boot or when i up the interface.
My googeling led me to a thread where it was claimed that bonding with the round robin method could cause this problem, but i do not use the round robing method.
I also found a workaround where you have to disable DAD on the guest interface, but this is obviously not good enough, and will not scale in a larger cluster.
I think my networking is pretty standard for a proxmox node.
2 10gbps nics, in a bond for HA for management, 2 10gbps nic in a bond for VM traffic, on the VM bond is a vlan aware bridge, and vm's are defining the tag on the vm to get the vm into the right vlan. adding ~3000 bridges is not desired. ipv4 works as expected, but ipv4 is not as critical as ipv6 nowadays. I see this problem on several nodes, using different nic's and drivers, so i do not think it is limited to a spesific host nic.
proxmox is the latest version running from pve enterprise repo.
Is it possible to configure the bond to avoid the DAD ipv6 issue on the guest ?
is there some network redesign that can provide HA and avoid the DAD ipv6 issue ?
kind regards
Ronny Aasen
-- info --
# cat /etc/network/interfaces
auto lo
iface lo inet loopback
iface eno1 inet manual
iface eno2 inet manual
iface eno3 inet manual
iface eno4 inet manual
auto bond0
iface bond0 inet manual
slaves eno1 eno2
bond_miimon 100
bond_mode 2
mtu 9000
#vm bond
auto bond1
iface bond1 inet manual
slaves eno3 eno4
bond_miimon 100
bond_mode 2
mtu 9000
#management bond
auto br0
iface br0 inet manual
bridge_ports bond1
bridge_stp off
bridge_fd 0
bridge_vlan_aware yes
mtu 9000
bridge_maxwait 0
bridge_maxage 0
bridge_ageing 0
#mgmt bridge
auto br0.1000
iface br0.1000 inet static
address 10.10.0.25
netmask 255.255.255.0
gateway 10.10.0.1
mtu 1500
#mgmt interface
auto br0.1010
iface br0.1010 inet static
address 10.10.10.25
netmask 255.255.255.0
mtu 1500
#ceph access interface
auto vmbr1
iface vmbr1 inet manual
bridge_ports bond0
bridge_stp off
bridge_fd 0
bridge_vlan_aware yes
mtu 9000
bridge_maxwait 0
bridge_maxage 0
bridge_ageing 0
#vlan-trunk for VM nettverk
# pveversion --verbose
proxmox-ve: 5.2-2 (running kernel: 4.15.18-1-pve)
pve-manager: 5.2-6 (running version: 5.2-6/bcd5f008)
pve-kernel-4.15: 5.2-4
pve-kernel-4.13: 5.2-2
pve-kernel-4.15.18-1-pve: 4.15.18-17
pve-kernel-4.15.17-2-pve: 4.15.17-10
pve-kernel-4.13.16-4-pve: 4.13.16-51
pve-kernel-4.13.16-3-pve: 4.13.16-50
pve-kernel-4.13.16-1-pve: 4.13.16-46
corosync: 2.4.2-pve5
criu: 2.11.1-1~bpo90
gfs2-utils: not correctly installed
glusterfs-client: 3.8.8-1
ksm-control-daemon: 1.2-2
libjs-extjs: 6.0.1-2
libpve-access-control: 5.0-8
libpve-apiclient-perl: 2.0-5
libpve-common-perl: 5.0-37
libpve-guest-common-perl: 2.0-17
libpve-http-server-perl: 2.0-9
libpve-storage-perl: 5.0-24
libqb0: 1.0.1-1
lvm2: 2.02.168-pve6
lxc-pve: 3.0.0-3
lxcfs: 3.0.0-1
novnc-pve: 1.0.0-2
proxmox-widget-toolkit: 1.0-19
pve-cluster: 5.0-29
pve-container: 2.0-24
pve-docs: 5.2-5
pve-firewall: 3.0-13
pve-firmware: 2.0-5
pve-ha-manager: 2.0-5
pve-i18n: 1.0-6
pve-libspice-server1: 0.12.8-3
pve-qemu-kvm: 2.11.2-1
pve-xtermjs: 1.0-5
qemu-server: 5.0-30
smartmontools: 6.5+svn4324-1
spiceterm: 3.0-5
vncterm: 1.5-3
zfsutils-linux: 0.7.9-pve1~bpo9
IPv6: ens18: IPv6 duplicate address [link local address] detected! on boot or when i up the interface.
My googeling led me to a thread where it was claimed that bonding with the round robin method could cause this problem, but i do not use the round robing method.
I also found a workaround where you have to disable DAD on the guest interface, but this is obviously not good enough, and will not scale in a larger cluster.
I think my networking is pretty standard for a proxmox node.
2 10gbps nics, in a bond for HA for management, 2 10gbps nic in a bond for VM traffic, on the VM bond is a vlan aware bridge, and vm's are defining the tag on the vm to get the vm into the right vlan. adding ~3000 bridges is not desired. ipv4 works as expected, but ipv4 is not as critical as ipv6 nowadays. I see this problem on several nodes, using different nic's and drivers, so i do not think it is limited to a spesific host nic.
proxmox is the latest version running from pve enterprise repo.
Is it possible to configure the bond to avoid the DAD ipv6 issue on the guest ?
is there some network redesign that can provide HA and avoid the DAD ipv6 issue ?
kind regards
Ronny Aasen
-- info --
# cat /etc/network/interfaces
auto lo
iface lo inet loopback
iface eno1 inet manual
iface eno2 inet manual
iface eno3 inet manual
iface eno4 inet manual
auto bond0
iface bond0 inet manual
slaves eno1 eno2
bond_miimon 100
bond_mode 2
mtu 9000
#vm bond
auto bond1
iface bond1 inet manual
slaves eno3 eno4
bond_miimon 100
bond_mode 2
mtu 9000
#management bond
auto br0
iface br0 inet manual
bridge_ports bond1
bridge_stp off
bridge_fd 0
bridge_vlan_aware yes
mtu 9000
bridge_maxwait 0
bridge_maxage 0
bridge_ageing 0
#mgmt bridge
auto br0.1000
iface br0.1000 inet static
address 10.10.0.25
netmask 255.255.255.0
gateway 10.10.0.1
mtu 1500
#mgmt interface
auto br0.1010
iface br0.1010 inet static
address 10.10.10.25
netmask 255.255.255.0
mtu 1500
#ceph access interface
auto vmbr1
iface vmbr1 inet manual
bridge_ports bond0
bridge_stp off
bridge_fd 0
bridge_vlan_aware yes
mtu 9000
bridge_maxwait 0
bridge_maxage 0
bridge_ageing 0
#vlan-trunk for VM nettverk
# pveversion --verbose
proxmox-ve: 5.2-2 (running kernel: 4.15.18-1-pve)
pve-manager: 5.2-6 (running version: 5.2-6/bcd5f008)
pve-kernel-4.15: 5.2-4
pve-kernel-4.13: 5.2-2
pve-kernel-4.15.18-1-pve: 4.15.18-17
pve-kernel-4.15.17-2-pve: 4.15.17-10
pve-kernel-4.13.16-4-pve: 4.13.16-51
pve-kernel-4.13.16-3-pve: 4.13.16-50
pve-kernel-4.13.16-1-pve: 4.13.16-46
corosync: 2.4.2-pve5
criu: 2.11.1-1~bpo90
gfs2-utils: not correctly installed
glusterfs-client: 3.8.8-1
ksm-control-daemon: 1.2-2
libjs-extjs: 6.0.1-2
libpve-access-control: 5.0-8
libpve-apiclient-perl: 2.0-5
libpve-common-perl: 5.0-37
libpve-guest-common-perl: 2.0-17
libpve-http-server-perl: 2.0-9
libpve-storage-perl: 5.0-24
libqb0: 1.0.1-1
lvm2: 2.02.168-pve6
lxc-pve: 3.0.0-3
lxcfs: 3.0.0-1
novnc-pve: 1.0.0-2
proxmox-widget-toolkit: 1.0-19
pve-cluster: 5.0-29
pve-container: 2.0-24
pve-docs: 5.2-5
pve-firewall: 3.0-13
pve-firmware: 2.0-5
pve-ha-manager: 2.0-5
pve-i18n: 1.0-6
pve-libspice-server1: 0.12.8-3
pve-qemu-kvm: 2.11.2-1
pve-xtermjs: 1.0-5
qemu-server: 5.0-30
smartmontools: 6.5+svn4324-1
spiceterm: 3.0-5
vncterm: 1.5-3
zfsutils-linux: 0.7.9-pve1~bpo9