[SOLVED] SDN EVPN Node -> VM connectivity

brauni

New Member
Dec 27, 2025
2
1
3
Greetings

I am running a 3 Node cluster with a EVPN with multiple VNETs.
No Firewall on Host or DC.

I should be able to ping VMs from Nodes, correct?

Currently working:
Node to Node
VM to VM
Node to VM (on that Node)

Not working:
Node to VM (on another Node)

Bash:
/etc/network/interfaces.d/sdn
#version:13

auto apps
iface apps
    address 10.10.13.1/24
    post-up iptables -t nat -A POSTROUTING -s '10.10.13.0/24' -o vmbr0 -j SNAT --to-source 54.38.94.150
    post-down iptables -t nat -D POSTROUTING -s '10.10.13.0/24' -o vmbr0 -j SNAT --to-source 54.38.94.150
    post-up iptables -t raw -I PREROUTING -i fwbr+ -j CT --zone 1
    post-down iptables -t raw -D PREROUTING -i fwbr+ -j CT --zone 1
    hwaddress BC:24:11:7D:19:CB
    bridge_ports vxlan_apps
    bridge_stp off
    bridge_fd 0
    mtu 1450
    ip-forward on
    arp-accept on
    vrf vrf_zevpn

auto infra
iface infra
    address 10.10.15.1/24
    post-up iptables -t nat -A POSTROUTING -s '10.10.15.0/24' -o vmbr0 -j SNAT --to-source 54.38.94.150
    post-down iptables -t nat -D POSTROUTING -s '10.10.15.0/24' -o vmbr0 -j SNAT --to-source 54.38.94.150
    post-up iptables -t raw -I PREROUTING -i fwbr+ -j CT --zone 1
    post-down iptables -t raw -D PREROUTING -i fwbr+ -j CT --zone 1
    hwaddress BC:24:11:7D:19:CB
    bridge_ports vxlan_infra
    bridge_stp off
    bridge_fd 0
    mtu 1450
    ip-forward on
    arp-accept on
    vrf vrf_zevpn

auto k8s
iface k8s
    address 10.10.8.1/24
    post-up iptables -t nat -A POSTROUTING -s '10.10.8.0/24' -o vmbr0 -j SNAT --to-source 54.38.94.150
    post-down iptables -t nat -D POSTROUTING -s '10.10.8.0/24' -o vmbr0 -j SNAT --to-source 54.38.94.150
    post-up iptables -t raw -I PREROUTING -i fwbr+ -j CT --zone 1
    post-down iptables -t raw -D PREROUTING -i fwbr+ -j CT --zone 1
    hwaddress BC:24:11:7D:19:CB
    bridge_ports vxlan_k8s
    bridge_stp off
    bridge_fd 0
    mtu 1450
    alias talos
    ip-forward on
    arp-accept on
    vrf vrf_zevpn

auto vrf_zevpn
iface vrf_zevpn
    vrf-table auto
    post-up ip route del vrf vrf_zevpn unreachable default metric 4278198272

auto vrfbr_zevpn
iface vrfbr_zevpn
    bridge-ports vrfvx_zevpn
    bridge_stp off
    bridge_fd 0
    mtu 1450
    vrf vrf_zevpn

auto vrfvx_zevpn
iface vrfvx_zevpn
    vxlan-id 665
    vxlan-local-tunnelip 172.24.0.3
    bridge-learning off
    bridge-arp-nd-suppress on
    mtu 1450

auto vxlan_apps
iface vxlan_apps
    vxlan-id 13
    vxlan-local-tunnelip 172.24.0.3
    bridge-learning off
    bridge-arp-nd-suppress on
    mtu 1450

auto vxlan_infra
iface vxlan_infra
    vxlan-id 15
    vxlan-local-tunnelip 172.24.0.3
    bridge-learning off
    bridge-arp-nd-suppress on
    mtu 1450

auto vxlan_k8s
iface vxlan_k8s
    vxlan-id 8
    vxlan-local-tunnelip 172.24.0.3
    bridge-learning off
    bridge-arp-nd-suppress on
    mtu 1450

I can see the VM in the neighbours and route seems correct?

Code:
ip neighbour show
172.24.0.2 dev ztdiyrsa75 lladdr be:6c:de:bd:fc:f5 REACHABLE
10.10.15.12 dev infra lladdr bc:24:11:4c:23:3e extern_learn NOARP proto zebra
172.24.0.2 dev vrfbr_zevpn lladdr 82:e3:94:8d:d1:c5 extern_learn NOARP proto zebra
172.24.0.1 dev ztdiyrsa75 lladdr be:f0:b1:97:3d:b9 REACHABLE

ip route show 10.10.15.12
10.10.15.12 nhid 33 via 172.24.0.2 dev vrfbr_zevpn proto bgp metric 20 onlink

Code:
cat /etc/frr/frr.conf
frr version 10.4.1
frr defaults datacenter
hostname ovhsbg3
log syslog informational
service integrated-vtysh-config
!
!
vrf vrf_zevpn
 vni 665
exit-vrf
!
router bgp 66000
 bgp router-id 172.24.0.3
 no bgp hard-administrative-reset
 no bgp default ipv4-unicast
 coalesce-time 1000
 no bgp graceful-restart notification
 neighbor VTEP peer-group
 neighbor VTEP remote-as 66000
 neighbor VTEP bfd
 neighbor 172.24.0.1 peer-group VTEP
 neighbor 172.24.0.2 peer-group VTEP
 !
 address-family ipv4 unicast
  import vrf vrf_zevpn
 exit-address-family
 !
 address-family ipv6 unicast
  import vrf vrf_zevpn
 exit-address-family
 !
 address-family l2vpn evpn
  neighbor VTEP activate
  neighbor VTEP route-map MAP_VTEP_IN in
  neighbor VTEP route-map MAP_VTEP_OUT out
  advertise-all-vni
 exit-address-family
exit
!
router bgp 66000 vrf vrf_zevpn
 bgp router-id 172.24.0.3
 no bgp hard-administrative-reset
 no bgp graceful-restart notification
 !
 address-family ipv4 unicast
  redistribute connected
 exit-address-family
 !
 address-family ipv6 unicast
  redistribute connected
 exit-address-family
 !
 address-family l2vpn evpn
  default-originate ipv4
  default-originate ipv6
 exit-address-family
exit
!
ip prefix-list only_default seq 1 permit 0.0.0.0/0
!
ipv6 prefix-list only_default_v6 seq 1 permit ::/0
!
route-map MAP_VTEP_IN deny 1
 match ip address prefix-list only_default
exit
!
route-map MAP_VTEP_IN deny 2
 match ipv6 address prefix-list only_default_v6
exit
!
route-map MAP_VTEP_IN permit 3
exit
!
route-map MAP_VTEP_OUT permit 1
exit
!
line vty
!

Any help would be appreciated. I am at my wits end.
 
This looks like expected behavior.
Your VNET bridges are attached to a VRF, so the EVPN routes live in that VRF table and not in the main routing table.

The Proxmox host itself isn’t part of that VRF unless you explicitly put it there.

EVPN extends the VM networks across nodes, but the host networking stack stays separate unless you add policy routing or move it into the same VRF.
 
This looks like expected behavior.
Your VNET bridges are attached to a VRF, so the EVPN routes live in that VRF table and not in the main routing table.

The Proxmox host itself isn’t part of that VRF unless you explicitly put it there.

EVPN extends the VM networks across nodes, but the host networking stack stays separate unless you add policy routing or move it into the same VRF.
Correct!


Please also note that 66000 is NOT a private ASN, so I strongly suggest switching that into private ASN range (64512 - 65534).
 
  • Like
Reactions: nico1875p
Correct!


Please also note that 66000 is NOT a private ASN, so I strongly suggest switching that into private ASN range (64512 - 65534).
Woops, thanks a lot for the hint, changed it to the default 65000

This looks like expected behavior.
Your VNET bridges are attached to a VRF, so the EVPN routes live in that VRF table and not in the main routing table.

The Proxmox host itself isn’t part of that VRF unless you explicitly put it there.

EVPN extends the VM networks across nodes, but the host networking stack stays separate unless you add policy routing or move it into the same VRF.
Thanks for the clarification! I must have been misremembering being able to ping VMs from the nodes.
 
  • Like
Reactions: nico1875p