proxmox 7.0 sdn beta test

I have fixed the mtu on ovsint port, I have resent:
http://odisoweb1.odiso.net/libpve-network-perl_0.4-4_all.deb
Hi Spirit

After loading above I see ln_vnet4040 is still mtu of 1500

Code:
root@pve00:~# cat /etc/network/interfaces.d/sdn
#version:57

auto ln_vnet4040
iface ln_vnet4040
        ovs_type OVSIntPort
        ovs_bridge vmbr0
        ovs_options vlan_mode=dot1q-tunnel tag=4040

auto vmbr0
iface vmbr0
        ovs_ports ln_vnet4040

auto vnet4040
iface vnet4040
        bridge_ports ln_vnet4040
        bridge_stp off
        bridge_fd 0
        bridge-vlan-aware yes
        bridge-vids 2-4094
        mtu 9000
root@pve00:~# cat /etc/pve/sdn/zones.cfg
vlan: zvlan
        bridge vmbr0
        mtu 9000
root@pve00:~# cat /etc/pve/sdn/vnets.cfg
vnet: vnet4040
        tag 4040
        zone zvlan
        vlanaware 1

root@pve00:~# ovs-vsctl show
82d024d1-224b-4fa9-a4ad-c596570ce3d9
    Bridge "vmbr0"
        Port "vlan18"
            tag: 18
            Interface "vlan18"
                type: internal
        Port "vlan2"
            tag: 2
            Interface "vlan2"
                type: internal
        Port "bond0"
            Interface "eth0"
            Interface "eth1"
        Port "vlan21"
            tag: 21
            Interface "vlan21"
                type: internal
        Port "vmbr0"
            Interface "vmbr0"
                type: internal
        Port "tap20101i2"
            tag: 101
            Interface "tap20101i2"
        Port "tap20101i1"
            tag: 100
            Interface "tap20101i1"
        Port "ln_vnet4040"
            tag: 4040
            Interface "ln_vnet4040"
                type: internal
        Port "vlan23"
            tag: 23
            Interface "vlan23"
                type: internal
        Port "tap21001i0"
            Interface "tap21001i0"
        Port "vlan20"
            tag: 20
            Interface "vlan20"
                type: internal
        Port "tap21001i1"
            Interface "tap21001i1"
        Port "vlan1"
            Interface "vlan1"
                type: internal
        Port "tap20101i4"
            tag: 103
            Interface "tap20101i4"
        Port "tap20101i3"
            tag: 102
            Interface "tap20101i3"
        Port "tap20101i0"
            tag: 4
            Interface "tap20101i0"
    ovs_version: "2.12.0"
root@pve00:~# brctl show
bridge name     bridge id               STP enabled     interfaces
vnet4040                8000.aaa8a3efbbb9       no              ln_vnet4040
                                                        tap20101i5
root@pve00:~# ovs-vsctl list-ports switch_c | xargs -n1 ip link show  | grep mtu | column -t
ovs-vsctl: no bridge named switch_c
1:   lo:           <LOOPBACK,UP,LOWER_UP>                     mtu  65536  qdisc  noqueue     state   UNKNOWN     mode   DEFAULT  group  default  qlen   1000
2:   eth0:         <BROADCAST,MULTICAST,UP,LOWER_UP>          mtu  9000   qdisc  mq          master  ovs-system  state  UP       mode   DEFAULT  group  default  qlen  1000
3:   eth1:         <BROADCAST,MULTICAST,UP,LOWER_UP>          mtu  9000   qdisc  mq          master  ovs-system  state  UP       mode   DEFAULT  group  default  qlen  1000
4:   ovs-system:   <BROADCAST,MULTICAST>                      mtu  1500   qdisc  noop        state   DOWN        mode   DEFAULT  group  default  qlen   1000
5:   vmbr0:        <BROADCAST,MULTICAST,UP,LOWER_UP>          mtu  9000   qdisc  noqueue     state   UNKNOWN     mode   DEFAULT  group  default  qlen   1000
6:   vlan1:        <BROADCAST,MULTICAST,UP,LOWER_UP>          mtu  9000   qdisc  noqueue     state   UNKNOWN     mode   DEFAULT  group  default  qlen   1000
7:   vlan18:       <BROADCAST,MULTICAST,UP,LOWER_UP>          mtu  9000   qdisc  noqueue     state   UNKNOWN     mode   DEFAULT  group  default  qlen   1000
8:   vlan20:       <BROADCAST,MULTICAST,UP,LOWER_UP>          mtu  9000   qdisc  noqueue     state   UNKNOWN     mode   DEFAULT  group  default  qlen   1000
9:   vlan21:       <BROADCAST,MULTICAST,UP,LOWER_UP>          mtu  9000   qdisc  noqueue     state   UNKNOWN     mode   DEFAULT  group  default  qlen   1000
10:  vlan23:       <BROADCAST,MULTICAST,UP,LOWER_UP>          mtu  9000   qdisc  noqueue     state   UNKNOWN     mode   DEFAULT  group  default  qlen   1000
11:  vlan2:        <BROADCAST,MULTICAST,UP,LOWER_UP>          mtu  9000   qdisc  noqueue     state   UNKNOWN     mode   DEFAULT  group  default  qlen   1000
12:  bond0:        <BROADCAST,MULTICAST,UP,LOWER_UP>          mtu  1500   qdisc  noqueue     state   UNKNOWN     mode   DEFAULT  group  default  qlen   1000
13:  ln_vnet4040:  <BROADCAST,MULTICAST,UP,LOWER_UP>          mtu  1500   qdisc  noqueue     master  vnet4040    state  UNKNOWN  mode   DEFAULT  group  default  qlen  1000
14:  vnet4040:     <BROADCAST,MULTICAST,UP,LOWER_UP>          mtu  9000   qdisc  noqueue     state   UP          mode   DEFAULT  group  default  qlen   1000
15:  tap20101i0:   <BROADCAST,MULTICAST,PROMISC,UP,LOWER_UP>  mtu  9000   qdisc  pfifo_fast  master  ovs-system  state  UNKNOWN  mode   DEFAULT  group  default  qlen  1000
16:  tap20101i1:   <BROADCAST,MULTICAST,PROMISC,UP,LOWER_UP>  mtu  9000   qdisc  pfifo_fast  master  ovs-system  state  UNKNOWN  mode   DEFAULT  group  default  qlen  1000
17:  tap20101i2:   <BROADCAST,MULTICAST,PROMISC,UP,LOWER_UP>  mtu  9000   qdisc  pfifo_fast  master  ovs-system  state  UNKNOWN  mode   DEFAULT  group  default  qlen  1000
18:  tap20101i3:   <BROADCAST,MULTICAST,PROMISC,UP,LOWER_UP>  mtu  9000   qdisc  pfifo_fast  master  ovs-system  state  UNKNOWN  mode   DEFAULT  group  default  qlen  1000
19:  tap20101i4:   <BROADCAST,MULTICAST,PROMISC,UP,LOWER_UP>  mtu  9000   qdisc  pfifo_fast  master  ovs-system  state  UNKNOWN  mode   DEFAULT  group  default  qlen  1000
20:  tap20101i5:   <BROADCAST,MULTICAST,PROMISC,UP,LOWER_UP>  mtu  9000   qdisc  pfifo_fast  master  vnet4040    state  UNKNOWN  mode   DEFAULT  group  default  qlen  1000
21:  tap21001i0:   <BROADCAST,MULTICAST,PROMISC,UP,LOWER_UP>  mtu  9000   qdisc  mq          master  ovs-system  state  UNKNOWN  mode   DEFAULT  group  default  qlen  1000
22:  tap21001i1:   <BROADCAST,MULTICAST,PROMISC,UP,LOWER_UP>  mtu  9000   qdisc  mq          master  ovs-system  state  UNKNOWN  mode   DEFAULT  group  default  qlen  1000
 
I have reuploaded ifupdown2

http://odisoweb1.odiso.net/ifupdown2_3.0.0-1+pve1_all.deb

it was missing a patch

you shouldn't see anymore

" Running /usr/bin/ovs-vsctl -- --may-exist add-br vmbr0 -- --if-exists del-port vmbr0 tap20101i3 -- --if-exists del-port vmbr0 tap20101i4 -- --if-exists del-port vmbr0 tap20101i1 -- --if-exists del-port vmbr0 tap20101i2 -- --if-exists del-port vmbr0 tap20101i0 -- --if-exists clear bridge vmbr0 auto_attach controller external-ids fail_mode flood_vlans ipfix mirrors netflow other_config protocols sflow -- --if-exists clear interface vmbr0 mtu_request external-ids other_config options -- set Interface vmbr0 mtu_request=9000"



About the mtu,
you should see "ovs_mtu ..." in ln_vnet4040.

are you sure to have restart pvedaemon + pveproxy + regenerate the sdn config after install libpve-network-perl_0.4-4_all.deb ?

Code:
dpkg -i libpve-network-perl_0.4-4_all.deb
systemctl restart pvedaemon
systemctl restart pveproxy
pvesh set /cluster/sdn/   (it's the command line, like "apply" button on sdn the web interface)
 
Hi Spirit,

I have also noticed that the default dot1qtunnel is not 802.1q 0x8100 after running a pcap trace on a VM where packets was the destination,
the packets arrived as 802.1ad , see attached the pcap traces, egress_vm.pcap from vm with vnet interface to destination VM ingress_vm.pcap

the pcap files is in attached zip file
 

Attachments

  • pcaps.zip
    974 bytes · Views: 3
About the mtu,
you should see "ovs_mtu ..." in ln_vnet4040.

are you sure to have restart pvedaemon + pveproxy + regenerate the sdn config after install libpve-network-perl_0.4-4_all.deb ?
Hi Spirit,

I even tried rebooting the host, but I will try again,

remove all sdn config and re-add them then reboot the host after loaded the new ifupdown2_3.0.0-1+pve1_all.deb
 
Hi Spirit,

I have also noticed that the default dot1qtunnel is not 802.1q 0x8100 after running a pcap trace on a VM where packets was the destination,
the packets arrived as 802.1ad , see attached the pcap traces, egress_vm.pcap from vm with vnet interface to destination VM ingress_vm.pcap

the pcap files is in attached zip file

I'll fix this. It's ok for qinq plugin, but not yet for vlan plugin. (I'll default it to 802.1q too)
 
Here a new version (0.4.5), with 802.1q dot1tunnel for vlan plugin

http://odisoweb1.odiso.net:/libpve-network-perl_0.4-5_all.deb

(you should see " other_config:qinq-ethtype=802.1q" in /etc/network/interfaces.d/sdn)
 
Hi Spirit,

Can we also have the vlanaware option in the Gui for both vlan and qinq zones? and mtu options also for qinq and vlan zones

it's already available.

you can enable vlanware vnet on any plugin. (even with vxlan, to do vlan inside vxlan tunnel. or in qinq plugin if you have triple tag)

mtu option is also available in any zone.
 
Hi Spirit

I have applied the libpve-network-perl_0.4-5_all.deb and ifupdown2_3.0.0-1+pve1_all.deb

rebooted the host,

and I still have network loss when doing any of the below

pvesh set /cluster/sdn/
ifreload -a
ifreload -a -d

can you attach result of 'ifreload -a -d' ?
 
it's already available.

you can enable vlanware vnet on any plugin. (even with vxlan, to do vlan inside vxlan tunnel. or in qinq plugin if you have triple tag)

mtu option is also available in any zone.
Hi Spirit,

my pve GUI do not show these options for zone vlan,
so currently I create the vlan zone and vnet and then manually edit the files before applying the config,

Code:
pveversion -v
proxmox-ve: 6.2-1 (running kernel: 5.4.41-1-pve)
pve-manager: 6.2-4 (running version: 6.2-4/9824574a)
pve-kernel-5.4: 6.2-2
pve-kernel-helper: 6.2-2
pve-kernel-5.4.41-1-pve: 5.4.41-1
pve-kernel-5.4.34-1-pve: 5.4.34-2
ceph-fuse: 12.2.11+dfsg1-2.1+b1
corosync: 3.0.3-pve1
criu: 3.11-3
glusterfs-client: 5.5-3
ifupdown2: 3.0.0-1+pve1
ksm-control-daemon: 1.3-1
libjs-extjs: 6.0.1-10
libknet1: 1.15-pve1
libproxmox-acme-perl: 1.0.4
libpve-access-control: 6.1-1
libpve-apiclient-perl: 3.0-3
libpve-common-perl: 6.1-2
libpve-guest-common-perl: 3.0-10
libpve-http-server-perl: 3.0-5
libpve-network-perl: 0.4-5
libpve-storage-perl: 6.1-8
libqb0: 1.0.5-1
libspice-server1: 0.14.2-4~pve6+1
lvm2: 2.03.02-pve4
lxc-pve: 4.0.2-1
lxcfs: 4.0.3-pve2
novnc-pve: 1.1.0-1
openvswitch-switch: 2.12.0-1
proxmox-mini-journalreader: 1.1-1
proxmox-widget-toolkit: 2.2-1
pve-cluster: 6.1-8
pve-container: 3.1-6
pve-docs: 6.2-4
pve-edk2-firmware: 2.20200229-1
pve-firewall: 4.1-2
pve-firmware: 3.1-1
pve-ha-manager: 3.0-9
pve-i18n: 2.1-2
pve-qemu-kvm: 5.0.0-2
pve-xtermjs: 4.3.0-1
qemu-server: 6.2-2
smartmontools: 7.1-pve2
spiceterm: 3.1-1
vncterm: 1.6-1
zfsutils-linux: 0.8.4-pve1
 
for your packets loss problem,
could you tell where is located the source and destination ? ex : (2 vms on same vnet ? 2 vm on different vnets ? 1 vm on vnet && 1 vm on classic ovs bridge ? 1vm to ousitde through physical interface ?)
 
can you attach result of 'ifreload -a -d' ?
Hi Spirit

Please see attached file as requested, but i did another test and seems the vm's does not lose connection on the VNET interface ,

seems the host is losing connection on the vlan1 interface see below pve.JPG

and network config for the host etc/network/interfaces

Code:
root@pve00:~# cat /etc/network/interfaces
# network interface settings; autogenerated
# Please do NOT modify this file directly, unless you know what
# you're doing.
#
# If you want to manage parts of the network configuration manually,
# please utilize the 'source' or 'source-directory' directives to do
# so.
# PVE will preserve these directives, but will NOT read its network
# configuration from sourced files, so do not attempt to move any of
# the PVE managed interfaces into external files!

auto lo
iface lo inet loopback
        pre-up ifconfig eth0 mtu 9000
        pre-up ifconfig eth1 mtu 9000

auto eth0
iface eth0 inet manual
#eth0 - 1_eth-0-38 - 1G

auto eth1
iface eth1 inet manual
#eth1 - 2_eth-0-38 - 1G

auto vlan1
iface vlan1 inet static
        address 10.254.1.49/24
        gateway 10.254.1.1
        ovs_type OVSIntPort
        ovs_bridge vmbr0
        ovs_mtu 9000

auto vlan18
iface vlan18 inet static
        address 10.255.18.9/28
        ovs_type OVSIntPort
        ovs_bridge vmbr0
        ovs_mtu 9000
        ovs_options tag=18
#storage network

auto vlan20
iface vlan20 inet static
        address 10.255.20.9/24
        ovs_type OVSIntPort
        ovs_bridge vmbr0
        ovs_mtu 9000
        ovs_options tag=20
# storage iSCSI 1

auto vlan21
iface vlan21 inet static
        address 10.255.21.9/24
        ovs_type OVSIntPort
        ovs_bridge vmbr0
        ovs_mtu 9000
        ovs_options tag=21
# storage iSCSI 2

auto vlan23
iface vlan23 inet static
        address 10.255.23.9/24
        ovs_type OVSIntPort
        ovs_bridge vmbr0
        ovs_options tag=23

auto vlan2
iface vlan2 inet static
        address 10.254.2.100/24
        ovs_type OVSIntPort
        ovs_bridge vmbr0
        ovs_mtu 9000
        ovs_options tag=2
#vxlan to dc9_sw

auto bond0
iface bond0 inet manual
        ovs_bonds eth0 eth1
        ovs_type OVSBond
        ovs_bridge vmbr0
        ovs_mtu 9000
        ovs_options lacp=active bond_mode=balance-tcp other_config:lacp-time=fast
# bond0_eth0/eth1

auto vmbr0
iface vmbr0 inet manual
        ovs_type OVSBridge
        ovs_ports bond0 vlan1 vlan18 vlan20 vlan21 vlan23 vlan2
        ovs_mtu 9000
#guest network 1G

source /etc/network/interfaces.d/*
 

Attachments

  • ifreload.txt
    28.8 KB · Views: 1
for your packets loss problem,
could you tell where is located the source and destination ? ex : (2 vms on same vnet ? 2 vm on different vnets ? 1 vm on vnet && 1 vm on classic ovs bridge ? 1vm to ousitde through physical interface ?)
I have posted this in previous post but let me add it again, in this screenshot you can see me ping 10.254.1.49, this ip is on the host see below
then from the vm console im ping another vm via VNET interface, the other VM has normal vmbr0 port and I have added the vlan 4040 as per vnet config

Code:
auto vlan1
iface vlan1 inet static
        address 10.254.1.49/24
        gateway 10.254.1.1
        ovs_type OVSIntPort
        ovs_bridge vmbr0
        ovs_mtu 9000

pve.JPG

Code:
root@pve00:~# cat /etc/network/interfaces.d/sdn
#version:63

auto ln_vnet4040
iface ln_vnet4040
        ovs_type OVSIntPort
        ovs_bridge vmbr0
        ovs_mtu 9000
        ovs_options vlan_mode=dot1q-tunnel other_config:qinq-ethtype=802.1q tag=4040

auto vmbr0
iface vmbr0
        ovs_ports ln_vnet4040

auto vnet4040
iface vnet4040
        bridge_ports ln_vnet4040
        bridge_stp off
        bridge_fd 0
        bridge-vlan-aware yes
        bridge-vids 2-4094
        mtu 9000
root@pve00:~# cat /etc/pve/nodes/pve00/qemu-server/
10010.conf  20101.conf  20102.conf  20103.conf  20104.conf  20105.conf  21000.conf  21001.conf
root@pve00:~# cat /etc/pve/nodes/pve00/qemu-server/20101.conf
agent: 1
bootdisk: virtio0
cores: 1
cpu: host,flags=+md-clear;+pcid;+spec-ctrl;+ssbd;+aes
memory: 256
name: LAB-1
net0: virtio=52:54:00:FF:3E:92,bridge=vmbr0,tag=4
net1: virtio=52:54:00:F9:11:DA,bridge=vmbr0,tag=100
net2: virtio=52:54:00:A9:7B:54,bridge=vmbr0,tag=101
net3: virtio=52:54:00:F6:E0:80,bridge=vmbr0,tag=102
net4: virtio=52:54:00:47:FE:4C,bridge=vmbr0,tag=103
net5: virtio=52:54:00:42:87:7B,bridge=vnet4040
numa: 1
onboot: 0
ostype: l26
smbios1: uuid=dd0a2457-75a0-48d7-a65d-2243957e153d
sockets: 1
tablet: 0
virtio0: local-lvm:vm-201-disk-0,cache=none,size=128M
vmgenid: 51daab4a-a7b5-4098-a0e4-1416c391efb8
root@pve00:~# cat /etc/pve/nodes/pve00/qemu-server/21001.conf
agent: 1
bootdisk: virtio0
cores: 2
cpu: IvyBridge,flags=+md-clear;+pcid;+spec-ctrl;+ssbd;+aes
memory: 256
name: chr-v7-beta
net0: virtio=52:54:00:89:14:19,bridge=vmbr0,queues=2
net1: virtio=52:54:00:A6:10:64,bridge=vmbr0,queues=2
numa: 1
onboot: 0
ostype: l26
smbios1: uuid=2a9ad897-8d96-41df-8655-99061d3f715a
sockets: 1
tablet: 0
virtio0: local-lvm:vm-101-disk-0,cache=none,size=128M
vmgenid: 2813354f-61ca-4e4e-af89-c890d085f848
 
Last edited:
Hi, sorry I don't understand your test.

I don't understand, how you can ping from vm 20101 on vnet4040 (so, with only 1 vlan tag 4040 ? no other vlan coming from inside the vm guest ?) to vlan1(default vlan - 10.254.1.1) ?
They don't have same vlan tag, so I don't see how it can works ?
 
Last edited:
Hi, sorry I don't understand your test.

I don't understand, how you can ping from vm 20101 on vnet4040 (so, with only 1 vlan tag 4040 ? no other vlan coming from inside the vm guest ?) to vlan1(default vlan - 10.254.1.1)
They don't have same vlan tag, so I don't see how it can works ?
Hi Spirit,

both the below vm's are Mikrotik CHR vm's

VM 20101 have vnet 4040, this vnet interface is labled e6 on the router with ip 10.0.0.1/31, I also have 2 vlans under this e6 vlan 20 and 30 with ip's
10.20.0.1/31 and 10.30.0.1/31

vm20101.JPG

then vm 21001 is another MT with 2 x ports connected to vmbr0 and here I also add vlans attached to ether2_agg2 with vlans under this interface,

vlan 4040 to be on the same vlan as vnet4040 as above VM 20101 and under vlan4040 I also have vlan20 and 30 IP's on these interface are
10.0.0.2/31 on vlan4040 and 10.20.0.2/31 on vlan20 and 10.30.0.2/31 on vlan30
vm21001.JPG

but in any-case as per previous message with the ifreload it seems its not the vm - vm traffic that is dropping its the traffic to the management vlan for the host 10.254.1.49 below is the host network config
Code:
root@pve00:~# cat /etc/network/interfaces
# network interface settings; autogenerated
# Please do NOT modify this file directly, unless you know what
# you're doing.
#
# If you want to manage parts of the network configuration manually,
# please utilize the 'source' or 'source-directory' directives to do
# so.
# PVE will preserve these directives, but will NOT read its network
# configuration from sourced files, so do not attempt to move any of
# the PVE managed interfaces into external files!

auto lo
iface lo inet loopback
        pre-up ifconfig eth0 mtu 9000
        pre-up ifconfig eth1 mtu 9000

auto eth0
iface eth0 inet manual
#eth0 - 1_eth-0-38 - 1G

auto eth1
iface eth1 inet manual
#eth1 - 2_eth-0-38 - 1G

auto vlan1
iface vlan1 inet static
        address 10.254.1.49/24
        gateway 10.254.1.1
        ovs_type OVSIntPort
        ovs_bridge vmbr0
        ovs_mtu 9000

auto vlan18
iface vlan18 inet static
        address 10.255.18.9/28
        ovs_type OVSIntPort
        ovs_bridge vmbr0
        ovs_mtu 9000
        ovs_options tag=18
#storage network

auto vlan20
iface vlan20 inet static
        address 10.255.20.9/24
        ovs_type OVSIntPort
        ovs_bridge vmbr0
        ovs_mtu 9000
        ovs_options tag=20
# storage iSCSI 1

auto vlan21
iface vlan21 inet static
        address 10.255.21.9/24
        ovs_type OVSIntPort
        ovs_bridge vmbr0
        ovs_mtu 9000
        ovs_options tag=21
# storage iSCSI 2

auto vlan23
iface vlan23 inet static
        address 10.255.23.9/24
        ovs_type OVSIntPort
        ovs_bridge vmbr0
        ovs_options tag=23

auto vlan2
iface vlan2 inet static
        address 10.254.2.100/24
        ovs_type OVSIntPort
        ovs_bridge vmbr0
        ovs_mtu 9000
        ovs_options tag=2
#vxlan to dc9_sw

auto bond0
iface bond0 inet manual
        ovs_bonds eth0 eth1
        ovs_type OVSBond
        ovs_bridge vmbr0
        ovs_mtu 9000
        ovs_options lacp=active bond_mode=balance-tcp other_config:lacp-time=fast
# bond0_eth0/eth1

auto vmbr0
iface vmbr0 inet manual
        ovs_type OVSBridge
        ovs_ports bond0 vlan1 vlan18 vlan20 vlan21 vlan23 vlan2
        ovs_mtu 9000
#guest network 1G

source /etc/network/interfaces.d/*

below is the screenshot again showing Host management vlan droping
pve.JPG
 
ok,
if I can resume your setup:

Code:
vmbr0------(tag4040)----vnet4040-----VM 20101(tag20|30)
           --------- vm 21001 (tag4040 . tag20|30)
          --------vlan1

and on 21001, you also have an ip without vlan (10.254.1.251), and you try to ping for here to 10.254.1.49 ? right? (so, it's works , it's vlan1)



could you try to do a simple "ifup vlan1 -d" ? , it should do the same than reload, but for vlan1 interface only.

maybe also try "ifup vmbr0 -d".

and send the result, and tell me if you have network loss.
 
Hi Spirit
could you try to do a simple "ifup vlan1 -d" ? , it should do the same than reload, but for vlan1 interface only.

maybe also try "ifup vmbr0 -d".

and send the result, and tell me if you have network loss.
Please see below test for "ifup vlan1 -d" and "ifup vmbr0 -d" and both of these test had no affect of packetloss

see attached ifup vlan1.txt

I did one more test to see where the screen stopped and where the packet loss starts where running ifreload -a -d
below is where the screen stop responding and packet loss starts
ifreload.JPG
I see quite a few places like below where it seems to try and set what it seems to be mtu to 1500 which it shouldn't be cause MTU is set to 9000 everywhere in my config see attached file for /etc/network/interfaces config, maybe the issue is because im using a Bond interface?

info: address: using default mtu 1500
info: writing "1500" to file /sys/class/net/eth0/mtu
info: writing "1500" to file /sys/class/net/eth1/mtu
info: writing "1500" to file /sys/class/net/vmbr0/mtu
info: writing "1500" to file /sys/class/net/vlan1/mtu
info: writing "1500" to file /sys/class/net/vlan18/mtu
info: writing "1500" to file /sys/class/net/vlan20/mtu
info: writing "1500" to file /sys/class/net/vlan21/mtu
info: writing "1500" to file /sys/class/net/vlan23/mtu
info: writing "1500" to file /sys/class/net/vlan2/mtu
info: writing "1500" to file /sys/class/net/bond0/mtu
info: writing "1500" to file /sys/class/net/ln_vnet4040/mtu

I check and I loose network to the VM's aswell on all interfaces, I moved the IP 10.0.0.2/31 on vlan4040 from previous post to an external VM on another cluster/host and ran the ping test again while running ifreload -a -d and also loose connection, see below screenshot, on the left is ssh session to 10.254.1.49(host) and right is the external VM testing ping to vnet4040

external_vm.JPG

debug: eth1: pre-up : running module ethtool
debug: eth1: pre-up : running module address
info: executing /sbin/sysctl net.mpls.conf.eth1.input=0
 

Attachments

  • ifreload.txt
    28.8 KB · Views: 1
  • interfaces.txt
    2.3 KB · Views: 1
  • ifup vlan1.txt
    18.2 KB · Views: 1

About

The Proxmox community has been around for many years and offers help and support for Proxmox VE, Proxmox Backup Server, and Proxmox Mail Gateway.
We think our community is one of the best thanks to people like you!

Get your subscription!

The Proxmox team works very hard to make sure you are running the best software and getting stable updates and security enhancements, as well as quick enterprise support. Tens of thousands of happy customers have a Proxmox subscription. Get yours easily in our online shop.

Buy now!