VLAN 1 NIC for VMs and administration

Jan 16, 2014
40
0
26
Hi, I'm trying to configure 2 nodes of a cluster. Heach node has 2 NIC.
I've thought to use NIC 1, of each node, as internal comunication (gluster..); NIC 2 of each node should be the bridge for the virtual machines.
Since VMs have different lan, I need to configure NIC 2 with vlan support; I've thought to use another vlan for management the nodes.
Here the configuration:

Code:
auto lo
iface lo inet loopback

auto bond0
iface bond0 inet manual
    slaves eth1
    bond_mode 0
    bond_xmit_hash_policy layer2
    bond_miimon 100
    bond_downdelay 0
    bond_updelay 0

#bridge for VMs
#auto vmbr0
#iface vmbr0 inet manual
 #   bridge_ports bond0
 #   bridge_stp off
 #   bridge_fd 0

# Management vlan
auto vmbr0v99
iface vmbr0v99 inet static
    address 192.168.99.3
    netmask 255.255.255.0
    network 192.168.99.0
    gateway 192.168.99.254
    bridge_ports bond0.99
    bridge_stp off
    bridge_fd 0

#Internal comunication
auto eth0
iface eth0 inet static  
    address 192.168.98.3
    netmask 255.255.255.0

With this configuration I can reach the node and from the node I can reach internet correctly (so the switch has the port tagged correctly).
But the VMs need a bridge like vmbr0 and If I enable this then I can't reach the node.

Furthermore from this http://188.165.151.221/threads/12850-Problems-with-VLAN-s
Code:
... 
and if you create a vm, with network interface on vmbr1 + vlan tag 4, proxmox create a

vmbr1v4
iface vmbr1v4 inet manual
    bridge_ports bond0.4
    bridge_stp off
    bridge_fd 0

(not in /etc/network/interfaces, but directly in memory)

Is there any way to have a bonding vlan bridge with a management vlan for nodes?
Thanks.
 
You didn't set up the bond0.99 vlan interface. You should have something like:
auto bond0.99
iface bond0.99 manual

before bridging.. if you issue: ifconfig bond0.99 does it show anything now?
 
Do you mean I should add the lines?:

Code:
auto bond0.99
iface bond0.99 inet manual
    slaves eth1
    bond_mode 0
    bond_xmit_hash_policy layer2
    bond_miimon 100
    bond_downdelay 0
    bond_updelay 0

With this lines I get errors about "the interface already exists".

Furthermore, with the previous configuration, ifconfig already return something about bond0.99:

Code:
[I][B]#ifconfig[/B][/I]
bond0     Link encap:Ethernet  HWaddr 00:05:5d:2c:b6:d3
          inet6 addr: fe80::205:5dff:fe2c:b6d3/64 Scope:Link
          UP BROADCAST RUNNING MASTER MULTICAST  MTU:1500  Metric:1
          RX packets:15772 errors:0 dropped:0 overruns:0 frame:0
          TX packets:9853 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0
          RX bytes:2811046 (2.6 MiB)  TX bytes:814260 (795.1 KiB)
[B]
bond0.99  Link encap:Ethernet  HWaddr 00:05:5d:2c:b6:d3
          UP BROADCAST RUNNING MASTER MULTICAST  MTU:1500  Metric:1
          RX packets:4541 errors:0 dropped:0 overruns:0 frame:0
          TX packets:9846 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0
          RX bytes:804509 (785.6 KiB)  TX bytes:723040 (706.0 KiB)[/B]

eth0      Link encap:Ethernet  HWaddr 00:26:18:30:ba:70
          inet addr:192.168.98.3  Bcast:192.168.98.255  Mask:255.255.255.0
          inet6 addr: fe80::226:18ff:fe30:ba70/64 Scope:Link
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
          RX packets:6857349 errors:0 dropped:0 overruns:0 frame:0
          TX packets:6588232 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1000
          RX bytes:1365993341 (1.2 GiB)  TX bytes:1289132226 (1.2 GiB)
          Interrupt:20 Base address:0xc000

eth1      Link encap:Ethernet  HWaddr 00:05:5d:2c:b6:d3
          UP BROADCAST RUNNING SLAVE MULTICAST  MTU:1500  Metric:1
          RX packets:15772 errors:0 dropped:0 overruns:0 frame:0
          TX packets:9853 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1000
          RX bytes:2811046 (2.6 MiB)  TX bytes:814260 (795.1 KiB)
          Interrupt:18 Base address:0x8c00

lo        Link encap:Local Loopback
          inet addr:127.0.0.1  Mask:255.0.0.0
          inet6 addr: ::1/128 Scope:Host
          UP LOOPBACK RUNNING  MTU:16436  Metric:1
          RX packets:445995 errors:0 dropped:0 overruns:0 frame:0
          TX packets:445995 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0
          RX bytes:23816681 (22.7 MiB)  TX bytes:23816681 (22.7 MiB)

venet0    Link encap:UNSPEC  HWaddr 00-00-00-00-00-00-00-00-00-00-00-00-00-00-00-00
          inet6 addr: fe80::1/128 Scope:Link
          UP BROADCAST POINTOPOINT RUNNING NOARP  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:3 overruns:0 carrier:0
          collisions:0 txqueuelen:0
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)

vmbr0v99  Link encap:Ethernet  HWaddr 00:05:5d:2c:b6:d3
          inet addr:192.168.99.3  Bcast:192.168.99.255  Mask:255.255.255.0
          inet6 addr: fe80::205:5dff:fe2c:b6d3/64 Scope:Link
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
          RX packets:4541 errors:0 dropped:0 overruns:0 frame:0
          TX packets:4459 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0
          RX bytes:786345 (767.9 KiB)  TX bytes:367630 (359.0 KiB)

So where am I wrong? Tnx
 
I've partially fixed the problem.

With that configuration:
Code:
auto lo
iface lo inet loopback
iface eth0 inet manual
iface eth1 inet manual

auto bond0
iface bond0 inet manual
    slaves eth1
    bond_mode 0
    bond_xmit_hash_policy layer2
    bond_miimon 100
    bond_downdelay 0
    bond_updelay 0

auto vmbr99
iface vmbr99 inet static
    address 192.168.99.3
    netmask 255.255.255.0
    network 192.168.99.0
    gateway 192.168.99.254
    bridge_ports bond0.99
    bridge_stp off
    bridge_fd 0
 
auto vmbr100
iface vmbr100 inet manual
    bridge_ports bond0.100
    bridge_stp off
    bridge_fd 0
    
auto eth0
iface eth0 inet static  
    address 192.168.98.3
    netmask 255.255.255.0

Now I can ping every subnet from the node.

But into a vm that has this configuration:
Schermata 2014-03-07 alle 09.46.46.pngSchermata 2014-03-07 alle 10.05.07.png

I can't reach the gateway 192.168.100.254.

Where could be the problem?
Tnx
 
Last edited:
your interface file show any kind of misconfiguration
i mean eth0 is defined twice... both as static and manual... your vmbr use bonds not defined...
if you're learning linux networking, try to learn by little steps... configure pve as simple as possible, then create vms to play around and learn networking there...
starting here http://pve.proxmox.com/wiki/Network_Model

Marco
 
I've read all pve wiki about network/bond configuration. I've defined vmbr in the "direct" manner (as written in http://pve.proxmox.com/wiki/Network_Model): indeed from that node I can ping every vlan. The problem is ping from the vms: I can't reach neither the gateway (vms are bridged by vmbr100).

From http://pve.proxmox.com/wiki/Network_Model I have the files:

Code:
# cat /etc/network/interfaces
auto lo
iface lo inet loopback

auto bond0
iface bond0 inet manual
    slaves eth1
    bond_mode 0
    bond_xmit_hash_policy layer2
    bond_miimon 100
    bond_downdelay 0
    bond_updelay 0

auto vmbr99
iface vmbr99 inet static
    address 192.168.99.3
    netmask 255.255.255.0
    network 192.168.99.0
    gateway 192.168.99.254
    bridge_ports bond0.99
    bridge_stp off
    bridge_fd 0
 
auto vmbr100
iface vmbr100 inet manual
    bridge_ports bond0.100
    bridge_stp off
    bridge_fd 0
   [B] # May be the problem is here
    # I always get an error when I enable these two rows but they seem to be ok[/B]
    [B]post-up ip route add table bond0.100 default via 192.168.100.254 dev vmbr100
    post-up ip rule add from 192.168.100.0/24 table bond0.100
    post-down ip route del table bond0.100 default via 192.168.100.254 dev vmbr100
    post-down ip rule del from 192.168.100.0/24 table bond0.100 [/B]

    
auto eth0
iface eth0 inet static  
    address 192.168.98.3
    netmask 255.255.255.0

Code:
# cat /etc/iproute2/rt_tables
#
# reserved values
#
255    local
254    main
253    default
0    unspec
#
# local
#
#1    inr.ruhep
100     bond0.100
99       bond0.99

Code:
# cat /proc/net/vlan/config 
VLAN Dev name     | VLAN ID
Name-Type: VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD
bond0.99       | 99  | bond0
bond0.100      | 100  | bond0
 
sorry I never configured like that.
in your new /etc/network/interfaces

your bond0 has "slaves eth1"
but eht1 is not defined?
I see only eth0 as last defined... ?
is it right?
Marco
 
In my purpose I reserve eth1 only for the bond. I can already reach the physical node by vmbr99 subnet. I thought to reserve vmbr100 only for vms (so I don't need do define another IP for the physical node). That's what I thought...
 
sorry, having no experience with such kind of config, i give up... i even thought you were a beginner, at first, but for sure I am more than you in this field.. :D
maybe others can help you. Good luck.

Marco