in my case, vlan have can't use.
#cat 100.conf
bootdisk: virtio0
cores: 4
ide2: none,media=cdrom
memory: 8192
name: test
net0: virtio=9E:62:1E:83:BF:75,bridge=vmbr4,tag=40
net1: virtio=92:4C:CD:2A:68:0D,bridge=vmbr3
ostype: l26
sockets: 1
virtio0: local:100/vm-100-disk-1.raw
net0 use vlan mode
net1 use bridge mode
#cat /etc/network/interfaces
--------------------------------------
..........
auto vmbr3
iface vmbr3 inet static
address 192.168.0.1
netmask 255.255.0.0
bridge_ports eth3 <---the switch port mode is access
bridge_stp off
bridge_fd 0
auto vmbr4
iface vmbr4 inet manual
bridge_ports eth4 <---the switch port mode is trunk,and have a vlan id=40
bridge_stp off
bridge_fd 0
--------------------------------------
vm net conf:
# ifconfig |grep "inet addr"
inet addr:172.16.0.20 Bcast:172.16.255.255 Mask:255.255.0.0
inet addr:192.168.0.20 Bcast:192.168.255.255 Mask:255.255.0.0
inet addr:127.0.0.1 Mask:255.0.0.0
172.16.0.0/16 --->gateway 172.16.0.254, not configured to eth0, just in the same vlan net;
192.168.0.0/16 --->gateway 192.16.0.254, not configured to eth1, just in the same vlan net;
gateway(192.16.0.254/172.16.0.254) is a vm by use bridge net,and use different switch port and vlan,
result:
# ping -f -c 4 192.168.0.254
PING 192.168.0.254 (192.168.0.254) 56(84) bytes of data.
--- 192.168.0.254 ping statistics ---
4 packets transmitted, 4 received, 0% packet loss, time 1ms
rtt min/avg/max/mdev = 0.030/0.415/0.653/0.236 ms, ipg/ewma 0.579/0.431 ms
# ping -f -c 4 172.16.0.254
PING 172.16.0.254 (172.16.0.254) 56(84) bytes of data.
...E
--- 172.16.0.254 ping statistics ---
4 packets transmitted, 0 received, +3 errors, 100% packet loss, time 10048ms
pipe 3
# pveversion -v
pve-manager: 2.1-12 (pve-manager/2.1/be112d89)
running kernel: 2.6.32-13-pve
proxmox-ve-2.6.32: 2.1-71
pve-kernel-2.6.32-13-pve: 2.6.32-71
lvm2: 2.02.95-1pve2
clvm: 2.02.95-1pve2
corosync-pve: 1.4.3-1
openais-pve: 1.1.4-2
libqb: 0.10.1-2
redhat-cluster-pve: 3.1.92-2
resource-agents-pve: 3.9.2-3
fence-agents-pve: 3.1.8-1
pve-cluster: 1.0-27
qemu-server: 2.0-45
pve-firmware: 1.0-17
libpve-common-perl: 1.0-28
libpve-access-control: 1.0-24
libpve-storage-perl: 2.0-25
vncterm: 1.0-2
vzctl: 3.0.30-2pve5
vzprocps: 2.0.11-2
vzquota: 3.0.12-3
pve-qemu-kvm: 1.1-6
ksm-control-daemon: 1.1-1
anybody have the same problem? I'm confused
#cat 100.conf
bootdisk: virtio0
cores: 4
ide2: none,media=cdrom
memory: 8192
name: test
net0: virtio=9E:62:1E:83:BF:75,bridge=vmbr4,tag=40
net1: virtio=92:4C:CD:2A:68:0D,bridge=vmbr3
ostype: l26
sockets: 1
virtio0: local:100/vm-100-disk-1.raw
net0 use vlan mode
net1 use bridge mode
#cat /etc/network/interfaces
--------------------------------------
..........
auto vmbr3
iface vmbr3 inet static
address 192.168.0.1
netmask 255.255.0.0
bridge_ports eth3 <---the switch port mode is access
bridge_stp off
bridge_fd 0
auto vmbr4
iface vmbr4 inet manual
bridge_ports eth4 <---the switch port mode is trunk,and have a vlan id=40
bridge_stp off
bridge_fd 0
--------------------------------------
vm net conf:
# ifconfig |grep "inet addr"
inet addr:172.16.0.20 Bcast:172.16.255.255 Mask:255.255.0.0
inet addr:192.168.0.20 Bcast:192.168.255.255 Mask:255.255.0.0
inet addr:127.0.0.1 Mask:255.0.0.0
172.16.0.0/16 --->gateway 172.16.0.254, not configured to eth0, just in the same vlan net;
192.168.0.0/16 --->gateway 192.16.0.254, not configured to eth1, just in the same vlan net;
gateway(192.16.0.254/172.16.0.254) is a vm by use bridge net,and use different switch port and vlan,
result:
# ping -f -c 4 192.168.0.254
PING 192.168.0.254 (192.168.0.254) 56(84) bytes of data.
--- 192.168.0.254 ping statistics ---
4 packets transmitted, 4 received, 0% packet loss, time 1ms
rtt min/avg/max/mdev = 0.030/0.415/0.653/0.236 ms, ipg/ewma 0.579/0.431 ms
# ping -f -c 4 172.16.0.254
PING 172.16.0.254 (172.16.0.254) 56(84) bytes of data.
...E
--- 172.16.0.254 ping statistics ---
4 packets transmitted, 0 received, +3 errors, 100% packet loss, time 10048ms
pipe 3
# pveversion -v
pve-manager: 2.1-12 (pve-manager/2.1/be112d89)
running kernel: 2.6.32-13-pve
proxmox-ve-2.6.32: 2.1-71
pve-kernel-2.6.32-13-pve: 2.6.32-71
lvm2: 2.02.95-1pve2
clvm: 2.02.95-1pve2
corosync-pve: 1.4.3-1
openais-pve: 1.1.4-2
libqb: 0.10.1-2
redhat-cluster-pve: 3.1.92-2
resource-agents-pve: 3.9.2-3
fence-agents-pve: 3.1.8-1
pve-cluster: 1.0-27
qemu-server: 2.0-45
pve-firmware: 1.0-17
libpve-common-perl: 1.0-28
libpve-access-control: 1.0-24
libpve-storage-perl: 2.0-25
vncterm: 1.0-2
vzctl: 3.0.30-2pve5
vzprocps: 2.0.11-2
vzquota: 3.0.12-3
pve-qemu-kvm: 1.1-6
ksm-control-daemon: 1.1-1
anybody have the same problem? I'm confused