Hello,
I'm testing out an HA Proxmox environment and seem to be having issues with the floating IP since switching the nodes to OVS. I have a working fence and cluster configuration and have also confirmed that multicast is working with the ssmpingd/asmping. Host IPs are 10.22.2.101-103 and I'm trying to use 10.22.2.100 as the floating IP. I've searched the threads but haven't seen any issues with the floating IP if fencing and clustering are both working. Does anyone have ideas on what I might look into next? Any help is much appreciated!
Thanks,
-Mike
Fence/PVE Output and cluster.conf contents below:
fence_tool ls output:
fence domain
member count 3
victim count 0
victim now 0
master nodeid 1
wait state none
members 1 2 3
pve_cm nodes output:
Node Sts Inc Joined Name
1 M 656 2014-12-05 13:27:08 host1
2 M 660 2014-12-05 13:27:23 host2
3 M 676 2014-12-05 16:12:23 host3
pve_cm status output:
root@central1:~# pvecm status
Version: 6.2.0
Config Version: 44
Cluster Name: CLTest
Cluster Id: 4489
Cluster Member: Yes
Cluster Generation: 676
Membership state: Cluster-Member
Nodes: 3
Expected votes: 3
Total votes: 3
Node votes: 1
Quorum: 2
Active subsystems: 3
Flags:
Ports Bound: 0
Node name: host1
Node ID: 1
Multicast addresses: 239.192.17.154
Node addresses: 10.22.2.101
/etc/pve/cluster.conf:
<?xml version="1.0"?>
<cluster config_version="44" name="CLTest">
<cman keyfile="/var/lib/pve-cluster/corosync.authkey"/>
<fencedevices>
<fencedevice agent="fence_ipmilan" ipaddr="10.77.1.11" login="CHANGEME" name="host1-ipmi" passwd="CHANGEME" power_wait="10"/>
<fencedevice agent="fence_ipmilan" ipaddr="10.77.1.12" login="CHANGEME" name="host2-ipmi" passwd="CHANGEME" power_wait="10"/>
<fencedevice agent="fence_ipmilan" ipaddr="10.77.1.13" login="CHANGEME" name="host3-ipmi" passwd="CHANGEME" power_wait="10"/>
</fencedevices>
<clusternodes>
<clusternode name="host1" nodeid="1" votes="1">
<fence>
<method name="1">
<device name="host1-ipmi"/>
</method>
</fence>
</clusternode>
<clusternode name="host2" nodeid="2" votes="1">
<fence>
<method name="1">
<device name="host2-ipmi"/>
</method>
</fence>
</clusternode>
<clusternode name="host3" nodeid="3" votes="1">
<fence>
<method name="1">
<device name="host3-ipmi"/>
</method>
</fence>
</clusternode>
</clusternodes>
<rm>
<service autostart="1" exclusive="0" name="PVEIP1" recovery="relocate">
<ip address="10.22.2.100"/>
</service>
</rm>
</cluster>
/etc/network/interface:
# network interface settings
allow-vmbr0 zzz_core
iface zzz_core inet static
address 10.22.2.101
netmask 255.255.252.0
gateway 10.22.0.1
ovs_type OVSIntPort
ovs_bridge vmbr0
ovs_options tag=722
allow-vmbr1 zzz_corestor
iface zzz_corestor inet static
address 10.44.2.101
netmask 255.255.252.0
ovs_type OVSIntPort
ovs_bridge vmbr1
ovs_options tag=744
auto lo
iface lo inet loopback
allow-vmbr0 eth0
iface eth0 inet manual
ovs_type OVSPort
ovs_bridge vmbr0
allow-vmbr1 eth1
iface eth1 inet manual
ovs_type OVSPort
ovs_bridge vmbr1
auto vmbr0
iface vmbr0 inet manual
ovs_type OVSBridge
ovs_ports eth0 zzz_core
auto vmbr1
iface vmbr1 inet manual
ovs_type OVSBridge
ovs_ports eth1 zzz_corestor
auto vmbr11
iface vmbr11 inet manual
ovs_type OVSBridge
auto vmbr12
iface vmbr12 inet manual
ovs_type OVSBridge
I'm testing out an HA Proxmox environment and seem to be having issues with the floating IP since switching the nodes to OVS. I have a working fence and cluster configuration and have also confirmed that multicast is working with the ssmpingd/asmping. Host IPs are 10.22.2.101-103 and I'm trying to use 10.22.2.100 as the floating IP. I've searched the threads but haven't seen any issues with the floating IP if fencing and clustering are both working. Does anyone have ideas on what I might look into next? Any help is much appreciated!
Thanks,
-Mike
Fence/PVE Output and cluster.conf contents below:
fence_tool ls output:
fence domain
member count 3
victim count 0
victim now 0
master nodeid 1
wait state none
members 1 2 3
pve_cm nodes output:
Node Sts Inc Joined Name
1 M 656 2014-12-05 13:27:08 host1
2 M 660 2014-12-05 13:27:23 host2
3 M 676 2014-12-05 16:12:23 host3
pve_cm status output:
root@central1:~# pvecm status
Version: 6.2.0
Config Version: 44
Cluster Name: CLTest
Cluster Id: 4489
Cluster Member: Yes
Cluster Generation: 676
Membership state: Cluster-Member
Nodes: 3
Expected votes: 3
Total votes: 3
Node votes: 1
Quorum: 2
Active subsystems: 3
Flags:
Ports Bound: 0
Node name: host1
Node ID: 1
Multicast addresses: 239.192.17.154
Node addresses: 10.22.2.101
/etc/pve/cluster.conf:
<?xml version="1.0"?>
<cluster config_version="44" name="CLTest">
<cman keyfile="/var/lib/pve-cluster/corosync.authkey"/>
<fencedevices>
<fencedevice agent="fence_ipmilan" ipaddr="10.77.1.11" login="CHANGEME" name="host1-ipmi" passwd="CHANGEME" power_wait="10"/>
<fencedevice agent="fence_ipmilan" ipaddr="10.77.1.12" login="CHANGEME" name="host2-ipmi" passwd="CHANGEME" power_wait="10"/>
<fencedevice agent="fence_ipmilan" ipaddr="10.77.1.13" login="CHANGEME" name="host3-ipmi" passwd="CHANGEME" power_wait="10"/>
</fencedevices>
<clusternodes>
<clusternode name="host1" nodeid="1" votes="1">
<fence>
<method name="1">
<device name="host1-ipmi"/>
</method>
</fence>
</clusternode>
<clusternode name="host2" nodeid="2" votes="1">
<fence>
<method name="1">
<device name="host2-ipmi"/>
</method>
</fence>
</clusternode>
<clusternode name="host3" nodeid="3" votes="1">
<fence>
<method name="1">
<device name="host3-ipmi"/>
</method>
</fence>
</clusternode>
</clusternodes>
<rm>
<service autostart="1" exclusive="0" name="PVEIP1" recovery="relocate">
<ip address="10.22.2.100"/>
</service>
</rm>
</cluster>
/etc/network/interface:
# network interface settings
allow-vmbr0 zzz_core
iface zzz_core inet static
address 10.22.2.101
netmask 255.255.252.0
gateway 10.22.0.1
ovs_type OVSIntPort
ovs_bridge vmbr0
ovs_options tag=722
allow-vmbr1 zzz_corestor
iface zzz_corestor inet static
address 10.44.2.101
netmask 255.255.252.0
ovs_type OVSIntPort
ovs_bridge vmbr1
ovs_options tag=744
auto lo
iface lo inet loopback
allow-vmbr0 eth0
iface eth0 inet manual
ovs_type OVSPort
ovs_bridge vmbr0
allow-vmbr1 eth1
iface eth1 inet manual
ovs_type OVSPort
ovs_bridge vmbr1
auto vmbr0
iface vmbr0 inet manual
ovs_type OVSBridge
ovs_ports eth0 zzz_core
auto vmbr1
iface vmbr1 inet manual
ovs_type OVSBridge
ovs_ports eth1 zzz_corestor
auto vmbr11
iface vmbr11 inet manual
ovs_type OVSBridge
auto vmbr12
iface vmbr12 inet manual
ovs_type OVSBridge
Last edited: