Got a non-std corosync config here with a static nodelist and 2x rings across each their redundant switch networks:
This imposes some issues when wanting to use the firewall, as PVEFW-HOST-IN/OUT chains says:
Could I modify these standard created chains to allow for both our corosync rings and used ports?
nodelist {
node {
nodeid: 1
quorum_votes: 1
ring1_addr: n1.pve
ring0_addr: n1
}
...
}
quorum {
provider: corosync_votequorum
}
totem {
cluster_name: pve-clst
config_version: 19
ip_version: ipv4
secauth: off
rrp_mode: active
version: 2
interface {
bindnetaddr: 193.162.153.240
ringnumber: 1
broadcast: yes
mcastport: 5405
transport: udp
netmtu: 9000
}
interface {
bindnetaddr: 10.45.71.0
ringnumber: 0
broadcast: yes
mcastport: 5407
transport: udp
netmtu: 9000
}
}
root@n7:~# corosync-cfgtool -s
Printing ring status.
Local node ID 7
RING ID 0
id = 10.45.71.7
status = ring 0 active with no faults
RING ID 1
id = 193.162.153.250
status = ring 1 active with no faults
root@n7:~# pve-firewall localnet
local hostname: n7
local IP address: 10.45.71.7
network auto detect: 10.45.71.0/24
using detected local_network: 10.45.71.0/24
This imposes some issues when wanting to use the firewall, as PVEFW-HOST-IN/OUT chains says:
RETURN udp -- 10.45.71.0/24 10.45.71.0/24 udp dpts:5404:5405
RETURN udp -- 10.45.71.0/24 anywhere ADDRTYPE match dst-type MULTICAST udp dpts:5404:5405
RETURN udp -- anywhere 10.45.71.0/24 udp dpts:5404:5405
RETURN udp -- anywhere anywhere ADDRTYPE match dst-type MULTICAST udp dpts:5404:5405
Could I modify these standard created chains to allow for both our corosync rings and used ports?