Hello, iam facing an issue with cluster/corosync.
If one node in cluster is unavailable for like 30 minutes, because of lost internet connection, HW maintenance or something else, the node wont rejoin cluster after starting/connecting.
The solution i found is to restart corosync on all nodes, but it is not reliable solution, because somone need to do it manually.
There is also a thing with corosync located in /etc/pve/corosync.conf, iam not able to open this one, only the one located in /etc/corosync/corosync.conf
Iam getting this error when opening in nano [ Error reading lock file /etc/pve/.corosync.conf.swp: Not enough data read ]
I will be glad for any ideas.
Thank you
There are some informations about my pveversion, corosync configuration and logs.
If one node in cluster is unavailable for like 30 minutes, because of lost internet connection, HW maintenance or something else, the node wont rejoin cluster after starting/connecting.
The solution i found is to restart corosync on all nodes, but it is not reliable solution, because somone need to do it manually.
There is also a thing with corosync located in /etc/pve/corosync.conf, iam not able to open this one, only the one located in /etc/corosync/corosync.conf
Iam getting this error when opening in nano [ Error reading lock file /etc/pve/.corosync.conf.swp: Not enough data read ]
I will be glad for any ideas.
Thank you
There are some informations about my pveversion, corosync configuration and logs.
root@havirov-prox1:~# pveversion -v
proxmox-ve: 6.1-2 (running kernel: 5.4.22-1-pve)
pve-manager: 6.1-8 (running version: 6.1-8/806edfe1)
pve-kernel-5.4: 6.1-7
pve-kernel-helper: 6.1-7
pve-kernel-5.3: 6.1-5
pve-kernel-5.4.24-1-pve: 5.4.24-1
pve-kernel-5.4.22-1-pve: 5.4.22-1
pve-kernel-5.3.18-2-pve: 5.3.18-2
pve-kernel-4.13.13-2-pve: 4.13.13-33
ceph-fuse: 12.2.11+dfsg1-2.1+b1
corosync: 3.0.3-pve1
criu: 3.11-3
glusterfs-client: 5.5-3
ifupdown: 0.8.35+pve1
ksm-control-daemon: 1.3-1
libjs-extjs: 6.0.1-10
libknet1: 1.15-pve1
libpve-access-control: 6.0-6
libpve-apiclient-perl: 3.0-3
libpve-common-perl: 6.0-17
libpve-guest-common-perl: 3.0-5
libpve-http-server-perl: 3.0-5
libpve-storage-perl: 6.1-5
libqb0: 1.0.5-1
libspice-server1: 0.14.2-4~pve6+1
lvm2: 2.03.02-pve4
lxc-pve: 3.2.1-1
lxcfs: 3.0.3-pve60
novnc-pve: 1.1.0-1
proxmox-mini-journalreader: 1.1-1
proxmox-widget-toolkit: 2.1-3
pve-cluster: 6.1-4
pve-container: 3.0-22
pve-docs: 6.1-6
pve-edk2-firmware: 2.20200229-1
pve-firewall: 4.0-10
pve-firmware: 3.0-6
pve-ha-manager: 3.0-9
pve-i18n: 2.0-4
pve-qemu-kvm: 4.1.1-4
pve-xtermjs: 4.3.0-1
qemu-server: 6.1-7
smartmontools: 7.1-pve2
spiceterm: 3.1-1
vncterm: 1.6-1
zfsutils-linux: 0.8.3-pve1
proxmox-ve: 6.1-2 (running kernel: 5.4.22-1-pve)
pve-manager: 6.1-8 (running version: 6.1-8/806edfe1)
pve-kernel-5.4: 6.1-7
pve-kernel-helper: 6.1-7
pve-kernel-5.3: 6.1-5
pve-kernel-5.4.24-1-pve: 5.4.24-1
pve-kernel-5.4.22-1-pve: 5.4.22-1
pve-kernel-5.3.18-2-pve: 5.3.18-2
pve-kernel-4.13.13-2-pve: 4.13.13-33
ceph-fuse: 12.2.11+dfsg1-2.1+b1
corosync: 3.0.3-pve1
criu: 3.11-3
glusterfs-client: 5.5-3
ifupdown: 0.8.35+pve1
ksm-control-daemon: 1.3-1
libjs-extjs: 6.0.1-10
libknet1: 1.15-pve1
libpve-access-control: 6.0-6
libpve-apiclient-perl: 3.0-3
libpve-common-perl: 6.0-17
libpve-guest-common-perl: 3.0-5
libpve-http-server-perl: 3.0-5
libpve-storage-perl: 6.1-5
libqb0: 1.0.5-1
libspice-server1: 0.14.2-4~pve6+1
lvm2: 2.03.02-pve4
lxc-pve: 3.2.1-1
lxcfs: 3.0.3-pve60
novnc-pve: 1.1.0-1
proxmox-mini-journalreader: 1.1-1
proxmox-widget-toolkit: 2.1-3
pve-cluster: 6.1-4
pve-container: 3.0-22
pve-docs: 6.1-6
pve-edk2-firmware: 2.20200229-1
pve-firewall: 4.0-10
pve-firmware: 3.0-6
pve-ha-manager: 3.0-9
pve-i18n: 2.0-4
pve-qemu-kvm: 4.1.1-4
pve-xtermjs: 4.3.0-1
qemu-server: 6.1-7
smartmontools: 7.1-pve2
spiceterm: 3.1-1
vncterm: 1.6-1
zfsutils-linux: 0.8.3-pve1
logging {
debug: off
to_syslog: yes
}
nodelist {
node {
name: backup
nodeid: 5
quorum_votes: 1
ring0_addr: backup
}
node {
name: havirov-prox1
nodeid: 8
quorum_votes: 1
ring0_addr: havirov-prox1
}
node {
name: prox1
nodeid: 2
quorum_votes: 1
ring0_addr: prox1
}
node {
name: prox1-brno
nodeid: 9
quorum_votes: 1
ring0_addr: prox1-brno
}
node {
name: prox2
nodeid: 3
quorum_votes: 1
ring0_addr: prox2
}
node {
name: prox2-brno
nodeid: 7
quorum_votes: 1
ring0_addr: prox2-brno
}
node {
name: prox3
nodeid: 4
quorum_votes: 1
ring0_addr: prox3
}
node {
name: prox4
nodeid: 6
quorum_votes: 1
ring0_addr: prox4
}
node {
name: pve
nodeid: 1
quorum_votes: 1
ring0_addr: pve
}
}
quorum {
provider: corosync_votequorum
}
totem {
cluster_name: cutter-pv
config_version: 42
interface {
ringnumber: 0
knet_transport: sctp
}
ip_version: ipv4
secauth: on
version: 2
token: 10000
}
debug: off
to_syslog: yes
}
nodelist {
node {
name: backup
nodeid: 5
quorum_votes: 1
ring0_addr: backup
}
node {
name: havirov-prox1
nodeid: 8
quorum_votes: 1
ring0_addr: havirov-prox1
}
node {
name: prox1
nodeid: 2
quorum_votes: 1
ring0_addr: prox1
}
node {
name: prox1-brno
nodeid: 9
quorum_votes: 1
ring0_addr: prox1-brno
}
node {
name: prox2
nodeid: 3
quorum_votes: 1
ring0_addr: prox2
}
node {
name: prox2-brno
nodeid: 7
quorum_votes: 1
ring0_addr: prox2-brno
}
node {
name: prox3
nodeid: 4
quorum_votes: 1
ring0_addr: prox3
}
node {
name: prox4
nodeid: 6
quorum_votes: 1
ring0_addr: prox4
}
node {
name: pve
nodeid: 1
quorum_votes: 1
ring0_addr: pve
}
}
quorum {
provider: corosync_votequorum
}
totem {
cluster_name: cutter-pv
config_version: 42
interface {
ringnumber: 0
knet_transport: sctp
}
ip_version: ipv4
secauth: on
version: 2
token: 10000
}