Log JAM :: /dev/mapper/pve-root is 100% full

Jun 25, 2022
68
7
13
Filesystem Size Used Avail Use% Mounted on
udev 126G 0 126G 0% /dev
tmpfs 26G 2.6G 23G 10% /run
/dev/mapper/pve-root 94G 94G 0 100% /
tmpfs 126G 60M 126G 1% /dev/shm
tmpfs 5.0M 0 5.0M 0% /run/lock
efivarfs 304K 72K 228K 24% /sys/firmware/efi/efivars
/dev/sda2 511M 352K 511M 1% /boot/efi
zpool-ha 825G 128K 825G 1% /zpool-ha
/dev/fuse 128M 56K 128M 1% /etc/pve
tmpfs 126G 28K 126G 1% /var/lib/ceph/osd/ceph-4
tmpfs 126G 28K 126G 1% /var/lib/ceph/osd/ceph-5
192.168.0.100:/mnt/ser-hdd/pve 14T 1.6T 13T 12% /mnt/pve/NFS
192.168.0.44:/mnt/NFSShare 3.6T 1.6T 2.0T 45% /mnt/pve/NFS-Storage
tmpfs 26G 0 26G 0% /run/user/0

message is :=

connection failed (Error 500: closing file '/var/tmp/pve-reserved-ports.tmp.650163' failed - No space left on device)

total 83469464
drwxr-xr-x 17 root root 4096 Nov 27 00:00 .
drwxr-xr-x 11 root root 4096 Nov 22 2022 ..
-rw-r--r-- 1 root root 825 Nov 26 01:05 alternatives.log
-rw-r--r-- 1 root root 15677 Nov 25 23:50 alternatives.log.1
-rw-r--r-- 1 root root 119 Aug 13 08:50 alternatives.log.2.gz
-rw-r--r-- 1 root root 496 May 28 2023 alternatives.log.3.gz
-rw-r--r-- 1 root root 204 Feb 15 2023 alternatives.log.4.gz
-rw-r--r-- 1 root root 525 Jan 31 2023 alternatives.log.5.gz
-rw-r--r-- 1 root root 2589 Dec 31 2022 alternatives.log.6.gz
drwxr-xr-x 2 root root 4096 Nov 26 00:00 apt
-rw-r----- 1 root adm 28672 Nov 28 07:58 auth.log
-rw-r----- 1 root adm 57317 Nov 26 00:00 auth.log.1
-rw-r----- 1 root adm 2935 Nov 18 23:17 auth.log.2.gz
-rw-r----- 1 root adm 2978 Nov 11 23:17 auth.log.3.gz
-rw-r----- 1 root adm 3740 Nov 4 23:17 auth.log.4.gz
-rw-rw---- 1 root utmp 0 Nov 1 00:00 btmp
-rw-rw---- 1 root utmp 0 Oct 1 00:00 btmp.1
drwxrws--T 2 ceph ceph 4096 Nov 27 00:00 ceph
drwxr-x--- 2 _chrony _chrony 4096 Dec 1 2022 chrony
drwxr-xr-x 2 root root 4096 Nov 16 2022 corosync
-rw-r----- 1 root adm 7936 Nov 28 07:17 cron.log
-rw-r----- 1 root adm 372 Nov 26 00:00 cron.log.1
-rw-r----- 1 root adm 3847354 Nov 25 23:49 daemon.log
-rw-r----- 1 root adm 3635120 Nov 19 00:00 daemon.log.1
-rw-r----- 1 root adm 71446 Nov 12 00:00 daemon.log.2.gz
-rw-r----- 1 root adm 80483 Nov 5 00:00 daemon.log.3.gz
-rw-r----- 1 root adm 77526 Oct 29 00:00 daemon.log.4.gz
-rw-r----- 1 root adm 4537 Nov 25 23:24 debug
-rw-r----- 1 root adm 4537 Aug 13 08:50 debug.1
-rw-r----- 1 root adm 1002 May 28 2023 debug.2.gz
-rw-r----- 1 root adm 2181 Feb 15 2023 debug.3.gz
-rw-r----- 1 root adm 2954 Feb 10 2023 debug.4.gz
-rw-r--r-- 1 root root 0 Nov 26 00:00 dpkg.log
-rw-r--r-- 1 root root 503141 Nov 25 23:54 dpkg.log.1
-rw-r--r-- 1 root root 1800 Aug 13 08:16 dpkg.log.2.gz
-rw-r--r-- 1 root root 5408 May 28 2023 dpkg.log.3.gz
-rw-r--r-- 1 root root 1997 Feb 19 2023 dpkg.log.4.gz
-rw-r--r-- 1 root root 1401 Jan 30 2023 dpkg.log.5.gz
-rw-r--r-- 1 root root 35230 Dec 15 2022 dpkg.log.6.gz
-rw-r--r-- 1 root root 2049472 Nov 25 23:49 faillog
-rw-r--r-- 1 root root 1364 Nov 25 23:49 fontconfig.log
drwxr-xr-x 2 root root 4096 May 18 2021 glusterfs
drwxr-xr-x 19 root root 4096 Nov 27 08:54 ifupdown2
drwxr-sr-x+ 3 root systemd-journal 4096 Dec 1 2022 journal
-rw-r----- 1 root adm 42669842432 Nov 28 07:59 kern.log
-rw-r----- 1 root adm 2543624 Nov 26 00:00 kern.log.1
-rw-r----- 1 root adm 1199 Nov 3 08:12 kern.log.2.gz
-rw-r----- 1 root adm 6602 Oct 28 19:41 kern.log.3.gz
-rw-r----- 1 root adm 6316 Oct 21 23:58 kern.log.4.gz
-rw-rw-r-- 1 root utmp 18701432 Nov 28 07:58 lastlog
drwxr-xr-x 2 root root 4096 Jul 4 2022 lxc
-rw-r----- 1 root adm 16738 Nov 25 23:48 mail.info
-rw-r----- 1 root adm 14569 Nov 18 04:20 mail.info.1
-rw-r----- 1 root adm 2165 Nov 11 04:20 mail.info.2.gz
-rw-r----- 1 root adm 2490 Nov 4 04:20 mail.info.3.gz
-rw-r----- 1 root adm 2523 Oct 28 04:20 mail.info.4.gz
-rw-r----- 1 root adm 63061 Nov 27 08:54 mail.log
-rw-r----- 1 root adm 18513 Nov 26 00:00 mail.log.1
-rw-r----- 1 root adm 2185 Nov 18 04:20 mail.log.2.gz
-rw-r----- 1 root adm 2165 Nov 11 04:20 mail.log.3.gz
-rw-r----- 1 root adm 2490 Nov 4 04:20 mail.log.4.gz
-rw-r----- 1 root adm 221706 Nov 25 23:49 messages
-rw-r----- 1 root adm 147 Nov 12 00:10 messages.1
-rw-r----- 1 root adm 142 Nov 5 00:10 messages.2.gz
-rw-r----- 1 root adm 1301 Nov 3 08:12 messages.3.gz
-rw-r----- 1 root adm 6720 Oct 28 19:41 messages.4.gz
drwxr-xr-x 2 root root 4096 Nov 27 00:00 openvswitch
drwx------ 2 root root 4096 Dec 1 2022 private
drwxr-xr-x 3 root root 4096 Dec 1 2022 pve
-rw-r--r-- 1 root root 40784 Nov 28 05:29 pveam.log
-rw-r--r-- 1 root root 51925 Oct 28 01:37 pveam.log.0
-rw-r----- 1 root adm 55 Nov 27 00:00 pve-firewall.log
-rw-r----- 1 root adm 537 Nov 27 00:00 pve-firewall.log.1
-rw-r----- 1 root adm 189 Nov 26 00:00 pve-firewall.log.2.gz
-rw-r----- 1 root adm 121 Nov 25 00:00 pve-firewall.log.3.gz
-rw-r----- 1 root adm 121 Nov 24 00:00 pve-firewall.log.4.gz
-rw-r----- 1 root adm 121 Nov 23 00:00 pve-firewall.log.5.gz
-rw-r----- 1 root adm 121 Nov 22 00:00 pve-firewall.log.6.gz
-rw-r----- 1 root adm 124 Nov 21 00:00 pve-firewall.log.7.gz
drwx------ 2 www-data www-data 4096 Nov 27 00:00 pveproxy
lrwxrwxrwx 1 root root 39 Nov 25 23:48 README -> ../../usr/share/doc/systemd/README.logs
drwxr-xr-x 3 root root 4096 Dec 1 2022 runit
drwxr-x--- 2 root adm 4096 Aug 10 2022 samba
-rw-r----- 1 root adm 42783801344 Nov 28 07:59 syslog
-rw-r----- 1 root adm 6693195 Nov 26 00:00 syslog.1
-rw-r----- 1 root adm 88294 Nov 19 00:00 syslog.2.gz
-rw-r----- 1 root adm 80929 Nov 12 00:00 syslog.3.gz
-rw-r----- 1 root adm 93229 Nov 5 00:00 syslog.4.gz
-rw-r----- 1 root adm 2219 Nov 26 01:05 user.log
-rw-r----- 1 root adm 1540 Nov 26 00:00 user.log.1
-rw-r----- 1 root adm 84 Aug 13 08:50 user.log.2.gz
-rw-r----- 1 root adm 568 May 28 2023 user.log.3.gz
-rw-r----- 1 root adm 118 Feb 15 2023 user.log.4.gz
drwxr-xr-x 2 root root 4096 Nov 3 08:11 vzdump
-rw-rw-r-- 1 root utmp 200448 Nov 28 07:58 wtmp

this is the log directory, i think syslog, and kern.log consuming the disk, i tried to delete these two files , but access denied .... need help ... this happen after i upgraded the server to 8.1
 
First, post command output in CODE tags so that it's readable.

i think syslog, and kern.log consuming the disk
Yes, the files you identified filled up your disk, so inspect them, what is the culprit and fix it, so that there will not be written more.

but access denied .... need help
Post what you tried, so that we can identify what you did wrong.
 
hi, thanks for the reply, last Sunday i upgraded the server (three node ceph cluster Dell R750) from 7.x to 8.1x, after the upgrade 6 of the ethernet changed its name, and i lost connectivity in ceph storage, there is 4 10g fibre and 4 10g RJ45 port in the system, it seems the embedded nic do not changed there name after upgrade, only extra card changed its name, it take some time to figure me out what went wrong, in this time these log started building up, now i am unable to write changes in interface file because of disk full, and the cluster 3rd node is down , i tried to delete the syslog file with rm -r syslog, but showing access denied, or is that safe to directly delete these two large log files directly ? kindly help how to clean to free up the consumed disk space.

or "journalctl --vacuum-size/time=xxx" is safe to use in proxmox

kindly help .... i am stuck now
 
last Sunday i upgraded the server (three node ceph cluster Dell R750) from 7.x to 8.1x, after the upgrade 6 of the ethernet changed its name, and i lost connectivity in ceph storage, there is 4 10g fibre and 4 10g RJ45 port in the system
Oh yeah .. saw that thread.


i tried to delete the syslog file with rm -r syslog, but showing access denied
PLEASE: post what you did including the prompt, the command and the error message and do not describe what you saw or recall you've done or seen.
 
You can usually completely empty log files that are currently being kept open by processes with the following command: truncate -s 0 filename

The parameter -s 0 can also be -s 5M, for example, if you want to be able to view a few of the latest messages.
 
It seems you don't want to keep any log lines, so you can just use my command.
 
syslog

2023-11-30T07:46:34.201393+05:30 pve-3 kernel: [369661.410755] bnxt_en 0000:b2:00.0 ens4f0np0: left promiscuous mode
2023-11-30T07:46:34.201394+05:30 pve-3 kernel: [369661.410795] bnxt_en 0000:b2:00.1 ens4f1np1: left promiscuous mode
2023-11-30T07:46:34.201395+05:30 pve-3 kernel: [369661.410828] i40e 0000:b1:00.0 ens5f0: left promiscuous mode
2023-11-30T07:46:34.201395+05:30 pve-3 kernel: [369661.410859] i40e 0000:b1:00.1 ens5f1: left promiscuous mode
2023-11-30T07:46:34.201396+05:30 pve-3 kernel: [369661.412442] bnxt_en 0000:b2:00.0 ens4f0np0: entered promiscuous mode
2023-11-30T07:46:34.201397+05:30 pve-3 kernel: [369661.412492] bnxt_en 0000:b2:00.1 ens4f1np1: entered promiscuous mode
2023-11-30T07:46:34.201398+05:30 pve-3 kernel: [369661.412537] i40e 0000:b1:00.0 ens5f0: entered promiscuous mode
2023-11-30T07:46:34.201398+05:30 pve-3 kernel: [369661.412576] i40e 0000:b1:00.1 ens5f1: entered promiscuous mode
2023-11-30T07:46:34.201399+05:30 pve-3 kernel: [369661.413386] bnxt_en 0000:b2:00.0 ens4f0np0: left promiscuous mode
2023-11-30T07:46:34.201400+05:30 pve-3 kernel: [369661.413426] bnxt_en 0000:b2:00.1 ens4f1np1: left promiscuous mode
2023-11-30T07:46:34.201400+05:30 pve-3 kernel: [369661.413458] i40e 0000:b1:00.0 ens5f0: left promiscuous mode
2023-11-30T07:46:34.205386+05:30 pve-3 kernel: [369661.413522] i40e 0000:b1:00.1 ens5f1: left promiscuous mode
2023-11-30T07:46:34.205387+05:30 pve-3 kernel: [369661.415121] bnxt_en 0000:b2:00.0 ens4f0np0: entered promiscuous mode
2023-11-30T07:46:34.205388+05:30 pve-3 kernel: [369661.415170] bnxt_en 0000:b2:00.1 ens4f1np1: entered promiscuous mode
2023-11-30T07:46:34.205393+05:30 pve-3 kernel: [369661.415217] i40e 0000:b1:00.0 ens5f0: entered promiscuous mode
2023-11-30T07:46:34.205394+05:30 pve-3 kernel: [369661.415256] i40e 0000:b1:00.1 ens5f1: entered promiscuous mode
2023-11-30T07:46:34.205395+05:30 pve-3 kernel: [369661.416091] bnxt_en 0000:b2:00.0 ens4f0np0: left promiscuous mode
2023-11-30T07:46:34.205395+05:30 pve-3 kernel: [369661.416129] bnxt_en 0000:b2:00.1 ens4f1np1: left promiscuous mode
2023-11-30T07:46:34.205396+05:30 pve-3 kernel: [369661.416169] i40e 0000:b1:00.0 ens5f0: left promiscuous mode
2023-11-30T07:46:34.205396+05:30 pve-3 kernel: [369661.416206] i40e 0000:b1:00.1 ens5f1: left promiscuous mode
2023-11-30T07:46:34.209387+05:30 pve-3 kernel: [369661.417789] bnxt_en 0000:b2:00.0 ens4f0np0: entered promiscuous mode
2023-11-30T07:46:34.209389+05:30 pve-3 kernel: [369661.417839] bnxt_en 0000:b2:00.1 ens4f1np1: entered promiscuous mode
2023-11-30T07:46:34.209390+05:30 pve-3 kernel: [369661.417884] i40e 0000:b1:00.0 ens5f0: entered promiscuous mode
2023-11-30T07:46:34.209391+05:30 pve-3 kernel: [369661.417924] i40e 0000:b1:00.1 ens5f1: entered promiscuous mode
2023-11-30T07:46:34.209392+05:30 pve-3 kernel: [369661.418759] bnxt_en 0000:b2:00.0 ens4f0np0: left promiscuous mode
2023-11-30T07:46:34.209394+05:30 pve-3 kernel: [369661.418797] bnxt_en 0000:b2:00.1 ens4f1np1: left promiscuous mode
2023-11-30T07:46:34.209395+05:30 pve-3 kernel: [369661.418836] i40e 0000:b1:00.0 ens5f0: left promiscuous mode
2023-11-30T07:46:34.209395+05:30 pve-3 kernel: [369661.418874] i40e 0000:b1:00.1 ens5f1: left promiscuous mode
2023-11-30T07:46:34.209396+05:30 pve-3 kernel: [369661.420455] bnxt_en 0000:b2:00.0 ens4f0np0: entered promiscuous mode
2023-11-30T07:46:34.209396+05:30 pve-3 kernel: [369661.420506] bnxt_en 0000:b2:00.1 ens4f1np1: entered promiscuous mode
2023-11-30T07:46:34.209397+05:30 pve-3 kernel: [369661.420551] i40e 0000:b1:00.0 ens5f0: entered promiscuous mode

kern.log

2023-11-30T08:03:36.661386+05:30 pve-3 kernel: [370683.875784] bnxt_en 0000:b2:00.0 ens4f0np0: entered promiscuous mode
2023-11-30T08:03:36.661387+05:30 pve-3 kernel: [370683.875838] bnxt_en 0000:b2:00.1 ens4f1np1: entered promiscuous mode
2023-11-30T08:03:36.661388+05:30 pve-3 kernel: [370683.875884] i40e 0000:b1:00.0 ens5f0: entered promiscuous mode
2023-11-30T08:03:36.661388+05:30 pve-3 kernel: [370683.875925] i40e 0000:b1:00.1 ens5f1: entered promiscuous mode
2023-11-30T08:03:36.661389+05:30 pve-3 kernel: [370683.876837] bnxt_en 0000:b2:00.0 ens4f0np0: left promiscuous mode
2023-11-30T08:03:36.661390+05:30 pve-3 kernel: [370683.876880] bnxt_en 0000:b2:00.1 ens4f1np1: left promiscuous mode
2023-11-30T08:03:36.661394+05:30 pve-3 kernel: [370683.876913] i40e 0000:b1:00.0 ens5f0: left promiscuous mode
2023-11-30T08:03:36.661394+05:30 pve-3 kernel: [370683.876944] i40e 0000:b1:00.1 ens5f1: left promiscuous mode
2023-11-30T08:03:36.665388+05:30 pve-3 kernel: [370683.878581] bnxt_en 0000:b2:00.0 ens4f0np0: entered promiscuous mode
2023-11-30T08:03:36.665390+05:30 pve-3 kernel: [370683.878635] bnxt_en 0000:b2:00.1 ens4f1np1: entered promiscuous mode
2023-11-30T08:03:36.665391+05:30 pve-3 kernel: [370683.878680] i40e 0000:b1:00.0 ens5f0: entered promiscuous mode
2023-11-30T08:03:36.665391+05:30 pve-3 kernel: [370683.878722] i40e 0000:b1:00.1 ens5f1: entered promiscuous mode
2023-11-30T08:03:36.665392+05:30 pve-3 kernel: [370683.879644] bnxt_en 0000:b2:00.0 ens4f0np0: left promiscuous mode
2023-11-30T08:03:36.665392+05:30 pve-3 kernel: [370683.879685] bnxt_en 0000:b2:00.1 ens4f1np1: left promiscuous mode
2023-11-30T08:03:36.665393+05:30 pve-3 kernel: [370683.879721] i40e 0000:b1:00.0 ens5f0: left promiscuous mode
2023-11-30T08:03:36.665394+05:30 pve-3 kernel: [370683.879753] i40e 0000:b1:00.1 ens5f1: left promiscuous mode
2023-11-30T08:03:36.665398+05:30 pve-3 kernel: [370683.881381] bnxt_en 0000:b2:00.0 ens4f0np0: entered promiscuous mode
2023-11-30T08:03:36.665399+05:30 pve-3 kernel: [370683.881433] bnxt_en 0000:b2:00.1 ens4f1np1: entered promiscuous mode
2023-11-30T08:03:36.665399+05:30 pve-3 kernel: [370683.881480] i40e 0000:b1:00.0 ens5f0: entered promiscuous mode
2023-11-30T08:03:36.665400+05:30 pve-3 kernel: [370683.881521] i40e 0000:b1:00.1 ens5f1: entered promiscuous mode
2023-11-30T08:03:36.665401+05:30 pve-3 kernel: [370683.882373] bnxt_en 0000:b2:00.0 ens4f0np0: left promiscuous mode
2023-11-30T08:03:36.665403+05:30 pve-3 kernel: [370683.882427] bnxt_en 0000:b2:00.1 ens4f1np1: left promiscuous mode
2023-11-30T08:03:36.669386+05:30 pve-3 kernel: [370683.882463] i40e 0000:b1:00.0 ens5f0: left promiscuous mode
2023-11-30T08:03:36.669387+05:30 pve-3 kernel: [370683.882496] i40e 0000:b1:00.1 ens5f1: left promiscuous mode
2023-11-30T08:03:36.669388+05:30 pve-3 kernel: [370683.884173] bnxt_en 0000:b2:00.0 ens4f0np0: entered promiscuous mode
2023-11-30T08:03:36.669388+05:30 pve-3 kernel: [370683.884224] bnxt_en 0000:b2:00.1 ens4f1np1: entered promiscuous mode
2023-11-30T08:03:36.669389+05:30 pve-3 kernel: [370683.884306] i40e 0000:b1:00.0 ens5f0: entered promiscuous mode
2023-11-30T08:03:36.669390+05:30 pve-3 kernel: [370683.884371] i40e 0000:b1:00.1 ens5f1: entered promiscuous mode
2023-11-30T08:03:36.669390+05:30 pve-3 kernel: [370683.885240] bnxt_en 0000:b2:00.0 ens4f0np0: left promiscuous mode
2023-11-30T08:03:36.669394+05:30 pve-3 kernel: [370683.885281] bnxt_en 0000:b2:00.1 ens4f1np1: left promiscuous mode
2023-11-30T08:03:36.669395+05:30 pve-3 kernel: [370683.885319] i40e 0000:b1:00.0 ens5f0: left promiscuous mode
2023-11-30T08:03:36.669396+05:30 pve-3 kernel: [370683.885356] i40e 0000:b1:00.1 ens5f1: left promiscuous mode
2023-11-30T08:03:36.673388+05:30 pve-3 kernel: [370683.887000] bnxt_en 0000:b2:00.0 ens4f0np0: entered promiscuous mode
2023-11-30T08:03:36.673390+05:30 pve-3 kernel: [370683.887051] bnxt_en 0000:b2:00.1 ens4f1np1: entered promiscuous mode
2023-11-30T08:03:36.673390+05:30 pve-3 kernel: [370683.887097] i40e 0000:b1:00.0 ens5f0: entered promiscuous mode


it seems to me it all related to network interface card mismatch or driver mismatch ....... not sure
 
Did you really just upgrade and didn't activate any other scripts or anything like that?

Have you installed the latest updates and seen whether that might solve your problem? Or generally tried a reboot and then checked?

You have one cluster, does this happen to everyone?
 
it was upgraded on sunday night, and i did not activated any other script, only thing i noticed is out of 10 nic , 6 of them changed there name after upgrade, now i try to change them , but the apply config button not working with ifupdown2, trying to change manually and removing it from interface file, i rebooted the server 3 times without result, this is the third node of a three node ceph cluster, rest of the two not upgraded yet, i will upgrade them when i will have some stability in 8.1 third node.

and also noticed that in root partition after upgrade it is consuming 15gb+ disk, after cleaning the log.... a bit unusual ...... some more thing may be happening inside which i am unable to find out still. any other advise ... i can explore ?
 
but the apply config button not working with ifupdown2
It seems rather unlikely to me that it would suddenly stop working. Are you sure that there aren't any errors in the changes and that you can't apply them?

Please do not repeat in your words what looks like and what doesn't work. Please always provide screenshots or log excerpts, this is the only way we can identify an error and help you.
 
root@pve-3:/var/log# df -h
Filesystem Size Used Avail Use% Mounted on
udev 126G 0 126G 0% /dev
tmpfs 26G 2.8M 26G 1% /run
/dev/mapper/pve-root 94G 18G 72G 20% /
tmpfs 126G 66M 126G 1% /dev/shm
tmpfs 5.0M 0 5.0M 0% /run/lock
efivarfs 304K 72K 228K 24% /sys/firmware/efi/efivars
/dev/sda2 511M 352K 511M 1% /boot/efi
zpool-ha 825G 128K 825G 1% /zpool-ha
/dev/fuse 128M 52K 128M 1% /etc/pve
tmpfs 126G 28K 126G 1% /var/lib/ceph/osd/ceph-4
tmpfs 126G 28K 126G 1% /var/lib/ceph/osd/ceph-5
192.168.0.100:/mnt/ser-hdd/pve 14T 1.6T 13T 12% /mnt/pve/NFS
192.168.0.44:/mnt/NFSShare 3.6T 1.6T 2.0T 45% /mnt/pve/NFS-Storage
tmpfs 26G 0 26G 0% /run/user/0

after trimming the big syslog and kern.log

i am looking for errors in the interface file, one thing i want to know if this line

"auto enp177s0f1" (v7 nic name)

is this is required in the new version in interface file, as i saw the new interface it detected do not have these line ?

like
"iface ens4f0np0 inet manual" (v8 nic name)

do not have this autoxxxx.... line associated in interface file, i am attaching the interface for your reference
is this causing any trouble ?
but "systemctl restart networking" is working,

as this cluster is in production and people are working, i am scared to do any experiment ,





# network interface settings; autogenerated
# Please do NOT modify this file directly, unless you know what
# you're doing.
#
# If you want to manage parts of the network configuration manually,
# please utilize the 'source' or 'source-directory' directives to do
# so.
# PVE will preserve these directives, but will NOT read its network
# configuration from sourced files, so do not attempt to move any of
# the PVE managed interfaces into external files!

auto lo
iface lo inet loopback

auto eno12399np0
iface eno12399np0 inet manual

auto enp177s0f2
iface enp177s0f2 inet manual

auto enp177s0f3
iface enp177s0f3 inet manual

auto eno8303
iface eno8303 inet manual

auto eno8403
iface eno8403 inet manual

auto eno12409np1
iface eno12409np1 inet manual

auto enp178s0f0np0
iface enp178s0f0np0 inet manual

auto enp178s0f1np1
iface enp178s0f1np1 inet manual

auto enp177s0f0
iface enp177s0f0 inet manual

auto enp177s0f1
iface enp177s0f1 inet manual

iface ens4f0np0 inet manual

iface ens4f1np1 inet manual

iface ens5f0 inet manual

iface ens5f1 inet manual

iface ens5f2 inet manual

iface ens5f3 inet manual

auto mgnt
iface mgnt inet static
address 192.168.137.112/24
gateway 192.168.137.1
ovs_type OVSIntPort
ovs_bridge vmbr0

auto data
iface data inet static
address 192.168.137.119/24
ovs_type OVSIntPort
ovs_bridge vmbr1
ovs_mtu 9000

auto pvecluster1
iface pvecluster1 inet static
address 192.168.30.112/24
ovs_type OVSIntPort
ovs_bridge vmbr0
ovs_mtu 9000
ovs_options tag=30
#pvecluster-1

auto pvecluster2
iface pvecluster2 inet static
address 10.10.30.112/24
ovs_type OVSIntPort
ovs_bridge vmbr1
ovs_mtu 9000
ovs_options tag=30
#pvecluster-2

auto pvecluster3
iface pvecluster3 inet static
address 192.168.30.12/24
ovs_type OVSIntPort
ovs_bridge vmbr2
ovs_mtu 9000
ovs_options tag=30
#pvecluster-3

auto pveceph
iface pveceph inet static
address 192.168.20.112/24
ovs_type OVSIntPort
ovs_bridge vmbr2
ovs_mtu 9000
ovs_options tag=20
#ceph-cluster

auto migration
iface migration inet static
address 10.10.50.112/24
ovs_type OVSIntPort
ovs_bridge vmbr2
ovs_mtu 9000
ovs_options tag=50
#HA-Migration

auto ups
iface ups inet static
address 192.168.50.112/24
ovs_type OVSIntPort
ovs_bridge vmbr3

auto wan
iface wan inet manual
ovs_type OVSIntPort
ovs_bridge vmbr3
ovs_options tag=10
#BSNL

auto airtel
iface airtel inet manual
ovs_type OVSIntPort
ovs_bridge vmbr3
#Airtel

auto nas
iface nas inet static
address 192.168.0.112/24
ovs_type OVSIntPort
ovs_bridge vmbr0
ovs_mtu 9000

auto bond0
iface bond0 inet manual
ovs_bonds eno12399np0 eno12409np1
ovs_type OVSBond
ovs_bridge vmbr0
ovs_mtu 9000
ovs_options bond_mode=balance-tcp lacp=active

auto bond1
iface bond1 inet manual
ovs_bonds enp177s0f0 enp177s0f1
ovs_type OVSBond
ovs_bridge vmbr1
ovs_mtu 9000
ovs_options lacp=active bond_mode=balance-tcp

auto bond2
iface bond2 inet manual
ovs_bonds enp178s0f0np0 enp178s0f1np1
ovs_type OVSBond
ovs_bridge vmbr2
ovs_mtu 9000
ovs_options lacp=active bond_mode=balance-tcp

auto bond3
iface bond3 inet static
address 10.10.20.112/24
bond-slaves enp177s0f2 enp177s0f3
bond-miimon 100
bond-mode broadcast
mtu 9000
#ceph-public

auto bond4
iface bond4 inet manual
ovs_bonds eno8303 eno8403
ovs_type OVSBond
ovs_bridge vmbr3
ovs_mtu 9000
ovs_options lacp=active bond_mode=balance-tcp
#1G

auto vmbr0
iface vmbr0 inet manual
ovs_type OVSBridge
ovs_ports bond0 mgnt pvecluster1 nas
ovs_mtu 9000

auto vmbr1
iface vmbr1 inet manual
ovs_type OVSBridge
ovs_ports bond1 data pvecluster2
ovs_mtu 9000

auto vmbr2
iface vmbr2 inet manual
ovs_type OVSBridge
ovs_ports bond2 pvecluster3 pveceph migration
ovs_mtu 9000

auto vmbr3
iface vmbr3 inet manual
ovs_type OVSBridge
ovs_ports bond4 ups wan airtel
ovs_mtu 9000
 

About

The Proxmox community has been around for many years and offers help and support for Proxmox VE, Proxmox Backup Server, and Proxmox Mail Gateway.
We think our community is one of the best thanks to people like you!

Get your subscription!

The Proxmox team works very hard to make sure you are running the best software and getting stable updates and security enhancements, as well as quick enterprise support. Tens of thousands of happy customers have a Proxmox subscription. Get yours easily in our online shop.

Buy now!