Hello 
my ceph dashboard shows duplicate entries for the ceph mon/mgr on node01 for some reason, it doesnt seem to affect anything but Id like to get rid of it



pveversion 9.0.10
ceph 19.2.3
I found this thread with almost the same problem as me, changing /etc/hostname from fqdn to nodename (node01.url.tld -> node01) and rebooting didnt seem to do the trick for me though.
Please let me know if this is enough information to work with
FYI: This issue was also there before I upgraded to pve9 and squid 19
ceph versions:
ceph mon dump --format json-pretty:
ceph config dump:
cat /etc/pve/ceph.conf:
ls -lh /etc/systemd/system/ceph-*.target.wants/*:
my ceph dashboard shows duplicate entries for the ceph mon/mgr on node01 for some reason, it doesnt seem to affect anything but Id like to get rid of it



pveversion 9.0.10
ceph 19.2.3
I found this thread with almost the same problem as me, changing /etc/hostname from fqdn to nodename (node01.url.tld -> node01) and rebooting didnt seem to do the trick for me though.
Please let me know if this is enough information to work with
FYI: This issue was also there before I upgraded to pve9 and squid 19
Code:
root@node01:~# ceph -s
cluster:
id: 4589d16f-fc90-4b1f-9895-d32b282142ad
health: HEALTH_OK
services:
mon: 3 daemons, quorum node03,node02,node01 (age 8m)
mgr: node03(active, since 95m), standbys: node02, node01
osd: 30 osds: 30 up (since 71m), 30 in (since 14M)
data:
pools: 2 pools, 513 pgs
objects: 2.01M objects, 7.7 TiB
usage: 23 TiB used, 29 TiB / 52 TiB avail
pgs: 513 active+clean
io:
client: 15 MiB/s rd, 98 MiB/s wr, 917 op/s rd, 785 op/s wr
ceph versions:
Code:
{
"mon": {
"ceph version 19.2.3 (2f03f1cd83e5d40cdf1393cb64a662a8e8bb07c6) squid (stable)": 3
},
"mgr": {
"ceph version 19.2.3 (2f03f1cd83e5d40cdf1393cb64a662a8e8bb07c6) squid (stable)": 3
},
"osd": {
"ceph version 19.2.3 (2f03f1cd83e5d40cdf1393cb64a662a8e8bb07c6) squid (stable)": 30
},
"overall": {
"ceph version 19.2.3 (2f03f1cd83e5d40cdf1393cb64a662a8e8bb07c6) squid (stable)": 36
}
}
ceph mon dump --format json-pretty:
Code:
{
"epoch": 13,
"fsid": "4589d16f-fc90-4b1f-9895-d32b282142ad",
"modified": "2025-11-05T20:38:14.825678Z",
"created": "2021-04-30T20:04:24.551678Z",
"min_mon_release": 19,
"min_mon_release_name": "squid",
"election_strategy": 1,
"disallowed_leaders": "",
"stretch_mode": false,
"tiebreaker_mon": "",
"removed_ranks": "",
"features": {
"persistent": [
"kraken",
"luminous",
"mimic",
"osdmap-prune",
"nautilus",
"octopus",
"pacific",
"elector-pinging",
"quincy",
"reef",
"squid"
],
"optional": []
},
"mons": [
{
"rank": 0,
"name": "node03",
"public_addrs": {
"addrvec": [
{
"type": "v2",
"addr": "172.16.100.13:3300",
"nonce": 0
},
{
"type": "v1",
"addr": "172.16.100.13:6789",
"nonce": 0
}
]
},
"addr": "172.16.100.13:6789/0",
"public_addr": "172.16.100.13:6789/0",
"priority": 0,
"weight": 0,
"crush_location": "{}"
},
{
"rank": 1,
"name": "node02",
"public_addrs": {
"addrvec": [
{
"type": "v2",
"addr": "172.16.100.12:3300",
"nonce": 0
},
{
"type": "v1",
"addr": "172.16.100.12:6789",
"nonce": 0
}
]
},
"addr": "172.16.100.12:6789/0",
"public_addr": "172.16.100.12:6789/0",
"priority": 0,
"weight": 0,
"crush_location": "{}"
},
{
"rank": 2,
"name": "node01",
"public_addrs": {
"addrvec": [
{
"type": "v2",
"addr": "172.16.100.11:3300",
"nonce": 0
},
{
"type": "v1",
"addr": "172.16.100.11:6789",
"nonce": 0
}
]
},
"addr": "172.16.100.11:6789/0",
"public_addr": "172.16.100.11:6789/0",
"priority": 0,
"weight": 0,
"crush_location": "{}"
}
],
"quorum": [
0,
1,
2
]
}
dumped monmap epoch 13
ceph config dump:
Code:
WHO MASK LEVEL OPTION VALUE RO
mon advanced auth_allow_insecure_global_id_reclaim false
osd.0 basic osd_mclock_max_capacity_iops_ssd 39392.227822
osd.1 basic osd_mclock_max_capacity_iops_ssd 40240.594221
osd.10 basic osd_mclock_max_capacity_iops_ssd 38153.925373
osd.11 basic osd_mclock_max_capacity_iops_ssd 46128.366973
osd.12 basic osd_mclock_max_capacity_iops_ssd 34755.987423
osd.13 basic osd_mclock_max_capacity_iops_ssd 38312.714686
osd.14 basic osd_mclock_max_capacity_iops_ssd 59460.894235
osd.15 basic osd_mclock_max_capacity_iops_ssd 39437.606066
osd.16 basic osd_mclock_max_capacity_iops_ssd 37157.497515
osd.17 basic osd_mclock_max_capacity_iops_ssd 46218.426094
osd.18 basic osd_mclock_max_capacity_iops_ssd 39305.290005
osd.19 basic osd_mclock_max_capacity_iops_ssd 43526.675149
osd.2 basic osd_mclock_max_capacity_iops_ssd 43537.771956
osd.20 basic osd_mclock_max_capacity_iops_ssd 36363.348983
osd.21 basic osd_mclock_max_capacity_iops_ssd 41469.474133
osd.22 basic osd_mclock_max_capacity_iops_ssd 40192.907198
osd.23 basic osd_mclock_max_capacity_iops_ssd 45012.648104
osd.24 basic osd_mclock_max_capacity_iops_ssd 36763.396186
osd.25 basic osd_mclock_max_capacity_iops_ssd 38148.098038
osd.26 basic osd_mclock_max_capacity_iops_ssd 43338.759057
osd.27 basic osd_mclock_max_capacity_iops_ssd 39116.310903
osd.28 basic osd_mclock_max_capacity_iops_ssd 38898.520451
osd.29 basic osd_mclock_max_capacity_iops_ssd 42401.277036
osd.3 basic osd_mclock_max_capacity_iops_ssd 38871.446839
osd.4 basic osd_mclock_max_capacity_iops_ssd 40708.145879
osd.5 basic osd_mclock_max_capacity_iops_ssd 47969.984605
osd.6 basic osd_mclock_max_capacity_iops_ssd 40917.870768
osd.7 basic osd_mclock_max_capacity_iops_ssd 41020.438091
osd.8 basic osd_mclock_max_capacity_iops_ssd 51706.186969
osd.9 basic osd_mclock_max_capacity_iops_ssd 43111.491418
cat /etc/pve/ceph.conf:
Code:
[global]
auth_client_required = cephx
auth_cluster_required = cephx
auth_service_required = cephx
cluster_network = 172.16.100.0/24
fsid = 4589d16f-fc90-4b1f-9895-d32b282142ad
mon_allow_pool_delete = true
mon_host = 172.16.100.13 172.16.100.12 172.16.100.11
ms_bind_ipv4 = true
osd_pool_default_min_size = 2
osd_pool_default_size = 3
public_network = 172.16.100.0/24
[client]
keyring = /etc/pve/priv/$cluster.$name.keyring
[client.crash]
keyring = /etc/pve/ceph/$cluster.$name.keyring
[mon.node01]
public_addr = 172.16.100.11
[mon.node02]
public_addr = 172.16.100.12
[mon.node03]
public_addr = 172.16.100.13
ls -lh /etc/systemd/system/ceph-*.target.wants/*:
Code:
lrwxrwxrwx 1 root root 37 Mar 7 2024 /etc/systemd/system/ceph-mgr.target.wants/ceph-mgr@node01.service -> /lib/systemd/system/ceph-mgr@.service
lrwxrwxrwx 1 root root 37 Mar 7 2024 /etc/systemd/system/ceph-mon.target.wants/ceph-mon@node01.service -> /lib/systemd/system/ceph-mon@.service