Hi,
I noticed the following command shows nothing:
root@xxxxxxxx:~# ceph config dump
WHO MASK LEVEL OPTION VALUE RO
Any idea?
root@xxxxxxx:~# cat /etc/ceph/ceph.conf
[global]
auth_client_required = cephx
auth_cluster_required = cephx
auth_service_required = cephx
cluster_network = 10.12.12.0/24
fsid = 9363399b-d2e6-4d2b-8887-0ad821c88ba8
mon_allow_pool_delete = true
mon_host = 10.12.12.13 10.12.12.15 10.12.12.18
osd_pool_default_min_size = 2
osd_pool_default_size = 3
public_network = 10.12.12.0/24
[client]
keyring = /etc/pve/priv/$cluster.$name.keyring
root@xxxxxx:~# pveversion -v
proxmox-ve: 6.1-2 (running kernel: 5.3.18-2-pve)
pve-manager: 6.1-8 (running version: 6.1-8/806edfe1)
pve-kernel-helper: 6.1-7
pve-kernel-5.3: 6.1-5
pve-kernel-5.3.18-2-pve: 5.3.18-2
pve-kernel-5.3.10-1-pve: 5.3.10-1
ceph: 14.2.8-pve1
ceph-fuse: 14.2.8-pve1
corosync: 3.0.3-pve1
criu: 3.11-3
glusterfs-client: 5.5-3
ifupdown: 0.8.35+pve1
ksm-control-daemon: 1.3-1
libjs-extjs: 6.0.1-10
libknet1: 1.15-pve1
libpve-access-control: 6.0-6
libpve-apiclient-perl: 3.0-3
libpve-common-perl: 6.0-17
libpve-guest-common-perl: 3.0-5
libpve-http-server-perl: 3.0-5
libpve-storage-perl: 6.1-5
libqb0: 1.0.5-1
libspice-server1: 0.14.2-4~pve6+1
lvm2: 2.03.02-pve4
lxc-pve: 3.2.1-1
lxcfs: 3.0.3-pve60
novnc-pve: 1.1.0-1
proxmox-mini-journalreader: 1.1-1
proxmox-widget-toolkit: 2.1-3
pve-cluster: 6.1-4
pve-container: 3.0-22
pve-docs: 6.1-6
pve-edk2-firmware: 2.20200229-1
pve-firewall: 4.0-10
pve-firmware: 3.0-6
pve-ha-manager: 3.0-9
pve-i18n: 2.0-4
pve-qemu-kvm: 4.1.1-4
pve-xtermjs: 4.3.0-1
qemu-server: 6.1-7
smartmontools: 7.1-pve2
spiceterm: 3.1-1
vncterm: 1.6-1
zfsutils-linux: 0.8.3-pve1
root@xxxxxx:~# ceph -s
cluster:
id: 9363399b-d2e6-4d2b-8887-0ad821c88ba8
health: HEALTH_OK
services:
mon: 3 daemons, quorum xxxx,xxxxxx,xxxxxx (age 40h)
mgr: xxxxxx(active, since 40h), standbys: xxxxxx, xxxxxx
osd: 13 osds: 13 up (since 40h), 13 in (since 2w)
data:
pools: 1 pools, 512 pgs
objects: 767.11k objects, 1.0 TiB
usage: 2.8 TiB used, 6.6 TiB / 9.5 TiB avail
pgs: 512 active+clean
io:
client: 7.1 MiB/s rd, 3.5 MiB/s wr, 915 op/s rd, 277 op/s wr
I noticed the following command shows nothing:
root@xxxxxxxx:~# ceph config dump
WHO MASK LEVEL OPTION VALUE RO
Any idea?
root@xxxxxxx:~# cat /etc/ceph/ceph.conf
[global]
auth_client_required = cephx
auth_cluster_required = cephx
auth_service_required = cephx
cluster_network = 10.12.12.0/24
fsid = 9363399b-d2e6-4d2b-8887-0ad821c88ba8
mon_allow_pool_delete = true
mon_host = 10.12.12.13 10.12.12.15 10.12.12.18
osd_pool_default_min_size = 2
osd_pool_default_size = 3
public_network = 10.12.12.0/24
[client]
keyring = /etc/pve/priv/$cluster.$name.keyring
root@xxxxxx:~# pveversion -v
proxmox-ve: 6.1-2 (running kernel: 5.3.18-2-pve)
pve-manager: 6.1-8 (running version: 6.1-8/806edfe1)
pve-kernel-helper: 6.1-7
pve-kernel-5.3: 6.1-5
pve-kernel-5.3.18-2-pve: 5.3.18-2
pve-kernel-5.3.10-1-pve: 5.3.10-1
ceph: 14.2.8-pve1
ceph-fuse: 14.2.8-pve1
corosync: 3.0.3-pve1
criu: 3.11-3
glusterfs-client: 5.5-3
ifupdown: 0.8.35+pve1
ksm-control-daemon: 1.3-1
libjs-extjs: 6.0.1-10
libknet1: 1.15-pve1
libpve-access-control: 6.0-6
libpve-apiclient-perl: 3.0-3
libpve-common-perl: 6.0-17
libpve-guest-common-perl: 3.0-5
libpve-http-server-perl: 3.0-5
libpve-storage-perl: 6.1-5
libqb0: 1.0.5-1
libspice-server1: 0.14.2-4~pve6+1
lvm2: 2.03.02-pve4
lxc-pve: 3.2.1-1
lxcfs: 3.0.3-pve60
novnc-pve: 1.1.0-1
proxmox-mini-journalreader: 1.1-1
proxmox-widget-toolkit: 2.1-3
pve-cluster: 6.1-4
pve-container: 3.0-22
pve-docs: 6.1-6
pve-edk2-firmware: 2.20200229-1
pve-firewall: 4.0-10
pve-firmware: 3.0-6
pve-ha-manager: 3.0-9
pve-i18n: 2.0-4
pve-qemu-kvm: 4.1.1-4
pve-xtermjs: 4.3.0-1
qemu-server: 6.1-7
smartmontools: 7.1-pve2
spiceterm: 3.1-1
vncterm: 1.6-1
zfsutils-linux: 0.8.3-pve1
root@xxxxxx:~# ceph -s
cluster:
id: 9363399b-d2e6-4d2b-8887-0ad821c88ba8
health: HEALTH_OK
services:
mon: 3 daemons, quorum xxxx,xxxxxx,xxxxxx (age 40h)
mgr: xxxxxx(active, since 40h), standbys: xxxxxx, xxxxxx
osd: 13 osds: 13 up (since 40h), 13 in (since 2w)
data:
pools: 1 pools, 512 pgs
objects: 767.11k objects, 1.0 TiB
usage: 2.8 TiB used, 6.6 TiB / 9.5 TiB avail
pgs: 512 active+clean
io:
client: 7.1 MiB/s rd, 3.5 MiB/s wr, 915 op/s rd, 277 op/s wr