root@rep-srv10-a:~# ceph osd df tree
ID CLASS WEIGHT REWEIGHT SIZE RAW USE DATA OMAP META AVAIL %USE VAR PGS STATUS TYPE NAME
-1 120.07584 - 120 TiB 94 TiB 94 TiB 19 MiB 149 GiB 26 TiB 78.16 1.00 - root default
-3 60.03792 - 60 TiB 47 TiB 47 TiB 9.8 MiB 76 GiB 13 TiB 78.16 1.00 - host rep-srv10-a
0 hdd 5.45799 0.90002 5.5 TiB 4.7 TiB 4.7 TiB 1.2 MiB 7.8 GiB 802 GiB 85.66 1.10 207 up osd.0
2 hdd 5.45799 1.00000 5.5 TiB 4.2 TiB 4.2 TiB 911 KiB 6.6 GiB 1.3 TiB 76.48 0.98 174 up osd.2
3 hdd 5.45799 0.95001 5.5 TiB 4.9 TiB 4.9 TiB 1.0 MiB 8.3 GiB 576 GiB 89.69 1.15 191 up osd.3
5 hdd 5.45799 1.00000 5.5 TiB 4.4 TiB 4.4 TiB 929 KiB 7.1 GiB 1.1 TiB 79.84 1.02 203 up osd.5
8 hdd 5.45799 1.00000 5.5 TiB 4.1 TiB 4.1 TiB 1.1 MiB 6.5 GiB 1.4 TiB 74.69 0.96 189 up osd.8
9 hdd 5.45799 1.00000 5.5 TiB 4.6 TiB 4.6 TiB 1.1 MiB 7.1 GiB 849 GiB 84.80 1.08 200 up osd.9
11 hdd 5.45799 1.00000 5.5 TiB 3.5 TiB 3.5 TiB 872 KiB 5.8 GiB 1.9 TiB 64.68 0.83 170 up osd.11
13 hdd 5.45799 1.00000 5.5 TiB 4.0 TiB 4.0 TiB 949 KiB 6.6 GiB 1.5 TiB 73.06 0.93 179 up osd.13
15 hdd 5.45799 1.00000 5.5 TiB 4.4 TiB 4.3 TiB 874 KiB 6.9 GiB 1.1 TiB 79.74 1.02 176 up osd.15
19 hdd 5.45799 1.00000 5.5 TiB 4.2 TiB 4.2 TiB 885 KiB 7.1 GiB 1.2 TiB 77.11 0.99 178 up osd.19
20 hdd 5.45799 1.00000 5.5 TiB 4.0 TiB 4.0 TiB 75 KiB 5.8 GiB 1.4 TiB 74.02 0.95 181 up osd.20
-5 60.03792 - 60 TiB 47 TiB 47 TiB 9.5 MiB 74 GiB 13 TiB 78.16 1.00 - host rep-srv10-b
1 hdd 5.45799 1.00000 5.5 TiB 4.4 TiB 4.4 TiB 922 KiB 7.0 GiB 1.1 TiB 80.52 1.03 196 up osd.1
4 hdd 5.45799 1.00000 5.5 TiB 3.1 TiB 3.1 TiB 845 KiB 5.2 GiB 2.4 TiB 56.21 0.72 159 up osd.4
6 hdd 5.45799 1.00000 5.5 TiB 4.6 TiB 4.6 TiB 984 KiB 7.3 GiB 843 GiB 84.91 1.09 198 up osd.6
7 hdd 5.45799 1.00000 5.5 TiB 4.4 TiB 4.3 TiB 1013 KiB 6.8 GiB 1.1 TiB 79.70 1.02 204 up osd.7
10 hdd 5.45799 1.00000 5.5 TiB 4.3 TiB 4.3 TiB 946 KiB 6.8 GiB 1.1 TiB 78.99 1.01 186 up osd.10
12 hdd 5.45799 1.00000 5.5 TiB 4.2 TiB 4.2 TiB 946 KiB 6.7 GiB 1.3 TiB 76.46 0.98 187 up osd.12
14 hdd 5.45799 1.00000 5.5 TiB 4.4 TiB 4.4 TiB 974 KiB 6.8 GiB 1.1 TiB 80.59 1.03 185 up osd.14
16 hdd 5.45799 1.00000 5.5 TiB 5.0 TiB 5.0 TiB 1.0 MiB 7.7 GiB 473 GiB 91.54 1.17 187 up osd.16
17 hdd 5.45799 1.00000 5.5 TiB 4.2 TiB 4.2 TiB 901 KiB 6.7 GiB 1.3 TiB 76.37 0.98 190 up osd.17
18 hdd 5.45799 1.00000 5.5 TiB 4.5 TiB 4.5 TiB 1.1 MiB 7.2 GiB 988 GiB 82.32 1.05 193 up osd.18
21 hdd 5.45799 1.00000 5.5 TiB 3.9 TiB 3.9 TiB 55 KiB 5.6 GiB 1.5 TiB 72.13 0.92 163 up osd.21
TOTAL 120 TiB 94 TiB 94 TiB 19 MiB 149 GiB 26 TiB 78.16
MIN/MAX VAR: 0.72/1.17 STDDEV: 7.56
root@rep-srv10-a:~# ceph -w
cluster:
id: cabe0579-127c-4b0b-8f86-00c1a702618b
health: HEALTH_WARN
1 backfillfull osd(s)
2 nearfull osd(s)
2 pool(s) backfillfull
services:
mon: 3 daemons, quorum rep-srv10-a,rep-srv10-b,rep-srv11 (age 4w)
mgr: rep-srv10-a(active, since 4w), standbys: rep-srv10-b
osd: 22 osds: 22 up (since 4d), 22 in (since 4d)
data:
pools: 2 pools, 2048 pgs
objects: 12.49M objects, 48 TiB
usage: 94 TiB used, 26 TiB / 120 TiB avail
pgs: 2048 active+clean
io:
client: 6.4 MiB/s rd, 3.1 MiB/s wr, 10 op/s rd, 68 op/s wr
ceph osd pool get <pool> size
ceph osd pool get <pool> min_size
ceph mgr module enable balancer
ceph balancer on
ceph balancer mode upmap