I have 5 nodes in the cluster, so far I only had disks in 4, I added 4 SAS-SSD disks to the 5th node, the cepch pool has not expanded. Should I do anything else after adding the OSD? in each node I have 4 disks of the same type assigned to the same cruchmap
# begin crush map
tunable choose_local_tries 0
tunable choose_local_fallback_tries 0
tunable choose_total_tries 50
tunable chooseleaf_descend_once 1
tunable chooseleaf_vary_r 1
tunable chooseleaf_stable 1
tunable straw_calc_version 1
tunable allowed_bucket_algs 54
# devices
device 0 osd.0 class NVMe-U.2
device 1 osd.1 class NVMe-U.2
device 2 osd.2 class NVMe-U.2
device 3 osd.3 class NVMe-U.2
device 4 osd.4 class SAS-SSD
device 5 osd.5 class SAS-SSD
device 6 osd.6 class SAS-SSD
device 7 osd.7 class SAS-SSD
device 8 osd.8 class NVMe-U.2
device 9 osd.9 class NVMe-U.2
device 10 osd.10 class NVMe-U.2
device 11 osd.11 class NVMe-U.2
device 12 osd.12 class SAS-SSD
device 13 osd.13 class SAS-SSD
device 14 osd.14 class SAS-SSD
device 15 osd.15 class SAS-SSD
device 16 osd.16 class NVMe-U.2
device 17 osd.17 class NVMe-U.2
device 18 osd.18 class NVMe-U.2
device 19 osd.19 class NVMe-U.2
device 20 osd.20 class SAS-SSD
device 21 osd.21 class SAS-SSD
device 22 osd.22 class SAS-SSD
device 23 osd.23 class SAS-SSD
device 24 osd.24 class NVMe-U.2
device 25 osd.25 class NVMe-U.2
device 26 osd.26 class NVMe-U.2
device 27 osd.27 class NVMe-U.2
device 28 osd.28 class SAS-SSD
device 29 osd.29 class SAS-SSD
device 30 osd.30 class SAS-SSD
device 31 osd.31 class SAS-SSD
device 32 osd.32 class SAS-SSD
device 33 osd.33 class SAS-SSD
device 34 osd.34 class SAS-SSD
device 35 osd.35 class SAS-SSD
# types
type 0 osd
type 1 host
type 2 chassis
type 3 rack
type 4 row
type 5 pdu
type 6 pod
type 7 room
type 8 datacenter
type 9 zone
type 10 region
type 11 root
# buckets
host prox02 {
id -3 # do not change unnecessarily
id -2 class NVMe-U.2 # do not change unnecessarily
id -11 class SAS-SSD # do not change unnecessarily
# weight 12.22589
alg straw2
hash 0 # rjenkins1
item osd.0 weight 1.60100
item osd.1 weight 1.60100
item osd.2 weight 1.60100
item osd.3 weight 1.60100
item osd.4 weight 1.45547
item osd.5 weight 1.45547
item osd.6 weight 1.45547
item osd.7 weight 1.45547
}
host prox03 {
id -5 # do not change unnecessarily
id -4 class NVMe-U.2 # do not change unnecessarily
id -12 class SAS-SSD # do not change unnecessarily
# weight 12.22591
alg straw2
hash 0 # rjenkins1
item osd.8 weight 1.60100
item osd.9 weight 1.60100
item osd.10 weight 1.60100
item osd.11 weight 1.60100
item osd.13 weight 1.45547
item osd.14 weight 1.45547
item osd.15 weight 1.45547
item osd.35 weight 1.45549
}
host prox04 {
id -7 # do not change unnecessarily
id -6 class NVMe-U.2 # do not change unnecessarily
id -13 class SAS-SSD # do not change unnecessarily
# weight 12.22589
alg straw2
hash 0 # rjenkins1
item osd.16 weight 1.60100
item osd.17 weight 1.60100
item osd.18 weight 1.60100
item osd.19 weight 1.60100
item osd.20 weight 1.45547
item osd.21 weight 1.45547
item osd.22 weight 1.45547
item osd.23 weight 1.45547
}
host prox05 {
id -9 # do not change unnecessarily
id -8 class NVMe-U.2 # do not change unnecessarily
id -14 class SAS-SSD # do not change unnecessarily
# weight 12.22589
alg straw2
hash 0 # rjenkins1
item osd.28 weight 1.45547
item osd.29 weight 1.45547
item osd.30 weight 1.45547
item osd.31 weight 1.45547
item osd.24 weight 1.60100
item osd.25 weight 1.60100
item osd.26 weight 1.60100
item osd.27 weight 1.60100
}
host prox01 {
id -16 # do not change unnecessarily
id -17 class NVMe-U.2 # do not change unnecessarily
id -18 class SAS-SSD # do not change unnecessarily
# weight 5.82196
alg straw2
hash 0 # rjenkins1
item osd.12 weight 1.45549
item osd.32 weight 1.45549
item osd.33 weight 1.45549
item osd.34 weight 1.45549
}
root default {
id -1 # do not change unnecessarily
id -10 class NVMe-U.2 # do not change unnecessarily
id -15 class SAS-SSD # do not change unnecessarily
# weight 54.72568
alg straw2
hash 0 # rjenkins1
item prox02 weight 12.22594
item prox03 weight 12.22591
item prox04 weight 12.22594
item prox05 weight 12.22594
item prox01 weight 5.82196
}
# rules
rule replicated_SAS-SSD {
id 0
type replicated
step take default class NVMe-U.2
step chooseleaf firstn 0 type host
step emit
}
rule replicated_NVMe-U.2 {
id 1
type replicated
step take default class SAS-SSD
step chooseleaf firstn 0 type host
step emit
}
# end crush map
[global]
auth_client_required = cephx
auth_cluster_required = cephx
auth_service_required = cephx
cluster_network = 10.0.20.0/24
fsid = 49e07b17-2997-4148-825c-672fb010357b
mon_allow_pool_delete = true
mon_host = 10.0.20.62 10.0.20.64 10.0.20.66 10.0.20.68 10.0.20.60
ms_bind_ipv4 = true
ms_bind_ipv6 = false
osd_pool_default_min_size = 2
osd_pool_default_size = 3
public_network = 10.0.20.0/24
[client]
keyring = /etc/pve/priv/$cluster.$name.keyring
[osd]
osd_class_update_on_start = false
[mon.prox01]
public_addr = 10.0.20.60
[mon.prox02]
public_addr = 10.0.20.62
[mon.prox03]
public_addr = 10.0.20.64
[mon.prox04]
public_addr = 10.0.20.66
[mon.prox05]
public_addr = 10.0.20.68
# begin crush map
tunable choose_local_tries 0
tunable choose_local_fallback_tries 0
tunable choose_total_tries 50
tunable chooseleaf_descend_once 1
tunable chooseleaf_vary_r 1
tunable chooseleaf_stable 1
tunable straw_calc_version 1
tunable allowed_bucket_algs 54
# devices
device 0 osd.0 class NVMe-U.2
device 1 osd.1 class NVMe-U.2
device 2 osd.2 class NVMe-U.2
device 3 osd.3 class NVMe-U.2
device 4 osd.4 class SAS-SSD
device 5 osd.5 class SAS-SSD
device 6 osd.6 class SAS-SSD
device 7 osd.7 class SAS-SSD
device 8 osd.8 class NVMe-U.2
device 9 osd.9 class NVMe-U.2
device 10 osd.10 class NVMe-U.2
device 11 osd.11 class NVMe-U.2
device 12 osd.12 class SAS-SSD
device 13 osd.13 class SAS-SSD
device 14 osd.14 class SAS-SSD
device 15 osd.15 class SAS-SSD
device 16 osd.16 class NVMe-U.2
device 17 osd.17 class NVMe-U.2
device 18 osd.18 class NVMe-U.2
device 19 osd.19 class NVMe-U.2
device 20 osd.20 class SAS-SSD
device 21 osd.21 class SAS-SSD
device 22 osd.22 class SAS-SSD
device 23 osd.23 class SAS-SSD
device 24 osd.24 class NVMe-U.2
device 25 osd.25 class NVMe-U.2
device 26 osd.26 class NVMe-U.2
device 27 osd.27 class NVMe-U.2
device 28 osd.28 class SAS-SSD
device 29 osd.29 class SAS-SSD
device 30 osd.30 class SAS-SSD
device 31 osd.31 class SAS-SSD
device 32 osd.32 class SAS-SSD
device 33 osd.33 class SAS-SSD
device 34 osd.34 class SAS-SSD
device 35 osd.35 class SAS-SSD
# types
type 0 osd
type 1 host
type 2 chassis
type 3 rack
type 4 row
type 5 pdu
type 6 pod
type 7 room
type 8 datacenter
type 9 zone
type 10 region
type 11 root
# buckets
host prox02 {
id -3 # do not change unnecessarily
id -2 class NVMe-U.2 # do not change unnecessarily
id -11 class SAS-SSD # do not change unnecessarily
# weight 12.22589
alg straw2
hash 0 # rjenkins1
item osd.0 weight 1.60100
item osd.1 weight 1.60100
item osd.2 weight 1.60100
item osd.3 weight 1.60100
item osd.4 weight 1.45547
item osd.5 weight 1.45547
item osd.6 weight 1.45547
item osd.7 weight 1.45547
}
host prox03 {
id -5 # do not change unnecessarily
id -4 class NVMe-U.2 # do not change unnecessarily
id -12 class SAS-SSD # do not change unnecessarily
# weight 12.22591
alg straw2
hash 0 # rjenkins1
item osd.8 weight 1.60100
item osd.9 weight 1.60100
item osd.10 weight 1.60100
item osd.11 weight 1.60100
item osd.13 weight 1.45547
item osd.14 weight 1.45547
item osd.15 weight 1.45547
item osd.35 weight 1.45549
}
host prox04 {
id -7 # do not change unnecessarily
id -6 class NVMe-U.2 # do not change unnecessarily
id -13 class SAS-SSD # do not change unnecessarily
# weight 12.22589
alg straw2
hash 0 # rjenkins1
item osd.16 weight 1.60100
item osd.17 weight 1.60100
item osd.18 weight 1.60100
item osd.19 weight 1.60100
item osd.20 weight 1.45547
item osd.21 weight 1.45547
item osd.22 weight 1.45547
item osd.23 weight 1.45547
}
host prox05 {
id -9 # do not change unnecessarily
id -8 class NVMe-U.2 # do not change unnecessarily
id -14 class SAS-SSD # do not change unnecessarily
# weight 12.22589
alg straw2
hash 0 # rjenkins1
item osd.28 weight 1.45547
item osd.29 weight 1.45547
item osd.30 weight 1.45547
item osd.31 weight 1.45547
item osd.24 weight 1.60100
item osd.25 weight 1.60100
item osd.26 weight 1.60100
item osd.27 weight 1.60100
}
host prox01 {
id -16 # do not change unnecessarily
id -17 class NVMe-U.2 # do not change unnecessarily
id -18 class SAS-SSD # do not change unnecessarily
# weight 5.82196
alg straw2
hash 0 # rjenkins1
item osd.12 weight 1.45549
item osd.32 weight 1.45549
item osd.33 weight 1.45549
item osd.34 weight 1.45549
}
root default {
id -1 # do not change unnecessarily
id -10 class NVMe-U.2 # do not change unnecessarily
id -15 class SAS-SSD # do not change unnecessarily
# weight 54.72568
alg straw2
hash 0 # rjenkins1
item prox02 weight 12.22594
item prox03 weight 12.22591
item prox04 weight 12.22594
item prox05 weight 12.22594
item prox01 weight 5.82196
}
# rules
rule replicated_SAS-SSD {
id 0
type replicated
step take default class NVMe-U.2
step chooseleaf firstn 0 type host
step emit
}
rule replicated_NVMe-U.2 {
id 1
type replicated
step take default class SAS-SSD
step chooseleaf firstn 0 type host
step emit
}
# end crush map
[global]
auth_client_required = cephx
auth_cluster_required = cephx
auth_service_required = cephx
cluster_network = 10.0.20.0/24
fsid = 49e07b17-2997-4148-825c-672fb010357b
mon_allow_pool_delete = true
mon_host = 10.0.20.62 10.0.20.64 10.0.20.66 10.0.20.68 10.0.20.60
ms_bind_ipv4 = true
ms_bind_ipv6 = false
osd_pool_default_min_size = 2
osd_pool_default_size = 3
public_network = 10.0.20.0/24
[client]
keyring = /etc/pve/priv/$cluster.$name.keyring
[osd]
osd_class_update_on_start = false
[mon.prox01]
public_addr = 10.0.20.60
[mon.prox02]
public_addr = 10.0.20.62
[mon.prox03]
public_addr = 10.0.20.64
[mon.prox04]
public_addr = 10.0.20.66
[mon.prox05]
public_addr = 10.0.20.68