I reinstalled one of the three proxmox ceph nodes with new name, new ip.
I removed all lvm data, wiped fs of the old disks with:
Now when I create osds via gui or cli they are always filestore and I dont get it, default should be bluestore regarding documentation.
[
I removed all lvm data, wiped fs of the old disks with:
Code:
dmsetup remove_all
wipefs -af /dev/sda
ceph-volume lvm zap /dev/sda
Now when I create osds via gui or cli they are always filestore and I dont get it, default should be bluestore regarding documentation.
Code:
pveceph osd create /dev/sda -db_dev /dev/nvme0n1 -db_size 75
Code:
root@pve-04:/# cat /etc/pve/ceph.conf
[global]
auth_client_required = cephx
auth_cluster_required = cephx
auth_service_required = cephx
cluster_network = 10.6.6.221/24
fsid = aeb02f58-8b3d-4b5d-8cf2-a16af4959bbf
mon_allow_pool_delete = true
mon_host = 10.5.5.221 10.5.5.223 10.5.5.225
osd_pool_default_min_size = 2
osd_pool_default_size = 3
public_network = 10.5.5.221/24
[client]
keyring = /etc/pve/priv/$cluster.$name.keyring
[mds]
keyring = /var/lib/ceph/mds/ceph-$id/keyring
[mds.pve-01]
host = pve-01
mds_standby_for_name = pve
[mds.pve-03]
host = pve-03
mds_standby_for_name = pve
[mon.pve-01]
public_addr = 10.5.5.221
[mon.pve-03]
public_addr = 10.5.5.223
[mon.pve-04]
public_addr = 10.5.5.225
Code:
# begin crush map
tunable choose_local_tries 0
tunable choose_local_fallback_tries 0
tunable choose_total_tries 50
tunable chooseleaf_descend_once 1
tunable chooseleaf_vary_r 1
tunable chooseleaf_stable 1
tunable straw_calc_version 1
tunable allowed_bucket_algs 54
# devices
device 0 osd.0 class hdd
device 1 osd.1 class hdd
device 2 osd.2 class hdd
device 3 osd.3 class hdd
device 4 osd.4 class hdd
device 5 osd.5 class hdd
device 11 osd.11 class hdd
device 13 osd.13 class hdd
device 14 osd.14 class hdd
device 15 osd.15 class hdd
device 16 osd.16 class hdd
device 17 osd.17 class hdd
# types
type 0 osd
type 1 host
type 2 chassis
type 3 rack
type 4 row
type 5 pdu
type 6 pod
type 7 room
type 8 datacenter
type 9 zone
type 10 region
type 11 root
# buckets
host pve-01 {
id -3 # do not change unnecessarily
id -4 class hdd # do not change unnecessarily
# weight 5.898
alg straw2
hash 0 # rjenkins1
item osd.0 weight 0.983
item osd.1 weight 0.983
item osd.2 weight 0.983
item osd.3 weight 0.983
item osd.4 weight 0.983
item osd.5 weight 0.983
}
host pve-03 {
id -7 # do not change unnecessarily
id -8 class hdd # do not change unnecessarily
# weight 5.898
alg straw2
hash 0 # rjenkins1
item osd.11 weight 0.983
item osd.13 weight 0.983
item osd.14 weight 0.983
item osd.15 weight 0.983
item osd.16 weight 0.983
item osd.17 weight 0.983
}
host pve-04 {
id -5 # do not change unnecessarily
id -6 class hdd # do not change unnecessarily
# weight 0.000
alg straw2
hash 0 # rjenkins1
}
root default {
id -1 # do not change unnecessarily
id -2 class hdd # do not change unnecessarily
# weight 11.794
alg straw2
hash 0 # rjenkins1
item pve-01 weight 5.897
item pve-03 weight 5.897
item pve-04 weight 0.000
}
# rules
rule replicated_rule {
id 0
type replicated
min_size 1
max_size 10
step take default
step chooseleaf firstn 0 type host
step emit
}
# end crush map
Last edited: