Hello,
I need some guidance on the configuration for the performance of rbd performance. I've followed instructions and gone through the documentation multiple times but can't get the disk performance as high as I expect them to be.
My current setup is as follows:
3 similarly configured node, with each node having:
I'm not sure if I missed something on the configurations, but I can't get the disks to perform above 200MB/s writes, and on Linux VM's it performs worse. I have writeback enabled on the VM's HDD.
Here's my ceph configuration:
Here's the crush map
Ceph osd metadata output (ceph osd metadata |grep "id")
Any insights on this would be appreciated.
Thank you.
I need some guidance on the configuration for the performance of rbd performance. I've followed instructions and gone through the documentation multiple times but can't get the disk performance as high as I expect them to be.
My current setup is as follows:
3 similarly configured node, with each node having:
- 6x 8TB SATA HDD (leaving room for additional 6x 8TB drives in the future)
- 2x 960GB Intel Optane 905p
- Ceph Network using 10Gbe full-mesh setup (will convert to a 10Gbe switch when the 4th node is to be added) with Jumbo frames enabled
I'm not sure if I missed something on the configurations, but I can't get the disks to perform above 200MB/s writes, and on Linux VM's it performs worse. I have writeback enabled on the VM's HDD.
Here's my ceph configuration:
Code:
[global]
auth_client_required = cephx
auth_cluster_required = cephx
auth_service_required = cephx
cluster_network = 10.xx.xx.21/24
fsid = xxxxxxx
mon_allow_pool_delete = true
mon_host = 10.xx.xx.21 10.xx.xx.22 10.xx.xx.23
ms_bind_ipv4 = true
ms_bind_ipv6 = false
osd_pool_default_min_size = 2
osd_pool_default_size = 3
public_network = 10.xx.xx.21/24
[client]
keyring = /etc/pve/priv/$cluster.$name.keyring
[mds]
keyring = /var/lib/ceph/mds/ceph-$id/keyring
[mds.xx-xxx-01]
host = xx-xxx-01
mds_standby_for_name = pve
[mds.xx-xxx-02]
host = xx-xxx-02
mds_standby_for_name = pve
[mds.xx-xxx-03]
host = xx-xxx-03
mds standby for name = pve
[mon.xx-xxx-01]
public_addr = 10.xx.xx.21
[mon.xx-xxx-02]
public_addr = 10.xx.xx.22
[mon.xx-xxx-03]
public_addr = 10.xx.xx.23
Here's the crush map
Code:
# begin crush map
tunable choose_local_tries 0
tunable choose_local_fallback_tries 0
tunable choose_total_tries 50
tunable chooseleaf_descend_once 1
tunable chooseleaf_vary_r 1
tunable chooseleaf_stable 1
tunable straw_calc_version 1
tunable allowed_bucket_algs 54
# devices
device 0 osd.0 class hdd
device 1 osd.1 class hdd
device 2 osd.2 class hdd
device 3 osd.3 class hdd
device 4 osd.4 class hdd
device 5 osd.5 class hdd
device 6 osd.6 class hdd
device 7 osd.7 class hdd
device 8 osd.8 class hdd
device 9 osd.9 class hdd
device 10 osd.10 class hdd
device 11 osd.11 class hdd
device 12 osd.12 class hdd
device 13 osd.13 class hdd
device 14 osd.14 class hdd
device 15 osd.15 class hdd
device 16 osd.16 class hdd
device 17 osd.17 class hdd
# types
type 0 osd
type 1 host
type 2 chassis
type 3 rack
type 4 row
type 5 pdu
type 6 pod
type 7 room
type 8 datacenter
type 9 zone
type 10 region
type 11 root
# buckets
host xx-xxx-01 {
id -3 # do not change unnecessarily
id -4 class hdd # do not change unnecessarily
# weight 44.537
alg straw2
hash 0 # rjenkins1
item osd.0 weight 7.423
item osd.1 weight 7.423
item osd.2 weight 7.423
item osd.3 weight 7.423
item osd.4 weight 7.423
item osd.5 weight 7.423
}
host xx-xxx-02 {
id -5 # do not change unnecessarily
id -6 class hdd # do not change unnecessarily
# weight 44.537
alg straw2
hash 0 # rjenkins1
item osd.6 weight 7.423
item osd.7 weight 7.423
item osd.8 weight 7.423
item osd.12 weight 7.423
item osd.13 weight 7.423
item osd.14 weight 7.423
}
host xx-xxx-03 {
id -7 # do not change unnecessarily
id -8 class hdd # do not change unnecessarily
# weight 44.537
alg straw2
hash 0 # rjenkins1
item osd.9 weight 7.423
item osd.10 weight 7.423
item osd.11 weight 7.423
item osd.15 weight 7.423
item osd.16 weight 7.423
item osd.17 weight 7.423
}
root default {
id -1 # do not change unnecessarily
id -2 class hdd # do not change unnecessarily
# weight 133.612
alg straw2
hash 0 # rjenkins1
item xx-xxx-01 weight 44.537
item xx-xxx-02 weight 44.537
item xx-xxx-03 weight 44.537
}
# rules
rule replicated_rule {
id 0
type replicated
min_size 1
max_size 10
step take default
step chooseleaf firstn 0 type host
step emit
}
# end crush map
Ceph osd metadata output (ceph osd metadata |grep "id")
Code:
"id": 0,
"bluefs_db_block_size": "4096",
"bluefs_db_size": "159970754560",
"bluestore_bdev_block_size": "4096",
"bluestore_bdev_size": "8001545043968",
"device_ids": "nvme0n1=INTEL_SSDPE21D960GA_xxxx,sdc=ST8000NM000A-2KE101_xxxx",
"id": 1,
"bluefs_db_block_size": "4096",
"bluefs_db_size": "159970754560",
"bluestore_bdev_block_size": "4096",
"bluestore_bdev_size": "8001545043968",
"device_ids": "nvme0n1=INTEL_SSDPE21D960GA_xxxx,sdd=ST8000NM000A-2KE101_xxxx",
"id": 2,
"bluefs_db_block_size": "4096",
"bluefs_db_size": "159970754560",
"bluestore_bdev_block_size": "4096",
"bluestore_bdev_size": "8001545043968",
"device_ids": "nvme0n1=INTEL_SSDPE21D960GA_xxxx,sde=ST8000NM000A-2KE101_xxxx",
"id": 3,
"bluefs_db_block_size": "4096",
"bluefs_db_size": "159970754560",
"bluestore_bdev_block_size": "4096",
"bluestore_bdev_size": "8001545043968",
"device_ids": "nvme1n1=INTEL_SSDPE21D960GA_xxxx,sdf=ST8000NM000A-2KE101_xxxx",
"id": 4,
"bluefs_db_block_size": "4096",
"bluefs_db_size": "159970754560",
"bluestore_bdev_block_size": "4096",
"bluestore_bdev_size": "8001545043968",
"device_ids": "nvme1n1=INTEL_SSDPE21D960GA_xxxx,sdg=ST8000NM000A-2KE101_xxxx",
"id": 5,
"bluefs_db_block_size": "4096",
"bluefs_db_size": "159970754560",
"bluestore_bdev_block_size": "4096",
"bluestore_bdev_size": "8001545043968",
"device_ids": "nvme1n1=INTEL_SSDPE21D960GA_xxxx,sdh=ST8000NM000A-2KE101_xxxx",
"id": 6,
"bluefs_db_block_size": "4096",
"bluefs_db_size": "159970754560",
"bluestore_bdev_block_size": "4096",
"bluestore_bdev_size": "8001545043968",
"device_ids": "nvme0n1=INTEL_SSDPE21D960GA_xxxx,sdc=ST8000NM000A-2KE101_xxxx",
"id": 7,
"bluefs_db_block_size": "4096",
"bluefs_db_size": "159970754560",
"bluestore_bdev_block_size": "4096",
"bluestore_bdev_size": "8001545043968",
"device_ids": "nvme0n1=INTEL_SSDPE21D960GA_xxxx,sdd=ST8000NM000A-2KE101_xxxx",
"id": 8,
"bluefs_db_block_size": "4096",
"bluefs_db_size": "159970754560",
"bluestore_bdev_block_size": "4096",
"bluestore_bdev_size": "8001545043968",
"device_ids": "nvme0n1=INTEL_SSDPE21D960GA_xxxx,sde=ST8000NM000A-2KE101_xxxx",
"id": 9,
"bluefs_db_block_size": "4096",
"bluefs_db_size": "159970754560",
"bluestore_bdev_block_size": "4096",
"bluestore_bdev_size": "8001545043968",
"device_ids": "nvme0n1=INTEL_SSDPE21D960GA_xxxx,sdc=ST8000NM000A-2KE101_xxxx",
"id": 10,
"bluefs_db_block_size": "4096",
"bluefs_db_size": "159970754560",
"bluestore_bdev_block_size": "4096",
"bluestore_bdev_size": "8001545043968",
"device_ids": "nvme0n1=INTEL_SSDPE21D960GA_xxxx,sdd=ST8000NM000A-2KE101_xxxx",
"id": 11,
"bluefs_db_block_size": "4096",
"bluefs_db_size": "159970754560",
"bluestore_bdev_block_size": "4096",
"bluestore_bdev_size": "8001545043968",
"device_ids": "nvme0n1=INTEL_SSDPE21D960GA_xxxx,sde=ST8000NM000A-2KE101_xxxx",
"id": 12,
"bluefs_db_block_size": "4096",
"bluefs_db_size": "159970754560",
"bluestore_bdev_block_size": "4096",
"bluestore_bdev_size": "8001545043968",
"device_ids": "nvme1n1=INTEL_SSDPE21D960GA_xxxx,sdf=ST8000NM000A-2KE101_xxxx",
"id": 13,
"bluefs_db_block_size": "4096",
"bluefs_db_size": "159970754560",
"bluestore_bdev_block_size": "4096",
"bluestore_bdev_size": "8001545043968",
"device_ids": "nvme1n1=INTEL_SSDPE21D960GA_xxxx,sdg=ST8000NM000A-2KE101_xxxx",
"id": 14,
"bluefs_db_block_size": "4096",
"bluefs_db_size": "159970754560",
"bluestore_bdev_block_size": "4096",
"bluestore_bdev_size": "8001545043968",
"device_ids": "nvme1n1=INTEL_SSDPE21D960GA_xxxx,sdh=ST8000NM000A-2KE101_xxxx",
"id": 15,
"bluefs_db_block_size": "4096",
"bluefs_db_size": "159970754560",
"bluestore_bdev_block_size": "4096",
"bluestore_bdev_size": "8001545043968",
"device_ids": "nvme1n1=INTEL_SSDPE21D960GA_xxxx,sdf=ST8000NM000A-2KE101_xxxx",
"id": 16,
"bluefs_db_block_size": "4096",
"bluefs_db_size": "159970754560",
"bluestore_bdev_block_size": "4096",
"bluestore_bdev_size": "8001545043968",
"device_ids": "nvme1n1=INTEL_SSDPE21D960GA_xxxx,sdg=ST8000NM000A-2KE101_xxxx",
"id": 17,
"bluefs_db_block_size": "4096",
"bluefs_db_size": "159970754560",
"bluestore_bdev_block_size": "4096",
"bluestore_bdev_size": "8001545043968",
"device_ids": "nvme1n1=INTEL_SSDPE21D960GA_xxxx,sdh=ST8000NM000A-2KE101_xxxx",
Any insights on this would be appreciated.
Thank you.