Wir haben gedacht es wäre vielleicht ein bekanntes Problem aber ich kann gerne mehr Informationen geben.
3 Ceph-Nodes mit Ceph Nautilus 14.2.1 dienen als mons mds und haben vier osds (jeweils 10TB) die mit bluestore auf einer Nvme (jeder osd 50GB) arbeiten.
Ceph.conf:
[global]
fsid = 2a53dc49-4a00-4ce6-a261-e7796b53d9ff
mon_initial_members = ceph01, ceph02, ceph03
mon_host = 192.168.8.10,192.168.8.11,192.168.8.12
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
public network = 192.168.8.0/24
cluster network = 10.10.22.0/24
osd journal size = 20000
osd pool default size = 2
osd pool default min size = 1
rbd default features = 3
[mon]
mgr initial modules = balancer dashboard
mon allow pool delete = True
mon health preluminous compat = True
mon osd down out interval = 300
[mgr]
mgr_modules = dashboard balancer
[mds.1]
host = ceph01
[mds.2]
host = ceph02
[mds.3]
host = ceph03
[osd]
bluestore cache autotune = 0
bluestore cache kv ratio = 0.2
bluestore cache meta ratio = 0.8
bluestore cache size ssd = 8G
bluestore csum type = none
bluestore extent map shard max size = 200
bluestore extent map shard min size = 50
bluestore extent map shard target size = 100
bluestore rocksdb options = compression=kNoCompression,max_write_buffer_number=32,min_write_buffer_number_to_merge=2,recycle_log_file_num=32,compaction_style=kCompactionStyleLevel,write_buffer_size=67108864,target_file_size_base=67108864,max_background_compactions=31,level0_file_num_compaction_trigger=8,level0_slowdown_writes_trigger=32,level0_stop_writes_trigger=64,max_bytes_for_level_base=536870912,compaction_threads=32,max_bytes_for_level_multiplier=8,flusher_threads=8,compaction_readahead_size=2MB
osd map share max epochs = 100
osd max backfills = 5
osd memory target = 4294967296
osd op num shards = 8
osd op num threads per shard = 2
osd min pg log entries = 10
osd max pg log entries = 10
osd pg log dups tracked = 10
osd pg log trim min = 10
Falls noch Informationen fehlen sollten bitte nachfragen.