[SOLVED] latest pve-test with ceph-jewel seems to ignore "osd_crush_update_on_start = false"

markusd

Renowned Member
Apr 20, 2015
106
2
83
Dortmund
Hi,
I have created a custom crushmap with different root-entrys for different hdd's /ssd's like described here
http://docs.ceph.com/docs/master/ra...ap/#placing-different-pools-on-different-osds
Code:
 ceph osd crush tree     
[
    {
        "id": -11,
        "name": "hdd-root",
        "type": "root",
        "type_id": 11,
        "items": []
    },
    {
        "id": -2,
        "name": "ssd-root",
        "type": "root",
        "type_id": 11,
        "items": [
            {
                "id": -13,
                "name": "virt01-ssd",
                "type": "host",
                "type_id": 2,
                "items": [
                    {
                        "id": 11,
                        "name": "osd.11",
                        "type": "osd",
                        "type_id": 0,
                        "crush_weight": 1.000000,
                        "depth": 2
                    }
                ]
            },
            {
                "id": -14,
                "name": "virt02-ssd",
                "type": "host",
                "type_id": 2,
                "items": [
                    {
                        "id": 12,
                        "name": "osd.12",
                        "type": "osd",
                        "type_id": 0,
                        "crush_weight": 1.000000,
                        "depth": 2
                    }
                ]
            },
            {
                "id": -3,
                "name": "storage01-ssd",
                "type": "host",
                "type_id": 2,
                "items": []
            }
        ]
    },
    {
        "id": -1,
        "name": "default",
        "type": "root",
        "type_id": 11,
        "items": [
            {
                "id": -4,
                "name": "storage01",
                "type": "host",
                "type_id": 2,
                "items": [
                    {
                        "id": 14,
                        "name": "osd.14",
                        "type": "osd",
                        "type_id": 0,
                        "crush_weight": 0.899994,
                        "depth": 2
                    },
                    {
                        "id": 15,
                        "name": "osd.15",
                        "type": "osd",
                        "type_id": 0,
                        "crush_weight": 0.899994,
                        "depth": 2
                    },
                    {
                        "id": 16,
                        "name": "osd.16",
                        "type": "osd",
                        "type_id": 0,
                        "crush_weight": 0.899994,
                        "depth": 2
                    },
                    {
                        "id": 17,
                        "name": "osd.17",
                        "type": "osd",
                        "type_id": 0,
                        "crush_weight": 0.899994,
                        "depth": 2
                    },
                    {
                        "id": 18,
                        "name": "osd.18",
                        "type": "osd",
                        "type_id": 0,
                        "crush_weight": 0.899994,
                        "depth": 2
                    },
                    {
                        "id": 19,
                        "name": "osd.19",
                        "type": "osd",
                        "type_id": 0,
                        "crush_weight": 0.899994,
                        "depth": 2
                    },
                    {
                        "id": 20,
                        "name": "osd.20",
                        "type": "osd",
                        "type_id": 0,
                        "crush_weight": 0.899994,
                        "depth": 2
                    }
                ]
            },
            {
                "id": -5,
                "name": "virt02",
                "type": "host",
                "type_id": 2,
                "items": [
                    {
                        "id": 0,
                        "name": "osd.0",
                        "type": "osd",
                        "type_id": 0,
                        "crush_weight": 0.907990,
                        "depth": 2
                    },
                    {
                        "id": 2,
                        "name": "osd.2",
                        "type": "osd",
                        "type_id": 0,
                        "crush_weight": 0.907990,
                        "depth": 2
                    },
                    {
                        "id": 1,
                        "name": "osd.1",
                        "type": "osd",
                        "type_id": 0,
                        "crush_weight": 0.907990,
                        "depth": 2
                    },
                    {
                        "id": 3,
                        "name": "osd.3",
                        "type": "osd",
                        "type_id": 0,
                        "crush_weight": 0.907990,
                        "depth": 2
                    },
                    {
                        "id": 4,
                        "name": "osd.4",
                        "type": "osd",
                        "type_id": 0,
                        "crush_weight": 0.907990,
                        "depth": 2
                    }
                ]
            },
            {
                "id": -6,
                "name": "virt01",
                "type": "host",
                "type_id": 2,
                "items": [
                    {
                        "id": 5,
                        "name": "osd.5",
                        "type": "osd",
                        "type_id": 0,
                        "crush_weight": 0.907990,
                        "depth": 2
                    },
                    {
                        "id": 6,
                        "name": "osd.6",
                        "type": "osd",
                        "type_id": 0,
                        "crush_weight": 0.907990,
                        "depth": 2
                    },
                    {
                        "id": 7,
                        "name": "osd.7",
                        "type": "osd",
                        "type_id": 0,
                        "crush_weight": 0.907990,
                        "depth": 2
                    },
                    {
                        "id": 8,
                        "name": "osd.8",
                        "type": "osd",
                        "type_id": 0,
                        "crush_weight": 0.907990,
                        "depth": 2
                    },
                    {
                        "id": 9,
                        "name": "osd.9",
                        "type": "osd",
                        "type_id": 0,
                        "crush_weight": 0.907990,
                        "depth": 2
                    }
                ]
            }
        ]
    }
]
To prevent ceph from putting all devices back to "default-root" at boot i placed
" osd crush update on start = false"
to /etc/pve/ceph.conf
This worked good.
Now after latest updates, i found osd.11 & osd.12 (ssds from "root-ssd") in the default-root-location.
So it seems
"osd crush update on start = false"
is ignored now
I think a start-script has changed.

Do i miss something or is it worth to make a bug-report?

Thank you four your thoughts!

Markus
 
Hi,
I have created a custom crushmap with different root-entrys for different hdd's /ssd's like described here
http://docs.ceph.com/docs/master/ra...ap/#placing-different-pools-on-different-osds
Code:
 ceph osd crush tree    
[
    {
        "id": -11,
        "name": "hdd-root",
        "type": "root",
        "type_id": 11,
        "items": []
    },
    {
        "id": -2,
        "name": "ssd-root",
        "type": "root",
        "type_id": 11,
        "items": [
            {
                "id": -13,
                "name": "virt01-ssd",
                "type": "host",
                "type_id": 2,
                "items": [
                    {
                        "id": 11,
                        "name": "osd.11",
                        "type": "osd",
                        "type_id": 0,
                        "crush_weight": 1.000000,
                        "depth": 2
                    }
                ]
            },
            {
                "id": -14,
                "name": "virt02-ssd",
                "type": "host",
                "type_id": 2,
                "items": [
                    {
                        "id": 12,
                        "name": "osd.12",
                        "type": "osd",
                        "type_id": 0,
                        "crush_weight": 1.000000,
                        "depth": 2
                    }
                ]
            },
            {
                "id": -3,
                "name": "storage01-ssd",
                "type": "host",
                "type_id": 2,
                "items": []
            }
        ]
    },
    {
        "id": -1,
        "name": "default",
        "type": "root",
        "type_id": 11,
        "items": [
            {
                "id": -4,
                "name": "storage01",
                "type": "host",
                "type_id": 2,
                "items": [
                    {
                        "id": 14,
                        "name": "osd.14",
                        "type": "osd",
                        "type_id": 0,
                        "crush_weight": 0.899994,
                        "depth": 2
                    },
                    {
                        "id": 15,
                        "name": "osd.15",
                        "type": "osd",
                        "type_id": 0,
                        "crush_weight": 0.899994,
                        "depth": 2
                    },
                    {
                        "id": 16,
                        "name": "osd.16",
                        "type": "osd",
                        "type_id": 0,
                        "crush_weight": 0.899994,
                        "depth": 2
                    },
                    {
                        "id": 17,
                        "name": "osd.17",
                        "type": "osd",
                        "type_id": 0,
                        "crush_weight": 0.899994,
                        "depth": 2
                    },
                    {
                        "id": 18,
                        "name": "osd.18",
                        "type": "osd",
                        "type_id": 0,
                        "crush_weight": 0.899994,
                        "depth": 2
                    },
                    {
                        "id": 19,
                        "name": "osd.19",
                        "type": "osd",
                        "type_id": 0,
                        "crush_weight": 0.899994,
                        "depth": 2
                    },
                    {
                        "id": 20,
                        "name": "osd.20",
                        "type": "osd",
                        "type_id": 0,
                        "crush_weight": 0.899994,
                        "depth": 2
                    }
                ]
            },
            {
                "id": -5,
                "name": "virt02",
                "type": "host",
                "type_id": 2,
                "items": [
                    {
                        "id": 0,
                        "name": "osd.0",
                        "type": "osd",
                        "type_id": 0,
                        "crush_weight": 0.907990,
                        "depth": 2
                    },
                    {
                        "id": 2,
                        "name": "osd.2",
                        "type": "osd",
                        "type_id": 0,
                        "crush_weight": 0.907990,
                        "depth": 2
                    },
                    {
                        "id": 1,
                        "name": "osd.1",
                        "type": "osd",
                        "type_id": 0,
                        "crush_weight": 0.907990,
                        "depth": 2
                    },
                    {
                        "id": 3,
                        "name": "osd.3",
                        "type": "osd",
                        "type_id": 0,
                        "crush_weight": 0.907990,
                        "depth": 2
                    },
                    {
                        "id": 4,
                        "name": "osd.4",
                        "type": "osd",
                        "type_id": 0,
                        "crush_weight": 0.907990,
                        "depth": 2
                    }
                ]
            },
            {
                "id": -6,
                "name": "virt01",
                "type": "host",
                "type_id": 2,
                "items": [
                    {
                        "id": 5,
                        "name": "osd.5",
                        "type": "osd",
                        "type_id": 0,
                        "crush_weight": 0.907990,
                        "depth": 2
                    },
                    {
                        "id": 6,
                        "name": "osd.6",
                        "type": "osd",
                        "type_id": 0,
                        "crush_weight": 0.907990,
                        "depth": 2
                    },
                    {
                        "id": 7,
                        "name": "osd.7",
                        "type": "osd",
                        "type_id": 0,
                        "crush_weight": 0.907990,
                        "depth": 2
                    },
                    {
                        "id": 8,
                        "name": "osd.8",
                        "type": "osd",
                        "type_id": 0,
                        "crush_weight": 0.907990,
                        "depth": 2
                    },
                    {
                        "id": 9,
                        "name": "osd.9",
                        "type": "osd",
                        "type_id": 0,
                        "crush_weight": 0.907990,
                        "depth": 2
                    }
                ]
            }
        ]
    }
]
To prevent ceph from putting all devices back to "default-root" at boot i placed
" osd crush update on start = false"
to /etc/pve/ceph.conf
This worked good.
Now after latest updates, i found osd.11 & osd.12 (ssds from "root-ssd") in the default-root-location.
So it seems
"osd crush update on start = false"
is ignored now
I think a start-script has changed.

Do i miss something or is it worth to make a bug-report?

Thank you four your thoughts!

Markus
Hi,
does it run if you define the crush_location in ceph.conf (perhaps as workaround), like I do:
Code:
[osd.62]
host = ceph-03-ssd
public_addr = 172.16.1.13
cluster_addr = 192.168.1.13
osd_crush_location = "host=ceph-03-ssd root=ssd"
Udo
 
Hi Udo,
thank you -good idea!
I have put in ceph.conf:
Code:
[osd.11]
host = virt01-ssd
public_addr = 192.168.0.15
cluster_addr = 192.168.190.15
osd_crush_location = "host=virt01-ssd root=ssd-root"
[osd.12]
host = virt02-ssd
public_addr = 192.168.0.16
cluster_addr = 192.168.190.16
osd_crush_location = "host=virt02-ssd root=ssd-root"

But it didn`t work..
After reboot:

Code:
root@virt02:~# ceph osd tree
ID  WEIGHT   TYPE NAME              UP/DOWN REWEIGHT PRIMARY-AFFINITY
-11        0 root hdd-root                                          
-2  1.00000 root ssd-root                                          
-13  1.00000     host virt01-ssd                                    
11  1.00000         osd.11              up  1.00000          1.00000
-14        0     host virt02-ssd                                    
-3        0     host storage01-ssd                                  
-1 16.37985 root default                                            
-4  6.29996     host storage01                                      
[...]
-5  5.53995     host virt02                                        
[...]
12  1.00000         osd.12              up  1.00000          1.00000
-6  4.53995     host virt01                                        
[...]
Then i correct this with:
Code:
# ceph osd crush set osd.12 1.0 host=virt02-ssd
My conf:
Code:
[global]
         auth_client_required = none
         auth_cluster_required = none
         auth_service_required = none
         cephx_sign_messages = false
         cephx_require_signatures = false
         cluster_network = 192.168.190.0/24
         debug_asok = 0/0
         debug_auth = 0/0
         debug_buffer = 0/0
         debug_client = 0/0
         debug_context = 0/0
         debug_crush = 0/0
         debug_filer = 0/0
         debug_filestore = 0/0
         debug_finisher = 0/0
         debug_heartbeatmap = 0/0
         debug_journal = 0/0
         debug_journaler = 0/0
         debug_lockdep = 0/0
         debug_mon = 0/0
         debug_monc = 0/0
         debug_ms = 0/0
         debug_objclass = 0/0
         debug_objectcatcher = 0/0
         debug_objecter = 0/0
         debug_optracker = 0/0
         debug_osd = 0/0
         debug_paxos = 0/0
         debug_perfcounter = 0/0
         debug_rados = 0/0
         debug_rbd = 0/0
         debug_rgw = 0/0
         debug_throttle = 0/0
         debug_timer = 0/0
         debug_tp = 0/0
         osd_op_threads = 4
         osd_op_num_threads_per_shard = 1
         osd_op_num_shards = 25
         osd_max_backfills = 1
         osd_recovery_max_active = 1
         osd_disk_threads = 1
         filestore_op_threads = 4
         filestore_fd_cache_size = 64
         filestore_fd_cache_shards = 32
         ms_nocrc = true
         ms_dispatch_throttle_bytes = 0
         throttler_perf_counter = false
         fsid = 79feee35-196d-4f72-bf90-c34cd1d85cb5
         keyring = /etc/pve/priv/$cluster.$name.keyring
         osd_journal_size = 5120
         osd_pool_default min size = 1
         public_network = 192.168.0.0/24

[osd]
         keyring = /var/lib/ceph/osd/ceph-$id/keyring
         osd_client_message_size_cap = 0
         osd_client_message_cap = 0
         osd_enable_op_tracker = false
         osd_crush_update_on_start = false

[osd.11]
         host = virt01-ssd
         public_addr = 192.168.0.15
         cluster_addr = 192.168.190.15
         osd_crush_location = "host=virt01-ssd root=ssd-root"

[osd.12]
         host = virt02-ssd
         public_addr = 192.168.0.16
         cluster_addr = 192.168.190.16
         osd_crush_location = "host=virt02-ssd root=ssd-root"

[mon]  
         mon_osd_allow_primary_affinity = true

[mon.1]
         host = virt01
         mon_addr = 192.168.0.15:6789

[mon.0]
         host = storage01
         mon_addr = 192.168.0.5:6789

[mon.2]
         host = virt02
         mon_addr = 192.168.0.16:6789
Hm, maybe something changed in
/usr/lib/ceph/ceph-osd-prestart.sh
There is a line..:
Code:
update="$(ceph-conf --cluster=${cluster:-ceph} --name=osd.$id --lookup osd_crush_update_on_start || :)"

if [ "${update:-1}" = "1" -o "${update:-1}" = "true" ]; then
    # update location in crush
    hook="$(ceph-conf --cluster=${cluster:-ceph} --name=osd.$id --lookup osd_crush_location_hook || :)"
    if [ -z "$hook" ]; then
        hook="/usr/bin/ceph-crush-location"
    fi
So it should work? But i`m no programmer...

Thanks for your thoughts...

Markus
 

About

The Proxmox community has been around for many years and offers help and support for Proxmox VE, Proxmox Backup Server, and Proxmox Mail Gateway.
We think our community is one of the best thanks to people like you!

Get your subscription!

The Proxmox team works very hard to make sure you are running the best software and getting stable updates and security enhancements, as well as quick enterprise support. Tens of thousands of happy customers have a Proxmox subscription. Get yours easily in our online shop.

Buy now!