Yet another CEPH tuning question (comparing to dell san)

This is alll pretty much defaults that Proxmox/CEPH creates:

Code:
# cat /etc/pve/ceph.conf

[global]
         auth_client_required = cephx
         auth_cluster_required = cephx
         auth_service_required = cephx
         cluster_network = 192.168.1.81/24
         fsid = 84681487-a5e1-431f-8741-95694c39d8ac
         mon_allow_pool_delete = true
         mon_host = 192.168.1.81 192.168.1.82 192.168.1.83 192.168.1.84
         ms_bind_ipv4 = true
         ms_bind_ipv6 = false
         osd_pool_default_min_size = 2
         osd_pool_default_size = 3
         public_network = 192.168.1.81/24

[client]
         keyring = /etc/pve/priv/$cluster.$name.keyring

[mds]
         keyring = /var/lib/ceph/mds/ceph-$id/keyring

[mds.pve1-cpu1]
         host = pve1-cpu1
         mds_standby_for_name = pve

[mds.pve1-cpu2]
         host = pve1-cpu2
         mds_standby_for_name = pve

[mds.pve1-cpu3]
         host = pve1-cpu3
         mds_standby_for_name = pve

[mds.pve1-cpu4]
         host = pve1-cpu4
         mds standby for name = pve

[mon.pve1-cpu1]
         public_addr = 192.168.1.81

[mon.pve1-cpu2]
         public_addr = 192.168.1.82

[mon.pve1-cpu3]
         public_addr = 192.168.1.83

[mon.pve1-cpu4]
         public_addr = 192.168.1.84



# cat /etc/network/interfaces

# network interface settings; autogenerated
# Please do NOT modify this file directly, unless you know what
# you're doing.
#
# If you want to manage parts of the network configuration manually,
# please utilize the 'source' or 'source-directory' directives to do
# so.
# PVE will preserve these directives, but will NOT read its network
# configuration from sourced files, so do not attempt to move any of
# the PVE managed interfaces into external files!

auto lo
iface lo inet loopback

auto eno1
iface eno1 inet manual

auto eno2
iface eno2 inet manual

auto enp130s0f0
iface enp130s0f0 inet static
        address 10.10.1.81/24
#SAN+HA1

auto enp130s0f1
iface enp130s0f1 inet static
        address 10.10.2.81/24
#SAN+HA2

auto ibp5s0
iface ibp5s0 inet manual
        pre-up modprobe ib_ipoib
        pre-up echo connected > /sys/class/net/ibp5s0/mode
        mtu 65520
#CEPH-Net40g1

auto ibp5s0d1
iface ibp5s0d1 inet manual
        pre-up modprobe ib_ipoib
        pre-up echo connected > /sys/class/net/ibp5s0d1/mode
        mtu 65520
#CEPH-Net40g2

auto bond0
iface bond0 inet manual
        bond-slaves eno1 eno2
        bond-miimon 100
        bond-mode 802.3ad
        bond-xmit-hash-policy layer 2+3
#Mgmt+INet

auto bond1
iface bond1 inet static
        address 192.168.1.81/24
        bond-slaves ibp5s0 ibp5s0d1
        bond-miimon 100
        bond-mode active-backup
        bond-primary ibp5s0
        mtu 65520
#CEPH-Failover-Bond

auto vmbr0
iface vmbr0 inet manual
        bridge-ports bond0
        bridge-stp off
        bridge-fd 0
        bridge-vlan-aware yes
        bridge-vids 2-4094
        vlan-aware yes

auto vmbr0.102
iface vmbr0.102 inet static
        address 10.2.2.81/16
        gateway 10.2.0.1
#Management Access

auto vmbr0.740
iface vmbr0.740 inet manual
 
Ahhhh, lightbulb moment - So CEPH is trying to do CEPH stuff on the same network as the nodes are trying to serve the VMs. Never noticed that in the config and never thought about it either.

How hard is that to change? Get rid of the active/backup network, configure the 2nd nic on 192.168.2.0/24, apply that to the ceph config. How much will that break on a live ceph setup?

Thanks!
 
not very. first make your networking changes, then change the settings in ceph.conf. restart osd, monitors, and managers (mds as well for you). THIS APPLIES TO ALL NODES.

I do want to reiterate that its not likely to make any real difference in your actual application performance.

also, you dont need so many monitors, and certainly not mds's; you need 3 monitors and 2 MDSs per namespace (one master and one standby- and with Quincy you can even have multiple namespaces per mds.)
 

About

The Proxmox community has been around for many years and offers help and support for Proxmox VE, Proxmox Backup Server, and Proxmox Mail Gateway.
We think our community is one of the best thanks to people like you!

Get your subscription!

The Proxmox team works very hard to make sure you are running the best software and getting stable updates and security enhancements, as well as quick enterprise support. Tens of thousands of happy customers have a Proxmox subscription. Get yours easily in our online shop.

Buy now!