Hi,
I have completed setup of 6 node cluster running PVE and Ceph.
This is my ceph configuration:
root@ld3955:~# more /etc/pve/ceph.conf
[global]
auth client required = cephx
auth cluster required = cephx
auth service required = cephx
cluster network = 192.168.1.0/27
fsid = 6b1b5117-6e08-4843-93d6-xxxxxxxxxxxx
keyring = /etc/pve/priv/$cluster.$name.keyring
mon allow pool delete = true
osd journal size = 5120
osd pool default min size = 2
osd pool default size = 3
public network = 10.97.206.0/24
osd crush update on start = false
[mds]
keyring = /var/lib/ceph/mds/ceph-$id/keyring
[osd]
keyring = /var/lib/ceph/osd/ceph-$id/keyring
[mds.ld3955]
host = ld3955
mds standby for name = pve
[mds.ld3976]
host = ld3976
mds standby for name = pve
[mon.ld5505]
host = ld5505
mon addr = 10.97.206.93:6789
[mon.ld5506]
host = ld5506
mon addr = 10.97.206.94:6789
[mon.ld5507]
host = ld5507
mon addr = 10.97.206.95:6789
The public network is configured on a 10GBit NIC, and the cluster network on a 40GBit NIC.
All OSDs are only connected to nodes
ld5505
ld5506
ld5507
ld5508
I have mounted a NFS share on ld3955 using a 40GBit NIC.
When I transfer data from the remote location to a RBD I can monitor the max. throughput of 10GBit on this interfaces:
vmbr0 (= public network)
bond0 (= cluster network)
My assumption was that all data that must be distributed on the OSDs will be transferred over cluster network.
But this seems to be wrong, or why can I see this on vmbr0 with iftop?
353Mb 707Mb 1,04Gb 1,38Gb 1,73Gb
+---------------------------+----------------------------+----------------------------+----------------------------+----------------------------
ld3955 => ld5505 973Mb 1,17Gb 1,15Gb
<= 881Kb 1,16Mb 1,08Mb
ld3955 => ld5506 1,20Gb 1,11Gb 1,12Gb
<= 1,49Mb 1,61Mb 1,53Mb
ld3955 => ld5508 1,06Gb 1,07Gb 1,08Gb
<= 1,31Mb 1,30Mb 1,25Mb
ld3955 => ld5507 1,13Gb 1,04Gb 1,03Gb
<= 1,34Mb 1,33Mb 1,28Mb
Please comment and advise.
THX
I have completed setup of 6 node cluster running PVE and Ceph.
This is my ceph configuration:
root@ld3955:~# more /etc/pve/ceph.conf
[global]
auth client required = cephx
auth cluster required = cephx
auth service required = cephx
cluster network = 192.168.1.0/27
fsid = 6b1b5117-6e08-4843-93d6-xxxxxxxxxxxx
keyring = /etc/pve/priv/$cluster.$name.keyring
mon allow pool delete = true
osd journal size = 5120
osd pool default min size = 2
osd pool default size = 3
public network = 10.97.206.0/24
osd crush update on start = false
[mds]
keyring = /var/lib/ceph/mds/ceph-$id/keyring
[osd]
keyring = /var/lib/ceph/osd/ceph-$id/keyring
[mds.ld3955]
host = ld3955
mds standby for name = pve
[mds.ld3976]
host = ld3976
mds standby for name = pve
[mon.ld5505]
host = ld5505
mon addr = 10.97.206.93:6789
[mon.ld5506]
host = ld5506
mon addr = 10.97.206.94:6789
[mon.ld5507]
host = ld5507
mon addr = 10.97.206.95:6789
The public network is configured on a 10GBit NIC, and the cluster network on a 40GBit NIC.
All OSDs are only connected to nodes
ld5505
ld5506
ld5507
ld5508
I have mounted a NFS share on ld3955 using a 40GBit NIC.
When I transfer data from the remote location to a RBD I can monitor the max. throughput of 10GBit on this interfaces:
vmbr0 (= public network)
bond0 (= cluster network)
My assumption was that all data that must be distributed on the OSDs will be transferred over cluster network.
But this seems to be wrong, or why can I see this on vmbr0 with iftop?
353Mb 707Mb 1,04Gb 1,38Gb 1,73Gb
+---------------------------+----------------------------+----------------------------+----------------------------+----------------------------
ld3955 => ld5505 973Mb 1,17Gb 1,15Gb
<= 881Kb 1,16Mb 1,08Mb
ld3955 => ld5506 1,20Gb 1,11Gb 1,12Gb
<= 1,49Mb 1,61Mb 1,53Mb
ld3955 => ld5508 1,06Gb 1,07Gb 1,08Gb
<= 1,31Mb 1,30Mb 1,25Mb
ld3955 => ld5507 1,13Gb 1,04Gb 1,03Gb
<= 1,34Mb 1,33Mb 1,28Mb
Please comment and advise.
THX