Sorry - newbie issue with Ceph

stefws

Member
Jan 29, 2015
302
4
18
Denmark
siimnet.dk
First did a minor PoC on VMware guests, worked like charm even with Live Migration.
Now I am trying to scale PoC on top of laid-off HW, but fails to make Ceph work the same way in a 4 nodes cluster.
Pls any advice appreciated as I have now spent countless hours with google and reading getting more and more :confused:

Let me known if you need further info from me to assist, TIA!

root@node1:/etc/pve# pveceph lspools
Name size pg_num used
rbd_data 3 256 0
vm_images 3 256 0
root@node1:/etc/pve# pveceph status
{
"monmap" : {
"mons" : [
{
"name" : "2",
"addr" : "10.0.3.1:6789/0",
"rank" : 0
},
{
"name" : "1",
"addr" : "10.0.3.2:6789/0",
"rank" : 1
},
{
"name" : "3",
"addr" : "10.0.3.3:6789/0",
"rank" : 2
},
{
"name" : "0",
"addr" : "10.0.3.4:6789/0",
"rank" : 3
}
],
"created" : "2015-01-29 10:45:53.727024",
"epoch" : 4,
"modified" : "2015-01-29 10:47:06.598083",
"fsid" : "16fe2dcf-2629-422f-a649-871deba78bcd"
},
"election_epoch" : 56,
"health" : {
"detail" : [],
"overall_status" : "HEALTH_OK",
"summary" : [],
"timechecks" : {
"mons" : [
{
"name" : "2",
"latency" : "0.000000",
"skew" : "0.000000",
"health" : "HEALTH_OK"
},
{
"name" : "1",
"latency" : "0.007205",
"skew" : "-0.008600",
"health" : "HEALTH_OK"
},
{
"name" : "3",
"latency" : "0.002361",
"skew" : "-0.007540",
"health" : "HEALTH_OK"
},
{
"name" : "0",
"latency" : "0.003174",
"skew" : "-0.008190",
"health" : "HEALTH_OK"
}
],
"epoch" : 56,
"round_status" : "finished",
"round" : 20
},
"health" : {
"health_services" : [
{
"mons" : [
{
"kb_used" : 1221960,
"last_updated" : "2015-01-30 05:48:20.321321",
"name" : "2",
"health" : "HEALTH_OK",
"kb_total" : 17546044,
"kb_avail" : 15432796,
"store_stats" : {
"bytes_total" : 18355284,
"last_updated" : "0.000000",
"bytes_misc" : 72713,
"bytes_sst" : 18217035,
"bytes_log" : 65536
},
"avail_percent" : 87
},
{
"kb_used" : 4528924,
"last_updated" : "2015-01-30 05:48:52.363961",
"name" : "1",
"health" : "HEALTH_OK",
"kb_total" : 35092160,
"kb_avail" : 28780660,
"store_stats" : {
"bytes_total" : 18354377,
"last_updated" : "0.000000",
"bytes_misc" : 71815,
"bytes_sst" : 18217026,
"bytes_log" : 65536
},
"avail_percent" : 82
},
{
"kb_used" : 1268184,
"last_updated" : "2015-01-30 05:48:48.951639",
"name" : "3",
"health" : "HEALTH_OK",
"kb_total" : 17546044,
"kb_avail" : 15386572,
"store_stats" : {
"bytes_total" : 18354639,
"last_updated" : "0.000000",
"bytes_misc" : 71816,
"bytes_sst" : 18217287,
"bytes_log" : 65536
},
"avail_percent" : 87
},
{
"kb_used" : 1369764,
"last_updated" : "2015-01-30 05:48:30.246012",
"name" : "0",
"health" : "HEALTH_OK",
"kb_total" : 17546044,
"kb_avail" : 15284992,
"store_stats" : {
"bytes_total" : 18353311,
"last_updated" : "0.000000",
"bytes_misc" : 71233,
"bytes_sst" : 18216542,
"bytes_log" : 65536
},
"avail_percent" : 87
}
]
}
]
}
},
"osdmap" : {
"osdmap" : {
"num_in_osds" : 16,
"epoch" : 188,
"nearfull" : false,
"num_up_osds" : 16,
"full" : false,
"num_osds" : 16
}
},
"mdsmap" : {
"epoch" : 1,
"by_rank" : [],
"in" : 0,
"max" : 0,
"up" : 0
},
"pgmap" : {
"bytes_total" : 2261429428224,
"pgs_by_state" : [
{
"count" : 512,
"state_name" : "active+clean"
}
],
"data_bytes" : 0,
"num_pgs" : 512,
"version" : 511,
"bytes_avail" : 2260786995200,
"bytes_used" : 642433024
},
"quorum" : [
0,
1,
2,
3
],
"quorum_names" : [
"2",
"1",
"3",
"0"
],
"fsid" : "16fe2dcf-2629-422f-a649-871deba78bcd"
}


root@node1:/etc/pve# ceph -s
cluster 16fe2dcf-2629-422f-a649-871deba78bcd
health HEALTH_OK
monmap e4: 4 mons at {0=10.0.3.4:6789/0,1=10.0.3.2:6789/0,2=10.0.3.1:6789/0,3=10.0.3.3:6789/0}, election epoch 56, quorum 0,1,2,3 2,1,3,0
osdmap e188: 16 osds: 16 up, 16 in
pgmap v511: 512 pgs, 2 pools, 0 bytes data, 0 objects
612 MB used, 2105 GB / 2106 GB avail
512 active+clean

root@node1:/etc/pve# ceph osd tree
# id weight type name up/down reweight
-1 2.08 root default
-2 0.52 host node1
4 0.13 osd.4 up 1
5 0.13 osd.5 up 1
6 0.13 osd.6 up 1
7 0.13 osd.7 up 1
-3 0.52 host node2
0 0.13 osd.0 up 1
8 0.13 osd.8 up 1
9 0.13 osd.9 up 1
10 0.13 osd.10 up 1
-4 0.52 host node3
1 0.13 osd.1 up 1
2 0.13 osd.2 up 1
3 0.13 osd.3 up 1
11 0.13 osd.11 up 1
-5 0.52 host node4
12 0.13 osd.12 up 1
13 0.13 osd.13 up 1
14 0.13 osd.14 up 1
15 0.13 osd.15 up 1

root@node1:/etc/pve# pvecm nodes
Node Sts Inc Joined Name
1 M 136 2015-01-30 05:31:55 node3
2 M 116 2015-01-29 23:21:37 node2
3 M 108 2015-01-29 23:21:09 node1
4 M 116 2015-01-29 23:21:37 node4
root@node1:/etc/pve# pvecm status
Version: 6.2.0
Config Version: 6
Cluster Name: sprawlcl
Cluster Id: 28778
Cluster Member: Yes
Cluster Generation: 136
Membership state: Cluster-Member
Nodes: 4
Expected votes: 4
Total votes: 4
Node votes: 1
Quorum: 3
Active subsystems: 5
Flags:
Ports Bound: 0
Node name: node1
Node ID: 3
Multicast addresses: 239.192.112.218
Node addresses: 1.2.3.4 # <- hm only show one public IP here
 

About

The Proxmox community has been around for many years and offers help and support for Proxmox VE, Proxmox Backup Server, and Proxmox Mail Gateway.
We think our community is one of the best thanks to people like you!

Get your subscription!

The Proxmox team works very hard to make sure you are running the best software and getting stable updates and security enhancements, as well as quick enterprise support. Tens of thousands of happy customers have a Proxmox subscription. Get yours easily in our online shop.

Buy now!