Could you please post the output for:
# ceph -s
# ceph osd tree
From node 2
root@pmc2:~# ceph -s
cluster 773e19fe-60e7-427d-bdc9-6cbcc1301f6e
health HEALTH_WARN
1 mons down, quorum 1,2,3 1,2,3
monmap e4: 4 mons at {0=10.10.10.1:6789/0,1=10.10.10.2:6789/0,2=10.10.10.3:6789/0,3=10.10.10.4:6789/0}
election epoch 120, quorum 1,2,3 1,2,3
osdmap e6726: 12 osds: 9 up, 9 in
pgmap v6419672: 1088 pgs, 2 pools, 2537 GB data, 640 kobjects
5061 GB used, 2937 GB / 7999 GB avail
1088 active+clean
client io 2994 kB/s rd, 1206 kB/s wr, 254 op/s
root@pmc2:~#
root@pmc2:~# ceph osd tree
2015-09-04 05:57:22.736976 7fad6c616700 0 -- :/1667490 >> 10.10.10.1:6789/0 pip e(0x15ea050 sd=3 :0 s=1 pgs=0 cs=0 l=1 c=0x15e70d0).fault
ID WEIGHT TYPE NAME UP/DOWN REWEIGHT PRIMARY-AFFINITY
-1 10.43994 root default
-2 2.60999 host pmc1
0 0.87000 osd.0 down 0 1.00000
1 0.87000 osd.1 down 0 1.00000
2 0.87000 osd.2 down 0 1.00000
-3 2.60999 host pmc2
3 0.87000 osd.3 up 1.00000 1.00000
4 0.87000 osd.4 up 1.00000 1.00000
5 0.87000 osd.5 up 1.00000 1.00000
-4 2.60999 host pmc3
6 0.87000 osd.6 up 1.00000 1.00000
7 0.87000 osd.7 up 1.00000 1.00000
8 0.87000 osd.8 up 1.00000 1.00000
-5 2.60999 host pmc4
9 0.87000 osd.9 up 1.00000 1.00000
10 0.87000 osd.10 up 1.00000 1.00000
11 0.87000 osd.11 up 1.00000 1.00000
root@pmc2:~#