I know this is long, but I have tried to include every command possible to help with troubleshooting.
I cannot systemctl start ceph-mon.<MON-ID>.service. It is not there.
I needed to upgrade from Hammer to Jewel in order to install a new node.
I've done this several times before. I am trying my best to follow the wiki.
I Have read https://pve.proxmox.com/wiki/Ceph_Hammer_to_Jewel over ten times.
But I CANNOT figure this out.
I think I may have stumbled upon some kind of BUG.
I will list my version below, and list every command that I think will be helpful to you. I have a lot of data, and I don't think that I did anything wrong outside of the commands of the wiki.
Starting from the beginning of the wiki.
At this point, systemctl start ceph-mon.3.service does not work, nor does tab completion of systemctl start ceph-mon
I seem to have lost my ceph monitor.
I've rebooted several times since then because I did lose 2 hard disks during the ceph ownership change which took 2 days.
I saw these troubleshooting in other threads, so I will list them here.
I cannot systemctl start ceph-mon.<MON-ID>.service. It is not there.
I needed to upgrade from Hammer to Jewel in order to install a new node.
I've done this several times before. I am trying my best to follow the wiki.
I Have read https://pve.proxmox.com/wiki/Ceph_Hammer_to_Jewel over ten times.
But I CANNOT figure this out.
I think I may have stumbled upon some kind of BUG.
I will list my version below, and list every command that I think will be helpful to you. I have a lot of data, and I don't think that I did anything wrong outside of the commands of the wiki.
Code:
prox-e:~# pveversion
pve-manager/4.4-22/2728f613 (running kernel: 4.4.98-6-pve)
Code:
root@
prox-e:~# cat /etc/ceph/ceph.conf
[global]
auth client required = cephx
auth cluster required = cephx
auth service required = cephx
cluster network = 192.168.12.0/24
filestore xattr use omap = true
fsid = 94e14478-19e4-4b5f-89e5-aff03579632d
keyring = /etc/pve/priv/$cluster.$name.keyring
osd journal size = 5120
osd pool default min size = 1
public network = 192.168.12.0/24
[osd]
keyring = /var/lib/ceph/osd/ceph-$id/keyring
[mon.1]
host = prox-f
mon addr = 192.168.12.25:6789
[mon.3]
host = prox-e
mon addr = 192.168.12.24:6789
[mon.0]
host = prox-b
mon addr = 192.168.12.21:6789
[mon.2]
host = prox-c
mon addr = 192.168.12.22:6789
Starting from the beginning of the wiki.
Code:
prox-e:~# cat /etc/apt/sources.list.d/ceph.list
deb http://download.ceph.com/debian-jewel jessie main
Code:
prox-e:~# systemctl stop ceph-mon.3.1513810191.138222489.service
Code:
prox-e:~# df -h
Filesystem Size Used Avail Use% Mounted on
udev 10M 0 10M 0% /dev
tmpfs 4.8G 18M 4.7G 1% /run
rpool/ROOT/pve-1 217G 2.5G 214G 2% /
tmpfs 12G 45M 12G 1% /dev/shm
tmpfs 5.0M 0 5.0M 0% /run/lock
tmpfs 12G 0 12G 0% /sys/fs/cgroup
rpool 214G 128K 214G 1% /rpool
rpool/ROOT 214G 128K 214G 1% /rpool/ROOT
rpool/data 214G 128K 214G 1% /rpool/data
/dev/fuse 30M 44K 30M 1% /etc/pve
/dev/sdl1 2.8T 2.1T 660G 77% /var/lib/ceph/osd/ceph-37
/dev/sdn1 2.8T 1.9T 930G 67% /var/lib/ceph/osd/ceph-31
/dev/sdc1 2.8T 1.9T 856G 70% /var/lib/ceph/osd/ceph-39
/dev/sdb1 2.8T 2.1T 705G 75% /var/lib/ceph/osd/ceph-36
/dev/sdj1 2.8T 2.0T 762G 73% /var/lib/ceph/osd/ceph-29
/dev/sda1 1.9T 1.2T 660G 65% /var/lib/ceph/osd/ceph-40
/dev/sdf1 2.8T 1.8T 1003G 65% /var/lib/ceph/osd/ceph-33
/dev/sdd1 2.8T 1.9T 854G 70% /var/lib/ceph/osd/ceph-30
/dev/sdh1 2.8T 2.1T 657G 77% /var/lib/ceph/osd/ceph-28
/dev/sdm1 2.8T 1.6T 1.2T 59% /var/lib/ceph/osd/ceph-38
/dev/sdi1 2.8T 1.8T 1.1T 63% /var/lib/ceph/osd/ceph-32
/dev/sdk1 2.8T 2.0T 785G 72% /var/lib/ceph/osd/ceph-34
/dev/sdg1 1.9T 1.2T 716G 62% /var/lib/ceph/osd/ceph-27
Code:
prox-e:~# readlink -f /var/lib/ceph/osd/ceph-37/journal
/dev/sdl2
Code:
prox-e:~# blkid -o udev -p /dev/sdl2
ID_PART_ENTRY_SCHEME=gpt
ID_PART_ENTRY_NAME=ceph\x20journal
ID_PART_ENTRY_UUID=d3d49ff8-6234-4f9b-ba3f-d19cbd902318
ID_PART_ENTRY_TYPE=45b0969e-9b03-4f30-b4c6-b4b80ceff106
ID_PART_ENTRY_NUMBER=2
ID_PART_ENTRY_OFFSET=2048
ID_PART_ENTRY_SIZE=10483713
ID_PART_ENTRY_DISK=8:176
Code:
root@prox-e:~# ls -halt /var/lib/ceph/
total 29K
drwxr-xr-x 2 ceph ceph 4 Mar 14 20:41 tmp
drwxr-x--- 9 ceph ceph 9 Mar 13 16:58 .
drwxr-xr-x 47 root root 48 Mar 13 16:57 ..
drwxr-xr-x 2 ceph ceph 2 Oct 4 10:17 mds
drwxr-xr-x 20 ceph ceph 20 Mar 28 2017 osd
drwxr-xr-x 2 ceph ceph 3 Mar 28 2017 bootstrap-mds
drwxr-xr-x 2 ceph ceph 3 Mar 28 2017 bootstrap-rgw
drwxr-xr-x 2 ceph ceph 3 Mar 28 2017 bootstrap-osd
drwxr-xr-x 3 ceph ceph 3 Mar 28 2017 mon
root@prox-e:~# ls -halt /var/lib/ceph/osd/
total 20K
drwxr-xr-x 3 ceph ceph 217 Mar 14 16:18 ceph-37
drwxr-xr-x 3 ceph ceph 217 Mar 14 16:16 ceph-28
drwxr-xr-x 3 ceph ceph 217 Mar 14 16:15 ceph-34
drwxr-xr-x 3 ceph ceph 217 Mar 14 16:13 ceph-29
drwxr-xr-x 3 ceph ceph 217 Mar 14 16:11 ceph-31
drwxr-xr-x 3 ceph ceph 217 Mar 14 16:10 ceph-32
drwxr-xr-x 3 ceph ceph 217 Mar 14 16:08 ceph-27
drwxr-xr-x 3 ceph ceph 217 Mar 14 16:07 ceph-33
drwxr-xr-x 3 ceph ceph 217 Mar 14 16:05 ceph-30
drwxr-xr-x 3 ceph ceph 217 Mar 14 16:04 ceph-40
drwxr-xr-x 3 ceph ceph 217 Mar 14 16:02 ceph-36
drwxr-xr-x 3 ceph ceph 217 Mar 14 16:01 ceph-38
drwxr-xr-x 3 ceph ceph 217 Mar 14 15:59 ceph-39
drwxr-x--- 9 ceph ceph 9 Mar 13 16:58 ..
drwxr-xr-x 20 ceph ceph 20 Mar 28 2017 .
drwxr-xr-x 2 root root 2 Mar 28 2017 ceph-41
drwxr-xr-x 2 ceph ceph 2 Mar 28 2017 ceph-35
drwxr-xr-x 2 ceph ceph 2 Mar 28 2017 ceph-26
drwxr-xr-x 2 ceph ceph 2 Mar 28 2017 ceph-1
drwxr-xr-x 2 ceph ceph 2 Mar 28 2017 ceph-0
root@prox-e:~# ls -halt /var/lib/ceph/osd/ceph-37/
total 65K
-rw-r--r-- 1 ceph ceph 0 Mar 14 20:19 systemd
drwxr-xr-x 3 ceph ceph 217 Mar 14 16:18 .
drwxr-xr-x 225 ceph ceph 8.0K Jan 22 10:51 current
drwxr-xr-x 20 ceph ceph 20 Mar 28 2017 ..
-rw-r--r-- 1 ceph ceph 3 Mar 28 2017 active
-rw------- 1 ceph ceph 57 Mar 28 2017 keyring
-rw-r--r-- 1 ceph ceph 6 Mar 28 2017 ready
-rw-r--r-- 1 ceph ceph 53 Mar 28 2017 superblock
-rw-r--r-- 1 ceph ceph 4 Mar 28 2017 store_version
-rw-r--r-- 1 ceph ceph 610 Mar 28 2017 activate.monmap
-rw-r--r-- 1 ceph ceph 3 Mar 28 2017 whoami
-rw-r--r-- 1 ceph ceph 21 Mar 28 2017 magic
-rw-r--r-- 1 ceph ceph 37 Mar 28 2017 journal_uuid
-rw-r--r-- 1 ceph ceph 37 Mar 28 2017 fsid
-rw-r--r-- 1 ceph ceph 37 Mar 28 2017 ceph_fsid
lrwxrwxrwx 1 ceph ceph 58 Mar 28 2017 journal -> /dev/disk/by-partuuid/d3d49ff8-6234-4f9b-ba3f-d19cbd902318
Code:
root@prox-e:~# cat /etc/systemd/system/ceph.service
[Unit]
Description=PVE activate Ceph OSD disks
After=pve-cluster.service
Requires=pve-cluster.service
[Service]
ExecStart=/usr/sbin/ceph-disk --log-stdout activate-all
Type=oneshot
[Install]
WantedBy=multi-user.target
Code:
root@prox-e:~# ls -halt /etc/systemd/system/
total 61K
drwxr-xr-x 2 root root 15 Mar 14 20:41 ceph-osd.target.wants
drwxr-xr-x 2 root root 30 Mar 14 16:16 multi-user.target.wants
-rw-r--r-- 1 root root 220 Mar 14 16:16 ceph.service
drwxr-xr-x 15 root root 18 Mar 14 16:16 .
drwxr-xr-x 2 root root 5 Mar 13 16:59 ceph.target.wants
drwxr-xr-x 6 root root 13 May 7 2017 ..
drwxr-xr-x 2 root root 4 Mar 28 2017 sockets.target.wants
drwxr-xr-x 2 root root 6 Mar 27 2017 sysinit.target.wants
drwxr-xr-x 2 root root 4 Mar 27 2017 getty.target.wants
lrwxrwxrwx 1 root root 31 Mar 27 2017 sshd.service -> /lib/systemd/system/ssh.service
lrwxrwxrwx 1 root root 35 Mar 27 2017 syslog.service -> /lib/systemd/system/rsyslog.service
drwxr-xr-x 2 root root 4 Dec 9 2016 local-fs.target.wants
drwxr-xr-x 2 root root 4 Dec 9 2016 zfs-mount.service.wants
drwxr-xr-x 2 root root 6 Dec 9 2016 zfs.target.wants
drwxr-xr-x 2 root root 3 Dec 9 2016 zfs-share.service.wants
drwxr-xr-x 2 root root 3 Dec 9 2016 halt.target.wants
drwxr-xr-x 2 root root 3 Dec 9 2016 poweroff.target.wants
drwxr-xr-x 2 root root 3 Dec 9 2016 reboot.target.wants
At this point, systemctl start ceph-mon.3.service does not work, nor does tab completion of systemctl start ceph-mon
I seem to have lost my ceph monitor.
I've rebooted several times since then because I did lose 2 hard disks during the ceph ownership change which took 2 days.
Code:
root@prox-e:~# ceph -s
2018-03-14 21:14:43.777375 7f02a8415700 0 -- :/3284778895 >> 192.168.12.21:6789/0 pipe(0x7f02a405fa40 sd=3 :0 s=1 pgs=0 cs=0 l=1 c=0x7f02a405d4d0).fault
2018-03-14 21:14:46.777440 7f02a8314700 0 -- :/3284778895 >> 192.168.12.24:6789/0 pipe(0x7f0298000c80 sd=4 :0 s=1 pgs=0 cs=0 l=1 c=0x7f0298001f90).fault
2018-03-14 21:14:49.777929 7f02a8415700 0 -- :/3284778895 >> 192.168.12.22:6789/0 pipe(0x7f0298005160 sd=4 :0 s=1 pgs=0 cs=0 l=1 c=0x7f0298006420).fault
^C^CTraceback (most recent call last):
File "/usr/bin/ceph", line 954, in <module>
retval = main()
File "/usr/bin/ceph", line 858, in main
prefix='get_command_descriptions')
File "/usr/lib/python2.7/dist-packages/ceph_argparse.py", line 1308, in json_command
inbuf, timeout, verbose)
File "/usr/lib/python2.7/dist-packages/ceph_argparse.py", line 1185, in send_command_retry
return send_command(*args, **kwargs)
File "/usr/lib/python2.7/dist-packages/ceph_argparse.py", line 1239, in send_command
cluster.mon_command, cmd, inbuf, timeout)
File "/usr/lib/python2.7/dist-packages/ceph_argparse.py", line 1155, in run_in_thread
t.start()
File "/usr/lib/python2.7/threading.py", line 750, in start
self.__started.wait()
File "/usr/lib/python2.7/threading.py", line 621, in wait
self.__cond.wait(timeout)
File "/usr/lib/python2.7/threading.py", line 340, in wait
waiter.acquire()
KeyboardInterrupt
I saw these troubleshooting in other threads, so I will list them here.
Code:
prox-e:~# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 1.8T 0 disk
├─sda1 8:1 0 1.8T 0 part /var/lib/ceph/osd/ceph-40
└─sda2 8:2 0 5G 0 part
sdb 8:16 0 2.7T 0 disk
├─sdb1 8:17 0 2.7T 0 part /var/lib/ceph/osd/ceph-36
└─sdb2 8:18 0 5G 0 part
sdc 8:32 0 2.7T 0 disk
├─sdc1 8:33 0 2.7T 0 part /var/lib/ceph/osd/ceph-39
└─sdc2 8:34 0 5G 0 part
sdd 8:48 0 2.7T 0 disk
├─sdd1 8:49 0 2.7T 0 part /var/lib/ceph/osd/ceph-30
└─sdd2 8:50 0 5G 0 part
sde 8:64 0 2.7T 0 disk
sdf 8:80 0 2.7T 0 disk
├─sdf1 8:81 0 2.7T 0 part /var/lib/ceph/osd/ceph-33
└─sdf2 8:82 0 5G 0 part
sdg 8:96 0 1.8T 0 disk
├─sdg1 8:97 0 1.8T 0 part /var/lib/ceph/osd/ceph-27
└─sdg2 8:98 0 5G 0 part
sdh 8:112 0 2.7T 0 disk
├─sdh1 8:113 0 2.7T 0 part /var/lib/ceph/osd/ceph-28
└─sdh2 8:114 0 5G 0 part
sdi 8:128 0 2.7T 0 disk
├─sdi1 8:129 0 2.7T 0 part /var/lib/ceph/osd/ceph-32
└─sdi2 8:130 0 5G 0 part
sdj 8:144 0 2.7T 0 disk
├─sdj1 8:145 0 2.7T 0 part /var/lib/ceph/osd/ceph-29
└─sdj2 8:146 0 5G 0 part
sdk 8:160 0 2.7T 0 disk
├─sdk1 8:161 0 2.7T 0 part /var/lib/ceph/osd/ceph-34
└─sdk2 8:162 0 5G 0 part
sdl 8:176 0 2.7T 0 disk
├─sdl1 8:177 0 2.7T 0 part /var/lib/ceph/osd/ceph-37
└─sdl2 8:178 0 5G 0 part
sdm 8:192 0 2.7T 0 disk
├─sdm1 8:193 0 2.7T 0 part /var/lib/ceph/osd/ceph-38
└─sdm2 8:194 0 5G 0 part
sdn 8:208 0 2.7T 0 disk
├─sdn1 8:209 0 2.7T 0 part /var/lib/ceph/osd/ceph-31
└─sdn2 8:210 0 5G 0 part
sdo 8:224 0 232.9G 0 disk
├─sdo1 8:225 0 1007K 0 part
├─sdo2 8:226 0 232.9G 0 part
└─sdo9 8:233 0 8M 0 part
zd0 230:0 0 8G 0 disk [SWAP]
Code:
prox-e:~# cat /etc/network/interfaces
auto lo
iface lo inet loopback
auto bond0
iface bond0 inet manual
slaves eth0 eth1
bond_miimon 100
bond_mode active-backup
auto bond1
iface bond1 inet manual
slaves eth2 eth3
bond_miimon 100
bond_mode active-backup
auto vlan600
iface vlan600 inet manual
vlan-raw-device bond0
auto vmbr0
iface vmbr0 inet static
address 10.1.12.24
netmask 255.255.252.0
gateway 10.1.12.6
bridge_ports bond0
bridge_stp off
bridge_fd 0
auto vmbr1
iface vmbr1 inet static
address 10.4.12.24
netmask 255.255.255.0
bridge_ports vlan600
bridge_stp off
bridge_fd 0
bridge_vlan_aware yes
network 10.4.12.0
auto vmbr2
iface vmbr2 inet static
address 192.168.12.24
netmask 255.255.255.0
bridge_ports bond1
bridge_stp off
bridge_fd 0
bridge_vlan_aware yes
network 192.168.12.0
Code:
root@prox-e:~# systemctl status
● prox-e
State: degraded
Jobs: 13 queued
Failed: 1 units
Since: Wed 2018-03-14 20:18:52 CDT; 58min ago
CGroup: /
├─1 /sbin/init
└─system.slice
├─ksmtuned.service
│ ├─ 3102 /bin/bash /usr/sbin/ksmtuned
│ └─23692 sleep 60
├─dbus.service
│ └─3067 /usr/bin/dbus-daemon --system --address=systemd: --nofo
├─cron.service
│ └─3275 /usr/sbin/cron -f
├─nfs-common.service
│ ├─2918 /sbin/rpc.statd
│ └─2932 /usr/sbin/rpc.idmapd
├─pve-ha-lrm.service
│ └─3975 pve-ha-lr
├─postfix.service
│ ├─3298 /usr/lib/postfix/master
│ ├─3302 pickup -l -t unix -u -c
│ └─3303 qmgr -l -t unix -u
├─spiceproxy.service
│ ├─3093 spiceprox
│ └─3095 spiceproxy worke
├─open-iscsi.service
│ ├─2905 /usr/sbin/iscsid
│ └─2908 /usr/sbin/iscsid
├─system-ceph\x2dosd.slice
│ ├─ceph-osd@39.service
│ │ └─control
│ │ ├─23030 /bin/sh /usr/lib/ceph/ceph-osd-prestart.sh --clust
│ │ └─23336 /usr/bin/python /usr/bin/ceph --cluster=ceph --nam
│ ├─ceph-osd@32.service
│ │ └─control
│ │ ├─23046 /bin/sh /usr/lib/ceph/ceph-osd-prestart.sh --clust
│ │ └─23358 /usr/bin/python /usr/bin/ceph --cluster=ceph --nam
│ ├─ceph-osd@40.service
│ │ └─control
│ │ ├─23034 /bin/sh /usr/lib/ceph/ceph-osd-prestart.sh --clust
│ │ └─23357 /usr/bin/python /usr/bin/ceph --cluster=ceph --nam
│ ├─ceph-osd@34.service
│ │ └─control
│ │ ├─23043 /bin/sh /usr/lib/ceph/ceph-osd-prestart.sh --clust
│ │ └─23341 /usr/bin/python /usr/bin/ceph --cluster=ceph --nam
│ ├─ceph-osd@28.service
│ │ └─control
│ │ ├─23024 /bin/sh /usr/lib/ceph/ceph-osd-prestart.sh --clust
│ │ └─23313 /usr/bin/python /usr/bin/ceph --cluster=ceph --nam
│ ├─ceph-osd@36.service
│ │ └─control
│ │ ├─23020 /bin/sh /usr/lib/ceph/ceph-osd-prestart.sh --clust
│ │ └─23275 /usr/bin/python /usr/bin/ceph --cluster=ceph --nam
│ ├─ceph-osd@38.service
│ │ └─control
│ │ ├─23021 /bin/sh /usr/lib/ceph/ceph-osd-prestart.sh --clust
│ │ └─23290 /usr/bin/python /usr/bin/ceph --cluster=ceph --nam
│ ├─ceph-osd@31.service
│ │ └─control
│ │ ├─23032 /bin/sh /usr/lib/ceph/ceph-osd-prestart.sh --clust
│ │ └─23342 /usr/bin/python /usr/bin/ceph --cluster=ceph --nam
│ ├─ceph-osd@33.service
│ │ └─control
│ │ ├─23038 /bin/sh /usr/lib/ceph/ceph-osd-prestart.sh --clust
│ │ └─23354 /usr/bin/python /usr/bin/ceph --cluster=ceph --nam
│ ├─ceph-osd@27.service
│ │ └─control
│ │ ├─23050 /bin/sh /usr/lib/ceph/ceph-osd-prestart.sh --clust
│ │ └─23323 /usr/bin/python /usr/bin/ceph --cluster=ceph --nam
│ ├─ceph-osd@29.service
│ │ └─control
│ │ ├─23026 /bin/sh /usr/lib/ceph/ceph-osd-prestart.sh --clust
│ │ └─23315 /usr/bin/python /usr/bin/ceph --cluster=ceph --nam
│ ├─ceph-osd@37.service
│ │ └─control
│ │ ├─23036 /bin/sh /usr/lib/ceph/ceph-osd-prestart.sh --clust
│ │ └─23356 /usr/bin/python /usr/bin/ceph --cluster=ceph --nam
│ └─ceph-osd@30.service
│ └─control
│ ├─23027 /bin/sh /usr/lib/ceph/ceph-osd-prestart.sh --clust
│ └─23316 /usr/bin/python /usr/bin/ceph --cluster=ceph --nam
├─corosync.service
│ └─3332 corosync
├─pve-firewall.service
│ └─3874 pve-firewal
├─pve-cluster.service
│ └─3176 /usr/bin/pmxcfs
├─atd.service
│ └─3042 /usr/sbin/atd -f
├─systemd-journald.service
│ └─1649 /lib/systemd/systemd-journald
├─pve-ha-crm.service
│ └─3972 pve-ha-cr
├─systemd-timesyncd.service
│ └─2379 /lib/systemd/systemd-timesyncd
├─rrdcached.service
│ └─3085 /usr/bin/rrdcached -l unix:/var/run/rrdcached.sock -j /
├─pvestatd.service
│ ├─ 3921 pvestat
│ └─21184 /usr/bin/rados -p Ceph_Triple -m 192.168.12.25,192.168
├─ssh.service
│ ├─ 3127 /usr/sbin/sshd -D
│ ├─ 6001 sshd: root@pts/0
│ ├─ 6023 -bash
│ ├─23802 systemctl status
│ └─23803 pager
├─systemd-logind.service
│ └─3057 /lib/systemd/systemd-logind
├─watchdog-mux.service
│ └─3038 /usr/sbin/watchdog-mux
├─system-getty.slice
│ └─getty@tty1.service
│ ├─ 3138 /bin/login --
│ ├─10541 -bash
│ └─22313 ssh prox-f
├─pvefw-logger.service
│ └─2408 /usr/sbin/pvefw-logger
├─systemd-udevd.service
│ └─1662 /lib/systemd/systemd-udevd
├─rpcbind.service
│ └─2901 /sbin/rpcbind -w
├─rsyslog.service
│ └─3112 /usr/sbin/rsyslogd -n
├─smartd.service
│ └─3050 /usr/sbin/smartd -n
├─lxc-monitord.service
│ └─3169 /usr/lib/x86_64-linux-gnu/lxc/lxc-monitord --daemon
├─ntp.service
│ └─3094 /usr/sbin/ntpd -p /var/run/ntpd.pid -g -u 110:118
├─pveproxy.service
│ ├─3076 pveprox
│ ├─3077 pveproxy worke
│ ├─3078 pveproxy worke
│ └─3079 pveproxy worke
├─lxcfs.service
│ └─3044 /usr/bin/lxcfs /var/lib/lxcfs/
└─pvedaemon.service
├─3966 pvedaemo
├─3967 pvedaemon worke
├─3968 pvedaemon worke
└─3969 pvedaemon worke
Code:
root@prox-e:~# cat /etc/pve/storage.cfg
dir: local
path /var/lib/vz
content iso,vztmpl,backup
zfspool: local-zfs
pool rpool/data
sparse
content rootdir,images
rbd: Ceph_Triple
monhost 192.168.12.25 192.168.12.21 192.168.12.22
krbd
username admin
content images,rootdir
pool Ceph_Triple
Code:
root@prox-e:~# cat /etc/apt/sources.list
deb http://ftp.us.debian.org/debian jessie main contrib
# PVE pve-no-subscription repository provided by proxmox.com, NOT recommended for production use
deb http://download.proxmox.com/debian jessie pve-no-subscription
# security updates
deb http://security.debian.org jessie/updates main contrib
Code:
root@prox-e:~# ping 192.168.12.25
PING 192.168.12.25 (192.168.12.25) 56(84) bytes of data.
64 bytes from 192.168.12.25: icmp_seq=1 ttl=64 time=0.158 ms
64 bytes from 192.168.12.25: icmp_seq=2 ttl=64 time=0.189 ms
^C
--- 192.168.12.25 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 999ms
rtt min/avg/max/mdev = 0.158/0.173/0.189/0.020 ms
root@prox-e:~# ping 192.168.12.21
PING 192.168.12.21 (192.168.12.21) 56(84) bytes of data.
64 bytes from 192.168.12.21: icmp_seq=1 ttl=64 time=0.190 ms
^C
--- 192.168.12.21 ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 0.190/0.190/0.190/0.000 ms
root@prox-e:~# ping 192.168.12.22
PING 192.168.12.22 (192.168.12.22) 56(84) bytes of data.
64 bytes from 192.168.12.22: icmp_seq=1 ttl=64 time=0.138 ms
^C
--- 192.168.12.22 ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 0.138/0.138/0.138/0.000 ms
Last edited: