Hi,
I having trouble with my ssd-pool over the webgui.
I have alter the /etc/host with the new hostnmae in crush map because when i take an ssd osd out.
I get error hostname not found
After i altered the hosts file i get two random errors.
596: ssl3_get_server_certificate: certificate verify failed
and
500: proxy loop detected
But whe i make ceph osd df on the terminal all right.
On Terminal i can set the osd in and out. Only not on webgui
GLOBAL:
SIZE AVAIL RAW USED %RAW USED
15080G 8765G 6314G 41.87
POOLS:
NAME ID USED %USED MAX AVAIL OBJECTS
rbd 1 3153G 44.53 3927G 810078
sdf 14 22385 0 106G 256
sdf is my SSD Pool with 2 x 128GB SSD. So have max space 106G at size 2
But my config first.
3 PVE Hosts. Two with osds
ceph.conf
[global]
auth client required = cephx
auth cluster required = cephx
auth service required = cephx
cluster network = 10.10.10.0/24
filestore xattr use omap = true
fsid = ff91bdc6-76a5-4495-98d8-05bd9332f950
keyring = /etc/pve/priv/$cluster.$name.keyring
osd journal size = 5120
osd pool default min size = 1
public network = 10.10.10.0/24
mon data avail crit = 3
[mon]
mon compact on start = true
[client]
rbd cache = true
rbd cache writethrough until flush = true
[osd]
keyring = /var/lib/ceph/osd/ceph-$id/keyring
osd_max_backfills = 1
osd_recovery_max_active = 1
osd crush update on start = false
[mon.0]
host = vsrv
mon addr = 10.10.10.1:6789
[mon.1]
host = vsrv2
mon addr = 10.10.10.2:6789
[mon.2]
host = vsrv3
mon addr = 10.10.10.3:6789
my crushmap
# begin crush map
tunable choose_local_tries 0
tunable choose_local_fallback_tries 0
tunable choose_total_tries 50
tunable chooseleaf_descend_once 1
tunable straw_calc_version 1
# devices
device 0 osd.0
device 1 osd.1
device 2 osd.2
device 3 osd.3
device 4 osd.4
device 5 osd.5
device 6 osd.6
device 7 osd.7
# types
type 0 osd
type 1 host
type 2 chassis
type 3 rack
type 4 row
type 5 pdu
type 6 pod
type 7 room
type 8 datacenter
type 9 region
type 10 root
# buckets
host vsrv3 {
id -2 # do not change unnecessarily
# weight 7.250
alg straw
hash 0 # rjenkins1
item osd.1 weight 2.720
item osd.2 weight 1.810
item osd.3 weight 2.720
}
host vsrv2 {
id -3 # do not change unnecessarily
# weight 7.250
alg straw
hash 0 # rjenkins1
item osd.0 weight 2.720
item osd.4 weight 1.810
item osd.5 weight 2.720
}
host vsrv {
id -4 # do not change unnecessarily
# weight 0.000
alg straw
hash 0 # rjenkins1
}
root default {
id -1 # do not change unnecessarily
# weight 14.500
alg straw
hash 0 # rjenkins1
item vsrv3 weight 7.250
item vsrv2 weight 7.250
item vsrv weight 0.000
}
host vsrv2-ssd {
id -17 # do not change unnecessarily
# weight 0.100
alg straw
hash 0 # rjenkins1
item osd.7 weight 0.100
}
host vsrv3-ssd {
id -16 # do not change unnecessarily
# weight 0.100
alg straw
hash 0 # rjenkins1
item osd.6 weight 0.100
}
root ssd {
id -15 # do not change unnecessarily
# weight 0.200
alg straw
hash 0 # rjenkins1
item vsrv2-ssd weight 0.100
item vsrv3-ssd weight 0.100
}
# rules
rule replicated_ruleset {
ruleset 0
type replicated
min_size 1
max_size 10
step take default
step chooseleaf firstn 0 type host
step emit
}
rule ssd {
ruleset 1
type replicated
min_size 1
max_size 10
step take ssd
step chooseleaf firstn 0 type host
step emit
}
# end crush map
I hope i can explain my problem. Thank you!
Sincerely Bonkersdeluxe
I having trouble with my ssd-pool over the webgui.
I have alter the /etc/host with the new hostnmae in crush map because when i take an ssd osd out.
I get error hostname not found
After i altered the hosts file i get two random errors.
596: ssl3_get_server_certificate: certificate verify failed
and
500: proxy loop detected
But whe i make ceph osd df on the terminal all right.
On Terminal i can set the osd in and out. Only not on webgui
GLOBAL:
SIZE AVAIL RAW USED %RAW USED
15080G 8765G 6314G 41.87
POOLS:
NAME ID USED %USED MAX AVAIL OBJECTS
rbd 1 3153G 44.53 3927G 810078
sdf 14 22385 0 106G 256
sdf is my SSD Pool with 2 x 128GB SSD. So have max space 106G at size 2
But my config first.
3 PVE Hosts. Two with osds
ceph.conf
[global]
auth client required = cephx
auth cluster required = cephx
auth service required = cephx
cluster network = 10.10.10.0/24
filestore xattr use omap = true
fsid = ff91bdc6-76a5-4495-98d8-05bd9332f950
keyring = /etc/pve/priv/$cluster.$name.keyring
osd journal size = 5120
osd pool default min size = 1
public network = 10.10.10.0/24
mon data avail crit = 3
[mon]
mon compact on start = true
[client]
rbd cache = true
rbd cache writethrough until flush = true
[osd]
keyring = /var/lib/ceph/osd/ceph-$id/keyring
osd_max_backfills = 1
osd_recovery_max_active = 1
osd crush update on start = false
[mon.0]
host = vsrv
mon addr = 10.10.10.1:6789
[mon.1]
host = vsrv2
mon addr = 10.10.10.2:6789
[mon.2]
host = vsrv3
mon addr = 10.10.10.3:6789
my crushmap
# begin crush map
tunable choose_local_tries 0
tunable choose_local_fallback_tries 0
tunable choose_total_tries 50
tunable chooseleaf_descend_once 1
tunable straw_calc_version 1
# devices
device 0 osd.0
device 1 osd.1
device 2 osd.2
device 3 osd.3
device 4 osd.4
device 5 osd.5
device 6 osd.6
device 7 osd.7
# types
type 0 osd
type 1 host
type 2 chassis
type 3 rack
type 4 row
type 5 pdu
type 6 pod
type 7 room
type 8 datacenter
type 9 region
type 10 root
# buckets
host vsrv3 {
id -2 # do not change unnecessarily
# weight 7.250
alg straw
hash 0 # rjenkins1
item osd.1 weight 2.720
item osd.2 weight 1.810
item osd.3 weight 2.720
}
host vsrv2 {
id -3 # do not change unnecessarily
# weight 7.250
alg straw
hash 0 # rjenkins1
item osd.0 weight 2.720
item osd.4 weight 1.810
item osd.5 weight 2.720
}
host vsrv {
id -4 # do not change unnecessarily
# weight 0.000
alg straw
hash 0 # rjenkins1
}
root default {
id -1 # do not change unnecessarily
# weight 14.500
alg straw
hash 0 # rjenkins1
item vsrv3 weight 7.250
item vsrv2 weight 7.250
item vsrv weight 0.000
}
host vsrv2-ssd {
id -17 # do not change unnecessarily
# weight 0.100
alg straw
hash 0 # rjenkins1
item osd.7 weight 0.100
}
host vsrv3-ssd {
id -16 # do not change unnecessarily
# weight 0.100
alg straw
hash 0 # rjenkins1
item osd.6 weight 0.100
}
root ssd {
id -15 # do not change unnecessarily
# weight 0.200
alg straw
hash 0 # rjenkins1
item vsrv2-ssd weight 0.100
item vsrv3-ssd weight 0.100
}
# rules
rule replicated_ruleset {
ruleset 0
type replicated
min_size 1
max_size 10
step take default
step chooseleaf firstn 0 type host
step emit
}
rule ssd {
ruleset 1
type replicated
min_size 1
max_size 10
step take ssd
step chooseleaf firstn 0 type host
step emit
}
# end crush map
I hope i can explain my problem. Thank you!
Sincerely Bonkersdeluxe