zfs raid 10 to zfs raid 1

chalan

Member
Mar 16, 2015
119
4
16
i have 4x 1TB WD RED drives in raid 10 zfs pool

Code:
root@pve-klenova:~# zpool status
  pool: rpool
 state: ONLINE
  scan: resilvered 29.2G in 1h16m with 0 errors on Sun Dec 10 15:01:57 2017
config:

        NAME                                                STATE     READ WRITE CKSUM
        rpool                                               ONLINE       0     0     0
          mirror-0                                          ONLINE       0     0     0
            ata-WDC_WD10EFRX-68PJCN0_WD-WCC4J2021886-part2  ONLINE       0     0     0
            ata-WDC_WD10EFRX-68JCSN0_WD-WMC1U6546808-part2  ONLINE       0     0     0
          mirror-1                                          ONLINE       0     0     0
            ata-WDC_WD10EFRX-68FYTN0_WD-WCC4J2AK75T9        ONLINE       0     0     0
            ata-WDC_WD10EFRX-68FYTN0_WD-WCC4J1JE0SFR        ONLINE       0     0     0

errors: No known data errors

  pool: vmdata
 state: ONLINE
  scan: none requested
config:

        NAME                                     STATE     READ WRITE CKSUM
        vmdata                                   ONLINE       0     0     0
          mirror-0                               ONLINE       0     0     0
            ata-WDC_WD4002FYYZ-01B7CB1_K3GN7LYL  ONLINE       0     0     0
            ata-WDC_WD4002FYYZ-01B7CB1_K7GAE87L  ONLINE       0     0     0

errors: No known data errors

Code:
root@pve-klenova:~# ls -lh /dev/disk/by-id/
total 0
lrwxrwxrwx 1 root root  9 Dec 21 18:43 ata-WDC_WD10EFRX-68FYTN0_WD-WCC4J1JE0SFR -> ../../sdd
lrwxrwxrwx 1 root root 10 Dec 21 18:43 ata-WDC_WD10EFRX-68FYTN0_WD-WCC4J1JE0SFR-part1 -> ../../sdd1
lrwxrwxrwx 1 root root 10 Dec 21 18:43 ata-WDC_WD10EFRX-68FYTN0_WD-WCC4J1JE0SFR-part9 -> ../../sdd9
lrwxrwxrwx 1 root root  9 Dec 21 18:43 ata-WDC_WD10EFRX-68FYTN0_WD-WCC4J2AK75T9 -> ../../sdc
lrwxrwxrwx 1 root root 10 Dec 21 18:43 ata-WDC_WD10EFRX-68FYTN0_WD-WCC4J2AK75T9-part1 -> ../../sdc1
lrwxrwxrwx 1 root root 10 Dec 21 18:43 ata-WDC_WD10EFRX-68FYTN0_WD-WCC4J2AK75T9-part9 -> ../../sdc9
lrwxrwxrwx 1 root root  9 Dec 21 18:43 ata-WDC_WD10EFRX-68JCSN0_WD-WMC1U6546808 -> ../../sdb
lrwxrwxrwx 1 root root 10 Dec 21 18:43 ata-WDC_WD10EFRX-68JCSN0_WD-WMC1U6546808-part1 -> ../../sdb1
lrwxrwxrwx 1 root root 10 Dec 21 18:43 ata-WDC_WD10EFRX-68JCSN0_WD-WMC1U6546808-part2 -> ../../sdb2
lrwxrwxrwx 1 root root 10 Dec 21 18:43 ata-WDC_WD10EFRX-68JCSN0_WD-WMC1U6546808-part9 -> ../../sdb9
lrwxrwxrwx 1 root root  9 Dec 21 18:43 ata-WDC_WD10EFRX-68PJCN0_WD-WCC4J2021886 -> ../../sda
lrwxrwxrwx 1 root root 10 Dec 21 18:43 ata-WDC_WD10EFRX-68PJCN0_WD-WCC4J2021886-part1 -> ../../sda1
lrwxrwxrwx 1 root root 10 Dec 21 18:43 ata-WDC_WD10EFRX-68PJCN0_WD-WCC4J2021886-part2 -> ../../sda2
lrwxrwxrwx 1 root root 10 Dec 21 18:43 ata-WDC_WD10EFRX-68PJCN0_WD-WCC4J2021886-part9 -> ../../sda9
lrwxrwxrwx 1 root root  9 Dec 21 18:43 ata-WDC_WD4002FYYZ-01B7CB1_K3GN7LYL -> ../../sde
lrwxrwxrwx 1 root root 10 Dec 21 18:43 ata-WDC_WD4002FYYZ-01B7CB1_K3GN7LYL-part1 -> ../../sde1
lrwxrwxrwx 1 root root 10 Dec 21 18:43 ata-WDC_WD4002FYYZ-01B7CB1_K3GN7LYL-part9 -> ../../sde9
lrwxrwxrwx 1 root root  9 Dec 21 18:43 ata-WDC_WD4002FYYZ-01B7CB1_K7GAE87L -> ../../sdf
lrwxrwxrwx 1 root root 10 Dec 21 18:43 ata-WDC_WD4002FYYZ-01B7CB1_K7GAE87L-part1 -> ../../sdf1
lrwxrwxrwx 1 root root 10 Dec 21 18:43 ata-WDC_WD4002FYYZ-01B7CB1_K7GAE87L-part9 -> ../../sdf9
lrwxrwxrwx 1 root root  9 Dec 21 18:43 ata-WDC_WD7500BPKT-80PK4T0_WD-WXA1A23M9532 -> ../../sdg
lrwxrwxrwx 1 root root 10 Dec 21 18:43 ata-WDC_WD7500BPKT-80PK4T0_WD-WXA1A23M9532-part1 -> ../../sdg1
lrwxrwxrwx 1 root root 10 Dec 21 18:43 ata-WDC_WD7500BPKT-80PK4T0_WD-WXA1A23M9532-part2 -> ../../sdg2
lrwxrwxrwx 1 root root  9 Dec 21 18:43 wwn-0x5000cca25cc933fe -> ../../sde
lrwxrwxrwx 1 root root 10 Dec 21 18:43 wwn-0x5000cca25cc933fe-part1 -> ../../sde1
lrwxrwxrwx 1 root root 10 Dec 21 18:43 wwn-0x5000cca25cc933fe-part9 -> ../../sde9
lrwxrwxrwx 1 root root  9 Dec 21 18:43 wwn-0x5000cca269c4bd82 -> ../../sdf
lrwxrwxrwx 1 root root 10 Dec 21 18:43 wwn-0x5000cca269c4bd82-part1 -> ../../sdf1
lrwxrwxrwx 1 root root 10 Dec 21 18:43 wwn-0x5000cca269c4bd82-part9 -> ../../sdf9
lrwxrwxrwx 1 root root  9 Dec 21 18:43 wwn-0x50014ee20cb14f8a -> ../../sdc
lrwxrwxrwx 1 root root 10 Dec 21 18:43 wwn-0x50014ee20cb14f8a-part1 -> ../../sdc1
lrwxrwxrwx 1 root root 10 Dec 21 18:43 wwn-0x50014ee20cb14f8a-part9 -> ../../sdc9
lrwxrwxrwx 1 root root  9 Dec 21 18:43 wwn-0x50014ee25f04dc16 -> ../../sda
lrwxrwxrwx 1 root root 10 Dec 21 18:43 wwn-0x50014ee25f04dc16-part1 -> ../../sda1
lrwxrwxrwx 1 root root 10 Dec 21 18:43 wwn-0x50014ee25f04dc16-part2 -> ../../sda2
lrwxrwxrwx 1 root root 10 Dec 21 18:43 wwn-0x50014ee25f04dc16-part9 -> ../../sda9
lrwxrwxrwx 1 root root  9 Dec 21 18:43 wwn-0x50014ee261fd68b6 -> ../../sdd
lrwxrwxrwx 1 root root 10 Dec 21 18:43 wwn-0x50014ee261fd68b6-part1 -> ../../sdd1
lrwxrwxrwx 1 root root 10 Dec 21 18:43 wwn-0x50014ee261fd68b6-part9 -> ../../sdd9
lrwxrwxrwx 1 root root  9 Dec 21 18:43 wwn-0x50014ee602bd2b00 -> ../../sdb
lrwxrwxrwx 1 root root 10 Dec 21 18:43 wwn-0x50014ee602bd2b00-part1 -> ../../sdb1
lrwxrwxrwx 1 root root 10 Dec 21 18:43 wwn-0x50014ee602bd2b00-part2 -> ../../sdb2
lrwxrwxrwx 1 root root 10 Dec 21 18:43 wwn-0x50014ee602bd2b00-part9 -> ../../sdb9
lrwxrwxrwx 1 root root  9 Dec 21 18:43 wwn-0x50014ee6035e1da0 -> ../../sdg
lrwxrwxrwx 1 root root 10 Dec 21 18:43 wwn-0x50014ee6035e1da0-part1 -> ../../sdg1
lrwxrwxrwx 1 root root 10 Dec 21 18:43 wwn-0x50014ee6035e1da0-part2 -> ../../sdg2

and i want to remove two of them (the older)...

Code:
root@pve-klenova:~# smartctl /dev/sda -a | grep Power
  9 Power_On_Hours          0x0032   062   062   000    Old_age   Always       -       28441
 12 Power_Cycle_Count       0x0032   100   100   000    Old_age   Always       -       168
192 Power-Off_Retract_Count 0x0032   200   200   000    Old_age   Always       -       123
root@pve-klenova:~# smartctl /dev/sdb -a | grep Power
  9 Power_On_Hours          0x0032   039   039   000    Old_age   Always       -       44885
 12 Power_Cycle_Count       0x0032   100   100   000    Old_age   Always       -       332
192 Power-Off_Retract_Count 0x0032   200   200   000    Old_age   Always       -       249
root@pve-klenova:~# smartctl /dev/sdc -a | grep Power
  9 Power_On_Hours          0x0032   097   097   000    Old_age   Always       -       2215
 12 Power_Cycle_Count       0x0032   100   100   000    Old_age   Always       -       36
192 Power-Off_Retract_Count 0x0032   200   200   000    Old_age   Always       -       28
root@pve-klenova:~# smartctl /dev/sdd -a | grep Power
  9 Power_On_Hours          0x0032   097   097   000    Old_age   Always       -       2239
 12 Power_Cycle_Count       0x0032   100   100   000    Old_age   Always       -       37
192 Power-Off_Retract_Count 0x0032   200   200   000    Old_age   Always       -       28

so /dev/sda and /dev/sdb which should be ata-WDC_WD10EFRX-68PJCN0_WD-WCC4J2021886-part2 and ata-WDC_WD10EFRX-68JCSN0_WD-WMC1U6546808-part2 how can it safely be done please? thank you...
 
Last edited:
Hi,

You can not remove any disk from your raid10 pool, because your pool will be in degraded mode. I guess that you want to convert your raid 10 pool in a mirror with 2 disks - technical is possible but is risky if you can not do a backup before on other sytem / server. This it what you want?
 
yes i need to convert raid 10 to raid 1 and after that i will remove two of the four drives out of the server permanently...
 
edit: the other zfs pool vmdata with 2x4TB WD GOLD is empty so i can use it as backup but HOW? :) please... should i add a new storage in datacenter and after that how can i make backup before raid 10-raid1 convertion...

or i can install a fresh new proxmox on the 2x 4TB WD Gold drives and after that somehow move VMs from the "old" 4x1GB WD RED zfs pool and than totaly remove the 4x1TB WD RED drives... but i dont know how...
 
so simply

zpool offline rpool /dev/sda2
zpool offline rpool /dev/sdb2

please advise me, i cant make a mistake...

i need to detach drives in mirror-0 but both drives in there are bootable... how to do the other disk bootable too?
 
Detaching example

Code:
# truncate -s 1G file1
# truncate -s 1G file2
# zpool create zfs_test mirror /root/file1 /root/file2
# zpool status zfs_test
  pool: zfs_test
 state: ONLINE
  scan: none requested
config:

    NAME           STATE     READ WRITE CKSUM
    zfs_test       ONLINE       0     0     0
      /root/file1  ONLINE       0     0     0
      /root/file2  ONLINE       0     0     0

errors: No known data errors
# zpool detach zfs_test /root/file2
# zpool status zfs_test
  pool: zfs_test
 state: ONLINE
  scan: none requested
config:

    NAME           STATE     READ WRITE CKSUM
    zfs_test       ONLINE       0     0     0
      /root/file1  ONLINE       0     0     0

errors: No known data errors
 
but if i simly detach drives from raid10 at the end i will have 2xhdd in stripped mode, or am i wrong?
 
ok so my only option is to reinstalll? is it reasonable to have system and storage on diferrent zfs pools? i can have system on rpool on 2x1TB wd red sata2 mirrored and vm data/storage on /vmdata pool which is 2x4TB WD gold sata3... on i can have system and storage on 2x4GB WD Gold drives and the other WD RED drives i can use in different system... what is better?
 

About

The Proxmox community has been around for many years and offers help and support for Proxmox VE, Proxmox Backup Server, and Proxmox Mail Gateway.
We think our community is one of the best thanks to people like you!

Get your subscription!

The Proxmox team works very hard to make sure you are running the best software and getting stable updates and security enhancements, as well as quick enterprise support. Tens of thousands of happy customers have a Proxmox subscription. Get yours easily in our online shop.

Buy now!