I know that the strenght with NVMe is with many parallel IOs but there should be at least more than 500mb/sec possible inside a Vm on a huge 14 x P4510 Raid10 or am i wrong? Also it should be a lot faster to clone a 750Gb VM on that array than almost 1 hour runtime and this huge CPU load during clone.
Here is the output:
root@pve:~# zpool status -v
pool: pve1-zfsnvme
state: ONLINE
scan: resilvered 57.1G in 0 days 00:01:28 with 0 errors on Wed Jan 29 19:44:59 2020
config:
NAME STATE READ WRITE CKSUM
pve1-zfsnvme ONLINE 0 0 0
mirror-0 ONLINE 0 0 0
nvme-eui.01000000010000005cd2e496a98d5051 ONLINE 0 0 0
nvme-eui.01000000010000005cd2e453188e5051 ONLINE 0 0 0
mirror-1 ONLINE 0 0 0
nvme-eui.01000000010000005cd2e47eae8d5051 ONLINE 0 0 0
nvme-eui.01000000010000005cd2e4d5ba8d5051 ONLINE 0 0 0
mirror-2 ONLINE 0 0 0
nvme-eui.01000000010000005cd2e41b5e594f51 ONLINE 0 0 0
nvme-eui.01000000010000005cd2e4a1a38d5051 ONLINE 0 0 0
mirror-3 ONLINE 0 0 0
nvme-eui.01000000010000005cd2e43ed0564f51 ONLINE 0 0 0
nvme-eui.01000000010000005cd2e48c188e5051 ONLINE 0 0 0
mirror-4 ONLINE 0 0 0
nvme-eui.01000000010000005cd2e48ea38d5051 ONLINE 0 0 0
nvme-eui.01000000010000005cd2e4b3b4594f51 ONLINE 0 0 0
mirror-5 ONLINE 0 0 0
nvme-eui.01000000010000005cd2e4ca5f594f51 ONLINE 0 0 0
nvme10n1 ONLINE 0 0 0
mirror-6 ONLINE 0 0 0
nvme-eui.01000000010000005cd2e453a38d5051 ONLINE 0 0 0
nvme-eui.01000000010000005cd2e485ae8d5051 ONLINE 0 0 0
errors: No known data errors
root@pve:~# zpool list -v
NAME SIZE ALLOC FREE CKPOINT EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT
pve1-zfsnvme 12.7T 793G 11.9T - - 0% 6% 1.00x ONLINE -
mirror 1.81T 113G 1.70T - - 0% 6.10% - ONLINE
nvme-eui.01000000010000005cd2e496a98d5051 - - - - - - - - ONLINE
nvme-eui.01000000010000005cd2e453188e5051 - - - - - - - - ONLINE
mirror 1.81T 113G 1.70T - - 0% 6.10% - ONLINE
nvme-eui.01000000010000005cd2e47eae8d5051 - - - - - - - - ONLINE
nvme-eui.01000000010000005cd2e4d5ba8d5051 - - - - - - - - ONLINE
mirror 1.81T 113G 1.70T - - 0% 6.10% - ONLINE
nvme-eui.01000000010000005cd2e41b5e594f51 - - - - - - - - ONLINE
nvme-eui.01000000010000005cd2e4a1a38d5051 - - - - - - - - ONLINE
mirror 1.81T 113G 1.70T - - 0% 6.10% - ONLINE
nvme-eui.01000000010000005cd2e43ed0564f51 - - - - - - - - ONLINE
nvme-eui.01000000010000005cd2e48c188e5051 - - - - - - - - ONLINE
mirror 1.81T 113G 1.70T - - 0% 6.09% - ONLINE
nvme-eui.01000000010000005cd2e48ea38d5051 - - - - - - - - ONLINE
nvme-eui.01000000010000005cd2e4b3b4594f51 - - - - - - - - ONLINE
mirror 1.81T 113G 1.70T - - 0% 6.10% - ONLINE
nvme-eui.01000000010000005cd2e4ca5f594f51 - - - - - - - - ONLINE
nvme10n1 - - - - - - - - ONLINE
mirror 1.81T 113G 1.70T - - 0% 6.10% - ONLINE
nvme-eui.01000000010000005cd2e453a38d5051 - - - - - - - - ONLINE
nvme-eui.01000000010000005cd2e485ae8d5051 - - - - - - - - ONLINE
root@pve:~# cat /etc/pve/qemu-server/100.conf
agent: 1
bootdisk: scsi0
cores: 8
cpu: host
ide2: none,media=cdrom
memory: 8192
name: testvm-deb10
net0: virtio=BE
A:5A:3F:B8:9E,bridge=vmbr0,firewall=1
numa: 0
ostype: l26
scsi0: pve1-zfsnvme:vm-100-disk-0,discard=on,size=750G,ssd=1
scsihw: virtio-scsi-pci
smbios1: uuid=e9fd1864-39d5-4c08-8aa7-dbce84e2748c
sockets: 1
vmgenid: 51c54816-0985-495a-baf5-444b5ffb2a78