Hi,
I have this issue with terrible slow pve-zsync containers backups (compared to VMs backups). Both servers (source and destination) are enterprise graded with the latest version of Proxmox VE 5.4 installed.
I performed some pve-zsync backup tests with a clean/default instalation of Ubuntu 18.04 CT and Windows 10 VM. It took almost 36 minutes to backup the CT (~ 630 MB) and 10 minutes to backup the VM (~ 9.6 GB).
Any ideas?
Thanks.
I have this issue with terrible slow pve-zsync containers backups (compared to VMs backups). Both servers (source and destination) are enterprise graded with the latest version of Proxmox VE 5.4 installed.
I performed some pve-zsync backup tests with a clean/default instalation of Ubuntu 18.04 CT and Windows 10 VM. It took almost 36 minutes to backup the CT (~ 630 MB) and 10 minutes to backup the VM (~ 9.6 GB).
Any ideas?
Thanks.
Code:
root@SERVER1:~# pve-zsync sync -source 183 -dest SERVER2:hdd-pool/backup --verbose --maxsnap 10
full send of nvme-pool/subvol-183-disk-0@rep_default_2019-10-16_01:43:46 estimated size is 617M
total estimated size is 617M
TIME SENT SNAPSHOT
01:43:48 19.1M nvme-pool/subvol-183-disk-0@rep_default_2019-10-16_01:43:46
01:43:49 19.1M nvme-pool/subvol-183-disk-0@rep_default_2019-10-16_01:43:46
01:43:50 19.1M nvme-pool/subvol-183-disk-0@rep_default_2019-10-16_01:43:46
01:43:51 19.9M nvme-pool/subvol-183-disk-0@rep_default_2019-10-16_01:43:46
01:43:52 20.4M nvme-pool/subvol-183-disk-0@rep_default_2019-10-16_01:43:46
01:43:53 20.4M nvme-pool/subvol-183-disk-0@rep_default_2019-10-16_01:43:46
01:43:54 21.3M nvme-pool/subvol-183-disk-0@rep_default_2019-10-16_01:43:46
01:43:55 21.8M nvme-pool/subvol-183-disk-0@rep_default_2019-10-16_01:43:46
01:43:56 22.0M nvme-pool/subvol-183-disk-0@rep_default_2019-10-16_01:43:46
[...]
02:19:37 632M nvme-pool/subvol-183-disk-0@rep_default_2019-10-16_01:43:46
02:19:38 632M nvme-pool/subvol-183-disk-0@rep_default_2019-10-16_01:43:46
02:19:39 633M nvme-pool/subvol-183-disk-0@rep_default_2019-10-16_01:43:46
02:19:40 633M nvme-pool/subvol-183-disk-0@rep_default_2019-10-16_01:43:46
02:19:41 633M nvme-pool/subvol-183-disk-0@rep_default_2019-10-16_01:43:46
02:19:42 634M nvme-pool/subvol-183-disk-0@rep_default_2019-10-16_01:43:46
Code:
root@SERVER1:~# pve-zsync sync -source 184 -dest SERVER2:hdd-pool/backup --verbose --maxsnap 10
full send of nvme-pool/vm-184-disk-0@rep_default_2019-10-16_02:39:56 estimated size is 9.59G
total estimated size is 9.59G
TIME SENT SNAPSHOT
02:39:58 19.7M nvme-pool/vm-184-disk-0@rep_default_2019-10-16_02:39:56
02:39:59 25.2M nvme-pool/vm-184-disk-0@rep_default_2019-10-16_02:39:56
02:40:00 29.9M nvme-pool/vm-184-disk-0@rep_default_2019-10-16_02:39:56
[...]
02:49:38 9.65G nvme-pool/vm-184-disk-0@rep_default_2019-10-16_02:39:56
02:49:39 9.67G nvme-pool/vm-184-disk-0@rep_default_2019-10-16_02:39:56
02:49:40 9.68G nvme-pool/vm-184-disk-0@rep_default_2019-10-16_02:39:56
Code:
### SERVER 1 ###
root@SERVER1:~# pveversion -v
proxmox-ve: 5.4-2 (running kernel: 4.15.18-21-pve)
pve-manager: 5.4-13 (running version: 5.4-13/aee6f0ec)
pve-kernel-4.15: 5.4-9
pve-kernel-4.15.18-21-pve: 4.15.18-48
pve-kernel-4.15.18-12-pve: 4.15.18-36
pve-kernel-4.15.18-10-pve: 4.15.18-32
corosync: 2.4.4-pve1
criu: 2.11.1-1~bpo90
glusterfs-client: 3.8.8-1
ksm-control-daemon: 1.2-2
libjs-extjs: 6.0.1-2
libpve-access-control: 5.1-12
libpve-apiclient-perl: 2.0-5
libpve-common-perl: 5.0-55
libpve-guest-common-perl: 2.0-20
libpve-http-server-perl: 2.0-14
libpve-storage-perl: 5.0-44
libqb0: 1.0.3-1~bpo9
lvm2: 2.02.168-pve6
lxc-pve: 3.1.0-7
lxcfs: 3.0.3-pve1
novnc-pve: 1.0.0-3
proxmox-widget-toolkit: 1.0-28
pve-cluster: 5.0-38
pve-container: 2.0-40
pve-docs: 5.4-2
pve-edk2-firmware: 1.20190312-1
pve-firewall: 3.0-22
pve-firmware: 2.0-7
pve-ha-manager: 2.0-9
pve-i18n: 1.1-4
pve-libspice-server1: 0.14.1-2
pve-qemu-kvm: 3.0.1-4
pve-xtermjs: 3.12.0-1
pve-zsync: 1.7-4
qemu-server: 5.0-54
smartmontools: 6.5+svn4324-1
spiceterm: 3.0-5
vncterm: 1.5-3
zfsutils-linux: 0.7.13-pve1~bpo2
### SERVER 2 ###
root@SERVER2:~# pveversion -v
proxmox-ve: 5.4-2 (running kernel: 4.15.18-21-pve)
pve-manager: 5.4-13 (running version: 5.4-13/aee6f0ec)
pve-kernel-4.15: 5.4-9
pve-kernel-4.15.18-21-pve: 4.15.18-48
pve-kernel-4.15.18-18-pve: 4.15.18-44
pve-kernel-4.15.18-13-pve: 4.15.18-37
pve-kernel-4.15.18-12-pve: 4.15.18-36
pve-kernel-4.15.18-10-pve: 4.15.18-32
corosync: 2.4.4-pve1
criu: 2.11.1-1~bpo90
glusterfs-client: 3.8.8-1
ksm-control-daemon: 1.2-2
libjs-extjs: 6.0.1-2
libpve-access-control: 5.1-12
libpve-apiclient-perl: 2.0-5
libpve-common-perl: 5.0-55
libpve-guest-common-perl: 2.0-20
libpve-http-server-perl: 2.0-14
libpve-storage-perl: 5.0-44
libqb0: 1.0.3-1~bpo9
lvm2: 2.02.168-pve6
lxc-pve: 3.1.0-7
lxcfs: 3.0.3-pve1
novnc-pve: 1.0.0-3
proxmox-widget-toolkit: 1.0-28
pve-cluster: 5.0-38
pve-container: 2.0-40
pve-docs: 5.4-2
pve-edk2-firmware: 1.20190312-1
pve-firewall: 3.0-22
pve-firmware: 2.0-7
pve-ha-manager: 2.0-9
pve-i18n: 1.1-4
pve-libspice-server1: 0.14.1-2
pve-qemu-kvm: 3.0.1-4
pve-xtermjs: 3.12.0-1
pve-zsync: 1.7-4
qemu-server: 5.0-54
smartmontools: 6.5+svn4324-1
spiceterm: 3.0-5
vncterm: 1.5-3
zfsutils-linux: 0.7.13-pve1~bpo2
Code:
### SERVER 1 ###
root@SERVER1:~# zpool status
pool: nvme-pool
state: ONLINE
scan: scrub repaired 0B in 0h40m with 0 errors on Sun Oct 13 01:04:44 2019
config:
NAME STATE READ WRITE CKSUM
nvme-pool ONLINE 0 0 0
mirror-0 ONLINE 0 0 0
nvme-SAMSUNG_MZWLL1T6HEHP-00003_XXXXXXXXXX0212 ONLINE 0 0 0
nvme-SAMSUNG_MZWLL1T6HEHP-00003_XXXXXXXXXX0207 ONLINE 0 0 0
errors: No known data errors
pool: rpool
state: ONLINE
scan: scrub repaired 0B in 0h8m with 0 errors on Sun Oct 13 00:32:14 2019
config:
NAME STATE READ WRITE CKSUM
rpool ONLINE 0 0 0
mirror-0 ONLINE 0 0 0
wwn-0xxxxxxxxxxxxxx32d-part3 ONLINE 0 0 0
wwn-0xxxxxxxxxxxxxx2c8-part3 ONLINE 0 0 0
errors: No known data errors
### SERVER 2 ###
root@SERVER2:~# zpool status
pool: hdd-pool
state: ONLINE
scan: scrub repaired 0B in 1h9m with 0 errors on Sun Oct 13 01:33:28 2019
config:
NAME STATE READ WRITE CKSUM
hdd-pool ONLINE 0 0 0
mirror-0 ONLINE 0 0 0
scsi-xxxxxxxxxxxxx1544 ONLINE 0 0 0
scsi-xxxxxxxxxxxxxb888 ONLINE 0 0 0
mirror-1 ONLINE 0 0 0
scsi-xxxxxxxxxxxxx3218 ONLINE 0 0 0
scsi-xxxxxxxxxxxxx59b8 ONLINE 0 0 0
mirror-2 ONLINE 0 0 0
scsi-xxxxxxxxxxxxxdc5c ONLINE 0 0 0
scsi-xxxxxxxxxxxxxebdc ONLINE 0 0 0
mirror-3 ONLINE 0 0 0
scsi-xxxxxxxxxxxxxb5a4 ONLINE 0 0 0
scsi-xxxxxxxxxxxxx7e30 ONLINE 0 0 0
logs
mirror-4 ONLINE 0 0 0
nvme-INTEL_SSDPE21K100GA_XXXXXXXXXXXF100EGN ONLINE 0 0 0
nvme-INTEL_SSDPE21K100GA_XXXXXXXXXXXL100EGN ONLINE 0 0 0
errors: No known data errors
pool: nvme-pool
state: ONLINE
scan: scrub repaired 0B in 0h2m with 0 errors on Sun Oct 13 00:26:23 2019
config:
NAME STATE READ WRITE CKSUM
nvme-pool ONLINE 0 0 0
mirror-0 ONLINE 0 0 0
nvme-SAMSUNG_MZWLL1T6HEHP-00003_XXXXXXXXXXX311 ONLINE 0 0 0
nvme-SAMSUNG_MZWLL1T6HEHP-00003_XXXXXXXXXXX198 ONLINE 0 0 0
errors: No known data errors
pool: rpool
state: ONLINE
scan: scrub repaired 0B in 0h0m with 0 errors on Sun Oct 13 00:24:41 2019
config:
NAME STATE READ WRITE CKSUM
rpool ONLINE 0 0 0
mirror-0 ONLINE 0 0 0
wwn-0xxxxxxxxxxxxxxf62-part3 ONLINE 0 0 0
wwn-0xxxxxxxxxxxxxxf5e-part3 ONLINE 0 0 0
errors: No known data errors
Code:
### SERVER 1 ###
root@SERVER1:~# zfs get all nvme-pool | grep local
nvme-pool compression lz4 local
nvme-pool atime off local
nvme-pool xattr sa local
nvme-pool dnodesize auto local
### SERVER 2 ###
root@SERVER2:~# zfs get all hdd-pool | grep local
hdd-pool compression lz4 local
hdd-pool atime off local
hdd-pool xattr sa local
hdd-pool dnodesize auto local
Last edited: