pve-zsync: cannot receive new filesystem stream: out of space

Hyien

Member
Jun 18, 2021
95
3
13
35
i'm getting 'cannot receive new filesystem stream: out of space' when running pve-zsync. how do i resolve this?

Code:
on src:
# zpool list
NAME        SIZE  ALLOC   FREE  CKPOINT  EXPANDSZ   FRAG    CAP  DEDUP    HEALTH  ALTROOT
mypool  3.48T  1.09T  2.39T        -         -    37%    31%  1.00x    ONLINE  -

# zfs get all mypool/vm-129-disk-0
NAME                     PROPERTY              VALUE                  SOURCE
mypool/vm-129-disk-0  type                  volume                 -
mypool/vm-129-disk-0  creation              Sat Nov  5  3:43 2022  -
mypool/vm-129-disk-0  used                  516G                   -
mypool/vm-129-disk-0  available             1.06T                  -
mypool/vm-129-disk-0  referenced            147G                   -
mypool/vm-129-disk-0  compressratio         1.00x                  -
mypool/vm-129-disk-0  reservation           none                   default
mypool/vm-129-disk-0  volsize               500G                   local
mypool/vm-129-disk-0  volblocksize          8K                     default
mypool/vm-129-disk-0  checksum              on                     default
mypool/vm-129-disk-0  compression           off                    default
mypool/vm-129-disk-0  readonly              off                    default
mypool/vm-129-disk-0  createtxg             770791                 -
mypool/vm-129-disk-0  copies                1                      default
mypool/vm-129-disk-0  refreservation        516G                   local
mypool/vm-129-disk-0  guid                  <REDACTED>             -
mypool/vm-129-disk-0  primarycache          all                    default
mypool/vm-129-disk-0  secondarycache        all                    default
mypool/vm-129-disk-0  usedbysnapshots       0B                     -
mypool/vm-129-disk-0  usedbydataset         147G                   -
mypool/vm-129-disk-0  usedbychildren        0B                     -
mypool/vm-129-disk-0  usedbyrefreservation  368G                   -
mypool/vm-129-disk-0  logbias               latency                default
mypool/vm-129-disk-0  objsetid              84637                  -
mypool/vm-129-disk-0  dedup                 off                    default
mypool/vm-129-disk-0  mlslabel              none                   default
mypool/vm-129-disk-0  sync                  standard               default
mypool/vm-129-disk-0  refcompressratio      1.00x                  -
mypool/vm-129-disk-0  written               147G                   -
mypool/vm-129-disk-0  logicalused           147G                   -
mypool/vm-129-disk-0  logicalreferenced     147G                   -
mypool/vm-129-disk-0  volmode               default                default
mypool/vm-129-disk-0  snapshot_limit        none                   default
mypool/vm-129-disk-0  snapshot_count        none                   default
mypool/vm-129-disk-0  snapdev               hidden                 default
mypool/vm-129-disk-0  context               none                   default
mypool/vm-129-disk-0  fscontext             none                   default
mypool/vm-129-disk-0  defcontext            none                   default
mypool/vm-129-disk-0  rootcontext           none                   default
mypool/vm-129-disk-0  redundant_metadata    all                    default
mypool/vm-129-disk-0  encryption            off                    default
mypool/vm-129-disk-0  keylocation           none                   default
mypool/vm-129-disk-0  keyformat             none                   default
mypool/vm-129-disk-0  pbkdf2iters           0                      default


on dest:
# zpool list
NAME        SIZE  ALLOC   FREE  CKPOINT  EXPANDSZ   FRAG    CAP  DEDUP    HEALTH  ALTROOT
mypool  3.48T  1.21T  2.28T        -         -    40%    34%  1.00x    ONLINE  -

NAME                  PROPERTY              VALUE                  SOURCE
mypool/vm-130-disk-0  type                  volume                 -
mypool/vm-130-disk-0  creation              Sat Nov  5  3:43 2022  -
mypool/vm-130-disk-0  used                  516G                   -
mypool/vm-130-disk-0  available             464G                   -
mypool/vm-130-disk-0  referenced            119G                   -
mypool/vm-130-disk-0  compressratio         1.00x                  -
mypool/vm-130-disk-0  reservation           none                   default
mypool/vm-130-disk-0  volsize               500G                   local
mypool/vm-130-disk-0  volblocksize          8K                     default
mypool/vm-130-disk-0  checksum              on                     default
mypool/vm-130-disk-0  compression           off                    default
mypool/vm-130-disk-0  readonly              off                    default
mypool/vm-130-disk-0  createtxg             769328                 -
mypool/vm-130-disk-0  copies                1                      default
mypool/vm-130-disk-0  refreservation        516G                   local
mypool/vm-130-disk-0  guid                  <REDACTED>             -
mypool/vm-130-disk-0  primarycache          all                    default
mypool/vm-130-disk-0  secondarycache        all                    default
mypool/vm-130-disk-0  usedbysnapshots       0B                     -
mypool/vm-130-disk-0  usedbydataset         119G                   -
mypool/vm-130-disk-0  usedbychildren        0B                     -
mypool/vm-130-disk-0  usedbyrefreservation  397G                   -
mypool/vm-130-disk-0  logbias               latency                default
mypool/vm-130-disk-0  objsetid              84914                  -
mypool/vm-130-disk-0  dedup                 off                    default
mypool/vm-130-disk-0  mlslabel              none                   default
mypool/vm-130-disk-0  sync                  standard               default
mypool/vm-130-disk-0  refcompressratio      1.00x                  -
mypool/vm-130-disk-0  written               119G                   -
mypool/vm-130-disk-0  logicalused           118G                   -
mypool/vm-130-disk-0  logicalreferenced     118G                   -
mypool/vm-130-disk-0  volmode               default                default
mypool/vm-130-disk-0  snapshot_limit        none                   default
mypool/vm-130-disk-0  snapshot_count        none                   default
mypool/vm-130-disk-0  snapdev               hidden                 default
mypool/vm-130-disk-0  context               none                   default
mypool/vm-130-disk-0  fscontext             none                   default
mypool/vm-130-disk-0  defcontext            none                   default
mypool/vm-130-disk-0  rootcontext           none                   default
mypool/vm-130-disk-0  redundant_metadata    all                    default
mypool/vm-130-disk-0  encryption            off                    default
mypool/vm-130-disk-0  keylocation           none                   default
mypool/vm-130-disk-0  keyformat             none                   default
mypool/vm-130-disk-0  pbkdf2iters           0                      default
 
Any raidz1/2/3 in use? With badly configured volblocksize the same zvols could then need way more space on the target storage.