Disk speed: VM Freebsd 15 vs Debian Trixie - Proxmox

vesuvienne

Member
Jun 7, 2024
82
3
8
Hi team, Just dropping some result of disk speed performance, if someone have some time, knowledge or any clues(maybe i did something wrong) to upgrade my freebsd 15 disk speed:
Proxmox - LVM-Thin - VM - CPU:2 MEMORY:2GB
- VM disk mounted with: VirtIO - iothread=1,ssd=1,discard=on
Bash:
=====================================================
 Disk Benchmark (fio) 'FreeBSD 15.0-RELEASE-p5' 11/04/2026
 IO Engine: posixaio
 Disk+type: /dev/da0p2 ufs
 File: fio_test_file | Size: 4G
=====================================================
 SUMMARY (Best & Worst Results)
=====================================================
Test                               Best      Worst
--------------------------------------------------------------
Seq Write (1M, 1 job)         1277MiB/s   726MiB/s
Seq Read  (1M, 1 job)         2973MiB/s  2671MiB/s
Seq Write (1M, p4)            1096MiB/s   667MiB/s
Seq Read  (1M, p4)            2635MiB/s  2294MiB/s
Seq Write (128K)              1215MiB/s   754MiB/s
Seq Read  (128K)              2242MiB/s  1737MiB/s

Rand Read (QD1)               4812 IOPS  4501 IOPS
Rand Read (QD4)              28.0k IOPS 26.7k IOPS
Rand Read (Stress)           45.3k IOPS 44.7k IOPS
Rand Write (QD1)              2443 IOPS  1597 IOPS
Rand Write (QD4)              2020 IOPS  1779 IOPS
Rand Write (Stress)           2621 IOPS  1590 IOPS

Mixed R/W (realistic)         3757 IOPS  1413 IOPS

Sync Write (1 job)                 1998       1448
Sync Write (load)                  2553       2162

---

=====================================================
 Disk Benchmark (fio) 'Debian GNU/Linux 13 (trixie)' 09/04/2026
 IO Engine: io_uring
 Disk+type: /dev/sda2 ext4
 File: fio_test_file | Size: 4G
=====================================================
 SUMMARY (Best & Worst Results)
=====================================================
Test                               Best      Worst
--------------------------------------------------------------
Seq Write (1M, 1 job)         1746MiB/s   845MiB/s
Seq Read  (1M, 1 job)         3424MiB/s  2606MiB/s
Seq Write (1M, p4)            1503MiB/s   934MiB/s
Seq Read  (1M, p4)            3098MiB/s  3062MiB/s
Seq Write (128K)              1276MiB/s   730MiB/s
Seq Read  (128K)              2373MiB/s  1973MiB/s

Rand Read (QD1)              11.8k IOPS 11.5k IOPS
Rand Read (QD4)              87.9k IOPS 86.1k IOPS
Rand Read (Stress)            273k IOPS  200k IOPS
Rand Write (QD1)             27.5k IOPS 27.2k IOPS
Rand Write (QD4)              118k IOPS  106k IOPS
Rand Write (Stress)           138k IOPS  137k IOPS

Mixed R/W (realistic)        17.8k IOPS  6291 IOPS

Sync Write (1 job)                  430        206
Sync Write (load)                   880        631

- My fio test
Bash:
run_fio() {
  NAME=$1
  shift
  echo "" >>"$LOG_FILE"
  echo ">>> $NAME" | tee -a "$LOG_FILE"

  fio --name="$NAME" \
    --filename="$TEST_FILE" \
    --size="$SIZE" \
    --direct=1 \
    --ioengine="${IOENGINE:-io_uring}" \
    --ramp_time=10 \
    --runtime="$RUNTIME" \
    --time_based \
    --randrepeat=0 \
    --norandommap \
    --refill_buffers \
    --group_reporting \
    --clat_percentiles=1 \
    --lat_percentiles=1 \
    --percentile_list=50,90,95,99,99.9,99.99 \
    --log_avg_msec=1000 \
    --numjobs=1 \
    --unlink=1 \
    "$@" >>"$LOG_FILE"

  echo "  Throughput:" | tee -a "$LOG_FILE"
  grep -E "IOPS=|BW=" "$LOG_FILE" | tail -1 | tee -a "$LOG_FILE"

  if echo "$NAME" | grep -q "rand"; then
    echo "  Latency P99/P99.9:" | tee -a "$LOG_FILE"
    grep -A 20 "clat percentiles" "$LOG_FILE" | grep -E "99.00th|99.90th" | tail -2 | tee -a "$LOG_FILE"
  fi
}
i=1
while [ $i -le $RUNS ]; do
  echo ""
  echo "===== Run $i / $RUNS ====="

  # -------------------------------
  # Sequential (throughput)
  # -------------------------------

  # Peak throughput
  run_fio "seq_write_1M_run$i" --rw=write --bs=1M --iodepth=16 --numjobs=1
  sleep $SLEEP_TIME
  run_fio "seq_read_1M_run$i" --rw=read --bs=1M --iodepth=16 --numjobs=1
  sleep $SLEEP_TIME

  # Parallel throughput (realistic NVMe usage)
  run_fio "seq_write_1M_p4_run$i" --rw=write --bs=1M --iodepth=16 --numjobs=4
  sleep $SLEEP_TIME
  run_fio "seq_read_1M_p4_run$i" --rw=read --bs=1M --iodepth=16 --numjobs=4
  sleep $SLEEP_TIME

  # Realistic streaming
  run_fio "seq_write_128K_run$i" --rw=write --bs=128K --iodepth=8 --numjobs=2
  sleep $SLEEP_TIME
  run_fio "seq_read_128K_run$i" --rw=read --bs=128K --iodepth=8 --numjobs=2
  sleep $SLEEP_TIME

  # -------------------------------
  # Random read (latency + scale)
  # -------------------------------

  # Latency baseline
  run_fio "rand_read_qd1_run$i" --rw=randread --bs=4k --iodepth=1 --numjobs=1
  sleep $SLEEP_TIME

  # Realistic load
  run_fio "rand_read_qd4_run$i" --rw=randread --bs=4k --iodepth=4 --numjobs=2
  sleep $SLEEP_TIME

  # Stress
  run_fio "rand_read_stress_run$i" --rw=randread --bs=4k --iodepth=16 --numjobs=4
  sleep $SLEEP_TIME

  # -------------------------------
  # Random write
  # -------------------------------

  run_fio "rand_write_qd1_run$i" --rw=randwrite --bs=4k --iodepth=1 --numjobs=1
  sleep $SLEEP_TIME

  run_fio "rand_write_qd4_run$i" --rw=randwrite --bs=4k --iodepth=4 --numjobs=2
  sleep $SLEEP_TIME

  run_fio "rand_write_stress_run$i" --rw=randwrite --bs=4k --iodepth=16 --numjobs=4
  sleep $SLEEP_TIME

  # -------------------------------
  # Mixed workload (REALISTIC)
  # -------------------------------

  run_fio "rand_rw_realistic_run$i" \
    --rw=randrw \
    --rwmixread=70 \
    --bsrange=4k-64k \
    --iodepth=8 \
    --numjobs=2
  sleep $SLEEP_TIME

  # -------------------------------
  # Sync writes (DB behavior)
  # -------------------------------

  # Single-thread commit latency
  run_fio "sync_write_run$i" \
    --rw=write --bs=4k \
    --fsync=1 \
    --iodepth=1 \
    --numjobs=1
  sleep $SLEEP_TIME

  # Multi-client commit pressure
  run_fio "sync_write_load_run$i" \
    --rw=write --bs=4k \
    --fsync=1 \
    --iodepth=1 \
    --numjobs=4
  sleep $SLEEP_TIME

  i=$((i + 1))
done
 
Last edited: