mount
:
sysfs on /sys type sysfs (rw,nosuid,nodev,noexec,relatime)
proc on /proc type proc (rw,relatime)
udev on /dev type devtmpfs (rw,nosuid,relatime,size=263985620k,nr_inodes=65996405,mode=755)
devpts on /dev/pts type devpts (rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000)
tmpfs on /run type tmpfs (rw,nosuid,noexec,relatime,size=52802496k,mode=755)
/dev/mapper/pve-root on / type ext4 (rw,relatime,errors=remount-ro,stripe=64)
securityfs on /sys/kernel/security type securityfs (rw,nosuid,nodev,noexec,relatime)
tmpfs on /dev/shm type tmpfs (rw,nosuid,nodev)
tmpfs on /run/lock type tmpfs (rw,nosuid,nodev,noexec,relatime,size=5120k)
tmpfs on /sys/fs/cgroup type tmpfs (ro,nosuid,nodev,noexec,mode=755)
cgroup2 on /sys/fs/cgroup/unified type cgroup2 (rw,nosuid,nodev,noexec,relatime)
cgroup on /sys/fs/cgroup/systemd type cgroup (rw,nosuid,nodev,noexec,relatime,xattr,name=systemd)
pstore on /sys/fs/pstore type pstore (rw,nosuid,nodev,noexec,relatime)
none on /sys/fs/bpf type bpf (rw,nosuid,nodev,noexec,relatime,mode=700)
cgroup on /sys/fs/cgroup/rdma type cgroup (rw,nosuid,nodev,noexec,relatime,rdma)
cgroup on /sys/fs/cgroup/memory type cgroup (rw,nosuid,nodev,noexec,relatime,memory)
cgroup on /sys/fs/cgroup/hugetlb type cgroup (rw,nosuid,nodev,noexec,relatime,hugetlb)
cgroup on /sys/fs/cgroup/devices type cgroup (rw,nosuid,nodev,noexec,relatime,devices)
cgroup on /sys/fs/cgroup/pids type cgroup (rw,nosuid,nodev,noexec,relatime,pids)
cgroup on /sys/fs/cgroup/perf_event type cgroup (rw,nosuid,nodev,noexec,relatime,perf_event)
cgroup on /sys/fs/cgroup/cpuset type cgroup (rw,nosuid,nodev,noexec,relatime,cpuset)
cgroup on /sys/fs/cgroup/freezer type cgroup (rw,nosuid,nodev,noexec,relatime,freezer)
cgroup on /sys/fs/cgroup/net_cls,net_prio type cgroup (rw,nosuid,nodev,noexec,relatime,net_cls,net_prio)
cgroup on /sys/fs/cgroup/blkio type cgroup (rw,nosuid,nodev,noexec,relatime,blkio)
cgroup on /sys/fs/cgroup/cpu,cpuacct type cgroup (rw,nosuid,nodev,noexec,relatime,cpu,cpuacct)
systemd-1 on /proc/sys/fs/binfmt_misc type autofs (rw,relatime,fd=33,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=58608)
mqueue on /dev/mqueue type mqueue (rw,relatime)
debugfs on /sys/kernel/debug type debugfs (rw,relatime)
hugetlbfs on /dev/hugepages type hugetlbfs (rw,relatime,pagesize=2M)
sunrpc on /run/rpc_pipefs type rpc_pipefs (rw,relatime)
fusectl on /sys/fs/fuse/connections type fusectl (rw,relatime)
configfs on /sys/kernel/config type configfs (rw,relatime)
replication on /replication type zfs (rw,xattr,noacl)
replication/subvol-125-disk-0 on /replication/subvol-125-disk-0 type zfs (rw,xattr,posixacl)
replication/basevol-108-disk-0 on /replication/basevol-108-disk-0 type zfs (rw,xattr,posixacl)
replication/subvol-103-disk-0 on /replication/subvol-103-disk-0 type zfs (rw,xattr,posixacl)
replication/subvol-101-disk-0 on /replication/subvol-101-disk-0 type zfs (rw,xattr,posixacl)
replication/subvol-113-disk-0 on /replication/subvol-113-disk-0 type zfs (rw,xattr,posixacl)
replication/subvol-105-disk-0 on /replication/subvol-105-disk-0 type zfs (rw,xattr,posixacl)
replication/subvol-106-disk-0 on /replication/subvol-106-disk-0 type zfs (rw,xattr,posixacl)
replication/subvol-116-disk-0 on /replication/subvol-116-disk-0 type zfs (rw,xattr,posixacl)
replication/subvol-104-disk-0 on /replication/subvol-104-disk-0 type zfs (rw,xattr,posixacl)
replication/subvol-127-disk-0 on /replication/subvol-127-disk-0 type zfs (rw,xattr,posixacl)
lxcfs on /var/lib/lxcfs type fuse.lxcfs (rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other)
binfmt_misc on /proc/sys/fs/binfmt_misc type binfmt_misc (rw,relatime)
/dev/fuse on /etc/pve type fuse (rw,nosuid,nodev,relatime,user_id=0,group_id=0,default_permissions,allow_other)
replication/subvol-135-disk-0 on /replication/subvol-135-disk-0 type zfs (rw,xattr,posixacl)
replication/subvol-136-disk-0 on /replication/subvol-136-disk-0 type zfs (rw,xattr,posixacl)
tmpfs on /run/user/0 type tmpfs (rw,nosuid,nodev,relatime,size=52802492k,mode=700)
df -h
Filesystem Size Used Avail Use% Mounted on
udev 252G 0 252G 0% /dev
tmpfs 51G 4.1G 47G 9% /run
/dev/mapper/pve-root 55G 53G 0 100% /
tmpfs 252G 54M 252G 1% /dev/shm
tmpfs 5.0M 0 5.0M 0% /run/lock
tmpfs 252G 0 252G 0% /sys/fs/cgroup
replication 4.6T 256K 4.6T 1% /replication
replication/subvol-125-disk-0 16G 1.5G 15G 10% /replication/subvol-125-disk-0
replication/basevol-108-disk-0 16G 964M 16G 6% /replication/basevol-108-disk-0
replication/subvol-103-disk-0 8.0G 4.6G 3.5G 57% /replication/subvol-103-disk-0
replication/subvol-101-disk-0 8.0G 1.2G 6.9G 15% /replication/subvol-101-disk-0
replication/subvol-113-disk-0 16G 4.6G 12G 29% /replication/subvol-113-disk-0
replication/subvol-105-disk-0 32G 12G 21G 35% /replication/subvol-105-disk-0
replication/subvol-106-disk-0 16G 3.2G 13G 20% /replication/subvol-106-disk-0
replication/subvol-116-disk-0 16G 3.1G 13G 20% /replication/subvol-116-disk-0
replication/subvol-104-disk-0 512G 101G 412G 20% /replication/subvol-104-disk-0
replication/subvol-127-disk-0 32G 2.0G 31G 7% /replication/subvol-127-disk-0
/dev/fuse 30M 60K 30M 1% /etc/pve
replication/subvol-135-disk-0 16G 3.0G 14G 19% /replication/subvol-135-disk-0
replication/subvol-136-disk-0 16G 2.3G 14G 15% /replication/subvol-136-disk-0
tmpfs 51G 0 51G 0% /run/user/0
du -sm /var/lib/vz/*
:
1 /var/lib/vz/dump
1 /var/lib/vz/images
6962 /var/lib/vz/template
/etc/pve/storage.cfg
:
dir: local
path /var/lib/vz
content vztmpl,backup,iso
shared 0
lvmthin: local-lvm
thinpool data
vgname pve
content rootdir,images
zfspool: replication
pool replication
content rootdir,images
mountpoint /replication
sparse 0
dir: backup
path /backup
content backup
prune-backups keep-weekly=100
shared 1
/var/lib/vz
on that local partitiondf -H
shows you have 100% use on your root partition... anything that hits 100% usage is usually a problem./var/lib/vz
that could be filling up the disk? /home /root /mnt
?please provide the information from my last reply (and if you want to get a head-start on tracking down the usage, useSame problem here...I don't use local for backups or ISO or anything else, nothing shows in local from the UI, output of du -sm /var/lib/vz/* is 1, 1, and 1 for dump, images, and template. Yet - 94G of 100G full in local. I use pve-data for backups and anything else and that has 1.2T out of 1.7T free.
Wondering if there was a response to the earlier post.
du -sm /*
to find out where the space is going to, repeating the command for dirs taking up lots of space and report back what you find)Output ofdf -h
Filesystem Size Used Avail Use% Mounted on tmpfs 51G 4.1G 47G 9% /run tmpfs 252G 54M 252G 1% /dev/shm tmpfs 5.0M 0 5.0M 0% /run/lock tmpfs 252G 0 252G 0% /sys/fs/cgroup tmpfs 51G 0 51G 0% /run/user/0
These tmpfs reside on local and the sizes are quite large. These are temporary mounts which are volatile until reboot. I would try reboot and see if these temp mounts unmount freeing up the space. I had this happen when I was transferring a large virtual disk to the wrong path and when that path's root system became full, it started moving data to local. A reboot cleared the mounts and freed up that space after deleting the .partial file of the original transfer.
I have the same issue, but I've noticed that it's only when the VM is Windows based. None of my Linux VM's cause this issue. I moved the Windows VM to different hosts and the problem followed it. I noticed there is another kernel update today and I'm currently installing it, so once I get machines migrated and the hosts upgraded I am going to fire the Windows VM up and see if the issue persists. If this kernel upgrade fixes it I will post back.Hello,
My local storage is full without any proper reason. It does not have so much data on it. Can anyone help me with it?
Hi,I have the same issue, but I've noticed that it's only when the VM is Windows based. None of my Linux VM's cause this issue. I moved the Windows VM to different hosts and the problem followed it. I noticed there is another kernel update today and I'm currently installing it, so once I get machines migrated and the hosts upgraded I am going to fire the Windows VM up and see if the issue persists. If this kernel upgrade fixes it I will post back.
Is anyone else seeing this issue on non-Windows VM's?
Sry ma fault, but i already checked "/" too. I´m unable to find the 300+ GB of lost space.it's/
that is full, so just looking in/var/lib/vz
might not give you the answer I'd look at things like /var/cache and /var/log next, and if those are okay, start at /* and go from there.
Output:
root@proxmox01:~# du -sm /*
10 /bin
196 /boot
34 /dev
5 /etc
1 /home
620 /lib
1 /lib64
1 /media
2124909 /mnt
1 /opt
du: cannot read directory '/proc/15701/task/15701/net': Invalid argument
du: cannot read directory '/proc/15701/net': Invalid argument
du: cannot access '/proc/396751/task/396751/fd/4': No such file or directory
du: cannot access '/proc/396751/task/396751/fdinfo/4': No such file or directory
du: cannot access '/proc/396751/fd/3': No such file or directory
du: cannot access '/proc/396751/fdinfo/3': No such file or directory
0 /proc
688 /r1-tosh-3tb
1 /root
1 /rpool
2 /run
10 /sbin
1 /srv
0 /sys
1 /tbw_send.sh
1 /tmp
1114 /usr
du: cannot access '/var/lib/lxcfs/cgroup': Input/output error
392857 /var