lxc permission and barriers

sahostking

Renowned Member
Hi guys,

I've had massive drop in performance from openvz (proxmox 3) to LXC (proxmox 4) with exact same amount of VPSs.

HW raid in raid 10 with 6 x enterprise Sata disks

I am trying to set barrier=0 but not sure if its set or not:

fstab contents:

# <file system> <mount point> <type> <options> <dump> <pass>
/dev/pve/root / ext4 errors=remount-ro,barrier=0,noatime 0 1
/dev/pve/swap none swap sw 0 0
proc /proc proc defaults 0 0


cat /proc/mounts

sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0
proc /proc proc rw,relatime 0 0
udev /dev devtmpfs rw,relatime,size=10240k,nr_inodes=8229823,mode=755 0 0
devpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=666 0 0
tmpfs /run tmpfs rw,nosuid,relatime,size=13173452k,mode=755 0 0
/dev/dm-0 / ext4 rw,noatime,nobarrier,errors=remount-ro,data=ordered 0 0
securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0
tmpfs /dev/shm tmpfs rw,nosuid,nodev 0 0
tmpfs /run/lock tmpfs rw,nosuid,nodev,noexec,relatime,size=5120k 0 0
tmpfs /sys/fs/cgroup tmpfs rw,mode=755 0 0
cgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/lib/systemd/systemd-cgroups-agent,name=systemd 0 0
pstore /sys/fs/pstore pstore rw,nosuid,nodev,noexec,relatime 0 0
cgroup /sys/fs/cgroup/cpuset cgroup rw,nosuid,nodev,noexec,relatime,cpuset,clone_children 0 0
cgroup /sys/fs/cgroup/cpu,cpuacct cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct 0 0
cgroup /sys/fs/cgroup/blkio cgroup rw,nosuid,nodev,noexec,relatime,blkio 0 0
cgroup /sys/fs/cgroup/memory cgroup rw,nosuid,nodev,noexec,relatime,memory 0 0
cgroup /sys/fs/cgroup/devices cgroup rw,nosuid,nodev,noexec,relatime,devices 0 0
cgroup /sys/fs/cgroup/freezer cgroup rw,nosuid,nodev,noexec,relatime,freezer 0 0
cgroup /sys/fs/cgroup/net_cls,net_prio cgroup rw,nosuid,nodev,noexec,relatime,net_cls,net_prio 0 0
cgroup /sys/fs/cgroup/perf_event cgroup rw,nosuid,nodev,noexec,relatime,perf_event,release_agent=/run/cgmanager/agents/cgm-release-agent.perf_event 0 0
cgroup /sys/fs/cgroup/hugetlb cgroup rw,nosuid,nodev,noexec,relatime,hugetlb,release_agent=/run/cgmanager/agents/cgm-release-agent.hugetlb 0 0
cgroup /sys/fs/cgroup/pids cgroup rw,nosuid,nodev,noexec,relatime,pids,release_agent=/run/cgmanager/agents/cgm-release-agent.pids 0 0
systemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=21,pgrp=1,timeout=300,minproto=5,maxproto=5,direct 0 0
mqueue /dev/mqueue mqueue rw,relatime 0 0
debugfs /sys/kernel/debug debugfs rw,relatime 0 0
hugetlbfs /dev/hugepages hugetlbfs rw,relatime 0 0
fusectl /sys/fs/fuse/connections fusectl rw,relatime 0 0
rpc_pipefs /run/rpc_pipefs rpc_pipefs rw,relatime 0 0
tmpfs /run/lxcfs/controllers tmpfs rw,relatime,size=100k,mode=700 0 0
pids /run/lxcfs/controllers/pids cgroup rw,relatime,pids,release_agent=/run/cgmanager/agents/cgm-release-agent.pids 0 0
hugetlb /run/lxcfs/controllers/hugetlb cgroup rw,relatime,hugetlb,release_agent=/run/cgmanager/agents/cgm-release-agent.hugetlb 0 0
perf_event /run/lxcfs/controllers/perf_event cgroup rw,relatime,perf_event,release_agent=/run/cgmanager/agents/cgm-release-agent.perf_event 0 0
net_cls,net_prio /run/lxcfs/controllers/net_cls,net_prio cgroup rw,relatime,net_cls,net_prio 0 0
freezer /run/lxcfs/controllers/freezer cgroup rw,relatime,freezer 0 0
devices /run/lxcfs/controllers/devices cgroup rw,relatime,devices 0 0
memory /run/lxcfs/controllers/memory cgroup rw,relatime,memory 0 0
blkio /run/lxcfs/controllers/blkio cgroup rw,relatime,blkio 0 0
cpu,cpuacct /run/lxcfs/controllers/cpu,cpuacct cgroup rw,relatime,cpu,cpuacct 0 0
cpuset /run/lxcfs/controllers/cpuset cgroup rw,relatime,cpuset,clone_children 0 0
name=systemd /run/lxcfs/controllers/name=systemd cgroup rw,relatime,xattr,release_agent=/lib/systemd/systemd-cgroups-agent,name=systemd 0 0
lxcfs /var/lib/lxcfs fuse.lxcfs rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other 0 0
cgmfs /run/cgmanager/fs tmpfs rw,relatime,size=100k,mode=755 0 0


Note I did use nabarrier before and remounted using mount -a and it addes it I see to:
/dev/dm-0 / ext4 rw,noatime,nobarrier,errors=remount-ro,data=ordered 0 0

But does that means lxc containers are using it to?
 
Last edited:
If the containers are based on LVM with their own ext4, then no. You need to change it for each container manually.

BTW: Consider using ZFS for LXC, much, much smoother and superior in features.
 

About

The Proxmox community has been around for many years and offers help and support for Proxmox VE, Proxmox Backup Server, and Proxmox Mail Gateway.
We think our community is one of the best thanks to people like you!

Get your subscription!

The Proxmox team works very hard to make sure you are running the best software and getting stable updates and security enhancements, as well as quick enterprise support. Tens of thousands of happy customers have a Proxmox subscription. Get yours easily in our online shop.

Buy now!