lxc-start -n 143 -F -lDEBUG -o lxc-143.log
lxc-start 143 20240321094324.741 DEBUG conf - ../src/lxc/conf.c:run_buffer:311 - Script exec /usr/share/lxc/hooks/lxc-pve-prestart-hook 143 lxc pre-start produced output: failed to remove directory '/sys/fs/cgroup/lxc/143/ns/system.slice/systemd-machine-id-commit.service': Device or resource busy
Seems like some services are still left over inside the CT, which makes PVE/LXC fail to ensure a clean start again..failed to remove directory '/sys/fs/cgroup/lxc/143/ns/system.slice/systemd-machine-id-commit.service': Device or resource busy
init 6
)systemctl stop pve-container@143
2024-03-21 04:14:21 ERROR: removing local copy of 'local-lvm:vm-143-disk-0' failed - lvremove 'pve/vm-143-disk-0' error: Logical volume pve/vm-143-disk-0 contains a filesystem in use.
INFO: trying to get global lock - waiting...
What distro, which DB and webserver are running there?1. created a new container, installed a database and webserver
pct config CTID
These are also follow-up symptoms/errors of the original issue that the CT is doing something odd and cannot stop correctly – potentially having processes in the uninterruptible D state (like e.g. can happen from a hung NFS mount).EDIT: more information
attempted to migrate the CT to a different host - got this error:
2024-03-21 04:14:21 ERROR: removing local copy of 'local-lvm:vm-143-disk-0' failed - lvremove 'pve/vm-143-disk-0' error: Logical volume pve/vm-143-disk-0 contains a filesystem in use.
backup jobs are also kicking off on the host, but not going anywhre - not sure if its related:
INFO: trying to get global lock - waiting...
Literally an install of Zabbix, followed from their site - https://www.zabbix.com/download?zab...ents=server_frontend_agent&db=mysql&ws=apacheWhat distro, which DB and webserver are running there?
Well this is probably no good -What's the full config?pct config CTID
pct config 143
Configuration file 'nodes/pve/lxc/143.conf' does not exist
pct config 142
arch: amd64
cores: 2
features: nesting=1
hostname: testdvc1
memory: 512
net0: name=eth0,bridge=vmbr0,firewall=1,gw=10.10.12.179,hwaddr=8E:D4:0C:67:CF:3E,ip=10.10.12.10/24,type=veth
ostype: debian
rootfs: local-lvm:vm-142-disk-0,size=8G
swap: 512
unprivileged: 1
And this isn't a cluster with multiple nodes? (that CLI tool only works for CTs that currently are on the same node IIRC).Well this is probably no good -
pct config 143
Configuration file 'nodes/pve/lxc/143.conf' does not exist
Not sure if that was from me tunnning arouund the console attempting fixes that other perscribed, but it would appear that file is gone...
cat /var/lib/lxc/143/config
It is indeed in a cluster - I tired moving it as an attempt to resolve the issues. Didn't work as the disk was 'in use'.And this isn't a cluster with multiple nodes?
run_buffer: 571 Script exited with status 2
lxc_init: 845 Failed to run lxc.hook.pre-start for container "101"
__lxc_start: 2034 Failed to initialize container "101"
TASK ERROR: startup for container '101' failed
root@pve:~# lxc-start -n 101 -F -lDEBUG -o lxc-101.log
lxc-start: 101: ../src/lxc/utils.c: run_buffer: 571 Script exited with status 2
lxc-start: 101: ../src/lxc/start.c: lxc_init: 845 Failed to run lxc.hook.pre-start for container "101"
lxc-start: 101: ../src/lxc/start.c: __lxc_start: 2034 Failed to initialize container "101"
lxc-start: 101: ../src/lxc/tools/lxc_start.c: lxc_start_main: 307 The container failed to start
lxc-start: 101: ../src/lxc/tools/lxc_start.c: lxc_start_main: 312 Additional information can be obtained by setting the --logfile and --logpriority options
root@pve:~#
root@pve:~# pveversion -v
proxmox-ve: 8.3.0 (running kernel: 6.8.12-9-pve)
pve-manager: 8.3.5 (running version: 8.3.5/dac3aa88bac3f300)
proxmox-kernel-helper: 8.1.1
proxmox-kernel-6.8: 6.8.12-9
proxmox-kernel-6.8.12-9-pve-signed: 6.8.12-9
proxmox-kernel-6.8.12-4-pve-signed: 6.8.12-4
ceph-fuse: 17.2.7-pve3
corosync: 3.1.7-pve3
criu: 3.17.1-2+deb12u1
glusterfs-client: 10.3-5
ifupdown2: 3.2.0-1+pmx11
ksm-control-daemon: 1.5-1
libjs-extjs: 7.0.0-5
libknet1: 1.28-pve1
libproxmox-acme-perl: 1.6.0
libproxmox-backup-qemu0: 1.5.1
libproxmox-rs-perl: 0.3.5
libpve-access-control: 8.2.0
libpve-apiclient-perl: 3.3.2
libpve-cluster-api-perl: 8.0.10
libpve-cluster-perl: 8.0.10
libpve-common-perl: 8.2.9
libpve-guest-common-perl: 5.1.6
libpve-http-server-perl: 5.2.0
libpve-network-perl: 0.10.1
libpve-rs-perl: 0.9.2
libpve-storage-perl: 8.3.3
libspice-server1: 0.15.1-1
lvm2: 2.03.16-2
lxc-pve: 6.0.0-1
lxcfs: 6.0.0-pve2
novnc-pve: 1.6.0-2
proxmox-backup-client: 3.3.4-1
proxmox-backup-file-restore: 3.3.4-1
proxmox-firewall: 0.6.0
proxmox-kernel-helper: 8.1.1
proxmox-mail-forward: 0.3.1
proxmox-mini-journalreader: 1.4.0
proxmox-offline-mirror-helper: 0.6.7
proxmox-widget-toolkit: 4.3.7
pve-cluster: 8.0.10
pve-container: 5.2.4
pve-docs: 8.3.1
pve-edk2-firmware: 4.2025.02-2
pve-esxi-import-tools: 0.7.2
pve-firewall: 5.1.0
pve-firmware: 3.15-2
pve-ha-manager: 4.0.6
pve-i18n: 3.4.1
pve-qemu-kvm: 9.2.0-3
pve-xtermjs: 5.3.0-3
qemu-server: 8.3.8
smartmontools: 7.3-pve1
spiceterm: 3.3.0
swtpm: 0.8.0+pve1
vncterm: 1.8.0
zfsutils-linux: 2.2.7-pve2
root@pve:~#
root@pve:~# pct config 101
arch: amd64
cores: 4
features: mount=nfs,nesting=1
hostname: docker-reports
memory: 8192
mp0: /mnt/media,mp=/mnt/media
net0: name=eth0,bridge=vmbr0,hwaddr=BC:24:11:F7:4B:7A,ip=dhcp,type=veth
onboot: 1
ostype: debian
rootfs: cold-data:101/vm-101-disk-0.raw,size=220G
swap: 2048
tags: ip101
lxc.mount.auto: cgroup:rw
lxc.cgroup2.devices.allow: a
lxc.cap.drop:
lxc.cgroup2.devices.allow: c 188:* rwm
lxc.cgroup2.devices.allow: c 189:* rwm
lxc.cgroup2.devices.allow: c 226:0 rwm
lxc.cgroup2.devices.allow: c 226:128 rwm
lxc.cgroup2.devices.allow: c 29:0 rwm
lxc.mount.entry: /dev/serial/by-id dev/serial/by-id none bind,optional,create=dir
lxc.mount.entry: /dev/ttyUSB0 dev/ttyUSB0 none bind,optional,create=file
lxc.mount.entry: /dev/ttyUSB1 dev/ttyUSB1 none bind,optional,create=file
lxc.mount.entry: /dev/ttyACM0 dev/ttyACM0 none bind,optional,create=file
lxc.mount.entry: /dev/ttyACM1 dev/ttyACM1 none bind,optional,create=file
lxc.mount.entry: /dev/dri/renderD128 dev/dri/renderD128 none bind,optional,create=file 0,0
root@pve:~#
root@pve:~# cat /etc/pve/storage.cfg
dir: local
path /var/lib/vz
content backup,vztmpl,iso
lvmthin: local-lvm
thinpool data
vgname pve
content rootdir,images
dir: cold-data
path /mnt/cold-data
content import,iso,backup,snippets,images,vztmpl,rootdir
shared 0
cifs: tempbckp
path /mnt/pve/tempbckp
server 192.168.1.4
share Backups
content import,iso,snippets,backup,images,vztmpl,rootdir
prune-backups keep-all=1
username autouser
root@pve:~#
We use essential cookies to make this site work, and optional cookies to enhance your experience.