lvdisplay not showing lvm volumes created by ubuntu server

nor500

New Member
Jun 15, 2019
7
0
1
49
I installed proxmox 5.4 on a server machine where previously ubuntu server 16.04 os was installed. There are two exisitng lvm volumes in the server, which were created by ubuntu server 16.04. The problem that proxmox lvdisplay or vgdisplay show nothing from them and also they are not showing up in proxmox gui either. I tested these lvm-s in ubuntu live server environment and lvdisplay are showing them and also I can mount them. My plan is to use this two exisiting ext4 formatted logical volumes (created by ubuntu server) in proxmox and share them with the VMs and containers. What can be the problem? Thanks for the help!
 

lhorace

Member
Oct 17, 2015
156
17
18
Hello there,

It might be useful to audience if include the output of the following, 1) pveversion -v and 2) journalctl -b 1 wrap in [ code ][/ code ] tags.

Regards
 

nor500

New Member
Jun 15, 2019
7
0
1
49
[ code ]
pveversion -v:

proxmox-ve: 5.4-1 (running kernel: 4.15.18-12-pve)
pve-manager: 5.4-3 (running version: 5.4-3/0a6eaa62)
pve-kernel-4.15: 5.3-3
pve-kernel-4.15.18-12-pve: 4.15.18-35
corosync: 2.4.4-pve1
criu: 2.11.1-1~bpo90
glusterfs-client: 3.8.8-1
ksm-control-daemon: 1.2-2
libjs-extjs: 6.0.1-2
libpve-access-control: 5.1-8
libpve-apiclient-perl: 2.0-5
libpve-common-perl: 5.0-50
libpve-guest-common-perl: 2.0-20
libpve-http-server-perl: 2.0-13
libpve-storage-perl: 5.0-41
libqb0: 1.0.3-1~bpo9
lvm2: 2.02.168-pve6
lxc-pve: 3.1.0-3
lxcfs: 3.0.3-pve1
novnc-pve: 1.0.0-3
proxmox-widget-toolkit: 1.0-25
pve-cluster: 5.0-36
pve-container: 2.0-37
pve-docs: 5.4-2
pve-edk2-firmware: 1.20190312-1
pve-firewall: 3.0-19
pve-firmware: 2.0-6
pve-ha-manager: 2.0-9
pve-i18n: 1.1-4
pve-libspice-server1: 0.14.1-2
pve-qemu-kvm: 2.12.1-3
pve-xtermjs: 3.12.0-1
qemu-server: 5.0-50
smartmontools: 6.5+svn4324-1
spiceterm: 3.0-5
vncterm: 1.5-3
zfsutils-linux: 0.7.13-pve1~bpo2
[/ code ]

The journalctl -b seemed too long to attach. Which part do you need?
 

lhorace

Member
Oct 17, 2015
156
17
18
When you wrapping it with code tags, remove the spacing between the brackets and the word so the form software can properly format the text, e.g:

Code:
pveversion -v:

proxmox-ve: 5.4-1 (running kernel: 4.15.18-12-pve)
pve-manager: 5.4-3 (running version: 5.4-3/0a6eaa62)
pve-kernel-4.15: 5.3-3
pve-kernel-4.15.18-12-pve: 4.15.18-35
corosync: 2.4.4-pve1
criu: 2.11.1-1~bpo90
glusterfs-client: 3.8.8-1
ksm-control-daemon: 1.2-2
libjs-extjs: 6.0.1-2
libpve-access-control: 5.1-8
libpve-apiclient-perl: 2.0-5
libpve-common-perl: 5.0-50
libpve-guest-common-perl: 2.0-20
libpve-http-server-perl: 2.0-13
libpve-storage-perl: 5.0-41
libqb0: 1.0.3-1~bpo9
lvm2: 2.02.168-pve6
lxc-pve: 3.1.0-3
lxcfs: 3.0.3-pve1
novnc-pve: 1.0.0-3
proxmox-widget-toolkit: 1.0-25
pve-cluster: 5.0-36
pve-container: 2.0-37
pve-docs: 5.4-2
pve-edk2-firmware: 1.20190312-1
pve-firewall: 3.0-19
pve-firmware: 2.0-6
pve-ha-manager: 2.0-9
pve-i18n: 1.1-4
pve-libspice-server1: 0.14.1-2
pve-qemu-kvm: 2.12.1-3
pve-xtermjs: 3.12.0-1
qemu-server: 5.0-50
smartmontools: 6.5+svn4324-1
spiceterm: 3.0-5
vncterm: 1.5-3
zfsutils-linux: 0.7.13-pve1~bpo2
journalctl -b # is a copy of the boot log, e.g. dmesg. Alternatively, on a fresh boot, copy the output of dmesg. Hopefully that should truncate the output to a sensible size to be able to include it here.
 

lhorace

Member
Oct 17, 2015
156
17
18
I couldn't find anything suspicious, while it's noted, could include the output of the following 'pvscan -v', then 'lvm fullreport', 'lvm dumpconfig', and 'lvm lastlog'.
 

nor500

New Member
Jun 15, 2019
7
0
1
49
I am sending it you:

Code:
pvscan -v :
    Wiping internal VG cache
    Wiping cache of LVM-capable devices
  No matching physical volumes found
#########
vm fullreport :
-
########
lvm dumpconfig :
config {
    checks=1
    abort_on_errors=0
    profile_dir="/etc/lvm/profile"
}
dmeventd {
    mirror_library="libdevmapper-event-lvm2mirror.so"
    snapshot_library="libdevmapper-event-lvm2snapshot.so"
    thin_library="libdevmapper-event-lvm2thin.so"
}
activation {
    checks=0
    udev_sync=1
    udev_rules=1
    verify_udev_operations=0
    retry_deactivation=1
    missing_stripe_filler="error"
    use_linear_target=1
    reserved_stack=64
    reserved_memory=8192
    process_priority=-18
    raid_region_size=512
    readahead="auto"
    raid_fault_policy="warn"
    mirror_image_fault_policy="remove"
    mirror_log_fault_policy="allocate"
    snapshot_autoextend_threshold=100
    snapshot_autoextend_percent=20
    thin_pool_autoextend_threshold=100
    thin_pool_autoextend_percent=20
    use_mlockall=0
    monitoring=1
    polling_interval=15
    activation_mode="degraded"
}
global {
    umask=63
    test=0
    units="h"
    si_unit_consistency=1
    suffix=1
    activation=1
    proc="/proc"
    etc="/etc"
    locking_type=1
    wait_for_locks=1
    fallback_to_clustered_locking=1
    fallback_to_local_locking=1
    locking_dir="/run/lock/lvm"
    prioritise_write_locks=1
    abort_on_internal_errors=0
    detect_internal_vg_cache_corruption=0
    metadata_read_only=0
    mirror_segtype_default="raid1"
    raid10_segtype_default="raid10"
    sparse_segtype_default="thin"
    use_lvmetad=0
    use_lvmlockd=0
    system_id_source="none"
    use_lvmpolld=1
    notify_dbus=1
}
shell {
    history_size=100
}
backup {
    backup=1
    backup_dir="/etc/lvm/backup"
    archive=1
    archive_dir="/etc/lvm/archive"
    retain_min=10
    retain_days=30
}
log {
    verbose=0
    silent=0
    syslog=1
    overwrite=0
    level=0
    indent=1
    command_names=0
    prefix="  "
    activation=0
    debug_classes=["memory","devices","activation","allocation","lvmetad","metadata","cache","locking","lvmpolld","dbus"]
}
allocation {
    maximise_cling=1
    use_blkid_wiping=1
    wipe_signatures_when_zeroing_new_lvs=1
    mirror_logs_require_separate_pvs=0
    cache_pool_metadata_require_separate_pvs=0
    thin_pool_metadata_require_separate_pvs=0
}
devices {
    dir="/dev"
    scan="/dev"
    obtain_device_list_from_udev=1
    external_device_info_source="none"
    global_filter=["r|/dev/zd.*|","r|/dev/mapper/pve-.*|"]
    cache_dir="/run/lvm"
    cache_file_prefix=""
    write_cache_state=1
    sysfs_scan=1
    multipath_component_detection=1
    md_component_detection=1
    fw_raid_component_detection=0
    md_chunk_alignment=1
    data_alignment_detection=1
    data_alignment=0
    data_alignment_offset_detection=1
    ignore_suspended_devices=0
    ignore_lvm_mirrors=1
    disable_after_error_count=0
    require_restorefile_with_uuid=1
    pv_min_size=2048
    issue_discards=1
    allow_changes_with_duplicate_pvs=0
}
#####
lvm lastlog :
No log report stored.
[/ code ]
 

nor500

New Member
Jun 15, 2019
7
0
1
49
I solved the problem. My lvm is running on top of mdadm raid6, I finally noticed that mdadm package is not part of proxmox default installation. I installed mdadm and now everything is working. .:)
 

About

The Proxmox community has been around for many years and offers help and support for Proxmox VE and Proxmox Mail Gateway. We think our community is one of the best thanks to people like you!

Get your subscription!

The Proxmox team works very hard to make sure you are running the best software and getting stable updates and security enhancements, as well as quick enterprise support. Tens of thousands of happy customers have a Proxmox subscription. Get your own in 60 seconds.

Buy now!