LVM 分区在启动时不“可用”

LVM 分区在启动时不“可用”

在我的 Ubuntu Trusty AMD64 KDE 桌面版本中,我有 2 个 LVM2 PV,每个 PV 都有一个 VG。每个 VG 包含 2 个或更多 LV。在启动时 (a) 所有 LV 都“不可用”,因此无法从 fstab 中挂载,它们在启动时生成错误 (b) 一旦启动 KDE,所有 LV 都不可用 (c) 可以使用“vgchange -ay”轻松使 LV 可用,并且一切都很好,直到下次重新启动。此外,所有 LV 均未报告任何可用空间信息

背景 - 在测试安装另一个发行版时,LVM 元数据全部被删除。我设法使用保存的元数据恢复了 LVM 卷,但出现了启动时无法激活 LV 的问题。我安装了其他发行版进行测试,其他 Ubuntu 安装也存在同样的问题。这表明 lvm 数据/元数据中存在问题 dmesg 中没有明显相关的消息 我看不到 /etc/lvm/lvm.conf 有任何问题 pvscan/vgscan/lvscan 全部完成,没有发现任何错误 所有数据都位于单个 HDD 的一个区段中

lvs
  LV                    VG        Attr      LSize   Pool Origin        Data%  Move Log Copy%  Convert
  lvwin-data            virt-test -wi-a----  20.00g                                                  
  lvwin7_kvm            virt-test -wi-a---- 260.00g                                                  
  lvwin7_kvm_copy       virt-test -wi-a---- 260.00g                                                  
  lvwin7a               virt-test -wi-a---- 512.00g                                                  
  lvwin7ub              virt-test -wn-a----  40.00g                                                  
  lvwin7ub-pre-catalyst virt-test -wi-a----  40.00g                                                  
  lxc_images            virt-test -wn-a---- 100.00g                                                  
  programming_data      virt-test -wi-a----  50.00g                                                  
  xen-test-snap         virt-test swi-a-s--  30.00g      xen-test-win7  38.57                        
  xen-test-win7         virt-test owi-a-s-- 150.00g

/etc/lvm/lvm.conf

devices {
    dir = "/dev"
    scan = [ "/dev" ]
    obtain_device_list_from_udev = 1
    preferred_names = [ ]
    filter = [ "a/.*/" ]
    cache_dir = "/run/lvm"
    cache_file_prefix = ""
    write_cache_state = 1
    sysfs_scan = 1
    multipath_component_detection = 1
    md_component_detection = 1
    md_chunk_alignment = 1
    data_alignment_detection = 1
    data_alignment = 0
    data_alignment_offset_detection = 1
    ignore_suspended_devices = 0
    disable_after_error_count = 0
    require_restorefile_with_uuid = 1
    pv_min_size = 2048
    issue_discards = 1
}

allocation {
    maximise_cling = 1
    mirror_logs_require_separate_pvs = 0
    thin_pool_metadata_require_separate_pvs = 0
}

log {
    verbose = 0
    silent = 0
    syslog = 1
    overwrite = 0
    level = 0
    indent = 1
    command_names = 0
    prefix = "  "
}

backup {
    backup = 1
    backup_dir = "/etc/lvm/backup"
    archive = 1
    archive_dir = "/etc/lvm/archive"
    retain_min = 10
    retain_days = 30
}

shell {
    history_size = 100
}


global {
    umask = 077
    test = 0
    units = "h"
    si_unit_consistency = 1
    activation = 1
    proc = "/proc"
    locking_type = 1
    wait_for_locks = 1
    fallback_to_clustered_locking = 1
    fallback_to_local_locking = 1
    locking_dir = "/run/lock/lvm"
    prioritise_write_locks = 1
    abort_on_internal_errors = 0
    metadata_read_only = 0
    mirror_segtype_default = "mirror"
    use_lvmetad = 0
    thin_check_executable = "/usr/sbin/thin_check"
    thin_check_options = [ "-q" ]
}

activation {
    checks = 0
    udev_sync = 1
    udev_rules = 1
    verify_udev_operations = 0
    retry_deactivation = 1
    missing_stripe_filler = "error"
    use_linear_target = 1
    reserved_stack = 64
    reserved_memory = 8192
    process_priority = -18
    mirror_region_size = 512
    readahead = "auto"
    raid_fault_policy = "warn"
    mirror_log_fault_policy = "allocate"
    mirror_image_fault_policy = "remove"
    snapshot_autoextend_threshold = 100
    snapshot_autoextend_percent = 20
    thin_pool_autoextend_threshold = 100
    thin_pool_autoextend_percent = 20
    use_mlockall = 0
    monitoring = 0
    polling_interval = 15
}


####################
# Advanced section #
####################
dmeventd {
    mirror_library = "libdevmapper-event-lvm2mirror.so"
    snapshot_library = "libdevmapper-event-lvm2snapshot.so"
    thin_library = "libdevmapper-event-lvm2thin.so"
}

欢迎提出任何调试建议

相关内容