drbd 不同步挂载点

drbd 不同步挂载点

我正在尝试在虚拟机上使用 centoOS 6.3 学习 drbd,我配置了两个虚拟机,node1 和 node2,我将一个文件复制到挂载点 /data,即 node1 的 /dev/drbd0,但没有反映到 node2 的 /data

这是配置

# You can find an example in  /usr/share/doc/drbd.../drbd.conf.example

#include "drbd.d/global_common.conf";
#include "drbd.d/*.res";

global {
    # do not participate in online usage survey
    usage-count no;
}

resource data {

    # write IO is reported as completed if it has reached both local
    # and remote disk
    protocol C;

    net {
        # set up peer authentication
        cram-hmac-alg sha1;
        shared-secret "s3cr3tp@ss";
        # default value 32 - increase as required
        max-buffers 512;
        # highest number of data blocks between two write barriers
        max-epoch-size 512;
        # size of the TCP socket send buffer - can tweak or set to 0 to
        # allow kernel to autotune
        sndbuf-size 0;
    }

    startup {
        # wait for connection timeout - boot process blocked
        # until DRBD resources are connected
        wfc-timeout 30;
        # WFC timeout if peer was outdated
        outdated-wfc-timeout 20;
        # WFC timeout if this node was in a degraded cluster (i.e. only had one
        # node left)
        degr-wfc-timeout 30;
    }

    disk {
        # the next two are for safety - detach on I/O error
        # and set up fencing - resource-only will attempt to
        # reach the other node and fence via the fence-peer
        # handler
         #on-io-error detach;
         #fencing resource-only;
        # no-disk-flushes; # if we had battery-backed RAID
        # no-md-flushes; # if we had battery-backed RAID
        # ramp up the resync rate
        # resync-rate 10M;
    }
    handlers {
        # specify the two fencing handlers
        # see: http://www.drbd.org/users-guide-8.4/s-pacemaker-fencing.html
        fence-peer "/usr/lib/drbd/crm-fence-peer.sh";
        after-resync-target "/usr/lib/drbd/crm-unfence-peer.sh";
    }
    # first node
    on node1 {
        # DRBD device
        device /dev/drbd0;
        # backing store device
        disk /dev/sdb;
        # IP address of node, and port to listen on
        address 192.168.1.101:7789;
        # use internal meta data (don't create a filesystem before
        # you create metadata!)
        meta-disk internal;
    }
    # second node
    on node2 {
        # DRBD debice
        device /dev/drbd0;
        # backing store device
        disk /dev/sdb;
        # IP address of node, and port to listen on
        address 192.168.1.102:7789;
        # use internal meta data (don't create a filesystem before
        # you create metadata!)
        meta-disk internal;
    }
}

这是 cat /proc/drbd

cat: /proc/data: No such file or directory
[root@node1 /]# cat /proc/drbd
version: 8.3.16 (api:88/proto:86-97)
GIT-hash: a798fa7e274428a357657fb52f0ecf40192c1985 build by phil@Build64R6, 2013-09-27 16:00:43
 0: cs:SyncSource ro:Primary/Secondary ds:UpToDate/Inconsistent C r-----
    ns:543648 nr:0 dw:265088 dr:280613 al:107 bm:25 lo:0 pe:0 ua:0 ap:0 ep:1 wo:f oos:7848864
    [>...................] sync'ed:  6.5% (7664/8188)M
    finish: 7:47:11 speed: 272 (524) K/sec

我将一个文件复制到节点 1 中的 /data ,但是在节点 2 中的 /date 中找不到该文件,有人可以帮忙吗?

node1 上的 drbd 状态

[root@node1 /]# service drbd status
drbd driver loaded OK; device status:
version: 8.3.16 (api:88/proto:86-97)
GIT-hash: a798fa7e274428a357657fb52f0ecf40192c1985 build by phil@Build64R6, 2013-09-27 16:00:43
m:res   cs          ro                 ds                     p  mounted  fstype
0:data  SyncSource  Primary/Secondary  UpToDate/Inconsistent  C  /data    ext3
...     sync'ed:    8.1%               (7536/8188)M

答案1

证明我错了,但 IIRC 您只能同时在其中一个节点上安装 FS。让它们同步,卸载 /data。切换,将其安装在节点 2 上,您应该可以看到所有数据。

答案2

DRBD 是分布式复制块设备 (Distributed Replicated Block Device) 的缩写,它不是一个文件系统。

如果您在主节点上写入文件,文件系统将发出写入操作。在正下方的层上,DRBD 确保这些写入被复制到辅助节点。对于辅助节点,这些写入仅显示为数据块。为了使其能够看到文件,您通常必须在主节点上卸载分区并将其挂载到辅助节点上。

不过,对于您想要实现的目标,有一个解决方案。为此,您需要一个集群文件系统。这样的文件系统允许您同时在两个节点上安装分区。使用常用的文件系统(例如 ext4),这是不可能的。

在 DRBD 上运行的此类集群文件系统的一个示例是 OCFS2。为了使用此文件系统并同时在两个服务器上安装分区,您的 DRBD 资源需要配置为双主模式。这意味着没有主节点。两个节点都可以同时写入资源。集群文件系统确保写入的数据是一致的。

相关内容