veth 不响应同一桥上的其他 veth ping 请求

veth 不响应同一桥上的其他 veth ping 请求

我创建了两个 veth 对并将它们连接到同一个网桥。但是,它们没有响应 ping 请求。这是 ubuntu 20.04。

# create the interfaces and bridge  
sudo ip link add br0 type bridge
sudo ip link add i0 type veth peer name i0-p
sudo ip link add i1 type veth peer name i1-p

# connect veth peers into bridge as well as physical interface 
sudo ip link set eno2 master br0
sudo ip link set i0-p master br0
sudo ip link set i1-p master br0

# bring up all the interfaces/bridges up. 
sudo ip link set br0 up
sudo ip link set eno2 up
sudo ip link set i0-p up 
sudo ip link set i1-p up 
sudo ip link set i0 up 
sudo ip link set i1 up 

# set the addresses 
sudo ip a add 10.0.10.4/16 dev br0
sudo ip a add 10.0.10.5/16 dev i0
sudo ip a add 10.0.10.6/16 dev i1
                                                                                   
                                                                                   
                                             +-------------+                       
                                             |             |                       
         +---------+               +---------|             |                       
         |         |               |         |             |                       
         |  i0     |---------------|  i0-p   |             |                       
         |         |               |         |             |                       
         |10.0.10.5/16             |         |             |            +---------+
         |         |               |         |             |            |         |
         +---------+               +---------|             |            |  en02   |
                                             |     br0     |------------|         |
                                             |             |            |         |
                                             |10.0.10.4/16 |            |         |
         +---------+               +---------+             |            +---------+
         |         |               |         |             |                       
         |  i1     |---------------|  i1-p   |             |                       
         |         |               |         |             |                       
         |10.0.10.6/16             |         |             |                       
         |         |               |         |             |                       
         +---------+               +---------+             |                       
                                             |             |                       
                                             +-------------+                       
# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: eno1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
    link/ether a4:bf:01:55:f3:a4 brd ff:ff:ff:ff:ff:ff
    inet 192.168.20.3/16 brd 192.168.255.255 scope global eno1
       valid_lft forever preferred_lft forever
    inet6 fe80::a6bf:1ff:fe55:f3a4/64 scope link
       valid_lft forever preferred_lft forever
3: eno2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq master br0 state UP group default qlen 1000
    link/ether a4:bf:01:55:f3:a5 brd ff:ff:ff:ff:ff:ff
    inet6 fe80::a6bf:1ff:fe55:f3a5/64 scope link
       valid_lft forever preferred_lft forever
4: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
    link/ether 02:42:09:12:3c:7c brd ff:ff:ff:ff:ff:ff
    inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
       valid_lft forever preferred_lft forever
10: br0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
    link/ether 32:b1:3c:cd:ab:b0 brd ff:ff:ff:ff:ff:ff
    inet 10.0.10.4/16 scope global br0
       valid_lft forever preferred_lft forever
    inet6 fe80::30b1:3cff:fecd:abb0/64 scope link
       valid_lft forever preferred_lft forever
11: i0-p@i0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master br0 state UP group default qlen 1000
    link/ether 32:b1:3c:cd:ab:b0 brd ff:ff:ff:ff:ff:ff
    inet6 fe80::30b1:3cff:fecd:abb0/64 scope link
       valid_lft forever preferred_lft forever
12: i0@i0-p: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
    link/ether ba:04:42:b1:62:f8 brd ff:ff:ff:ff:ff:ff
    inet 10.0.10.5/16 scope global i0
       valid_lft forever preferred_lft forever
    inet6 fe80::b804:42ff:feb1:62f8/64 scope link
       valid_lft forever preferred_lft forever
13: i1-p@i1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master br0 state UP group default qlen 1000
    link/ether 72:b0:ab:ca:c4:55 brd ff:ff:ff:ff:ff:ff
    inet6 fe80::70b0:abff:feca:c455/64 scope link
       valid_lft forever preferred_lft forever
14: i1@i1-p: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
    link/ether f6:47:29:14:84:bb brd ff:ff:ff:ff:ff:ff
    inet 10.0.10.6/16 scope global i1
       valid_lft forever preferred_lft forever
    inet6 fe80::f447:29ff:fe14:84bb/64 scope link
       valid_lft forever preferred_lft forever 
# sysctl -p
net.ipv4.ip_forward = 1
# ping 10.0.10.6 -I i0
PING 10.0.10.6 (10.0.10.6) from 10.0.10.5 i0: 56(84) bytes of data.
^C
--- 10.0.10.6 ping statistics ---
14 packets transmitted, 0 received, 100% packet loss, time 13296ms
# tcpdump -i i0 not stp
tcpdump: verbose output suppressed, use -v or -vv for full protocol decode
listening on i0, link-type EN10MB (Ethernet), capture size 262144 bytes
06:25:12.616946 ARP, Request who-has kolla01 tell kolla01, length 28
06:25:13.625187 ARP, Request who-has kolla01 tell kolla01, length 28
06:25:14.649177 ARP, Request who-has kolla01 tell kolla01, length 28
06:25:15.673372 ARP, Request who-has kolla01 tell kolla01, length 28
06:25:16.697226 ARP, Request who-has kolla01 tell kolla01, length 28
06:25:17.721230 ARP, Request who-has kolla01 tell kolla01, length 28
06:25:18.745363 ARP, Request who-has kolla01 tell kolla01, length 28
06:25:19.769271 ARP, Request who-has kolla01 tell kolla01, length 28
06:25:20.793229 ARP, Request who-has kolla01 tell kolla01, length 28
06:25:21.817332 ARP, Request who-has kolla01 tell kolla01, length 28
06:25:22.841256 ARP, Request who-has kolla01 tell kolla01, length 28
06:25:23.865261 ARP, Request who-has kolla01 tell kolla01, length 28
06:25:24.889354 ARP, Request who-has kolla01 tell kolla01, length 28
06:25:25.913245 ARP, Request who-has kolla01 tell kolla01, length 28
06:25:26.937169 ARP, Request who-has kolla01 tell kolla01, length 28

有人有什么建议吗?

更新:
应用kab00m的建议:

# tcpdump -ni lo
tcpdump: verbose output suppressed, use -v or -vv for full protocol decode
listening on lo, link-type EN10MB (Ethernet), capture size 262144 bytes
15:59:00.976656 IP 10.0.10.2 > 10.0.10.2: ICMP host 10.0.10.3 unreachable, length 92
15:59:00.976667 IP 10.0.10.2 > 10.0.10.2: ICMP host 10.0.10.3 unreachable, length 92
15:59:00.976674 IP 10.0.10.2 > 10.0.10.2: ICMP host 10.0.10.3 unreachable, length 92
15:59:04.080688 IP 10.0.10.2 > 10.0.10.2: ICMP host 10.0.10.3 unreachable, length 92
15:59:04.080698 IP 10.0.10.2 > 10.0.10.2: ICMP host 10.0.10.3 unreachable, length 92
15:59:04.080705 IP 10.0.10.2 > 10.0.10.2: ICMP host 10.0.10.3 unreachable, length 92
15:59:07.152677 IP 10.0.10.2 > 10.0.10.2: ICMP host 10.0.10.3 unreachable, length 92
^C
7 packets captured
14 packets received by filter
0 packets dropped by kernel
# destroy everything 
sudo ip link delete i0
sudo ip link delete i1
sudo ip link delete br0 

# create namespaces 
ip netns add net0 
ip netns add net1 

# create veth pairs and bridge 
ip link add i0 type veth peer name i0-p
ip link add i1 type veth peer name i1-p
ip link add br0 type bridge

# add devices to network namespace 
ip link set i0 netns net0
ip link set i1 netns net1

# add second peers into bridge 
ip link set eno2 master br0
ip link set i0-p master br0
ip link set i1-p master br0

# add addresses 
ip a add 10.0.10.4/16 dev br0
ip netns exec net0 ip a add 10.0.10.5/16 dev i0 
ip netns exec net1 ip a add 10.0.10.6/16 dev i1

# bring up all devices 
ip netns exec net0 ip link set i0 up
ip netns exec net1 ip link set i1 up
ip link set i0-p up
ip link set i1-p up
ip link set br0 up
ip link set eno2 up
# ip netns exec net0 ping 10.0.10.6 -I i0
PING 10.0.10.6 (10.0.10.6) from 10.0.10.5 i0: 56(84) bytes of data.
64 bytes from 10.0.10.6: icmp_seq=1 ttl=64 time=0.058 ms
64 bytes from 10.0.10.6: icmp_seq=2 ttl=64 time=0.051 ms
64 bytes from 10.0.10.6: icmp_seq=3 ttl=64 time=0.051 ms
^C
--- 10.0.10.6 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2046ms
rtt min/avg/max/mdev = 0.051/0.053/0.058/0.003 ms

答案1

答案取决于您想要实现的目标。

此设置的主要问题是回复数据包不会通过 veth 接口发送,因为 IP 被分配给了同一主机。当 ARP 回复数据包尝试发出时,它应该根据路由表检测传出接口。路由表明此地址被分配给了同一主机,因此回复将通过 lo 接口发送。您可以监听以tcpdump -ni lo查看回复,并且很可能会发现那里无法访问。

您可以创建像 LXC/docker 这样的虚拟机,也可以使用其他网络命名空间。man veth(4) 了解有关后者的更多信息。

顺便说一句,net.ipv4.ip_forward 与以太网数据包或桥接无关,这样,如果内核自己收到数据包,您可以告诉内核转发数据包,就像路由一样。

相关内容