我在 Debian 上的 docker 容器中以 docker 的桥接网络模式运行 TCP 服务器和 haproxy。我增加了临时端口范围,以便每个 IP 可以连接大约 50k 个客户端。为了超过 50k 个客户端,我执行 haproxy 容器并使用以下命令创建 4 个具有不同 IP 的虚拟网络接口:
ifconfig eth0:1 172.17.0.100
ifconfig eth0:2 172.17.0.101
ifconfig eth0:3 172.17.0.102
ifconfig eth0:4 172.17.0.103
eth0
在 haproxy 容器中已经可用。这样我就可以连接大约 200k 个客户端。这是我的 haproxy 配置:
global
ulimit-n 999999
maxconn 500000
maxpipes 200000
tune.ssl.default-dh-param 2048
nbproc 8
cpu-map 1 0
cpu-map 2 1
cpu-map 3 2
cpu-map 4 3
cpu-map 5 4
cpu-map 6 5
cpu-map 7 6
cpu-map 8 7
defaults
timeout connect 5000
timeout client 50000
timeout server 50000
listen mqtt
bind *:1883
bind *:1884 ssl crt /etc/ssl/myapp.pem
mode tcp
maxconn 500000
balance roundrobin
server broker1 myapp:1883 source 172.17.0.100
server broker2 myapp:1883 source 172.17.0.101
server broker3 myapp:1883 source 172.17.0.102
server broker4 myapp:1883 source 172.17.0.103
我在命令中将容器链接myapp
到 haproxy docker run
。那么,有没有办法在运行 haproxy docker 容器时自动创建虚拟网络接口,或者在 Dockerfile 中或使用 docker 网络?
请指教。谢谢
答案1
你可以用 OpenSVC 来管理它(https://www.opensvc.com)
- 安装 opensvc 代理(https://repo.opensvc.com)
- 创建服务(
svcmgr -s xxxxxx create
) - 填写服务配置文件(
svcmgr -s xxxxxx edit config
) - 启动/停止服务来测试你的应用程序堆栈(
svcmgr -s xxxxxx start --local
) - 服务级查询状态(
svcmgr -s xxxxxx print status
) - 代理级别查询状态(
svcmon
)
下面是一个可以满足您需求的 OpenSVC 服务配置文件示例:
[DEFAULT]
id = 68ec6a49-d3ee-42ea-831d-78db92bab972
[ip#0]
type = docker
ipname = 172.17.0.100
ipdev = {env.bridge}
netmask = 255.255.255.0
container_rid = {env.networkcontainer}
mode = bridge
[ip#1]
type = docker
ipname = 172.17.0.101
ipdev = {env.bridge}
netmask = 255.255.255.0
container_rid = {env.networkcontainer}
mode = bridge
[ip#2]
type = docker
ipname = 172.17.0.102
ipdev = {env.bridge}
netmask = 255.255.255.0
container_rid = {env.networkcontainer}
mode = bridge
[ip#3]
type = docker
ipname = 172.17.0.103
ipdev = {env.bridge}
netmask = 255.255.255.0
container_rid = {env.networkcontainer}
mode = bridge
[container#0]
type = docker
run_image = busybox:latest
run_args = -i -t --rm --net=none
-v /etc/localtime:/etc/localtime:ro
run_command = /bin/sh
[container#1]
type = docker
run_image = toke/mosquitto
run_args = --rm --net=container:{svcname}.container.0
-v {env.base_dir}/data/mqtt/config:/mqtt/config:ro
-v {env.base_dir}/data/mqtt/log:/mqtt/log:ro
-v {env.base_dir}/data/mqtt/data:/mqtt/data:ro
-v /etc/localtime:/etc/localtime:ro
disable = true
[container#2]
type = docker
run_image = haproxy:latest
run_args = --rm --net=container:{svcname}.container.0
-v {env.base_dir}/data/haproxy:/usr/local/etc/haproxy:ro
-v /etc/localtime:/etc/localtime:ro
disable = true
[env]
networkcontainer = container#0
bridge = docker0
base_dir = /srv/{svcname}
此设置的一些日志,OpenSVC 服务名为“ demovnic
”,并且正在我的“xps13”笔记本电脑上运行:
[root@xps13 tmp]# svcmgr -s demovnic start --local
xps13.demovnic.ip#3 checking 172.17.0.103 availability
xps13.demovnic.ip#2 checking 172.17.0.102 availability
xps13.demovnic.ip#1 checking 172.17.0.101 availability
xps13.demovnic.ip#0 checking 172.17.0.100 availability
xps13.demovnic.container container#2,container#1 disabled
xps13.demovnic.container container#2,container#1 disabled
xps13.demovnic.container#0 docker run -d --name=demovnic.container.0 -i -t --rm --net=none -v /etc/localtime:/etc/localtime:ro busybox:latest /bin/sh
xps13.demovnic.container#0 output:
xps13.demovnic.container#0 5da0f43b5b6eba14f0b04a240403237735a5ae0a97a88f54626e45c24024e245
xps13.demovnic.container#0 wait for up status
xps13.demovnic.container#0 wait for container operational
xps13.demovnic.ip#0 bridge mode
xps13.demovnic.ip#0 /sbin/ip link add name veth0pl30562 mtu 1500 type veth peer name veth0pg30562 mtu 1500
xps13.demovnic.ip#0 /sbin/ip link set veth0pl30562 master docker0
xps13.demovnic.ip#0 /sbin/ip link set veth0pl30562 up
xps13.demovnic.ip#0 /sbin/ip link set veth0pg30562 netns 30562
xps13.demovnic.ip#0 /usr/bin/nsenter --net=/var/run/docker/netns/9860d0aa3267 ip link set veth0pg30562 name eth0
xps13.demovnic.ip#0 /usr/bin/nsenter --net=/var/run/docker/netns/9860d0aa3267 ip addr add 172.17.0.100/24 dev eth0
xps13.demovnic.ip#0 /usr/bin/nsenter --net=/var/run/docker/netns/9860d0aa3267 ip link set eth0 up
xps13.demovnic.ip#0 /usr/bin/nsenter --net=/var/run/docker/netns/9860d0aa3267 ip route replace default dev eth0
xps13.demovnic.ip#0 /usr/bin/nsenter --net=/var/run/docker/netns/9860d0aa3267 /opt/opensvc/lib/arp.py eth0 172.17.0.100
xps13.demovnic.ip#1 bridge mode
xps13.demovnic.ip#1 /sbin/ip link add name veth1pl30562 mtu 1500 type veth peer name veth1pg30562 mtu 1500
xps13.demovnic.ip#1 /sbin/ip link set veth1pl30562 master docker0
xps13.demovnic.ip#1 /sbin/ip link set veth1pl30562 up
xps13.demovnic.ip#1 /sbin/ip link set veth1pg30562 netns 30562
xps13.demovnic.ip#1 /usr/bin/nsenter --net=/var/run/docker/netns/9860d0aa3267 ip link set veth1pg30562 name eth1
xps13.demovnic.ip#1 /usr/bin/nsenter --net=/var/run/docker/netns/9860d0aa3267 ip addr add 172.17.0.101/24 dev eth1
xps13.demovnic.ip#1 /usr/bin/nsenter --net=/var/run/docker/netns/9860d0aa3267 ip link set eth1 up
xps13.demovnic.ip#1 /usr/bin/nsenter --net=/var/run/docker/netns/9860d0aa3267 ip route replace default dev eth1
xps13.demovnic.ip#1 /usr/bin/nsenter --net=/var/run/docker/netns/9860d0aa3267 /opt/opensvc/lib/arp.py eth1 172.17.0.101
xps13.demovnic.ip#2 bridge mode
xps13.demovnic.ip#2 /sbin/ip link add name veth2pl30562 mtu 1500 type veth peer name veth2pg30562 mtu 1500
xps13.demovnic.ip#2 /sbin/ip link set veth2pl30562 master docker0
xps13.demovnic.ip#2 /sbin/ip link set veth2pl30562 up
xps13.demovnic.ip#2 /sbin/ip link set veth2pg30562 netns 30562
xps13.demovnic.ip#2 /usr/bin/nsenter --net=/var/run/docker/netns/9860d0aa3267 ip link set veth2pg30562 name eth2
xps13.demovnic.ip#2 /usr/bin/nsenter --net=/var/run/docker/netns/9860d0aa3267 ip addr add 172.17.0.102/24 dev eth2
xps13.demovnic.ip#2 /usr/bin/nsenter --net=/var/run/docker/netns/9860d0aa3267 ip link set eth2 up
xps13.demovnic.ip#2 /usr/bin/nsenter --net=/var/run/docker/netns/9860d0aa3267 ip route replace default dev eth2
xps13.demovnic.ip#2 /usr/bin/nsenter --net=/var/run/docker/netns/9860d0aa3267 /opt/opensvc/lib/arp.py eth2 172.17.0.102
xps13.demovnic.ip#3 bridge mode
xps13.demovnic.ip#3 /sbin/ip link add name veth3pl30562 mtu 1500 type veth peer name veth3pg30562 mtu 1500
xps13.demovnic.ip#3 /sbin/ip link set veth3pl30562 master docker0
xps13.demovnic.ip#3 /sbin/ip link set veth3pl30562 up
xps13.demovnic.ip#3 /sbin/ip link set veth3pg30562 netns 30562
xps13.demovnic.ip#3 /usr/bin/nsenter --net=/var/run/docker/netns/9860d0aa3267 ip link set veth3pg30562 name eth3
xps13.demovnic.ip#3 /usr/bin/nsenter --net=/var/run/docker/netns/9860d0aa3267 ip addr add 172.17.0.103/24 dev eth3
xps13.demovnic.ip#3 /usr/bin/nsenter --net=/var/run/docker/netns/9860d0aa3267 ip link set eth3 up
xps13.demovnic.ip#3 /usr/bin/nsenter --net=/var/run/docker/netns/9860d0aa3267 ip route replace default dev eth3
xps13.demovnic.ip#3 /usr/bin/nsenter --net=/var/run/docker/netns/9860d0aa3267 /opt/opensvc/lib/arp.py eth3 172.17.0.103
xps13.demovnic.container container#2,container#1 disabled
[root@xps13 tmp]# svcmgr -s demovnic print status
demovnic up
`- instances
`- xps13 up idle, started
|- ip#0 ....... up 172.17.0.100@docker0@container#0
|- ip#1 ....... up 172.17.0.101@docker0@container#0
|- ip#2 ....... up 172.17.0.102@docker0@container#0
|- ip#3 ....... up 172.17.0.103@docker0@container#0
|- container#0 ....... up docker container demovnic.container.0@busybox:latest
|- container#1 ..D..P. n/a docker container demovnic.container.1@toke/mosquitto
`- container#2 ..D..P. n/a docker container demovnic.container.2@haproxy:latest
[root@xps13 tmp]# svcmgr -s demovnic docker exec -it demovnic.container.0 ip a | grep -E "eth[0-9]|inet "
inet 127.0.0.1/8 scope host lo
515: eth0@if516: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue qlen 1000
inet 172.17.0.100/24 scope global eth0
517: eth1@if518: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue qlen 1000
inet 172.17.0.101/24 scope global eth1
519: eth2@if520: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue qlen 1000
inet 172.17.0.102/24 scope global eth2
521: eth3@if522: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue qlen 1000
inet 172.17.0.103/24 scope global eth3
[root@xps13 tmp]# svcmgr -s demovnic stop --local
xps13.demovnic.container container#2,container#1 disabled
xps13.demovnic.container container#2,container#1 disabled
xps13.demovnic.ip#3 /usr/bin/nsenter --net=/var/run/docker/netns/6b0d93d7bded ip addr del 172.17.0.103/24 dev eth3
xps13.demovnic.ip#3 checking 172.17.0.103 availability
xps13.demovnic.ip#2 /usr/bin/nsenter --net=/var/run/docker/netns/6b0d93d7bded ip addr del 172.17.0.102/24 dev eth2
xps13.demovnic.ip#2 checking 172.17.0.102 availability
xps13.demovnic.ip#1 /usr/bin/nsenter --net=/var/run/docker/netns/6b0d93d7bded ip addr del 172.17.0.101/24 dev eth1
xps13.demovnic.ip#1 checking 172.17.0.101 availability
xps13.demovnic.ip#0 /usr/bin/nsenter --net=/var/run/docker/netns/6b0d93d7bded ip addr del 172.17.0.100/24 dev eth0
xps13.demovnic.ip#0 checking 172.17.0.100 availability
xps13.demovnic.container#0 docker stop a1714ac9ae3f41170e38fd925c929c1c812787cd38c0ad75cb6bfb505857d551
xps13.demovnic.container#0 output:
xps13.demovnic.container#0 a1714ac9ae3f41170e38fd925c929c1c812787cd38c0ad75cb6bfb505857d551
xps13.demovnic.container#0 wait for down status
xps13.demovnic.container container#2,container#1 disabled
[root@xps13 opensvc]# svcmgr -s demovnic print status
demovnic down
`- instances
`- xps13 down idle
|- ip#0 ....... down 172.17.0.100@docker0@container#0
|- ip#1 ....... down 172.17.0.101@docker0@container#0
|- ip#2 ....... down 172.17.0.102@docker0@container#0
|- ip#3 ....... down 172.17.0.103@docker0@container#0
|- container#0 ....... down docker container demovnic.container.0@busybox:latest
| info: can not find container id
|- container#1 ..D..P. n/a docker container demovnic.container.1@toke/mosquitto
`- container#2 ..D..P. n/a docker container demovnic.container.2@haproxy:latest
您会看到 OpenSVC 代理正在容器网络命名空间中处理网络配置,然后您的 haproxy 和 mqtt 容器可以从同一个命名空间继承(感谢--net=container:{svcname}.container.0
)
示例中禁用了 mqtt 和 haproxy,因为我没有配置文件提供给容器。一旦您拥有配置文件,您就可以编辑服务配置文件并删除行“disable = true”,或者使用命令行 ( svcmgr -s xxxxxx enable --rid container#1,container#2
)