我试图使用 TC 通过特定端口限制传入和传出带宽的延迟,但我无法让它 100% 工作。我不确定我是否做对了。
当我 ping google 时,会增加延迟。我已经使用 iperf 通过端口 5001 测试带宽。带宽限制似乎在我应用了以下设置的客户端上起作用(客户端为 iperf -s,传入),但是如果我使用此客户端(传出)连接到另一种情况是,带宽被限制为 1-2 Mbit,而不是预期的 5Mbit。
例如,我想要的是将延迟设置为 100ms,带宽传出/传入限制为 5mbit,所有这些规则都应用于端口 5001。我是否走在正确的路径上,或者我是否误解了 tc 的概念?
Ip link add name ifb0 type ifb 2>/dev/null || :
Ip link set dev ifb0 up
Tc qdisc add dev ifb0 root handle 1: htb
Tc class dev ifb0 parent 1: classid 1:20 htb rate 5mbit
Tc qdisc add dev ifb0 parent 1:20 handle 20: sfq perturb 10
Tc filter add dev ifb0 parent 1: protocol ip prio 1 basic match ‘cmp(u16 at 2 layer transport eq 5001)’ flowid 1:20
Tc qdisc add dev ens192 root netem delay 200ms
Tc qdisc add dev ens192 ingress
Tc filter add add dev ens192 ingress protocol ip basic match ‘cmp(u16 at 2 layer transport eq 5001)’ action mirred egress redirect dev ifb0
答案1
这是几年前我的 ISP 遇到缓冲区膨胀问题时运行的一个脚本。它需要运行到root
和start
但stop
你可以运行status
没有root
。
#!/bin/bash
# Traffic shaping script (AQM, fq_codel+tbf)
# Copyright 2018,2019 Mikko Rantalainen <[email protected]>
# License: MIT (X11)
# Usage:
# 21/0.8 Mbps connection (ADSL2): DOWNLINK_RATE=21.7Mbit UPLINK_RATE=0.8Mbit TBF_LATENCY=500ms DOWNLINK_BURST=1500 UPLINK_BURST=1500 bin/traffic-shaping start
# 100/100 Mbps connection: ./traffic-shaping
# 1/1 GBps connection: DOWNLINK_RATE=1Gbit UPLINK_RATE=1Gbit TBF_LATENCY=15ms bin/traffic-shaping start
# Note that using low TBF_LATENCY will require powerful CPU.
#
# See also: https://www.bufferbloat.net/projects/codel/wiki/Best_practices_for_benchmarking_Codel_and_FQ_Codel/
# See also: http://www.jfcarter.net/~jimc/documents/voip-qos-1609.html
# TODO: man 7 tc-hfcs (instead of tbf)
# TODO: try to limit bandwidth using fq_codel only (get rid of tbf) - https://gist.github.com/eqhmcow/939373/8d2e8ad745a7e0a8ddb21abde42538034c2ea65b
#
set -e # abort if a command returns non-zero status (failed)
#set -x # verbose execution
# Note: ip route sometimes outputs multiple lines with prefix "default", use the first one
DEV="${DEV:=$(ip route | grep "^default " | head -n1 | grep -Po "(?<=dev )[^ ]+")}"
# ingress:
DOWNLINK_RATE="${DOWNLINK_RATE:=103000kbit}" # or e.g. "21.5Mbit"
# egress:
UPLINK_RATE="${UPLINK_RATE:=102000kbit}"
CODEL_INTERVAL="${CODEL_INTERVAL:=100ms}" # usually 100ms, high speed links with low latency may need lower values
CODEL_TARGET="${CODEL_TARGET:=5ms}" # unit "us" is also available, usually 5%-10% of CODEL_INTERVAL
CODEL_LIMIT="${CODEL_LIMIT:=1001}" # decrease to reduce latency, too low values will limit throughput
CODEL_FLOWS="${CODEL_FLOWS:=1024}"
# set burst as high as possible without causing dropped packets at the start of the connections
DOWNLINK_BURST="${DOWNLINK_BURST:=8000}"
UPLINK_BURST="${UPLINK_BURST:=55000}"
TBF_LATENCY="${TBF_LATENCY:=10ms}" # set to lower latency to improve control over bandwidth limiting, UPLINK_BURST bytes must be able to be sent in this time
IFB="$DEV.in" # logically this should be $DEV.ingress but max limit might be exceeded (e.g. dev = enp0s29u1u6 -> enp0s29u1u6.ingress is too long
INITCWND="${INITCWND:=15}" # initial congestion window, decrease if packet loss is seen
INITRWND="${INITRWND:=30}" # initial receiving window (advertised from client to servers), can be safely pretty high if you have lots of bandwidth (Windows and OS X have this near 40)
# See also: https://www.cdnplanet.com/blog/tune-tcp-initcwnd-for-optimum-performance/
# See also: https://www.acc.umu.se/~maswan/linux-netperf.txt
# See also: http://intronetworks.cs.luc.edu/1/html/newtcps.html
# See also: https://www.ietf.org/proceedings/84/slides/slides-84-iccrg-1.pdf
configure_shaping()
{
# EGRESS (outgoing traffic, "uploads"):
# setup bandwidth limiting:
tc qdisc add dev "$DEV" root handle 1: tbf rate "$UPLINK_RATE" burst "$UPLINK_BURST" latency "$TBF_LATENCY"
# setup fq_codel for bandwidth shaping
tc qdisc add dev "$DEV" parent 1: fq_codel quantum 300 limit "$CODEL_LIMIT" target "$CODEL_TARGET" interval "$CODEL_INTERVAL" flows "$CODEL_FLOWS" noecn
# INGRESS (incoming traffic, "downloads"):
ip link show ifb0 >&/dev/null && HAD_IFB0=1 || HAD_IFB0=0
ip link show ifb1 >&/dev/null && HAD_IFB1=1 || HAD_IFB1=0
# setup bandwidth limiting (ingress limiting needs IFB or Intermediate Functional Block, see https://wiki.linuxfoundation.org/networking/ifb):
tc qdisc add dev "$DEV" handle ffff: ingress
ip link add name "$IFB" type ifb
tc qdisc add dev "$IFB" root handle 1: tbf rate "$DOWNLINK_RATE" burst "$DOWNLINK_BURST" latency "$TBF_LATENCY"
# setup fq_codel for bandwidth shaping
tc qdisc add dev "$IFB" parent 1: fq_codel quantum 300 limit "$CODEL_LIMIT" target "$CODEL_TARGET" interval "$CODEL_INTERVAL" flows "$CODEL_FLOWS" ecn
ip link set dev "$IFB" up
# connect ingress filtering to actual WAN device
tc filter add dev "$DEV" parent ffff: protocol all prio 10 u32 match u32 0 0 flowid 1:1 action mirred egress redirect dev "$IFB"
# Configure initcwnd and initrwnd
# Note that "ip route" sometimes emit multiple lines with prefix "default" - we'll use first one always
ip route change $(ip route | grep ^default | head -n1) initcwnd "$INITCWND" initrwnd "$INITRWND"
## configure CDG congestion control algorithm
##modprobe tcp_cdg && echo cdg > /proc/sys/net/ipv4/tcp_congestion_control
# cubic seems to be better overall with AQM, let's tune it
echo cubic > /proc/sys/net/ipv4/tcp_congestion_control || true
echo 13 > /sys/module/tcp_cubic/parameters/hystart_low_window
echo 0 > /proc/sys/net/ipv4/tcp_slow_start_after_idle
# TODO: try modprobe tcp_westwood
# Remove any offloading that increases latency (Note that if you don't have enough CPU power, this may reduce max bandwith!)
# Note that due ethtool braindamage, the names used here do not match with ethtool --show-offload, see 'man ethtool' for details!
# ignore possible errors and keep going
ethtool --offload "$DEV" gso off || true
ethtool --offload "$DEV" gro off || true
ethtool --offload "$DEV" tx off || true
ethtool --offload "$DEV" rx off || true
ethtool --offload "$DEV" rxvlan off || true
ethtool --offload "$DEV" txvlan off || true
# cleanup broken ip link add ... type ifb sometimes creating extra ifb links (called "ifb0" and "ifb1")
test "$HAD_IFB0" = "0" && ip link show ifb0 >&/dev/null && ip link del ifb0
test "$HAD_IFB1" = "0" && ip link show ifb1 >&/dev/null && ip link del ifb1
}
remove_shaping()
{
#set -x
tc qdisc list | grep -q "ingress" && tc qdisc del dev "$DEV" ingress || true
# Note: we need to avoid removing root qdisc in case this kernel defaults to fq_codel, "qdisc list" will output "fq_codel 0:" for root qdisc so we look for something different
tc qdisc list | grep -q "fq_codel [1-9]" && tc qdisc del dev "$DEV" root || true
ip link show | grep -q "$IFB" && ip link del "$IFB" || true
# configure CDG congestion control algorithm
modprobe tcp_cdg && echo cdg > /proc/sys/net/ipv4/tcp_congestion_control || true
#set +x
}
status()
{
echo "─── queue discipline configuration: ──────────────────"
tc qdisc list
echo " TIP: use e.g. 'sudo tc qdisc del dev $DEV ingress' to remove ingress filtering"
echo " TIP: use e.g. 'sudo tc qdisc del dev $DEV root' to remove egress filtering"
echo "─── ip link show: ────────────────────────────────────"
ip link show
echo " TIP: use e.g. 'sudo ip link del $IFB' to remove ingress device"
}
color_status()
{
status | grep --color=auto -E "^|$DEV|$IFB|rate [^ ]+"
}
# handle parameters
ACTION="$1"
shift || true
while [ ! -z "$1" ]
do
case "$1" in
-v|--verbose)
echo "Device: $DEV"
echo "Downlink rate (ingress): $DOWNLINK_RATE"
echo "Uplink rate (egress): $UPLINK_RATE"
set -x
;;
*)
if [ ! -z "$2" ]; then
echo "Unknown parameter: '$2'" 1>&2
exit 1
fi
;;
esac
shift || true
done
case "$ACTION" in
start)
remove_shaping
configure_shaping
;;
stop)
remove_shaping
;;
status)
color_status
;;
restart)
remove_shaping
configure_shaping
;;
*)
echo "Unknown action: $1" 1>&2
echo "Usage: $0 <start|stop|restart|status> [--verbose|-v]" 1>&2
exit 1
esac
使用该脚本的方法是将其保存为egtraffic-shaping
并运行chmod a+x traffic-shaping
以启用执行位。然后您可以修改文件中的默认值或使用环境变量来配置脚本。例如,如果您有 100/100 Mbps 连接,您可以运行
DOWNLINK_RATE=95Mbit UPLINK_RATE=95Mbit TBF_LATENCY=25ms ./traffic-shaping start
作为root
。并恢复默认设置运行
./traffic-shaping stop
并显示当前运行状态
./traffic-shaping stop
请注意,上述设置不是永久性的,因此您需要在每次启动后重新运行脚本。我确实在 Ubuntu low-latency
(PREEMPT) Linux 内核中使用了它,TBF_LATENCY=10ms
所以我不知道generic
内核会带来什么样的延迟。TBF_LATENCY=40ms
如果使用内核运行,则可能需要设置generic
。如果您在start
没有设置任何环境变量的情况下运行,您最终将得到我自己的配置,我使用该配置与low-latency
内核进行 100/100 Mbps FTTH 连接。如果您的连接速度低于 20 Mbps,您可能需要进行设置DOWNLINK_BURST=1500 UPLINK_BURST=1500
以避免在新连接开始时造成短暂的峰值,从而可能导致数据包丢失。对于缓慢的上行链路,INITCWND
如果您在开始连接到快速服务器时遇到数据包丢失/高延迟(例如 Web 浏览器流量),则可能还需要减少(此脚本默认为 15)。 7-10 范围内的值应该适合 1 Mbps 连接。