/* Tom Kelly's Scalable TCP * * See http://www.deneholme.net/tom/scalable/ * * John Heffner */ #include #include /* These factors derived from the recommended values in the aer: * .01 and and 7/8. We use 50 instead of 100 to account for * delayed ack. */ #define TCP_SCALABLE_AI_CNT 50U #define TCP_SCALABLE_MD_SCALE 3 struct scalable { u32 loss_cwnd; }; static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked) { struct tcp_sock *tp = tcp_sk(sk); if (!tcp_is_cwnd_limited(sk)) return; if (tcp_in_slow_start(tp)) tcp_slow_start(tp, acked); else tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT), 1); } static u32 tcp_scalable_ssthresh(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); struct scalable *ca = inet_csk_ca(sk); ca->loss_cwnd = tp->snd_cwnd; return max(tp->snd_cwnd - (tp->snd_cwnd>>TCP_SCALABLE_MD_SCALE), 2U); } static u32 tcp_scalable_cwnd_undo(struct sock *sk) { const struct scalable *ca = inet_csk_ca(sk); return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd); } static struct tcp_congestion_ops tcp_scalable __read_mostly = { .ssthresh = tcp_scalable_ssthresh, .undo_cwnd = tcp_scalable_cwnd_undo, .cong_avoid = tcp_scalable_cong_avoid, .owner = THIS_MODULE, .name = "scalable", }; static int __init tcp_scalable_register(void) { return tcp_register_congestion_control(&tcp_scalable); } static void __exit tcp_scalable_unregister(void) { tcp_unregister_congestion_control(&tcp_scalable); } module_init(tcp_scalable_register); module_exit(tcp_scalable_unregister); MODULE_AUTHOR("John Heffner"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Scalable TCP"); href='/cgit.cgi/linux/net-next.git/log/net/sched/cls_cgroup.c?h=nds-private-remove'>logtreecommitdiff
AgeCommit message (Expand)AuthorFilesLines