/* * TCP Illinois congestion control. * Home page: * http://www.ews.uiuc.edu/~shaoliu/tcpillinois/index.html * * The algorithm is described in: * "TCP-Illinois: A Loss and Delay-Based Congestion Control Algorithm * for High-Speed Networks" * http://www.ifp.illinois.edu/~srikant/Papers/liubassri06perf.pdf * * Implemented from description in paper and ns-2 simulation. * Copyright (C) 2007 Stephen Hemminger */ #include #include #include #include #include #define ALPHA_SHIFT 7 #define ALPHA_SCALE (1u<end_seq = tp->snd_nxt; ca->cnt_rtt = 0; ca->sum_rtt = 0; /* TODO: age max_rtt? */ } static void tcp_illinois_init(struct sock *sk) { struct illinois *ca = inet_csk_ca(sk); ca->alpha = ALPHA_MAX; ca->beta = BETA_BASE; ca->base_rtt = 0x7fffffff; ca->max_rtt = 0; ca->acked = 0; ca->rtt_low = 0; ca->rtt_above = 0; rtt_reset(sk); } /* Measure RTT for each ack. */ static void tcp_illinois_acked(struct sock *sk, const struct ack_sample *sample) { struct illinois *ca = inet_csk_ca(sk); s32 rtt_us = sample->rtt_us; ca->acked = sample->pkts_acked; /* dup ack, no rtt sample */ if (rtt_us < 0) return; /* ignore bogus values, this prevents wraparound in alpha math */ if (rtt_us > RTT_MAX) rtt_us = RTT_MAX; /* keep track of minimum RTT seen so far */ if (ca->base_rtt > rtt_us) ca->base_rtt = rtt_us; /* and max */ if (ca->max_rtt < rtt_us) ca->max_rtt = rtt_us; ++ca->cnt_rtt; ca->sum_rtt += rtt_us; } /* Maximum queuing delay */ static inline u32 max_delay(const struct illinois *ca) { return ca->max_rtt - ca->base_rtt; } /* Average queuing delay */ static inline u32 avg_delay(const struct illinois *ca) { u64 t = ca->sum_rtt; do_div(t, ca->cnt_rtt); return t - ca->base_rtt; } /* * Compute value of alpha used for additive increase. * If small window then use 1.0, equivalent to Reno. * * For larger windows, adjust based on average delay. * A. If average delay is at minimum (we are uncongested), * then use large alpha (10.0) to increase faster. * B. If average delay is at maximum (getting congested) * then use small alpha (0.3) * * The result is a convex window growth curve. */ static u32 alpha(struct illinois *ca, u32 da, u32 dm) { u32 d1 = dm / 100; /* Low threshold */ if (da <= d1) { /* If never got out of low delay zone, then use max */ if (!ca->rtt_above) return ALPHA_MAX; /* Wait for 5 good RTT's before allowing alpha to go alpha max. * This prevents one good RTT from causing sudden window increase. */ if (++ca->rtt_low < theta) return ca->alpha; ca->rtt_low = 0; ca->rtt_above = 0; return ALPHA_MAX; } ca->rtt_above = 1; /* * Based on: * * (dm - d1) amin amax * k1 = ------------------- * amax - amin * * (dm - d1) amin * k2 = ---------------- - d1 * amax - amin * * k1 * alpha = ---------- * k2 + da */ dm -= d1; da -= d1; return (dm * ALPHA_MAX) / (dm + (da * (ALPHA_MAX - ALPHA_MIN)) / ALPHA_MIN); } /* * Beta used for multiplicative decrease. * For small window sizes returns same value as Reno (0.5) * * If delay is small (10% of max) then beta = 1/8 * If delay is up to 80% of max then beta = 1/2 * In between is a linear function */ static u32 beta(u32 da, u32 dm) { u32 d2, d3; d2 = dm / 10; if (da <= d2) return BETA_MIN; d3 = (8 * dm) / 10; if (da >= d3 || d3 <= d2) return BETA_MAX; /* * Based on: * * bmin d3 - bmax d2 * k3 = ------------------- * d3 - d2 * * bmax - bmin * k4 = ------------- * d3 - d2 * * b = k3 + k4 da */ return (BETA_MIN * d3 - BETA_MAX * d2 + (BETA_MAX - BETA_MIN) * da) / (d3 - d2); } /* Update alpha and beta values once per RTT */ static void update_params(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct illinois *ca = inet_csk_ca(sk); if (tp->snd_cwnd < win_thresh) { ca->alpha = ALPHA_BASE; ca->beta = BETA_BASE; } else if (ca->cnt_rtt > 0) { u32 dm = max_delay(ca); u32 da = avg_delay(ca); ca->alpha = alpha(ca, da, dm); ca->beta = beta(da, dm); } rtt_reset(sk); } /* * In case of loss, reset to default values */ static void tcp_illinois_state(struct sock *sk, u8 new_state) { struct illinois *ca = inet_csk_ca(sk); if (new_state == TCP_CA_Loss) { ca->alpha = ALPHA_BASE; ca->beta = BETA_BASE; ca->rtt_low = 0; ca->rtt_above = 0; rtt_reset(sk); } } /* * Increase window in response to successful acknowledgment. */ static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked) { struct tcp_sock *tp = tcp_sk(sk); struct illinois *ca = inet_csk_ca(sk); if (after(ack, ca->end_seq)) update_params(sk); /* RFC2861 only increase cwnd if fully utilized */ if (!tcp_is_cwnd_limited(sk)) return; /* In slow start */ if (tcp_in_slow_start(tp)) tcp_slow_start(tp, acked); else { u32 delta; /* snd_cwnd_cnt is # of packets since last cwnd increment */ tp->snd_cwnd_cnt += ca->acked; ca->acked = 1; /* This is close approximation of: * tp->snd_cwnd += alpha/tp->snd_cwnd */ delta = (tp->snd_cwnd_cnt * ca->alpha) >> ALPHA_SHIFT; if (delta >= tp->snd_cwnd) { tp->snd_cwnd = min(tp->snd_cwnd + delta / tp->snd_cwnd, (u32)tp->snd_cwnd_clamp); tp->snd_cwnd_cnt = 0; } } } static u32 tcp_illinois_ssthresh(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct illinois *ca = inet_csk_ca(sk); ca->loss_cwnd = tp->snd_cwnd; /* Multiplicative decrease */ return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->beta) >> BETA_SHIFT), 2U); } static u32 tcp_illinois_cwnd_undo(struct sock *sk) { const struct illinois *ca = inet_csk_ca(sk); return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd); } /* Extract info for Tcp socket info provided via netlink. */ static size_t tcp_illinois_info(struct sock *sk, u32 ext, int *attr, union tcp_cc_info *info) { const struct illinois *ca = inet_csk_ca(sk); if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) { info->vegas.tcpv_enabled = 1; info->vegas.tcpv_rttcnt = ca->cnt_rtt; info->vegas.tcpv_minrtt = ca->base_rtt; info->vegas.tcpv_rtt = 0; if (info->vegas.tcpv_rttcnt > 0) { u64 t = ca->sum_rtt; do_div(t, info->vegas.tcpv_rttcnt); info->vegas.tcpv_rtt = t; } *attr = INET_DIAG_VEGASINFO; return sizeof(struct tcpvegas_info); } return 0; } static struct tcp_congestion_ops tcp_illinois __read_mostly = { .init = tcp_illinois_init, .ssthresh = tcp_illinois_ssthresh, .undo_cwnd = tcp_illinois_cwnd_undo, .cong_avoid = tcp_illinois_cong_avoid, .set_state = tcp_illinois_state, .get_info = tcp_illinois_info, .pkts_acked = tcp_illinois_acked, .owner = THIS_MODULE, .name = "illinois", }; static int __init tcp_illinois_register(void) { BUILD_BUG_ON(sizeof(struct illinois) > ICSK_CA_PRIV_SIZE); return tcp_register_congestion_control(&tcp_illinois); } static void __exit tcp_illinois_unregister(void) { tcp_unregister_congestion_control(&tcp_illinois); } module_init(tcp_illinois_register); module_exit(tcp_illinois_unregister); MODULE_AUTHOR("Stephen Hemminger, Shao Liu"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("TCP Illinois"); MODULE_VERSION("1.0"); Julian Anastasov7-12/+31 Add new transport flag to allow sockets to confirm neighbour. When same struct dst_entry can be used for many different neighbours we can not use it for pending confirmations. The flag is propagated from transport to every packet. It is reset when cached dst is reset. Reported-by: YueHaibing <yuehaibing@huawei.com> Fixes: 5110effee8fd ("net: Do delayed neigh confirmation.") Fixes: f2bb4bedf35d ("ipv4: Cache output routes in fib_info nexthops.") Signed-off-by: Julian Anastasov <ja@ssi.bg> Acked-by: Eric Dumazet <edumazet@google.com> Acked-by: Neil Horman <nhorman@tuxdriver.com> Signed-off-by: David S. Miller <davem@davemloft.net> 2017-02-07net: add dst_pending_confirm flag to skbuffJulian Anastasov2-1/+5 Add new skbuff flag to allow protocols to confirm neighbour. When same struct dst_entry can be used for many different neighbours we can not use it for pending confirmations. Add sock_confirm_neigh() helper to confirm the neighbour and use it for IPv4, IPv6 and VRF before dst_neigh_output. Signed-off-by: Julian Anastasov <ja@ssi.bg> Acked-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net> 2017-02-07sock: add sk_dst_pending_confirm flagJulian Anastasov1-0/+2 Add new sock flag to allow sockets to confirm neighbour. When same struct dst_entry can be used for many different neighbours we can not use it for pending confirmations. As not all call paths lock the socket use full word for the flag. Add sk_dst_confirm as replacement for dst_confirm when called for received packets. Signed-off-by: Julian Anastasov <ja@ssi.bg> Acked-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net> 2017-02-07ipv6: sr: fix non static symbol warningsWei Yongjun1-4/+4 Fixes the following sparse warnings: net/ipv6/seg6_iptunnel.c:58:5: warning: symbol 'nla_put_srh' was not declared. Should it be static? net/ipv6/seg6_iptunnel.c:238:5: warning: symbol 'seg6_input' was not declared. Should it be static? net/ipv6/seg6_iptunnel.c:254:5: warning: symbol 'seg6_output' was not declared. Should it be static? Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com> Signed-off-by: David S. Miller <davem@davemloft.net> 2017-02-07net/sched: act_mirred: remove duplicated include from act_mirred.cWei Yongjun1-2/+0 Remove duplicated include. Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com> Signed-off-by: David S. Miller <davem@davemloft.net> 2017-02-07net: dsa: Add support for platform dataFlorian Fainelli1-18/+84 Allow drivers to use the new DSA API with platform data. Most of the code in net/dsa/dsa2.c does not rely so much on device_nodes and can get the same information from platform_data instead. We purposely do not support distributed configurations with platform data, so drivers should be providing a pointer to a 'struct dsa_chip_data' structure if they wish to communicate per-port layout. Multiple CPUs port could potentially be supported and dsa_chip_data is extended to receive up to one reference to an upstream network device per port described by a dsa_chip_data structure. dsa_dev_to_net_device() increments the network device's reference count, so we intentionally call dev_put() to be consistent with the DT-enabled path, until we have a generic notifier based solution. Signed-off-by: Florian Fainelli <f.fainelli@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net> 2017-02-07net: dsa: Rename and export dev_to_net_device()Florian Fainelli1-2/+3 In preparation for using this function in net/dsa/dsa2.c, rename the function to make its scope DSA specific, and export it. Signed-off-by: Florian Fainelli <f.fainelli@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net> 2017-02-06bridge: fdb: write to used and updated at most once per jiffyNikolay Aleksandrov2-2/+4 Writing once per jiffy is enough to limit the bridge's false sharing. After this change the bridge doesn't show up in the local load HitM stats. Suggested-by: David S. Miller <davem@davemloft.net> Signed-off-by: Nikolay Aleksandrov <nikolay@cumulusnetworks.com> Signed-off-by: David S. Miller <davem@davemloft.net> 2017-02-06bridge: move write-heavy fdb members in their own cache lineNikolay Aleksandrov1-4/+6 Fdb's used and updated fields are written to on every packet forward and packet receive respectively. Thus if we are receiving packets from a particular fdb, they'll cause false-sharing with everyone who has looked it up (even if it didn't match, since mac/vid share cache line!). The "used" field is even worse since it is updated on every packet forward to that fdb, thus the standard config where X ports use a single gateway results in 100% fdb false-sharing. Note that this patch does not prevent the last scenario, but it makes it better for other bridge participants which are not using that fdb (and are only doing lookups over it). The point is with this move we make sure that only communicating parties get the false-sharing, in a later patch we'll show how to avoid that too. Signed-off-by: Nikolay Aleksandrov <nikolay@cumulusnetworks.com> Signed-off-by: David S. Miller <davem@davemloft.net> 2017-02-06bridge: move to workqueue gcNikolay Aleksandrov10-23/+29 Move the fdb garbage collector to a workqueue which fires at least 10 milliseconds apart and cleans chain by chain allowing for other tasks to run in the meantime. When having thousands of fdbs the system is much more responsive. Most importantly remove the need to check if the matched entry has expired in __br_fdb_get that causes false-sharing and is completely unnecessary if we cleanup entries, at worst we'll get 10ms of traffic for that entry before it gets deleted. Signed-off-by: Nikolay Aleksandrov <nikolay@cumulusnetworks.com> Signed-off-by: David S. Miller <davem@davemloft.net> 2017-02-06bridge: modify bridge and port to have often accessed fields in one cache lineNikolay Aleksandrov1-23/+20 Move around net_bridge so the vlan fields are in the beginning since they're checked on every packet even if vlan filtering is disabled. For the port move flags & vlan group to the beginning, so they're in the same cache line with the port's state (both flags and state are checked on each packet). Signed-off-by: Nikolay Aleksandrov <nikolay@cumulusnetworks.com> Signed-off-by: David S. Miller <davem@davemloft.net> 2017-02-06net: dsa: introduce bridge notifierVivien Didelot2-11/+61 A slave device will now notify the switch fabric once its port is bridged or unbridged, instead of calling directly its switch operations. This code allows propagating cross-chip bridging events in the fabric. Signed-off-by: Vivien Didelot <vivien.didelot@savoirfairelinux.com> Signed-off-by: David S. Miller <davem@davemloft.net> 2017-02-06net: dsa: add switch notifierVivien Didelot5-0/+70 Add a notifier block per DSA switch, registered against a notifier head in the switch fabric they belong to. This infrastructure will allow to propagate fabric-wide events such as port bridging, VLAN configuration, etc. If a DSA switch driver cares about cross-chip configuration, such events can be caught. Signed-off-by: Vivien Didelot <vivien.didelot@savoirfairelinux.com> Signed-off-by: David S. Miller <davem@davemloft.net> 2017-02-06net: dsa: change state setter scopeVivien Didelot1-6/+9 The scope of the functions inside net/dsa/slave.c must be the slave net_device pointer. Change to state setter helper accordingly to simplify callers. Signed-off-by: Vivien Didelot <vivien.didelot@savoirfairelinux.com> Signed-off-by: David S. Miller <davem@davemloft.net> 2017-02-06net: dsa: rollback bridging on errorVivien Didelot1-1/+13 When an error is returned during the bridging of a port in a NETDEV_CHANGEUPPER event, net/core/dev.c rolls back the operation. Be consistent and unassign dp->bridge_dev when this happens. In the meantime, add comments to document this behavior. Signed-off-by: Vivien Didelot <vivien.didelot@savoirfairelinux.com> Signed-off-by: David S. Miller <davem@davemloft.net> 2017-02-06net: dsa: simplify netdevice events handlingVivien Didelot1-28/+16 Simplify the code handling the slave netdevice notifier call by providing a dsa_slave_changeupper helper for NETDEV_CHANGEUPPER, and so on (only this event is supported at the moment.) Return NOTIFY_DONE when we did not care about an event, and NOTIFY_OK when we were concerned but no error occurred, as the API suggests. Signed-off-by: Vivien Didelot <vivien.didelot@savoirfairelinux.com> Signed-off-by: David S. Miller <davem@davemloft.net> 2017-02-06net: dsa: move netdevice notifier registrationVivien Didelot3-10/+26 Move the netdevice notifier block register code in slave.c and provide helpers for dsa.c to register and unregister it. At the same time, check for errors since (un)register_netdevice_notifier may fail. Signed-off-by: Vivien Didelot <vivien.didelot@savoirfairelinux.com> Signed-off-by: David S. Miller <davem@davemloft.net> 2017-02-06net-next: treewide use is_vlan_dev() helper function.Parav Pandit1-1/+2 This patch makes use of is_vlan_dev() function instead of flag comparison which is exactly done by is_vlan_dev() helper function. Signed-off-by: Parav Pandit <parav@mellanox.com> Reviewed-by: Daniel Jurgens <danielj@mellanox.com> Acked-by: Stephen Hemminger <stephen@networkplumber.org> Acked-by: Jon Maxwell <jmaxwell37@gmail.com> Acked-by: Johannes Thumshirn <jth@kernel.org> Acked-by: Haiyang Zhang <haiyangz@microsoft.com> Signed-off-by: David S. Miller <davem@davemloft.net> 2017-02-06sctp: process fwd tsn chunk only when prsctp is enabledXin Long1-0/+6 This patch is to check if asoc->peer.prsctp_capable is set before processing fwd tsn chunk, if not, it will return an ERROR to the peer, just as rfc3758 section 3.3.1 demands. Reported-by: Julian Cordes <julian.cordes@gmail.com> Signed-off-by: Xin Long <lucien.xin@gmail.com> Acked-by: Neil Horman <nhorman@tuxdriver.com> Signed-off-by: David S. Miller <davem@davemloft.net> 2017-02-06net: remove ndo_neigh_{construct, destroy} from stacked devicesIdo Schimmel