This is a note to let you know that I've just added the patch titled net: add recursion limit to GRO to the 4.4-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: net-add-recursion-limit-to-gro.patch and it can be found in the queue-4.4 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. >From foo@baz Thu Nov 10 16:42:45 CET 2016 From: Sabrina Dubroca <sd@xxxxxxxxxxxxxxx> Date: Thu, 20 Oct 2016 15:58:02 +0200 Subject: net: add recursion limit to GRO From: Sabrina Dubroca <sd@xxxxxxxxxxxxxxx> [ Upstream commit fcd91dd449867c6bfe56a81cabba76b829fd05cd ] Currently, GRO can do unlimited recursion through the gro_receive handlers. This was fixed for tunneling protocols by limiting tunnel GRO to one level with encap_mark, but both VLAN and TEB still have this problem. Thus, the kernel is vulnerable to a stack overflow, if we receive a packet composed entirely of VLAN headers. This patch adds a recursion counter to the GRO layer to prevent stack overflow. When a gro_receive function hits the recursion limit, GRO is aborted for this skb and it is processed normally. This recursion counter is put in the GRO CB, but could be turned into a percpu counter if we run out of space in the CB. Thanks to Vladimír Beneš <vbenes@xxxxxxxxxx> for the initial bug report. Fixes: CVE-2016-7039 Fixes: 9b174d88c257 ("net: Add Transparent Ethernet Bridging GRO support.") Fixes: 66e5133f19e9 ("vlan: Add GRO support for non hardware accelerated vlan") Signed-off-by: Sabrina Dubroca <sd@xxxxxxxxxxxxxxx> Reviewed-by: Jiri Benc <jbenc@xxxxxxxxxx> Acked-by: Hannes Frederic Sowa <hannes@xxxxxxxxxxxxxxxxxxx> Acked-by: Tom Herbert <tom@xxxxxxxxxxxxxxx> Signed-off-by: David S. Miller <davem@xxxxxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- drivers/net/geneve.c | 2 +- drivers/net/vxlan.c | 2 +- include/linux/netdevice.h | 40 +++++++++++++++++++++++++++++++++++++++- net/8021q/vlan.c | 2 +- net/core/dev.c | 1 + net/ethernet/eth.c | 2 +- net/ipv4/af_inet.c | 2 +- net/ipv4/fou.c | 4 ++-- net/ipv4/gre_offload.c | 2 +- net/ipv4/udp_offload.c | 4 ++-- net/ipv6/ip6_offload.c | 2 +- 11 files changed, 51 insertions(+), 12 deletions(-) --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -440,7 +440,7 @@ static struct sk_buff **geneve_gro_recei skb_gro_pull(skb, gh_len); skb_gro_postpull_rcsum(skb, gh, gh_len); - pp = ptype->callbacks.gro_receive(head, skb); + pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); out_unlock: rcu_read_unlock(); --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -593,7 +593,7 @@ static struct sk_buff **vxlan_gro_receiv } } - pp = eth_gro_receive(head, skb); + pp = call_gro_receive(eth_gro_receive, head, skb); out: skb_gro_remcsum_cleanup(skb, &grc); --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2003,7 +2003,10 @@ struct napi_gro_cb { /* Used in foo-over-udp, set in udp[46]_gro_receive */ u8 is_ipv6:1; - /* 7 bit hole */ + /* Number of gro_receive callbacks this packet already went through */ + u8 recursion_counter:4; + + /* 3 bit hole */ /* used to support CHECKSUM_COMPLETE for tunneling protocols */ __wsum csum; @@ -2014,6 +2017,25 @@ struct napi_gro_cb { #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb) +#define GRO_RECURSION_LIMIT 15 +static inline int gro_recursion_inc_test(struct sk_buff *skb) +{ + return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT; +} + +typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *); +static inline struct sk_buff **call_gro_receive(gro_receive_t cb, + struct sk_buff **head, + struct sk_buff *skb) +{ + if (unlikely(gro_recursion_inc_test(skb))) { + NAPI_GRO_CB(skb)->flush |= 1; + return NULL; + } + + return cb(head, skb); +} + struct packet_type { __be16 type; /* This is really htons(ether_type). */ struct net_device *dev; /* NULL is wildcarded here */ @@ -2059,6 +2081,22 @@ struct udp_offload { struct udp_offload_callbacks callbacks; }; +typedef struct sk_buff **(*gro_receive_udp_t)(struct sk_buff **, + struct sk_buff *, + struct udp_offload *); +static inline struct sk_buff **call_gro_receive_udp(gro_receive_udp_t cb, + struct sk_buff **head, + struct sk_buff *skb, + struct udp_offload *uoff) +{ + if (unlikely(gro_recursion_inc_test(skb))) { + NAPI_GRO_CB(skb)->flush |= 1; + return NULL; + } + + return cb(head, skb, uoff); +} + /* often modified stats are per cpu, other are shared (netdev->stats) */ struct pcpu_sw_netstats { u64 rx_packets; --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -659,7 +659,7 @@ static struct sk_buff **vlan_gro_receive skb_gro_pull(skb, sizeof(*vhdr)); skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr)); - pp = ptype->callbacks.gro_receive(head, skb); + pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); out_unlock: rcu_read_unlock(); --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4240,6 +4240,7 @@ static enum gro_result dev_gro_receive(s NAPI_GRO_CB(skb)->flush = 0; NAPI_GRO_CB(skb)->free = 0; NAPI_GRO_CB(skb)->encap_mark = 0; + NAPI_GRO_CB(skb)->recursion_counter = 0; NAPI_GRO_CB(skb)->gro_remcsum_start = 0; /* Setup for GRO checksum validation */ --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -436,7 +436,7 @@ struct sk_buff **eth_gro_receive(struct skb_gro_pull(skb, sizeof(*eh)); skb_gro_postpull_rcsum(skb, eh, sizeof(*eh)); - pp = ptype->callbacks.gro_receive(head, skb); + pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); out_unlock: rcu_read_unlock(); --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1372,7 +1372,7 @@ static struct sk_buff **inet_gro_receive skb_gro_pull(skb, sizeof(*iph)); skb_set_transport_header(skb, skb_gro_offset(skb)); - pp = ops->callbacks.gro_receive(head, skb); + pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); out_unlock: rcu_read_unlock(); --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c @@ -201,7 +201,7 @@ static struct sk_buff **fou_gro_receive( if (!ops || !ops->callbacks.gro_receive) goto out_unlock; - pp = ops->callbacks.gro_receive(head, skb); + pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); out_unlock: rcu_read_unlock(); @@ -360,7 +360,7 @@ static struct sk_buff **gue_gro_receive( if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive)) goto out_unlock; - pp = ops->callbacks.gro_receive(head, skb); + pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); out_unlock: rcu_read_unlock(); --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c @@ -219,7 +219,7 @@ static struct sk_buff **gre_gro_receive( /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/ skb_gro_postpull_rcsum(skb, greh, grehlen); - pp = ptype->callbacks.gro_receive(head, skb); + pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); out_unlock: rcu_read_unlock(); --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -339,8 +339,8 @@ unflush: skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */ skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr)); NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto; - pp = uo_priv->offload->callbacks.gro_receive(head, skb, - uo_priv->offload); + pp = call_gro_receive_udp(uo_priv->offload->callbacks.gro_receive, + head, skb, uo_priv->offload); out_unlock: rcu_read_unlock(); --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -247,7 +247,7 @@ static struct sk_buff **ipv6_gro_receive skb_gro_postpull_rcsum(skb, iph, nlen); - pp = ops->callbacks.gro_receive(head, skb); + pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); out_unlock: rcu_read_unlock(); Patches currently in stable-queue which might be from sd@xxxxxxxxxxxxxxx are queue-4.4/net-add-recursion-limit-to-gro.patch queue-4.4/ipv6-correctly-add-local-routes-when-lo-goes-up.patch -- To unsubscribe from this list: send the line "unsubscribe stable" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html