Fixes sporadic IPv6 packet loss when flow offloading is enabled. IPv6 route GC and flowtable GC are not synchronized. When dst_cache becomes stale and a packet passes through the flow before the flowtable GC teardowns it, the packet can be dropped. So, it is necessary to check dst every time in packet path. Signed-off-by: Ritaro Takenaka <ritarot634@xxxxxxxxx> --- net/netfilter/nf_flow_table_core.c | 23 +---------------------- net/netfilter/nf_flow_table_ip.c | 17 +++++++++++++++++ 2 files changed, 18 insertions(+), 22 deletions(-) diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c index 3db256da919b..1d99afaf22c1 100644 --- a/net/netfilter/nf_flow_table_core.c +++ b/net/netfilter/nf_flow_table_core.c @@ -438,32 +438,11 @@ nf_flow_table_iterate(struct nf_flowtable *flow_table, return err; } -static bool flow_offload_stale_dst(struct flow_offload_tuple *tuple) -{ - struct dst_entry *dst; - - if (tuple->xmit_type == FLOW_OFFLOAD_XMIT_NEIGH || - tuple->xmit_type == FLOW_OFFLOAD_XMIT_XFRM) { - dst = tuple->dst_cache; - if (!dst_check(dst, tuple->dst_cookie)) - return true; - } - - return false; -} - -static bool nf_flow_has_stale_dst(struct flow_offload *flow) -{ - return flow_offload_stale_dst(&flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple) || - flow_offload_stale_dst(&flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple); -} - static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table, struct flow_offload *flow, void *data) { if (nf_flow_has_expired(flow) || - nf_ct_is_dying(flow->ct) || - nf_flow_has_stale_dst(flow)) + nf_ct_is_dying(flow->ct)) set_bit(NF_FLOW_TEARDOWN, &flow->flags); if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) { diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c index 32c0eb1b4821..402742dd054c 100644 --- a/net/netfilter/nf_flow_table_ip.c +++ b/net/netfilter/nf_flow_table_ip.c @@ -367,6 +367,14 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, if (nf_flow_state_check(flow, iph->protocol, skb, thoff)) return NF_ACCEPT; + if (tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_NEIGH || + tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM) { + if (!dst_check(tuplehash->tuple.dst_cache, 0)) { + flow_offload_teardown(flow); + return NF_ACCEPT; + } + } + if (skb_try_make_writable(skb, thoff + hdrsize)) return NF_DROP; @@ -624,6 +632,15 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb, if (nf_flow_state_check(flow, ip6h->nexthdr, skb, thoff)) return NF_ACCEPT; + if (tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_NEIGH || + tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM) { + if (!dst_check(tuplehash->tuple.dst_cache, + tuplehash->tuple.dst_cookie)) { + flow_offload_teardown(flow); + return NF_ACCEPT; + } + } + if (skb_try_make_writable(skb, thoff + hdrsize)) return NF_DROP; -- 2.34.1