Cache tracking has improved over time by incrementally adding/deleting objects when evaluating commands that are going to be sent to the kernel. nft_cache_is_complete() already checks that the cache contains objects that are required to handle this batch of commands by comparing cache flags. Infer from the current generation ID if no other transaction has invalidated the existing cache, this allows to skip unnecessary cache flush then refill situations which slow down incremental updates. Signed-off-by: Pablo Neira Ayuso <pablo@xxxxxxxxxxxxx> --- v2: no changes nft_slew.c provided by Neels (on Cc) show better numbers now after this series. src/cache.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/src/cache.c b/src/cache.c index e88cbae2ad95..4b797ec79ae5 100644 --- a/src/cache.c +++ b/src/cache.c @@ -1184,9 +1184,21 @@ static bool nft_cache_needs_refresh(struct nft_cache *cache, unsigned int flags) (flags & NFT_CACHE_REFRESH); } -static bool nft_cache_is_updated(struct nft_cache *cache, uint16_t genid) +static bool nft_cache_is_updated(struct nft_cache *cache, unsigned int flags, + uint16_t genid) { - return genid && genid == cache->genid; + if (!genid) + return false; + + if (genid == cache->genid) + return true; + + if (genid == cache->genid + 1) { + cache->genid++; + return true; + } + + return false; } bool nft_cache_needs_update(struct nft_cache *cache) @@ -1211,7 +1223,7 @@ replay: genid = mnl_genid_get(&ctx); if (!nft_cache_needs_refresh(cache, flags) && nft_cache_is_complete(cache, flags) && - nft_cache_is_updated(cache, genid)) + nft_cache_is_updated(cache, flags, genid)) return 0; if (cache->genid) -- 2.30.2