Target lookup is relatively costly due to the filesystem access. Avoid this overhead in huge rulesets which contain many chain jumps by caching the failed lookups into a hashtable for later. A sample ruleset involving 50k user-defined chains and 130k rules of which 90k contain a chain jump restores significantly faster on my testing VM: variant old new --------------------------- legacy 1m12s 37s nft 1m35s 53s Signed-off-by: Phil Sutter <phil@xxxxxx> --- libxtables/xtables.c | 78 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) diff --git a/libxtables/xtables.c b/libxtables/xtables.c index 094cbd87ec1ed..3cb9a87c9406d 100644 --- a/libxtables/xtables.c +++ b/libxtables/xtables.c @@ -49,6 +49,7 @@ #include <linux/netfilter_ipv4/ip_tables.h> #include <linux/netfilter_ipv6/ip6_tables.h> #include <libiptc/libxtc.h> +#include <libiptc/linux_list.h> #ifndef NO_SHARED_LIBS #include <dlfcn.h> @@ -255,6 +256,72 @@ static void dlreg_free(void) } #endif +struct notarget { + struct hlist_node node; + char name[]; +}; + +#define NOTARGET_HSIZE 512 +static struct hlist_head notargets[NOTARGET_HSIZE]; + +static void notargets_hlist_init(void) +{ + int i; + + for (i = 0; i < NOTARGET_HSIZE; i++) + INIT_HLIST_HEAD(¬argets[i]); +} + +static void notargets_hlist_free(void) +{ + struct hlist_node *pos, *n; + struct notarget *cur; + int i; + + for (i = 0; i < NOTARGET_HSIZE; i++) { + hlist_for_each_entry_safe(cur, pos, n, ¬argets[i], node) { + hlist_del(&cur->node); + free(cur); + } + } +} + +static uint32_t djb_hash(const char *key) +{ + uint32_t i, hash = 5381; + + for (i = 0; i < strlen(key); i++) + hash = ((hash << 5) + hash) + key[i]; + + return hash; +} + +static struct notarget *notargets_hlist_lookup(const char *name) +{ + uint32_t key = djb_hash(name) % NOTARGET_HSIZE; + struct hlist_node *node; + struct notarget *cur; + + hlist_for_each_entry(cur, node, ¬argets[key], node) { + if (!strcmp(name, cur->name)) + return cur; + } + return NULL; +} + +static void notargets_hlist_insert(const char *name) +{ + struct notarget *cur; + + if (!name) + return; + + cur = xtables_malloc(sizeof(*cur) + strlen(name) + 1); + cur->name[0] = '\0'; + strcat(cur->name, name); + hlist_add_head(&cur->node, ¬argets[djb_hash(name) % NOTARGET_HSIZE]); +} + void xtables_init(void) { /* xtables cannot be used with setuid in a safe way. */ @@ -284,6 +351,8 @@ void xtables_init(void) return; } xtables_libdir = XTABLES_LIBDIR; + + notargets_hlist_init(); } void xtables_fini(void) @@ -291,6 +360,7 @@ void xtables_fini(void) #ifndef NO_SHARED_LIBS dlreg_free(); #endif + notargets_hlist_free(); } void xtables_set_nfproto(uint8_t nfproto) @@ -820,8 +890,14 @@ xtables_find_target(const char *name, enum xtables_tryload tryload) struct xtables_target *prev = NULL; struct xtables_target **dptr; struct xtables_target *ptr; + struct notarget *notgt; bool found = false; + /* known non-target? */ + notgt = notargets_hlist_lookup(name); + if (notgt && tryload != XTF_LOAD_MUST_SUCCEED) + return NULL; + /* Standard target? */ if (strcmp(name, "") == 0 || strcmp(name, XTC_LABEL_ACCEPT) == 0 @@ -894,6 +970,8 @@ xtables_find_target(const char *name, enum xtables_tryload tryload) if (ptr) ptr->used = 1; + else + notargets_hlist_insert(name); return ptr; } -- 2.34.1