On 2023-12-21 13:37:03 [+0100], Oliver Hartkopp wrote: > --- a/net/can/gw.c > +++ b/net/can/gw.c > @@ -128,11 +128,13 @@ struct cgw_job { > struct hlist_node list; > struct rcu_head rcu; > u32 handled_frames; > u32 dropped_frames; > u32 deleted_frames; > - struct cf_mod mod; > + struct cf_mod *mod; > + struct cf_mod mod1; > + struct cf_mod mod2; I wouldn't put here mod1/2. That cf_mod struct has 736 bytes in my allmod build so it gains some weight. It also introduces a run-time problem, more on that later. Also *mod requires __rcu annotation. > @@ -504,11 +506,11 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) > /* clone the given skb, which has not been done in can_rcv() > * > * When there is at least one modification function activated, > * we need to copy the skb as we want to modify skb->data. > */ > - if (gwj->mod.modfunc[0]) > + if (gwj->mod->modfunc[0]) Almost. You need struct cf_mod *mod; … rcu_read_lock(); mod = rcu_dereference(gwj->mod); if (mod->modfunc[0]) … rcu_read_unlock(); /* no longer touching mod but I am leaving can_can_gw_rcv() * anyway */ > nskb = skb_copy(skb, GFP_ATOMIC); > else > nskb = skb_clone(skb, GFP_ATOMIC); > > if (!nskb) { > @@ -1085,21 +1087,25 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh, > if (mod.uid) { > ASSERT_RTNL(); > > /* check for updating an existing job with identical uid */ > hlist_for_each_entry(gwj, &net->can.cgw_list, list) { > - if (gwj->mod.uid != mod.uid) > + if (gwj->mod->uid != mod.uid) > continue; > > /* interfaces & filters must be identical */ > if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw))) > return -EINVAL; > > /* update modifications with disabled softirq & quit */ > - local_bh_disable(); > - memcpy(&gwj->mod, &mod, sizeof(mod)); > - local_bh_enable(); > + if (gwj->mod == &gwj->mod1) { > + memcpy(&gwj->mod2, &mod, sizeof(mod)); > + rcu_replace_pointer(gwj->mod, &gwj->mod2, true); > + } else { > + memcpy(&gwj->mod1, &mod, sizeof(mod)); > + rcu_replace_pointer(gwj->mod, &gwj->mod1, true); > + } Why are you afraid of doing mod = kmalloc(sizeof(*mod), GFP_KERNEL); before invoking cgw_parse_attr()? Let me try to sell this to you: - Your stack usage shrinks by 736 bytes (as per previous estimation) - If this is a new one, you attach the pointer to the struct cgw_job so it is not lost. - If this is an update, you avoid memcpy() and use rcu_replace_pointer() as it is intended. The construct of yours has the problem that it does not wait until the RCU grace period completes. Meaning on first update mod1 is used, you switch over to mod2. On the second update mod2 is used and you switch back to mod1. There is no guarantee that all current users of mod -> mod1 are gone by the time you switch from mod2 back to mod1. This of course requires two updates (one after the other). If you allocate the struct on entry (assuming the salesmen me was successful) you could use old_mod = rcu_replace_pointer(gwj->mod, new_mod, lockdep_rtnl_is_held()); to grab the about to be replaced cf_mod and once the replacement is over and at the end, kfree_rcu() on it or kfree_rcu_mightsleep(old_mod); to safe a RCU head. > return 0; > } > } > > /* ifindex == 0 is not allowed for job creation */ > @@ -1219,16 +1226,16 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, > > if (gwj->limit_hops != limhops) > continue; > > /* we have a match when uid is enabled and identical */ > - if (gwj->mod.uid || mod.uid) { > - if (gwj->mod.uid != mod.uid) > + if (gwj->mod->uid || mod.uid) { > + if (gwj->mod->uid != mod.uid) > continue; Also here you must use rcu_dereference() for gwj->mod. Since all add/remove jobs happen under the RTNL lock you could use rcu_dereference_protected(, lockdep_rtnl_is_held()); Sebastian