(better CC netdev) Paul Menage wrote: > cls_cgroup: read classid atomically in classifier > > Avoid reading the unsynchronized value cs->classid multiple times, > since it could change concurrently from non-zero to zero; this would > result in the classifier returning a positive result with a bogus > (zero) classid. > This patch looks nice. It also narrows rcu read section. Reviewed-by: Li Zefan <lizf@xxxxxxxxxxxxxx> > Signed-off-by: Paul Menage <menage@xxxxxxxxxx> > > --- > > net/sched/cls_cgroup.c | 22 +++++++++++----------- > 1 files changed, 11 insertions(+), 11 deletions(-) > > diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c > index 1ab4542..4ece6e0 100644 > --- a/net/sched/cls_cgroup.c > +++ b/net/sched/cls_cgroup.c > @@ -98,8 +98,7 @@ static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp, > struct tcf_result *res) > { > struct cls_cgroup_head *head = tp->root; > - struct cgroup_cls_state *cs; > - int ret = 0; > + u32 classid; > > /* > * Due to the nature of the classifier it is required to ignore all > @@ -115,17 +114,18 @@ static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp, > return -1; > > rcu_read_lock(); > - cs = task_cls_state(current); > - if (cs->classid && tcf_em_tree_match(skb, &head->ematches, NULL)) { > - res->classid = cs->classid; > - res->class = 0; > - ret = tcf_exts_exec(skb, &head->exts, res); > - } else > - ret = -1; > - > + classid = task_cls_state(current)->classid; > rcu_read_unlock(); > > - return ret; > + if (!classid) > + return -1; > + > + if (!tcf_em_tree_match(skb, &head->ematches, NULL)) > + return -1; > + > + res->classid = classid; > + res->class = 0; > + return tcf_exts_exec(skb, &head->exts, res); > } > > static unsigned long cls_cgroup_get(struct tcf_proto *tp, u32 handle) > > > -- To unsubscribe from this list: send the line "unsubscribe linux-net" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html