percpu_xxx funcs are duplicated with this_cpu_xxx funcs, so replace them for further code clean up. And in preempt safe scenario, __this_cpu_xxx funcs has a bit better performance since __this_cpu_xxx has no redundant preempt_disable() Signed-off-by: Alex Shi <alex.shi@xxxxxxxxx> --- net/netfilter/xt_TEE.c | 12 ++++++------ net/socket.c | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c index 5f054a0..678084c 100644 --- a/net/netfilter/xt_TEE.c +++ b/net/netfilter/xt_TEE.c @@ -90,7 +90,7 @@ tee_tg4(struct sk_buff *skb, const struct xt_action_param *par) const struct xt_tee_tginfo *info = par->targinfo; struct iphdr *iph; - if (percpu_read(tee_active)) + if (__this_cpu_read(tee_active)) return XT_CONTINUE; /* * Copy the skb, and route the copy. Will later return %XT_CONTINUE for @@ -127,9 +127,9 @@ tee_tg4(struct sk_buff *skb, const struct xt_action_param *par) ip_send_check(iph); if (tee_tg_route4(skb, info)) { - percpu_write(tee_active, true); + __this_cpu_write(tee_active, true); ip_local_out(skb); - percpu_write(tee_active, false); + __this_cpu_write(tee_active, false); } else { kfree_skb(skb); } @@ -170,7 +170,7 @@ tee_tg6(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_tee_tginfo *info = par->targinfo; - if (percpu_read(tee_active)) + if (__this_cpu_read(tee_active)) return XT_CONTINUE; skb = pskb_copy(skb, GFP_ATOMIC); if (skb == NULL) @@ -188,9 +188,9 @@ tee_tg6(struct sk_buff *skb, const struct xt_action_param *par) --iph->hop_limit; } if (tee_tg_route6(skb, info)) { - percpu_write(tee_active, true); + __this_cpu_write(tee_active, true); ip6_local_out(skb); - percpu_write(tee_active, false); + __this_cpu_write(tee_active, false); } else { kfree_skb(skb); } diff --git a/net/socket.c b/net/socket.c index ffe92ca..4b62ca9 100644 --- a/net/socket.c +++ b/net/socket.c @@ -479,7 +479,7 @@ static struct socket *sock_alloc(void) inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); - percpu_add(sockets_in_use, 1); + this_cpu_add(sockets_in_use, 1); return sock; } @@ -522,7 +522,7 @@ void sock_release(struct socket *sock) if (rcu_dereference_protected(sock->wq, 1)->fasync_list) printk(KERN_ERR "sock_release: fasync list not empty!\n"); - percpu_sub(sockets_in_use, 1); + this_cpu_sub(sockets_in_use, 1); if (!sock->file) { iput(SOCK_INODE(sock)); return; -- 1.6.3.3 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html