Replace places where __get_cpu_var() is used for an address calculation with this_cpu_ptr(). Cc: akpm@xxxxxxxxxxxxxxxxxxxx Cc: linux-mm@xxxxxxxxx Signed-off-by: Christoph Lameter <cl@xxxxxxxxx> Index: linux/lib/radix-tree.c =================================================================== --- linux.orig/lib/radix-tree.c 2013-12-18 21:50:02.550389590 -0600 +++ linux/lib/radix-tree.c 2013-12-18 21:50:02.530389966 -0600 @@ -221,7 +221,7 @@ * succeed in getting a node here (and never reach * kmem_cache_alloc) */ - rtp = &__get_cpu_var(radix_tree_preloads); + rtp = this_cpu_ptr(&radix_tree_preloads); if (rtp->nr) { ret = rtp->nodes[rtp->nr - 1]; rtp->nodes[rtp->nr - 1] = NULL; @@ -277,14 +277,14 @@ int ret = -ENOMEM; preempt_disable(); - rtp = &__get_cpu_var(radix_tree_preloads); + rtp = this_cpu_ptr(&radix_tree_preloads); while (rtp->nr < ARRAY_SIZE(rtp->nodes)) { preempt_enable(); node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); if (node == NULL) goto out; preempt_disable(); - rtp = &__get_cpu_var(radix_tree_preloads); + rtp = this_cpu_ptr(&radix_tree_preloads); if (rtp->nr < ARRAY_SIZE(rtp->nodes)) rtp->nodes[rtp->nr++] = node; else Index: linux/mm/memcontrol.c =================================================================== --- linux.orig/mm/memcontrol.c 2013-12-18 21:50:02.550389590 -0600 +++ linux/mm/memcontrol.c 2013-12-18 21:50:02.534389891 -0600 @@ -2432,7 +2432,7 @@ */ static void drain_local_stock(struct work_struct *dummy) { - struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock); + struct memcg_stock_pcp *stock = this_cpu_ptr(&memcg_stock); drain_stock(stock); clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); } Index: linux/mm/memory-failure.c =================================================================== --- linux.orig/mm/memory-failure.c 2013-12-18 21:50:02.550389590 -0600 +++ linux/mm/memory-failure.c 2013-12-18 21:50:02.534389891 -0600 @@ -1286,7 +1286,7 @@ unsigned long proc_flags; int gotten; - mf_cpu = &__get_cpu_var(memory_failure_cpu); + mf_cpu = this_cpu_ptr(&memory_failure_cpu); for (;;) { spin_lock_irqsave(&mf_cpu->lock, proc_flags); gotten = kfifo_get(&mf_cpu->fifo, &entry); Index: linux/mm/page-writeback.c =================================================================== --- linux.orig/mm/page-writeback.c 2013-12-18 21:50:02.550389590 -0600 +++ linux/mm/page-writeback.c 2013-12-18 21:50:02.538389816 -0600 @@ -1628,7 +1628,7 @@ * 1000+ tasks, all of them start dirtying pages at exactly the same * time, hence all honoured too large initial task->nr_dirtied_pause. */ - p = &__get_cpu_var(bdp_ratelimits); + p = this_cpu_ptr(&bdp_ratelimits); if (unlikely(current->nr_dirtied >= ratelimit)) *p = 0; else if (unlikely(*p >= ratelimit_pages)) { @@ -1640,7 +1640,7 @@ * short-lived tasks (eg. gcc invocations in a kernel build) escaping * the dirty throttling and livelock other long-run dirtiers. */ - p = &__get_cpu_var(dirty_throttle_leaks); + p = this_cpu_ptr(&dirty_throttle_leaks); if (*p > 0 && current->nr_dirtied < ratelimit) { unsigned long nr_pages_dirtied; nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied); Index: linux/mm/swap.c =================================================================== --- linux.orig/mm/swap.c 2013-12-18 21:50:02.550389590 -0600 +++ linux/mm/swap.c 2013-12-18 21:50:02.538389816 -0600 @@ -409,7 +409,7 @@ page_cache_get(page); local_irq_save(flags); - pvec = &__get_cpu_var(lru_rotate_pvecs); + pvec = this_cpu_ptr(&lru_rotate_pvecs); if (!pagevec_add(pvec, page)) pagevec_move_tail(pvec); local_irq_restore(flags); Index: linux/mm/vmalloc.c =================================================================== --- linux.orig/mm/vmalloc.c 2013-12-18 21:50:02.550389590 -0600 +++ linux/mm/vmalloc.c 2013-12-18 21:50:02.538389816 -0600 @@ -1488,7 +1488,7 @@ if (!addr) return; if (unlikely(in_interrupt())) { - struct vfree_deferred *p = &__get_cpu_var(vfree_deferred); + struct vfree_deferred *p = this_cpu_ptr(&vfree_deferred); if (llist_add((struct llist_node *)addr, &p->list)) schedule_work(&p->wq); } else Index: linux/mm/slub.c =================================================================== --- linux.orig/mm/slub.c 2013-12-18 21:50:02.550389590 -0600 +++ linux/mm/slub.c 2013-12-18 21:50:02.542389740 -0600 @@ -2176,7 +2176,7 @@ page = new_slab(s, flags, node); if (page) { - c = __this_cpu_ptr(s->cpu_slab); + c = raw_cpu_ptr(s->cpu_slab); if (c->page) flush_slab(s, c); @@ -2396,7 +2396,7 @@ * and the retrieval of the tid. */ preempt_disable(); - c = __this_cpu_ptr(s->cpu_slab); + c = this_cpu_ptr(s->cpu_slab); /* * The transaction ids are globally unique per cpu and per operation on @@ -2651,7 +2651,7 @@ * during the cmpxchg then the free will succedd. */ preempt_disable(); - c = __this_cpu_ptr(s->cpu_slab); + c = this_cpu_ptr(s->cpu_slab); tid = c->tid; preempt_enable(); Index: linux/mm/vmstat.c =================================================================== --- linux.orig/mm/vmstat.c 2013-12-18 21:50:02.550389590 -0600 +++ linux/mm/vmstat.c 2013-12-18 21:50:13.586182025 -0600 @@ -489,7 +489,7 @@ continue; if (__this_cpu_read(p->pcp.count)) - drain_zone_pages(zone, __this_cpu_ptr(&p->pcp)); + drain_zone_pages(zone, this_cpu_ptr(&p->pcp)); #endif } fold_diff(global_diff); @@ -1218,7 +1218,7 @@ static void vmstat_update(struct work_struct *w) { refresh_cpu_vm_stats(); - schedule_delayed_work(&__get_cpu_var(vmstat_work), + schedule_delayed_work(this_cpu_ptr(&vmstat_work), round_jiffies_relative(sysctl_stat_interval)); } -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>