rds_page_remainder_alloc() obtains the current CPU with get_cpu() while disabling preemption. Then the CPU number is used to access the per-CPU data structure via per_cpu(). This can be optimized by relying on local_bh_disable() to provide a stable CPU number/ avoid migration and then using this_cpu_ptr() to retrieve the data structure. Cc: Allison Henderson <allison.henderson@xxxxxxxxxx> Cc: linux-rdma@xxxxxxxxxxxxxxx Signed-off-by: Sebastian Andrzej Siewior <bigeasy@xxxxxxxxxxxxx> --- net/rds/page.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/net/rds/page.c b/net/rds/page.c index e0dd4f62ea47a..58a8548a915a9 100644 --- a/net/rds/page.c +++ b/net/rds/page.c @@ -86,8 +86,8 @@ int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes, goto out; } - rem = &per_cpu(rds_page_remainders, get_cpu()); local_bh_disable(); + rem = this_cpu_ptr(&rds_page_remainders); while (1) { /* avoid a tiny region getting stuck by tossing it */ @@ -116,12 +116,11 @@ int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes, /* alloc if there is nothing for us to use */ local_bh_enable(); - put_cpu(); page = alloc_page(gfp); - rem = &per_cpu(rds_page_remainders, get_cpu()); local_bh_disable(); + rem = this_cpu_ptr(&rds_page_remainders); if (!page) { ret = -ENOMEM; @@ -140,7 +139,6 @@ int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes, } local_bh_enable(); - put_cpu(); out: rdsdebug("bytes %lu ret %d %p %u %u\n", bytes, ret, ret ? NULL : sg_page(scat), ret ? 0 : scat->offset, -- 2.47.2