On Thu, Apr 28, 2011 at 12:14 PM, KOSAKI Motohiro <kosaki.motohiro@xxxxxxxxxxxxxx> wrote: > Adapt new API. Almost change is trivial, most important change are to > remove following like =operator. > > Âcpumask_t cpu_mask = *mm_cpumask(mm); Could you note that you are changing this to: cpumask_t cpu_mask; cpumask_copy(&cpu_mask, mm_cpumask(mm)); > Âcpus_allowed = current->cpus_allowed; > And this to: cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); > Because cpumask_var_t is =operator unsafe. These usage might prevent > kernel core improvement. > > No functional change. > > Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@xxxxxxxxxxxxxx> > Cc: "David S. Miller" <davem@xxxxxxxxxxxxx> > Cc: sparclinux@xxxxxxxxxxxxxxx > --- > Âarch/sparc/include/asm/smp_32.h Â|  12 ++++---- > Âarch/sparc/kernel/cpumap.c    |  Â4 +- > Âarch/sparc/kernel/ds.c      |  14 ++++---- > Âarch/sparc/kernel/irq_64.c    |  Â6 ++-- > Âarch/sparc/kernel/leon_smp.c   |  20 ++++++------ > Âarch/sparc/kernel/mdesc.c    Â|  Â2 +- > Âarch/sparc/kernel/of_device_64.c |  Â3 +- > Âarch/sparc/kernel/pci_msi.c   Â|  Â3 +- > Âarch/sparc/kernel/smp_32.c    |  51 +++++++++++++++++++-------------- > Âarch/sparc/kernel/smp_64.c    |  58 +++++++++++++++++++------------------- > Âarch/sparc/kernel/sun4d_smp.c  Â|  12 ++++---- > Âarch/sparc/kernel/sun4m_smp.c  Â|  12 ++++---- > Âarch/sparc/kernel/sysfs.c    Â|  Â3 +- > Âarch/sparc/kernel/us2e_cpufreq.c |  Â4 +- > Âarch/sparc/kernel/us3_cpufreq.c Â|  Â4 +- > Âarch/sparc/mm/init_64.c     Â|  14 ++++---- > Â16 files changed, 116 insertions(+), 106 deletions(-) > > diff --git a/arch/sparc/include/asm/smp_32.h b/arch/sparc/include/asm/smp_32.h > index d82d7f4..dd7a6be 100644 > --- a/arch/sparc/include/asm/smp_32.h > +++ b/arch/sparc/include/asm/smp_32.h > @@ -61,17 +61,17 @@ BTFIXUPDEF_BLACKBOX(load_current) > > Â#define smp_cross_call(func,mask,arg1,arg2,arg3,arg4) BTFIXUP_CALL(smp_cross_call)(func,mask,arg1,arg2,arg3,arg4) > > -static inline void xc0(smpfunc_t func) { smp_cross_call(func, cpu_online_map, 0, 0, 0, 0); } > +static inline void xc0(smpfunc_t func) { smp_cross_call(func, *cpu_online_mask, 0, 0, 0, 0); } > Âstatic inline void xc1(smpfunc_t func, unsigned long arg1) > -{ smp_cross_call(func, cpu_online_map, arg1, 0, 0, 0); } > +{ smp_cross_call(func, *cpu_online_mask, arg1, 0, 0, 0); } > Âstatic inline void xc2(smpfunc_t func, unsigned long arg1, unsigned long arg2) > -{ smp_cross_call(func, cpu_online_map, arg1, arg2, 0, 0); } > +{ smp_cross_call(func, *cpu_online_mask, arg1, arg2, 0, 0); } > Âstatic inline void xc3(smpfunc_t func, unsigned long arg1, unsigned long arg2, >              unsigned long arg3) > -{ smp_cross_call(func, cpu_online_map, arg1, arg2, arg3, 0); } > +{ smp_cross_call(func, *cpu_online_mask, arg1, arg2, arg3, 0); } > Âstatic inline void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2, >              unsigned long arg3, unsigned long arg4) > -{ smp_cross_call(func, cpu_online_map, arg1, arg2, arg3, arg4); } > +{ smp_cross_call(func, *cpu_online_mask, arg1, arg2, arg3, arg4); } > > Âstatic inline int smp_call_function(void (*func)(void *info), void *info, int wait) > Â{ > @@ -82,7 +82,7 @@ static inline int smp_call_function(void (*func)(void *info), void *info, int wa > Âstatic inline int smp_call_function_single(int cpuid, void (*func) (void *info), >                      void *info, int wait) > Â{ > -    smp_cross_call((smpfunc_t)func, cpumask_of_cpu(cpuid), > +    smp_cross_call((smpfunc_t)func, *cpumask_of(cpuid), >            (unsigned long) info, 0, 0, 0); >    Âreturn 0; > Â} > diff --git a/arch/sparc/kernel/cpumap.c b/arch/sparc/kernel/cpumap.c > index 8de64c8..d91fd78 100644 > --- a/arch/sparc/kernel/cpumap.c > +++ b/arch/sparc/kernel/cpumap.c > @@ -202,7 +202,7 @@ static struct cpuinfo_tree *build_cpuinfo_tree(void) >    Ânew_tree->total_nodes = n; >    Âmemcpy(&new_tree->level, tmp_level, sizeof(tmp_level)); > > -    prev_cpu = cpu = first_cpu(cpu_online_map); > +    prev_cpu = cpu = cpumask_first(cpu_online_mask); > >    Â/* Initialize all levels in the tree with the first CPU */ >    Âfor (level = CPUINFO_LVL_PROC; level >= CPUINFO_LVL_ROOT; level--) { > @@ -381,7 +381,7 @@ static int simple_map_to_cpu(unsigned int index) >    Â} > >    Â/* Impossible, since num_online_cpus() <= num_possible_cpus() */ > -    return first_cpu(cpu_online_map); > +    return cpumask_first(cpu_online_mask); > Â} > > Âstatic int _map_to_cpu(unsigned int index) > diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c > index 3add4de..dd1342c 100644 > --- a/arch/sparc/kernel/ds.c > +++ b/arch/sparc/kernel/ds.c > @@ -497,7 +497,7 @@ static void dr_cpu_init_response(struct ds_data *resp, u64 req_num, >    Âtag->num_records = ncpus; > >    Âi = 0; > -    for_each_cpu_mask(cpu, *mask) { > +    for_each_cpu(cpu, mask) { >        Âent[i].cpu = cpu; >        Âent[i].result = DR_CPU_RES_OK; >        Âent[i].stat = default_stat; > @@ -534,7 +534,7 @@ static int __cpuinit dr_cpu_configure(struct ds_info *dp, >    Âint resp_len, ncpus, cpu; >    Âunsigned long flags; > > -    ncpus = cpus_weight(*mask); > +    ncpus = cpumask_weight(mask); >    Âresp_len = dr_cpu_size_response(ncpus); >    Âresp = kzalloc(resp_len, GFP_KERNEL); >    Âif (!resp) > @@ -547,7 +547,7 @@ static int __cpuinit dr_cpu_configure(struct ds_info *dp, >    Âmdesc_populate_present_mask(mask); >    Âmdesc_fill_in_cpu_data(mask); > > -    for_each_cpu_mask(cpu, *mask) { > +    for_each_cpu(cpu, mask) { >        Âint err; > >        Âprintk(KERN_INFO "ds-%llu: Starting cpu %d...\n", > @@ -593,7 +593,7 @@ static int dr_cpu_unconfigure(struct ds_info *dp, >    Âint resp_len, ncpus, cpu; >    Âunsigned long flags; > > -    ncpus = cpus_weight(*mask); > +    ncpus = cpumask_weight(mask); >    Âresp_len = dr_cpu_size_response(ncpus); >    Âresp = kzalloc(resp_len, GFP_KERNEL); >    Âif (!resp) > @@ -603,7 +603,7 @@ static int dr_cpu_unconfigure(struct ds_info *dp, >               resp_len, ncpus, mask, >               DR_CPU_STAT_UNCONFIGURED); > > -    for_each_cpu_mask(cpu, *mask) { > +    for_each_cpu(cpu, mask) { >        Âint err; > >        Âprintk(KERN_INFO "ds-%llu: Shutting down cpu %d...\n", > @@ -649,13 +649,13 @@ static void __cpuinit dr_cpu_data(struct ds_info *dp, > >    Âpurge_dups(cpu_list, tag->num_records); > > -    cpus_clear(mask); > +    cpumask_clear(&mask); >    Âfor (i = 0; i < tag->num_records; i++) { >        Âif (cpu_list[i] == CPU_SENTINEL) >            Âcontinue; > >        Âif (cpu_list[i] < nr_cpu_ids) > -            cpu_set(cpu_list[i], mask); > +            cpumask_set_cpu(cpu_list[i], &mask); >    Â} > >    Âif (tag->type == DR_CPU_CONFIGURE) > diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c > index b1d275c..4e78862 100644 > --- a/arch/sparc/kernel/irq_64.c > +++ b/arch/sparc/kernel/irq_64.c > @@ -224,13 +224,13 @@ static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity) >    Âint cpuid; > >    Âcpumask_copy(&mask, affinity); > -    if (cpus_equal(mask, cpu_online_map)) { > +    if (cpumask_equal(&mask, cpu_online_mask)) { >        Âcpuid = map_to_cpu(irq); >    Â} else { >        Âcpumask_t tmp; > > -        cpus_and(tmp, cpu_online_map, mask); > -        cpuid = cpus_empty(tmp) ? map_to_cpu(irq) : first_cpu(tmp); > +        cpumask_and(&tmp, cpu_online_mask, &mask); > +        cpuid = cpumask_empty(&tmp) ? map_to_cpu(irq) : cpumask_first(&tmp); >    Â} > >    Âreturn cpuid; > diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c > index 8f5de4a..3c5014b 100644 > --- a/arch/sparc/kernel/leon_smp.c > +++ b/arch/sparc/kernel/leon_smp.c > @@ -104,11 +104,11 @@ void __cpuinit leon_callin(void) >    Âatomic_inc(&init_mm.mm_count); >    Âcurrent->active_mm = &init_mm; > > -    while (!cpu_isset(cpuid, smp_commenced_mask)) > +    while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) >        Âmb(); > >    Âlocal_irq_enable(); > -    cpu_set(cpuid, cpu_online_map); > +    set_cpu_online(cpuid, true); > Â} > > Â/* > @@ -262,21 +262,21 @@ void __init leon_smp_done(void) >    Âlocal_flush_cache_all(); > >    Â/* Free unneeded trap tables */ > -    if (!cpu_isset(1, cpu_present_map)) { > +    if (!cpu_present(1)) { >        ÂClearPageReserved(virt_to_page(&trapbase_cpu1)); >        Âinit_page_count(virt_to_page(&trapbase_cpu1)); >        Âfree_page((unsigned long)&trapbase_cpu1); >        Âtotalram_pages++; >        Ânum_physpages++; >    Â} > -    if (!cpu_isset(2, cpu_present_map)) { > +    if (!cpu_present(2)) { >        ÂClearPageReserved(virt_to_page(&trapbase_cpu2)); >        Âinit_page_count(virt_to_page(&trapbase_cpu2)); >        Âfree_page((unsigned long)&trapbase_cpu2); >        Âtotalram_pages++; >        Ânum_physpages++; >    Â} > -    if (!cpu_isset(3, cpu_present_map)) { > +    if (!cpu_present(3)) { >        ÂClearPageReserved(virt_to_page(&trapbase_cpu3)); >        Âinit_page_count(virt_to_page(&trapbase_cpu3)); >        Âfree_page((unsigned long)&trapbase_cpu3); > @@ -337,10 +337,10 @@ static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1, >        Â{ >            Âregister int i; > > -            cpu_clear(smp_processor_id(), mask); > -            cpus_and(mask, cpu_online_map, mask); > +            cpumask_clear_cpu(smp_processor_id(), &mask); > +            cpumask_and(&mask, cpu_online_mask, &mask); >            Âfor (i = 0; i <= high; i++) { > -                if (cpu_isset(i, mask)) { > +                if (cpumask_test_cpu(i, &mask)) { >                    Âccall_info.processors_in[i] = 0; >                    Âccall_info.processors_out[i] = 0; >                    Âset_cpu_int(i, LEON3_IRQ_CROSS_CALL); > @@ -354,7 +354,7 @@ static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1, > >            Âi = 0; >            Âdo { > -                if (!cpu_isset(i, mask)) > +                if (!cpumask_test_cpu(i, &mask)) >                    Âcontinue; > >                Âwhile (!ccall_info.processors_in[i]) > @@ -363,7 +363,7 @@ static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1, > >            Âi = 0; >            Âdo { > -                if (!cpu_isset(i, mask)) > +                if (!cpumask_test_cpu(i, &mask)) >                    Âcontinue; > >                Âwhile (!ccall_info.processors_out[i]) > diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c > index 56db064..42f28c7 100644 > --- a/arch/sparc/kernel/mdesc.c > +++ b/arch/sparc/kernel/mdesc.c > @@ -768,7 +768,7 @@ static void * __cpuinit mdesc_iterate_over_cpus(void *(*func)(struct mdesc_handl >                cpuid, NR_CPUS); >            Âcontinue; >        Â} > -        if (!cpu_isset(cpuid, *mask)) > +        if (!cpumask_test_cpu(cpuid, mask)) >            Âcontinue; > Â#endif > > diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c > index 5c14968..3bb2eac 100644 > --- a/arch/sparc/kernel/of_device_64.c > +++ b/arch/sparc/kernel/of_device_64.c > @@ -622,8 +622,9 @@ static unsigned int __init build_one_device_irq(struct platform_device *op, > Âout: >    Ânid = of_node_to_nid(dp); >    Âif (nid != -1) { > -        cpumask_t numa_mask = *cpumask_of_node(nid); > +        cpumask_t numa_mask; > please, could you remove the extra blank line? > +        cpumask_copy(&numa_mask, cpumask_of_node(nid)); >        Âirq_set_affinity(irq, &numa_mask); >    Â} > > diff --git a/arch/sparc/kernel/pci_msi.c b/arch/sparc/kernel/pci_msi.c > index 30982e9..580651a 100644 > --- a/arch/sparc/kernel/pci_msi.c > +++ b/arch/sparc/kernel/pci_msi.c > @@ -284,8 +284,9 @@ static int bringup_one_msi_queue(struct pci_pbm_info *pbm, > >    Ânid = pbm->numa_node; >    Âif (nid != -1) { > -        cpumask_t numa_mask = *cpumask_of_node(nid); > +        cpumask_t numa_mask; > please, could you remove the extra blank line? > +        cpumask_copy(&numa_mask, cpumask_of_node(nid)); >        Âirq_set_affinity(irq, &numa_mask); >    Â} >    Âerr = request_irq(irq, sparc64_msiq_interrupt, 0, > diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c > index f95690c..e6b74b5 100644 > --- a/arch/sparc/kernel/smp_32.c > +++ b/arch/sparc/kernel/smp_32.c > @@ -149,9 +149,10 @@ void smp_flush_tlb_all(void) > Âvoid smp_flush_cache_mm(struct mm_struct *mm) > Â{ >    Âif(mm->context != NO_CONTEXT) { > -        cpumask_t cpu_mask = *mm_cpumask(mm); > -        cpu_clear(smp_processor_id(), cpu_mask); > -        if (!cpus_empty(cpu_mask)) > +        cpumask_t cpu_mask; > +        cpumask_copy(&cpu_mask, mm_cpumask(mm)); > +        cpumask_clear_cpu(smp_processor_id(), &cpu_mask); > +        if (!cpumask_empty(&cpu_mask)) >            Âxc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm); >        Âlocal_flush_cache_mm(mm); >    Â} > @@ -160,9 +161,10 @@ void smp_flush_cache_mm(struct mm_struct *mm) > Âvoid smp_flush_tlb_mm(struct mm_struct *mm) > Â{ >    Âif(mm->context != NO_CONTEXT) { > -        cpumask_t cpu_mask = *mm_cpumask(mm); > -        cpu_clear(smp_processor_id(), cpu_mask); > -        if (!cpus_empty(cpu_mask)) { > +        cpumask_t cpu_mask; > +        cpumask_copy(&cpu_mask, mm_cpumask(mm)); > +        cpumask_clear_cpu(smp_processor_id(), &cpu_mask); > +        if (!cpumask_empty(&cpu_mask)) { >            Âxc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm); >            Âif(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm) >                Âcpumask_copy(mm_cpumask(mm), > @@ -178,9 +180,10 @@ void smp_flush_cache_range(struct vm_area_struct *vma, unsigned long start, >    Âstruct mm_struct *mm = vma->vm_mm; > >    Âif (mm->context != NO_CONTEXT) { > -        cpumask_t cpu_mask = *mm_cpumask(mm); > -        cpu_clear(smp_processor_id(), cpu_mask); > -        if (!cpus_empty(cpu_mask)) > +        cpumask_t cpu_mask; > +        cpumask_copy(&cpu_mask, mm_cpumask(mm)); > +        cpumask_clear_cpu(smp_processor_id(), &cpu_mask); > +        if (!cpumask_empty(&cpu_mask)) >            Âxc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) vma, start, end); >        Âlocal_flush_cache_range(vma, start, end); >    Â} > @@ -192,9 +195,10 @@ void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, >    Âstruct mm_struct *mm = vma->vm_mm; > >    Âif (mm->context != NO_CONTEXT) { > -        cpumask_t cpu_mask = *mm_cpumask(mm); > -        cpu_clear(smp_processor_id(), cpu_mask); > -        if (!cpus_empty(cpu_mask)) > +        cpumask_t cpu_mask; > +        cpumask_copy(&cpu_mask, mm_cpumask(mm)); > +        cpumask_clear_cpu(smp_processor_id(), &cpu_mask); > +        if (!cpumask_empty(&cpu_mask)) >            Âxc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end); >        Âlocal_flush_tlb_range(vma, start, end); >    Â} > @@ -205,9 +209,10 @@ void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page) >    Âstruct mm_struct *mm = vma->vm_mm; > >    Âif(mm->context != NO_CONTEXT) { > -        cpumask_t cpu_mask = *mm_cpumask(mm); > -        cpu_clear(smp_processor_id(), cpu_mask); > -        if (!cpus_empty(cpu_mask)) > +        cpumask_t cpu_mask; > +        cpumask_copy(&cpu_mask, mm_cpumask(mm)); > +        cpumask_clear_cpu(smp_processor_id(), &cpu_mask); > +        if (!cpumask_empty(&cpu_mask)) >            Âxc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page); >        Âlocal_flush_cache_page(vma, page); >    Â} > @@ -218,9 +223,10 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) >    Âstruct mm_struct *mm = vma->vm_mm; > >    Âif(mm->context != NO_CONTEXT) { > -        cpumask_t cpu_mask = *mm_cpumask(mm); > -        cpu_clear(smp_processor_id(), cpu_mask); > -        if (!cpus_empty(cpu_mask)) > +        cpumask_t cpu_mask; > +        cpumask_copy(&cpu_mask, mm_cpumask(mm)); > +        cpumask_clear_cpu(smp_processor_id(), &cpu_mask); > +        if (!cpumask_empty(&cpu_mask)) >            Âxc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page); >        Âlocal_flush_tlb_page(vma, page); >    Â} > @@ -247,9 +253,10 @@ void smp_flush_page_to_ram(unsigned long page) > > Âvoid smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) > Â{ > -    cpumask_t cpu_mask = *mm_cpumask(mm); > -    cpu_clear(smp_processor_id(), cpu_mask); > -    if (!cpus_empty(cpu_mask)) > +    cpumask_t cpu_mask; > +    cpumask_copy(&cpu_mask, mm_cpumask(mm)); > +    cpumask_clear_cpu(smp_processor_id(), &cpu_mask); > +    if (!cpumask_empty(&cpu_mask)) >        Âxc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr); >    Âlocal_flush_sig_insns(mm, insn_addr); > Â} > @@ -403,7 +410,7 @@ int __cpuinit __cpu_up(unsigned int cpu) >    Â}; > >    Âif (!ret) { > -        cpu_set(cpu, smp_commenced_mask); > +        cpumask_set_cpu(cpu, &smp_commenced_mask); >        Âwhile (!cpu_online(cpu)) >            Âmb(); >    Â} > diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c > index 9478da7..99cb172 100644 > --- a/arch/sparc/kernel/smp_64.c > +++ b/arch/sparc/kernel/smp_64.c > @@ -121,11 +121,11 @@ void __cpuinit smp_callin(void) >    Â/* inform the notifiers about the new cpu */ >    Ânotify_cpu_starting(cpuid); > > -    while (!cpu_isset(cpuid, smp_commenced_mask)) > +    while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) >        Ârmb(); > >    Âipi_call_lock_irq(); > -    cpu_set(cpuid, cpu_online_map); > +    set_cpu_online(cpuid, true); >    Âipi_call_unlock_irq(); > >    Â/* idle thread is expected to have preempt disabled */ > @@ -785,7 +785,7 @@ static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask > > Â/* Send cross call to all processors mentioned in MASK_P > Â* except self. ÂReally, there are only two cases currently, > - * "&cpu_online_map" and "&mm->cpu_vm_mask". > + * "cpu_online_mask" and "mm_cpumask(mm)". > Â*/ > Âstatic void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask) > Â{ > @@ -797,7 +797,7 @@ static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 d > Â/* Send cross call to all processors except self. */ > Âstatic void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2) > Â{ > -    smp_cross_call_masked(func, ctx, data1, data2, &cpu_online_map); > +    smp_cross_call_masked(func, ctx, data1, data2, cpu_online_mask); > Â} > > Âextern unsigned long xcall_sync_tick; > @@ -805,7 +805,7 @@ extern unsigned long xcall_sync_tick; > Âstatic void smp_start_sync_tick_client(int cpu) > Â{ >    Âxcall_deliver((u64) &xcall_sync_tick, 0, 0, > -           &cpumask_of_cpu(cpu)); > +           cpumask_of(cpu)); > Â} > > Âextern unsigned long xcall_call_function; > @@ -820,7 +820,7 @@ extern unsigned long xcall_call_function_single; > Âvoid arch_send_call_function_single_ipi(int cpu) > Â{ >    Âxcall_deliver((u64) &xcall_call_function_single, 0, 0, > -           &cpumask_of_cpu(cpu)); > +           cpumask_of(cpu)); > Â} > > Âvoid __irq_entry smp_call_function_client(int irq, struct pt_regs *regs) > @@ -918,7 +918,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu) >        Â} >        Âif (data0) { >            Âxcall_deliver(data0, __pa(pg_addr), > -                   (u64) pg_addr, &cpumask_of_cpu(cpu)); > +                   (u64) pg_addr, cpumask_of(cpu)); > Â#ifdef CONFIG_DEBUG_DCFLUSH >            Âatomic_inc(&dcpage_flushes_xcall); > Â#endif > @@ -954,7 +954,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) >    Â} >    Âif (data0) { >        Âxcall_deliver(data0, __pa(pg_addr), > -               (u64) pg_addr, &cpu_online_map); > +               (u64) pg_addr, cpu_online_mask); > Â#ifdef CONFIG_DEBUG_DCFLUSH >        Âatomic_inc(&dcpage_flushes_xcall); > Â#endif > @@ -1197,32 +1197,32 @@ void __devinit smp_fill_in_sib_core_maps(void) >    Âfor_each_present_cpu(i) { >        Âunsigned int j; > > -        cpus_clear(cpu_core_map[i]); > +        cpumask_clear(&cpu_core_map[i]); >        Âif (cpu_data(i).core_id == 0) { > -            cpu_set(i, cpu_core_map[i]); > +            cpumask_set_cpu(i, &cpu_core_map[i]); >            Âcontinue; >        Â} > >        Âfor_each_present_cpu(j) { >            Âif (cpu_data(i).core_id == >              Âcpu_data(j).core_id) > -                cpu_set(j, cpu_core_map[i]); > +                cpumask_set_cpu(j, &cpu_core_map[i]); >        Â} >    Â} > >    Âfor_each_present_cpu(i) { >        Âunsigned int j; > > -        cpus_clear(per_cpu(cpu_sibling_map, i)); > +        cpumask_clear(&per_cpu(cpu_sibling_map, i)); >        Âif (cpu_data(i).proc_id == -1) { > -            cpu_set(i, per_cpu(cpu_sibling_map, i)); > +            cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i)); >            Âcontinue; >        Â} > >        Âfor_each_present_cpu(j) { >            Âif (cpu_data(i).proc_id == >              Âcpu_data(j).proc_id) > -                cpu_set(j, per_cpu(cpu_sibling_map, i)); > +                cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i)); >        Â} >    Â} > Â} > @@ -1232,10 +1232,10 @@ int __cpuinit __cpu_up(unsigned int cpu) >    Âint ret = smp_boot_one_cpu(cpu); > >    Âif (!ret) { > -        cpu_set(cpu, smp_commenced_mask); > -        while (!cpu_isset(cpu, cpu_online_map)) > +        cpumask_set_cpu(cpu, &smp_commenced_mask); > +        while (!cpu_online(cpu)) >            Âmb(); > -        if (!cpu_isset(cpu, cpu_online_map)) { > +        if (!cpu_online(cpu)) { >            Âret = -ENODEV; >        Â} else { >            Â/* On SUN4V, writes to %tick and %stick are > @@ -1269,7 +1269,7 @@ void cpu_play_dead(void) >                Âtb->nonresum_mondo_pa, 0); >    Â} > > -    cpu_clear(cpu, smp_commenced_mask); > +    cpumask_clear_cpu(cpu, &smp_commenced_mask); >    Âmembar_safe("#Sync"); > >    Âlocal_irq_disable(); > @@ -1290,13 +1290,13 @@ int __cpu_disable(void) >    Âcpuinfo_sparc *c; >    Âint i; > > -    for_each_cpu_mask(i, cpu_core_map[cpu]) > -        cpu_clear(cpu, cpu_core_map[i]); > -    cpus_clear(cpu_core_map[cpu]); > +    for_each_cpu(i, &cpu_core_map[cpu]) > +        cpumask_clear_cpu(cpu, &cpu_core_map[i]); > +    cpumask_clear(&cpu_core_map[cpu]); > > -    for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu)) > -        cpu_clear(cpu, per_cpu(cpu_sibling_map, i)); > -    cpus_clear(per_cpu(cpu_sibling_map, cpu)); > +    for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu)) > +        cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i)); > +    cpumask_clear(&per_cpu(cpu_sibling_map, cpu)); > >    Âc = &cpu_data(cpu); > > @@ -1313,7 +1313,7 @@ int __cpu_disable(void) >    Âlocal_irq_disable(); > >    Âipi_call_lock(); > -    cpu_clear(cpu, cpu_online_map); > +    set_cpu_online(cpu, false); >    Âipi_call_unlock(); > >    Âcpu_map_rebuild(); > @@ -1327,11 +1327,11 @@ void __cpu_die(unsigned int cpu) > >    Âfor (i = 0; i < 100; i++) { >        Âsmp_rmb(); > -        if (!cpu_isset(cpu, smp_commenced_mask)) > +        if (!cpumask_test_cpu(cpu, &smp_commenced_mask)) >            Âbreak; >        Âmsleep(100); >    Â} > -    if (cpu_isset(cpu, smp_commenced_mask)) { > +    if (cpumask_test_cpu(cpu, &smp_commenced_mask)) { >        Âprintk(KERN_ERR "CPU %u didn't die...\n", cpu); >    Â} else { > Â#if defined(CONFIG_SUN_LDOMS) > @@ -1341,7 +1341,7 @@ void __cpu_die(unsigned int cpu) >        Âdo { >            Âhv_err = sun4v_cpu_stop(cpu); >            Âif (hv_err == HV_EOK) { > -                cpu_clear(cpu, cpu_present_map); > +                set_cpu_present(cpu, false); >                Âbreak; >            Â} >        Â} while (--limit > 0); > @@ -1362,7 +1362,7 @@ void __init smp_cpus_done(unsigned int max_cpus) > Âvoid smp_send_reschedule(int cpu) > Â{ >    Âxcall_deliver((u64) &xcall_receive_signal, 0, 0, > -           &cpumask_of_cpu(cpu)); > +           cpumask_of(cpu)); > Â} > > Âvoid __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) > diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c > index 475d50b..3d4185c 100644 > --- a/arch/sparc/kernel/sun4d_smp.c > +++ b/arch/sparc/kernel/sun4d_smp.c > @@ -105,7 +105,7 @@ void __cpuinit smp4d_callin(void) > >    Âlocal_irq_enable();   /* We don't allow PIL 14 yet */ > > -    while (!cpu_isset(cpuid, smp_commenced_mask)) > +    while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) >        Âbarrier(); > >    Âspin_lock_irqsave(&sun4d_imsk_lock, flags); > @@ -239,10 +239,10 @@ static void smp4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1, >        Â{ >            Âregister int i; > > -            cpu_clear(smp_processor_id(), mask); > -            cpus_and(mask, cpu_online_map, mask); > +            cpumask_clear_cpu(smp_processor_id(), &mask); > +            cpumask_and(&mask, cpu_online_mask, &mask); >            Âfor (i = 0; i <= high; i++) { > -                if (cpu_isset(i, mask)) { > +                if (cpumask_test_cpu(i, &mask)) { >                    Âccall_info.processors_in[i] = 0; >                    Âccall_info.processors_out[i] = 0; >                    Âsun4d_send_ipi(i, IRQ_CROSS_CALL); > @@ -255,7 +255,7 @@ static void smp4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1, > >            Âi = 0; >            Âdo { > -                if (!cpu_isset(i, mask)) > +                if (!cpumask_test_cpu(i, &mask)) >                    Âcontinue; >                Âwhile (!ccall_info.processors_in[i]) >                    Âbarrier(); > @@ -263,7 +263,7 @@ static void smp4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1, > >            Âi = 0; >            Âdo { > -                if (!cpu_isset(i, mask)) > +                if (!cpumask_test_cpu(i, &mask)) >                    Âcontinue; >                Âwhile (!ccall_info.processors_out[i]) >                    Âbarrier(); > diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c > index 5cc7dc5..68e6011 100644 > --- a/arch/sparc/kernel/sun4m_smp.c > +++ b/arch/sparc/kernel/sun4m_smp.c > @@ -70,7 +70,7 @@ void __cpuinit smp4m_callin(void) >    Âatomic_inc(&init_mm.mm_count); >    Âcurrent->active_mm = &init_mm; > > -    while (!cpu_isset(cpuid, smp_commenced_mask)) > +    while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) >        Âmb(); > >    Âlocal_irq_enable(); > @@ -199,10 +199,10 @@ static void smp4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1, >        Â{ >            Âregister int i; > > -            cpu_clear(smp_processor_id(), mask); > -            cpus_and(mask, cpu_online_map, mask); > +            cpumask_clear_cpu(smp_processor_id(), &mask); > +            cpumask_and(&mask, cpu_online_mask, &mask); >            Âfor (i = 0; i < ncpus; i++) { > -                if (cpu_isset(i, mask)) { > +                if (cpumask_test_cpu(i, &mask)) { >                    Âccall_info.processors_in[i] = 0; >                    Âccall_info.processors_out[i] = 0; >                    Âset_cpu_int(i, IRQ_CROSS_CALL); > @@ -218,7 +218,7 @@ static void smp4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1, > >            Âi = 0; >            Âdo { > -                if (!cpu_isset(i, mask)) > +                if (!cpumask_test_cpu(i, &mask)) >                    Âcontinue; >                Âwhile (!ccall_info.processors_in[i]) >                    Âbarrier(); > @@ -226,7 +226,7 @@ static void smp4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1, > >            Âi = 0; >            Âdo { > -                if (!cpu_isset(i, mask)) > +                if (!cpumask_test_cpu(i, &mask)) >                    Âcontinue; >                Âwhile (!ccall_info.processors_out[i]) >                    Âbarrier(); > diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c > index 1eb8b00..7408201 100644 > --- a/arch/sparc/kernel/sysfs.c > +++ b/arch/sparc/kernel/sysfs.c > @@ -103,9 +103,10 @@ static unsigned long run_on_cpu(unsigned long cpu, >                Âunsigned long (*func)(unsigned long), >                Âunsigned long arg) > Â{ > -    cpumask_t old_affinity = current->cpus_allowed; > +    cpumask_t old_affinity; >    Âunsigned long ret; > > +    cpumask_copy(&old_affinity, tsk_cpus_allowed(current)); >    Â/* should return -EINVAL to userspace */ >    Âif (set_cpus_allowed_ptr(current, cpumask_of(cpu))) >        Âreturn 0; > diff --git a/arch/sparc/kernel/us2e_cpufreq.c b/arch/sparc/kernel/us2e_cpufreq.c > index 8f982b7..531d54f 100644 > --- a/arch/sparc/kernel/us2e_cpufreq.c > +++ b/arch/sparc/kernel/us2e_cpufreq.c > @@ -237,7 +237,7 @@ static unsigned int us2e_freq_get(unsigned int cpu) >    Âif (!cpu_online(cpu)) >        Âreturn 0; > > -    cpus_allowed = current->cpus_allowed; > +    cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); >    Âset_cpus_allowed_ptr(current, cpumask_of(cpu)); > >    Âclock_tick = sparc64_get_clock_tick(cpu) / 1000; > @@ -258,7 +258,7 @@ static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index) >    Âif (!cpu_online(cpu)) >        Âreturn; > > -    cpus_allowed = current->cpus_allowed; > +    cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); >    Âset_cpus_allowed_ptr(current, cpumask_of(cpu)); > >    Ânew_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000; > diff --git a/arch/sparc/kernel/us3_cpufreq.c b/arch/sparc/kernel/us3_cpufreq.c > index f35d1e7..9a8ceb7 100644 > --- a/arch/sparc/kernel/us3_cpufreq.c > +++ b/arch/sparc/kernel/us3_cpufreq.c > @@ -85,7 +85,7 @@ static unsigned int us3_freq_get(unsigned int cpu) >    Âif (!cpu_online(cpu)) >        Âreturn 0; > > -    cpus_allowed = current->cpus_allowed; > +    cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); >    Âset_cpus_allowed_ptr(current, cpumask_of(cpu)); > >    Âreg = read_safari_cfg(); > @@ -105,7 +105,7 @@ static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index) >    Âif (!cpu_online(cpu)) >        Âreturn; > > -    cpus_allowed = current->cpus_allowed; > +    cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); >    Âset_cpus_allowed_ptr(current, cpumask_of(cpu)); > >    Ânew_freq = sparc64_get_clock_tick(cpu) / 1000; > diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c > index 2f6ae1d..e10cd03 100644 > --- a/arch/sparc/mm/init_64.c > +++ b/arch/sparc/mm/init_64.c > @@ -862,7 +862,7 @@ static void init_node_masks_nonnuma(void) >    Âfor (i = 0; i < NR_CPUS; i++) >        Ânuma_cpu_lookup_table[i] = 0; > > -    numa_cpumask_lookup_table[0] = CPU_MASK_ALL; > +    cpumask_setall(&numa_cpumask_lookup_table[0]); > Â} > > Â#ifdef CONFIG_NEED_MULTIPLE_NODES > @@ -1080,7 +1080,7 @@ static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md, > Â{ >    Âu64 arc; > > -    cpus_clear(*mask); > +    cpumask_clear(mask); > >    Âmdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) { >        Âu64 target = mdesc_arc_target(md, arc); > @@ -1091,7 +1091,7 @@ static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md, >            Âcontinue; >        Âid = mdesc_get_property(md, target, "id", NULL); >        Âif (*id < nr_cpu_ids) > -            cpu_set(*id, *mask); > +            cpumask_set_cpu(*id, mask); >    Â} > Â} > > @@ -1153,13 +1153,13 @@ static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp, > >    Ânuma_parse_mdesc_group_cpus(md, grp, &mask); > > -    for_each_cpu_mask(cpu, mask) > +    for_each_cpu(cpu, &mask) >        Ânuma_cpu_lookup_table[cpu] = index; > -    numa_cpumask_lookup_table[index] = mask; > +    cpumask_copy(&numa_cpumask_lookup_table[index], &mask); > >    Âif (numa_debug) { >        Âprintk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index); > -        for_each_cpu_mask(cpu, mask) > +        for_each_cpu(cpu, &mask) >            Âprintk("%d ", cpu); >        Âprintk("]\n"); >    Â} > @@ -1218,7 +1218,7 @@ static int __init numa_parse_jbus(void) >    Âindex = 0; >    Âfor_each_present_cpu(cpu) { >        Ânuma_cpu_lookup_table[cpu] = index; > -        numa_cpumask_lookup_table[index] = cpumask_of_cpu(cpu); > +        cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu)); >        Ânode_masks[index].mask = ~((1UL << 36UL) - 1UL); >        Ânode_masks[index].val = cpu << 36UL; > > -- > 1.7.3.1 > > > > -- > To unsubscribe from this list: send the line "unsubscribe linux-kernel" in > the body of a message to majordomo@xxxxxxxxxxxxxxx > More majordomo info at Âhttp://vger.kernel.org/majordomo-info.html > Please read the FAQ at Âhttp://www.tux.org/lkml/ > ÿô.nÇ·®+%˱é¥wÿº{.nÇ·¬¥ªÜ)¡Ü}©²ÆzÚj:+v¨þø®w¥þàÞ¨è&¢)ß«a¶Úÿûz¹ÞúÝjÿwèf