On Thu, Dec 08, 2016 at 07:19:51PM +1100, Alexey Kardashevskiy wrote: > This makes mm_iommu_lookup() able to work in realmode by replacing > list_for_each_entry_rcu() (which can do debug stuff which can fail in > real mode) with list_for_each_entry_lockless(). > > This adds realmode version of mm_iommu_ua_to_hpa() which adds > explicit vmalloc'd-to-linear address conversion. > Unlike mm_iommu_ua_to_hpa(), mm_iommu_ua_to_hpa_rm() can fail. > > This changes mm_iommu_preregistered() to receive @mm as in real mode > @current does not always have a correct pointer. > > This adds realmode version of mm_iommu_lookup() which receives @mm > (for the same reason as for mm_iommu_preregistered()) and uses > lockless version of list_for_each_entry_rcu(). > > Signed-off-by: Alexey Kardashevskiy <aik@xxxxxxxxx> Reviewed-by: David Gibson <david@xxxxxxxxxxxxxxxxxxxxx> > --- > arch/powerpc/include/asm/mmu_context.h | 4 ++++ > arch/powerpc/mm/mmu_context_iommu.c | 39 ++++++++++++++++++++++++++++++++++ > 2 files changed, 43 insertions(+) > > diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h > index b9e3f0aca261..c70c8272523d 100644 > --- a/arch/powerpc/include/asm/mmu_context.h > +++ b/arch/powerpc/include/asm/mmu_context.h > @@ -29,10 +29,14 @@ extern void mm_iommu_init(struct mm_struct *mm); > extern void mm_iommu_cleanup(struct mm_struct *mm); > extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, > unsigned long ua, unsigned long size); > +extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm( > + struct mm_struct *mm, unsigned long ua, unsigned long size); > extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, > unsigned long ua, unsigned long entries); > extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, > unsigned long ua, unsigned long *hpa); > +extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, > + unsigned long ua, unsigned long *hpa); > extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem); > extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem); > #endif > diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c > index 104bad029ce9..631d32f5937b 100644 > --- a/arch/powerpc/mm/mmu_context_iommu.c > +++ b/arch/powerpc/mm/mmu_context_iommu.c > @@ -314,6 +314,25 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, > } > EXPORT_SYMBOL_GPL(mm_iommu_lookup); > > +struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(struct mm_struct *mm, > + unsigned long ua, unsigned long size) > +{ > + struct mm_iommu_table_group_mem_t *mem, *ret = NULL; > + > + list_for_each_entry_lockless(mem, &mm->context.iommu_group_mem_list, > + next) { > + if ((mem->ua <= ua) && > + (ua + size <= mem->ua + > + (mem->entries << PAGE_SHIFT))) { > + ret = mem; > + break; > + } > + } > + > + return ret; > +} > +EXPORT_SYMBOL_GPL(mm_iommu_lookup_rm); > + > struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, > unsigned long ua, unsigned long entries) > { > @@ -345,6 +364,26 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, > } > EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa); > > +long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, > + unsigned long ua, unsigned long *hpa) > +{ > + const long entry = (ua - mem->ua) >> PAGE_SHIFT; > + void *va = &mem->hpas[entry]; > + unsigned long *pa; > + > + if (entry >= mem->entries) > + return -EFAULT; > + > + pa = (void *) vmalloc_to_phys(va); > + if (!pa) > + return -EFAULT; > + > + *hpa = *pa | (ua & ~PAGE_MASK); > + > + return 0; > +} > +EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa_rm); > + > long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem) > { > if (atomic64_inc_not_zero(&mem->mapped)) -- David Gibson | I'll have my music baroque, and my code david AT gibson.dropbear.id.au | minimalist, thank you. NOT _the_ _other_ | _way_ _around_! http://www.ozlabs.org/~dgibson
Attachment:
signature.asc
Description: PGP signature