On Thu, Jan 10, 2019 at 02:09:36PM -0700, Khalid Aziz wrote: > From: Juerg Haefliger <juerg.haefliger@xxxxxxxxxxxxx> > > v6: * guard against lookup_xpfo() returning NULL > > CC: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx> > Signed-off-by: Juerg Haefliger <juerg.haefliger@xxxxxxxxxxxxx> > Signed-off-by: Tycho Andersen <tycho@xxxxxxxxxx> > Signed-off-by: Khalid Aziz <khalid.aziz@xxxxxxxxxx> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx> > --- > include/linux/xpfo.h | 4 ++++ > kernel/dma/swiotlb.c | 3 ++- > mm/xpfo.c | 15 +++++++++++++++ > 3 files changed, 21 insertions(+), 1 deletion(-) > > diff --git a/include/linux/xpfo.h b/include/linux/xpfo.h > index a39259ce0174..e38b823f44e3 100644 > --- a/include/linux/xpfo.h > +++ b/include/linux/xpfo.h > @@ -35,6 +35,8 @@ void xpfo_kunmap(void *kaddr, struct page *page); > void xpfo_alloc_pages(struct page *page, int order, gfp_t gfp); > void xpfo_free_pages(struct page *page, int order); > > +bool xpfo_page_is_unmapped(struct page *page); > + > #else /* !CONFIG_XPFO */ > > static inline void xpfo_kmap(void *kaddr, struct page *page) { } > @@ -42,6 +44,8 @@ static inline void xpfo_kunmap(void *kaddr, struct page *page) { } > static inline void xpfo_alloc_pages(struct page *page, int order, gfp_t gfp) { } > static inline void xpfo_free_pages(struct page *page, int order) { } > > +static inline bool xpfo_page_is_unmapped(struct page *page) { return false; } > + > #endif /* CONFIG_XPFO */ > > #endif /* _LINUX_XPFO_H */ > diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c > index 045930e32c0e..820a54b57491 100644 > --- a/kernel/dma/swiotlb.c > +++ b/kernel/dma/swiotlb.c > @@ -396,8 +396,9 @@ static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr, > { > unsigned long pfn = PFN_DOWN(orig_addr); > unsigned char *vaddr = phys_to_virt(tlb_addr); > + struct page *page = pfn_to_page(pfn); > > - if (PageHighMem(pfn_to_page(pfn))) { > + if (PageHighMem(page) || xpfo_page_is_unmapped(page)) { > /* The buffer does not have a mapping. Map it in and copy */ > unsigned int offset = orig_addr & ~PAGE_MASK; > char *buffer; > diff --git a/mm/xpfo.c b/mm/xpfo.c > index bff24afcaa2e..cdbcbac582d5 100644 > --- a/mm/xpfo.c > +++ b/mm/xpfo.c > @@ -220,3 +220,18 @@ void xpfo_kunmap(void *kaddr, struct page *page) > spin_unlock(&xpfo->maplock); > } > EXPORT_SYMBOL(xpfo_kunmap); > + > +bool xpfo_page_is_unmapped(struct page *page) > +{ > + struct xpfo *xpfo; > + > + if (!static_branch_unlikely(&xpfo_inited)) > + return false; > + > + xpfo = lookup_xpfo(page); > + if (unlikely(!xpfo) && !xpfo->inited) > + return false; > + > + return test_bit(XPFO_PAGE_UNMAPPED, &xpfo->flags); > +} > +EXPORT_SYMBOL(xpfo_page_is_unmapped); > -- > 2.17.1 >