This checks that the TCE table page size is not bigger that the size of a page we just pinned and going to put its physical address to the table. Otherwise the hardware gets unwanted access to physical memory between the end of the actual page and the end of the aligned up TCE page. Signed-off-by: Alexey Kardashevskiy <aik@xxxxxxxxx> --- arch/powerpc/kernel/iommu.c | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index a10642a..b378f78 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -38,6 +38,7 @@ #include <linux/pci.h> #include <linux/iommu.h> #include <linux/sched.h> +#include <linux/hugetlb.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/iommu.h> @@ -1059,16 +1060,37 @@ int iommu_put_tce_user_mode(struct iommu_table *tbl, unsigned long entry, tce, entry << tbl->it_page_shift, ret); */ return -EFAULT; } + + /* + * Check that the TCE table granularity is not bigger than the size of + * a page we just found. Otherwise the hardware can get access to + * a bigger memory chunk that it should. + */ + if (PageHuge(page)) { + struct page *head = compound_head(page); + long shift = PAGE_SHIFT + compound_order(head); + + if (shift < tbl->it_page_shift) { + ret = -EINVAL; + goto put_page_exit; + } + + } + hwaddr = (unsigned long) page_address(page) + offset; ret = iommu_tce_build(tbl, entry, hwaddr, direction); if (ret) - put_page(page); + goto put_page_exit; - if (ret < 0) - pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%d\n", + return 0; + +put_page_exit: + pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%d\n", __func__, entry << tbl->it_page_shift, tce, ret); + put_page(page); + return ret; } EXPORT_SYMBOL_GPL(iommu_put_tce_user_mode); -- 2.0.0 -- To unsubscribe from this list: send the line "unsubscribe linux-api" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html