Instead of relying on apply_to_page_range() being available to modules, move its use into core kernel code and export it's application. Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> --- drivers/xen/gntdev-common.h | 2 ++ drivers/xen/gntdev.c | 30 +----------------------------- drivers/xen/grant-table.c | 37 +++++++++++++++++++++++++++++++++++++ 3 files changed, 40 insertions(+), 29 deletions(-) --- a/drivers/xen/gntdev-common.h +++ b/drivers/xen/gntdev-common.h @@ -86,4 +86,6 @@ bool gntdev_test_page_count(unsigned int int gntdev_map_grant_pages(struct gntdev_grant_map *map); +int gnttab_use_ptemod(struct vm_area_struct *vma, struct gntdev_grant_map *map); + #endif --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -262,32 +262,6 @@ void gntdev_put_map(struct gntdev_priv * /* ------------------------------------------------------------------ */ -static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data) -{ - struct gntdev_grant_map *map = data; - unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT; - int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte; - u64 pte_maddr; - - BUG_ON(pgnr >= map->count); - pte_maddr = arbitrary_virt_to_machine(pte).maddr; - - /* - * Set the PTE as special to force get_user_pages_fast() fall - * back to the slow path. If this is not supported as part of - * the grant map, it will be done afterwards. - */ - if (xen_feature(XENFEAT_gnttab_map_avail_bits)) - flags |= (1 << _GNTMAP_guest_avail0); - - gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags, - map->grants[pgnr].ref, - map->grants[pgnr].domid); - gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, flags, - INVALID_GRANT_HANDLE); - return 0; -} - int gntdev_map_grant_pages(struct gntdev_grant_map *map) { int i, err = 0; @@ -1028,9 +1002,7 @@ static int gntdev_mmap(struct file *flip mmu_interval_read_begin(&map->notifier); map->pages_vm_start = vma->vm_start; - err = apply_to_page_range(vma->vm_mm, vma->vm_start, - vma->vm_end - vma->vm_start, - find_grant_ptes, map); + err = gnttab_use_ptemod(vma, map); if (err) { pr_warn("find_grant_ptes() failure.\n"); goto out_put_map; --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -1591,6 +1591,43 @@ int gnttab_init(void) } EXPORT_SYMBOL_GPL(gnttab_init); +#include <xen/gntdev.h> +#include "gntdev-common.h" + +static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data) +{ + struct gntdev_grant_map *map = data; + unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT; + int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte; + u64 pte_maddr; + + BUG_ON(pgnr >= map->count); + pte_maddr = arbitrary_virt_to_machine(pte).maddr; + + /* + * Set the PTE as special to force get_user_pages_fast() fall + * back to the slow path. If this is not supported as part of + * the grant map, it will be done afterwards. + */ + if (xen_feature(XENFEAT_gnttab_map_avail_bits)) + flags |= (1 << _GNTMAP_guest_avail0); + + gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags, + map->grants[pgnr].ref, + map->grants[pgnr].domid); + gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, flags, + INVALID_GRANT_HANDLE); + return 0; +} + +int gnttab_use_ptemod(struct vm_area_struct *vma, struct gntdev_grant_map *map) +{ + return apply_to_page_range(vma->vm_mm, vma->vm_start, + vma->vm_end - vma->vm_start, + find_grant_ptes, map); +} +EXPORT_SYMBOL_GPL(gnttab_use_ptemod); + static int __gnttab_init(void) { if (!xen_domain()) _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx