As with one of the earlier patches in the series, we're forced to cast for copy_[to|from]_user. Again because of the nature of the GEN x86 exclusivity, this should be safe. Signed-off-by: Ben Widawsky <benjamin.widawsky at intel.com> --- drivers/gpu/drm/i915/i915_gem.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 9415c07..96527db 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -266,8 +266,8 @@ __copy_to_user_swizzled(char __user *cpu_vaddr, } static inline int -__copy_from_user_swizzled(char __user *gpu_vaddr, int gpu_offset, - const char *cpu_vaddr, +__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset, + const char __user *cpu_vaddr, int length) { int ret, cpu_offset = 0; @@ -542,12 +542,13 @@ fast_user_write(struct io_mapping *mapping, char __user *user_data, int length) { - char *vaddr_atomic; + void __iomem *vaddr_atomic; unsigned long unwritten; vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base); - unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset, - user_data, length); + unwritten = __copy_from_user_inatomic_nocache( + (void __force*)vaddr_atomic + page_offset, + user_data, length); io_mapping_unmap_atomic(vaddr_atomic); return unwritten; } -- 1.7.10