Access buffers in reads/writes{w,l,q} via get_unaligned and put_unaligned, to protect from potential unaligned traps. Signed-off-by: Julian Vetter <julian@xxxxxxxxxxxxxxxx> --- include/asm-generic/io.h | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index a5cbbf3e26ec..920fecd512fc 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -12,6 +12,7 @@ #include <linux/sizes.h> #include <linux/types.h> #include <linux/instruction_pointer.h> +#include <linux/unaligned.h> #ifdef CONFIG_GENERIC_IOMAP #include <asm-generic/iomap.h> @@ -422,7 +423,8 @@ static inline void readsw(const volatile void __iomem *addr, void *buffer, do { u16 x = __raw_readw(addr); - *buf++ = x; + put_unaligned(x, buf); + buf++; } while (--count); } } @@ -438,7 +440,8 @@ static inline void readsl(const volatile void __iomem *addr, void *buffer, do { u32 x = __raw_readl(addr); - *buf++ = x; + put_unaligned(x, buf); + buf++; } while (--count); } } @@ -455,7 +458,8 @@ static inline void readsq(const volatile void __iomem *addr, void *buffer, do { u64 x = __raw_readq(addr); - *buf++ = x; + put_unaligned(x, buf); + buf++; } while (--count); } } @@ -486,7 +490,9 @@ static inline void writesw(volatile void __iomem *addr, const void *buffer, const u16 *buf = buffer; do { - __raw_writew(*buf++, addr); + u16 val = get_unaligned(buf); + __raw_writew(val, addr); + buf++; } while (--count); } } @@ -501,7 +507,9 @@ static inline void writesl(volatile void __iomem *addr, const void *buffer, const u32 *buf = buffer; do { - __raw_writel(*buf++, addr); + u32 val = get_unaligned(buf); + __raw_writel(val, addr); + buf++; } while (--count); } } @@ -517,7 +525,9 @@ static inline void writesq(volatile void __iomem *addr, const void *buffer, const u64 *buf = buffer; do { - __raw_writeq(*buf++, addr); + u64 val = get_unaligned(buf); + __raw_writeq(val, addr); + buf++; } while (--count); } } -- 2.34.1