Hi Catalin, On Wed, Dec 01, 2021 at 07:37:49PM +0000, Catalin Marinas wrote: > With MTE, even if the pte allows an access, a mismatched tag somewhere > within a page can still cause a fault. Select ARCH_HAS_SUBPAGE_FAULTS if > MTE is enabled and implement the probe_subpage_*() functions. Note that > get_user() is sufficient for the writeable checks since the same tag > mismatch fault would be triggered by a read. > > Signed-off-by: Catalin Marinas <catalin.marinas@xxxxxxx> > --- > arch/arm64/Kconfig | 1 + > arch/arm64/include/asm/uaccess.h | 59 ++++++++++++++++++++++++++++++++ > 2 files changed, 60 insertions(+) > > diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig > index c4207cf9bb17..dff89fd0d817 100644 > --- a/arch/arm64/Kconfig > +++ b/arch/arm64/Kconfig > @@ -1777,6 +1777,7 @@ config ARM64_MTE > depends on AS_HAS_LSE_ATOMICS > # Required for tag checking in the uaccess routines > depends on ARM64_PAN > + select ARCH_HAS_SUBPAGE_FAULTS > select ARCH_USES_HIGH_VMA_FLAGS > help > Memory Tagging (part of the ARMv8.5 Extensions) provides > diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h > index 6e2e0b7031ab..bcbd24b97917 100644 > --- a/arch/arm64/include/asm/uaccess.h > +++ b/arch/arm64/include/asm/uaccess.h > @@ -445,4 +445,63 @@ static inline int __copy_from_user_flushcache(void *dst, const void __user *src, > } > #endif > > +#ifdef CONFIG_ARCH_HAS_SUBPAGE_FAULTS > + > +/* > + * Return 0 on success, the number of bytes not accessed otherwise. > + */ > +static inline size_t __mte_probe_user_range(const char __user *uaddr, > + size_t size, bool skip_first) > +{ > + const char __user *end = uaddr + size; > + int err = 0; > + char val; > + > + uaddr = PTR_ALIGN_DOWN(uaddr, MTE_GRANULE_SIZE); > + if (skip_first) > + uaddr += MTE_GRANULE_SIZE; Do we need the skipping for a functional reason, or is that an optimization? >From the comments in probe_subpage_writeable() and probe_subpage_safe_writeable() I wasn't sure if the skipping was because we *don't need to* check the first granule, or because we *must not* check the first granule. > + while (uaddr < end) { > + /* > + * A read is sufficient for MTE, the caller should have probed > + * for the pte write permission if required. > + */ > + __raw_get_user(val, uaddr, err); > + if (err) > + return end - uaddr; > + uaddr += MTE_GRANULE_SIZE; > + } I think we may need to account for the residue from PTR_ALIGN_DOWN(), or we can report more bytes not copied than was passed in `size` in the first place, which I think might confused some callers. Consider MTE_GRANULE_SIZE is 16, uaddr is 31, and size is 1 (so end is 32). We align uaddr down to 16, and if we fail the first access we return (32 - 16), i.e. 16. Thanks, Mark. > + (void)val; > + > + return 0; > +} > + > +static inline size_t probe_subpage_writeable(const void __user *uaddr, > + size_t size) > +{ > + if (!system_supports_mte()) > + return 0; > + /* first put_user() done in the caller */ > + return __mte_probe_user_range(uaddr, size, true); > +} > + > +static inline size_t probe_subpage_safe_writeable(const void __user *uaddr, > + size_t size) > +{ > + if (!system_supports_mte()) > + return 0; > + /* the caller used GUP, don't skip the first granule */ > + return __mte_probe_user_range(uaddr, size, false); > +} > + > +static inline size_t probe_subpage_readable(const void __user *uaddr, > + size_t size) > +{ > + if (!system_supports_mte()) > + return 0; > + /* first get_user() done in the caller */ > + return __mte_probe_user_range(uaddr, size, true); > +} > + > +#endif /* CONFIG_ARCH_HAS_SUBPAGE_FAULTS */ > + > #endif /* __ASM_UACCESS_H */