Re: [RFC PATCH v1 15/28] riscv/mm: Implement map_shadow_stack() syscall

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Wed, Jan 24, 2024 at 10:21:40PM -0800, debug@xxxxxxxxxxxx wrote:
> From: Deepak Gupta <debug@xxxxxxxxxxxx>
> 
> As discussed extensively in the changelog for the addition of this
> syscall on x86 ("x86/shstk: Introduce map_shadow_stack syscall") the
> existing mmap() and madvise() syscalls do not map entirely well onto the
> security requirements for guarded control stacks since they lead to
> windows where memory is allocated but not yet protected or stacks which
> are not properly and safely initialised. Instead a new syscall
> map_shadow_stack() has been defined which allocates and initialises a
> shadow stack page.
> 
> This patch implements this syscall for riscv. riscv doesn't require token
> to be setup by kernel because user mode can do that by itself. However to
> provide compatiblity and portability with other architectues, user mode can
> specify token set flag.
> 
> Signed-off-by: Deepak Gupta <debug@xxxxxxxxxxxx>
> ---
>  arch/riscv/kernel/Makefile      |   2 +
>  arch/riscv/kernel/usercfi.c     | 150 ++++++++++++++++++++++++++++++++
>  include/uapi/asm-generic/mman.h |   1 +
>  3 files changed, 153 insertions(+)
>  create mode 100644 arch/riscv/kernel/usercfi.c
> 
> diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
> index fee22a3d1b53..8c668269e886 100644
> --- a/arch/riscv/kernel/Makefile
> +++ b/arch/riscv/kernel/Makefile
> @@ -102,3 +102,5 @@ obj-$(CONFIG_COMPAT)		+= compat_vdso/
>  
>  obj-$(CONFIG_64BIT)		+= pi/
>  obj-$(CONFIG_ACPI)		+= acpi.o
> +
> +obj-$(CONFIG_RISCV_USER_CFI) += usercfi.o
> diff --git a/arch/riscv/kernel/usercfi.c b/arch/riscv/kernel/usercfi.c
> new file mode 100644
> index 000000000000..35ede2cbc05b
> --- /dev/null
> +++ b/arch/riscv/kernel/usercfi.c
> @@ -0,0 +1,150 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * Copyright (C) 2023 Rivos, Inc.
Nit: Should be updated to 2024
> + * Deepak Gupta <debug@xxxxxxxxxxxx>
> + */
> +
> +#include <linux/sched.h>
> +#include <linux/bitops.h>
> +#include <linux/types.h>
> +#include <linux/mm.h>
> +#include <linux/mman.h>
> +#include <linux/uaccess.h>
> +#include <linux/sizes.h>
> +#include <linux/user.h>
> +#include <linux/syscalls.h>
> +#include <linux/prctl.h>
> +#include <asm/csr.h>
> +#include <asm/usercfi.h>
> +
> +#define SHSTK_ENTRY_SIZE sizeof(void *)
> +
> +/*
> + * Writes on shadow stack can either be `sspush` or `ssamoswap`. `sspush` can happen
> + * implicitly on current shadow stack pointed to by CSR_SSP. `ssamoswap` takes pointer to
> + * shadow stack. To keep it simple, we plan to use `ssamoswap` to perform writes on shadow
> + * stack.
> + */
> +static noinline unsigned long amo_user_shstk(unsigned long *addr, unsigned long val)
> +{
> +	/*
> +	 * In case ssamoswap faults, return -1.
> +	 * Never expect -1 on shadow stack. Expect return addresses and zero
> +	 */
> +	unsigned long swap = -1;
> +
> +	__enable_user_access();
> +	asm_volatile_goto(
> +				".option push\n"
> +				".option arch, +zicfiss\n"
> +#ifdef CONFIG_64BIT
> +				"1: ssamoswap.d %0, %2, %1\n"
> +#else
> +				"1: ssamoswap.w %0, %2, %1\n"

A SSAMOSWAP macro that conditionally defines this would be cleaner

> +#endif
> +				_ASM_EXTABLE(1b, %l[fault])
> +				RISCV_ACQUIRE_BARRIER
> +				".option pop\n"
> +				: "=r" (swap), "+A" (*addr)

I just ran into this on one of my patches that not every compiler
supports output args in asm goto blocks. You need to guard this with the
kconfig option CC_HAS_ASM_GOTO_TIED_OUTPUT. Unfortunately, that means
that this code needs two versions, or you can choose to gate CFI behind
this option, it's supported by recent versions of GCC/CLANG.

For readability it is also nice to use labels for the asm variables such
as `"=r" (swap)` can be `[swap] "=r" (swap)` and then replace %0 with
%[swap].

- Charlie

> +				: "r" (val)
> +				: "memory"
> +				: fault
> +			);
> +	__disable_user_access();
> +	return swap;
> +fault:
> +	__disable_user_access();
> +	return -1;
> +}
> +
> +/*
> + * Create a restore token on the shadow stack.  A token is always XLEN wide
> + * and aligned to XLEN.
> + */
> +static int create_rstor_token(unsigned long ssp, unsigned long *token_addr)
> +{
> +	unsigned long addr;
> +
> +	/* Token must be aligned */
> +	if (!IS_ALIGNED(ssp, SHSTK_ENTRY_SIZE))
> +		return -EINVAL;
> +
> +	/* On RISC-V we're constructing token to be function of address itself */
> +	addr = ssp - SHSTK_ENTRY_SIZE;
> +
> +	if (amo_user_shstk((unsigned long __user *)addr, (unsigned long) ssp) == -1)
> +		return -EFAULT;
> +
> +	if (token_addr)
> +		*token_addr = addr;
> +
> +	return 0;
> +}
> +
> +static unsigned long allocate_shadow_stack(unsigned long addr, unsigned long size,
> +				unsigned long token_offset,
> +				bool set_tok)
> +{
> +	int flags = MAP_ANONYMOUS | MAP_PRIVATE;
> +	struct mm_struct *mm = current->mm;
> +	unsigned long populate, tok_loc = 0;
> +
> +	if (addr)
> +		flags |= MAP_FIXED_NOREPLACE;
> +
> +	mmap_write_lock(mm);
> +	addr = do_mmap(NULL, addr, size, PROT_SHADOWSTACK, flags,
> +				VM_SHADOW_STACK, 0, &populate, NULL);
> +	mmap_write_unlock(mm);
> +
> +	if (!set_tok || IS_ERR_VALUE(addr))
> +		goto out;
> +
> +	if (create_rstor_token(addr + token_offset, &tok_loc)) {
> +		vm_munmap(addr, size);
> +		return -EINVAL;
> +	}
> +
> +	addr = tok_loc;
> +
> +out:
> +	return addr;
> +}
> +
> +SYSCALL_DEFINE3(map_shadow_stack, unsigned long, addr, unsigned long, size, unsigned int, flags)
> +{
> +	bool set_tok = flags & SHADOW_STACK_SET_TOKEN;
> +	unsigned long aligned_size = 0;
> +
> +	if (!cpu_supports_shadow_stack())
> +		return -EOPNOTSUPP;
> +
> +	/* Anything other than set token should result in invalid param */
> +	if (flags & ~SHADOW_STACK_SET_TOKEN)
> +		return -EINVAL;
> +
> +	/*
> +	 * Unlike other architectures, on RISC-V, SSP pointer is held in CSR_SSP and is available
> +	 * CSR in all modes. CSR accesses are performed using 12bit index programmed in instruction
> +	 * itself. This provides static property on register programming and writes to CSR can't
> +	 * be unintentional from programmer's perspective. As long as programmer has guarded areas
> +	 * which perform writes to CSR_SSP properly, shadow stack pivoting is not possible. Since
> +	 * CSR_SSP is writeable by user mode, it itself can setup a shadow stack token subsequent
> +	 * to allocation. Although in order to provide portablity with other architecture (because
> +	 * `map_shadow_stack` is arch agnostic syscall), RISC-V will follow expectation of a token
> +	 * flag in flags and if provided in flags, setup a token at the base.
> +	 */
> +
> +	/* If there isn't space for a token */
> +	if (set_tok && size < SHSTK_ENTRY_SIZE)
> +		return -ENOSPC;
> +
> +	if (addr && (addr % PAGE_SIZE))
> +		return -EINVAL;
> +
> +	aligned_size = PAGE_ALIGN(size);
> +	if (aligned_size < size)
> +		return -EOVERFLOW;
> +
> +	return allocate_shadow_stack(addr, aligned_size, size, set_tok);
> +}
> diff --git a/include/uapi/asm-generic/mman.h b/include/uapi/asm-generic/mman.h
> index 57e8195d0b53..0c0ac6214de6 100644
> --- a/include/uapi/asm-generic/mman.h
> +++ b/include/uapi/asm-generic/mman.h
> @@ -19,4 +19,5 @@
>  #define MCL_FUTURE	2		/* lock all future mappings */
>  #define MCL_ONFAULT	4		/* lock all pages that are faulted in */
>  
> +#define SHADOW_STACK_SET_TOKEN (1ULL << 0)     /* Set up a restore token in the shadow stack */
>  #endif /* __ASM_GENERIC_MMAN_H */
> -- 
> 2.43.0
> 




[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux