The 07/16/2023 22:51, Mark Brown wrote: > +SYSCALL_DEFINE3(map_shadow_stack, unsigned long, addr, unsigned long, size, unsigned int, flags) > +{ > + unsigned long aligned_size; > + unsigned long __user *cap_ptr; > + unsigned long cap_val; > + int ret; > + > + if (!system_supports_gcs()) > + return -EOPNOTSUPP; > + > + if (flags) > + return -EINVAL; > + > + /* > + * An overflow would result in attempting to write the restore token > + * to the wrong location. Not catastrophic, but just return the right > + * error code and block it. > + */ > + aligned_size = PAGE_ALIGN(size); > + if (aligned_size < size) > + return -EOVERFLOW; > + > + addr = alloc_gcs(addr, aligned_size, 0, false); > + if (IS_ERR_VALUE(addr)) > + return addr; > + > + /* > + * Put a cap token at the end of the allocated region so it > + * can be switched to. > + */ > + cap_ptr = (unsigned long __user *)(addr + aligned_size - > + (2 * sizeof(unsigned long))); > + cap_val = GCS_CAP(cap_ptr); > + > + ret = copy_to_user_gcs(cap_ptr, &cap_val, 1); with uint64_t *p = map_shadow_stack(0, N*8, 0); i'd expect p[N-1] to be the end token and p[N-2] to be the cap token, not p[PAGE_ALIGN(N*8)/8-2]. if we allow misalligned size here (and in munmap) then i think it's better to not page align. size%8!=0 || size<16 can be an error. > + if (ret != 0) { > + vm_munmap(addr, size); > + return -EFAULT; > + } > + > + return addr; > +}