Currently as a result of templating from x86 code gcs_alloc_thread_stack() returns a pointer as an unsigned int however on arm64 we don't actually use this pointer value as anything other than a pass/fail flag. Simplify the interface to just return an int with 0 on success and a negative error code on failure. Acked-by: Deepak Gupta <debug@xxxxxxxxxxxx> Signed-off-by: Mark Brown <broonie@xxxxxxxxxx> --- arch/arm64/include/asm/gcs.h | 8 ++++---- arch/arm64/kernel/process.c | 8 ++++---- arch/arm64/mm/gcs.c | 8 ++++---- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/arch/arm64/include/asm/gcs.h b/arch/arm64/include/asm/gcs.h index f50660603ecf5dc09a92740062df3a089b02b219..d8923b5f03b776252aca76ce316ef57399d71fa9 100644 --- a/arch/arm64/include/asm/gcs.h +++ b/arch/arm64/include/asm/gcs.h @@ -64,8 +64,8 @@ static inline bool task_gcs_el0_enabled(struct task_struct *task) void gcs_set_el0_mode(struct task_struct *task); void gcs_free(struct task_struct *task); void gcs_preserve_current_state(void); -unsigned long gcs_alloc_thread_stack(struct task_struct *tsk, - const struct kernel_clone_args *args); +int gcs_alloc_thread_stack(struct task_struct *tsk, + const struct kernel_clone_args *args); static inline int gcs_check_locked(struct task_struct *task, unsigned long new_val) @@ -91,8 +91,8 @@ static inline bool task_gcs_el0_enabled(struct task_struct *task) static inline void gcs_set_el0_mode(struct task_struct *task) { } static inline void gcs_free(struct task_struct *task) { } static inline void gcs_preserve_current_state(void) { } -static inline unsigned long gcs_alloc_thread_stack(struct task_struct *tsk, - const struct kernel_clone_args *args) +static inline int gcs_alloc_thread_stack(struct task_struct *tsk, + const struct kernel_clone_args *args) { return -ENOTSUPP; } diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index fdd095480c3ffb8c13fd4e7c9abc79e88143e08b..8ebd11c29792524dfeeade9cc7826b007329aa6a 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -297,7 +297,7 @@ static void flush_gcs(void) static int copy_thread_gcs(struct task_struct *p, const struct kernel_clone_args *args) { - unsigned long gcs; + int ret; if (!system_supports_gcs()) return 0; @@ -305,9 +305,9 @@ static int copy_thread_gcs(struct task_struct *p, p->thread.gcs_base = 0; p->thread.gcs_size = 0; - gcs = gcs_alloc_thread_stack(p, args); - if (IS_ERR_VALUE(gcs)) - return PTR_ERR((void *)gcs); + ret = gcs_alloc_thread_stack(p, args); + if (ret != 0) + return ret; p->thread.gcs_el0_mode = current->thread.gcs_el0_mode; p->thread.gcs_el0_locked = current->thread.gcs_el0_locked; diff --git a/arch/arm64/mm/gcs.c b/arch/arm64/mm/gcs.c index 5c46ec527b1cdaa8f52cff445d70ba0f8509d086..1f633a482558b59aac5427963d42b37fce08c8a6 100644 --- a/arch/arm64/mm/gcs.c +++ b/arch/arm64/mm/gcs.c @@ -38,8 +38,8 @@ static unsigned long gcs_size(unsigned long size) return max(PAGE_SIZE, size); } -unsigned long gcs_alloc_thread_stack(struct task_struct *tsk, - const struct kernel_clone_args *args) +int gcs_alloc_thread_stack(struct task_struct *tsk, + const struct kernel_clone_args *args) { unsigned long addr, size; @@ -59,13 +59,13 @@ unsigned long gcs_alloc_thread_stack(struct task_struct *tsk, size = gcs_size(size); addr = alloc_gcs(0, size); if (IS_ERR_VALUE(addr)) - return addr; + return PTR_ERR((void *)addr); tsk->thread.gcs_base = addr; tsk->thread.gcs_size = size; tsk->thread.gcspr_el0 = addr + size - sizeof(u64); - return addr; + return 0; } SYSCALL_DEFINE3(map_shadow_stack, unsigned long, addr, unsigned long, size, unsigned int, flags) -- 2.39.2