On Thu, Sep 1, 2022 at 2:08 AM Jisheng Zhang <jszhang@xxxxxxxxxx> wrote: > > Implement the lazy preempt for riscv. > > Signed-off-by: Jisheng Zhang <jszhang@xxxxxxxxxx> > --- > arch/riscv/Kconfig | 1 + > arch/riscv/include/asm/thread_info.h | 7 +++++-- > arch/riscv/kernel/asm-offsets.c | 1 + > arch/riscv/kernel/entry.S | 9 +++++++-- > 4 files changed, 14 insertions(+), 4 deletions(-) > > diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig > index 7a8134fd7ec9..9f2f1936b1b5 100644 > --- a/arch/riscv/Kconfig > +++ b/arch/riscv/Kconfig > @@ -105,6 +105,7 @@ config RISCV > select HAVE_PERF_REGS > select HAVE_PERF_USER_STACK_DUMP > select HAVE_POSIX_CPU_TIMERS_TASK_WORK > + select HAVE_PREEMPT_LAZY > select HAVE_REGS_AND_STACK_ACCESS_API > select HAVE_FUNCTION_ARG_ACCESS_API > select HAVE_STACKPROTECTOR > diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h > index 78933ac04995..471915b179a2 100644 > --- a/arch/riscv/include/asm/thread_info.h > +++ b/arch/riscv/include/asm/thread_info.h > @@ -56,6 +56,7 @@ > struct thread_info { > unsigned long flags; /* low level flags */ > int preempt_count; /* 0=>preemptible, <0=>BUG */ > + int preempt_lazy_count; /* 0=>preemptible, <0=>BUG */ > /* > * These stack pointers are overwritten on every system call or > * exception. SP is also saved to the stack it can be recovered when > @@ -90,7 +91,7 @@ struct thread_info { > #define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ > #define TIF_SIGPENDING 2 /* signal pending */ > #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ > -#define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */ > +#define TIF_NEED_RESCHED_LAZY 4 /* lazy rescheduling */ > #define TIF_MEMDIE 5 /* is terminating due to OOM killer */ > #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ > #define TIF_SYSCALL_AUDIT 7 /* syscall auditing */ > @@ -98,6 +99,7 @@ struct thread_info { > #define TIF_NOTIFY_SIGNAL 9 /* signal notifications exist */ > #define TIF_UPROBE 10 /* uprobe breakpoint or singlestep */ > #define TIF_32BIT 11 /* compat-mode 32bit process */ > +#define TIF_RESTORE_SIGMASK 12 /* restore signal mask in do_signal() */ > > #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) > #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) > @@ -108,10 +110,11 @@ struct thread_info { > #define _TIF_SECCOMP (1 << TIF_SECCOMP) > #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) > #define _TIF_UPROBE (1 << TIF_UPROBE) > +#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) > > #define _TIF_WORK_MASK \ > (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | \ > - _TIF_NOTIFY_SIGNAL | _TIF_UPROBE) > + _TIF_NEED_RESCHED_LAZY | _TIF_NOTIFY_SIGNAL | _TIF_UPROBE) > > #define _TIF_SYSCALL_WORK \ > (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT | \ > diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c > index df9444397908..e38e33822f72 100644 > --- a/arch/riscv/kernel/asm-offsets.c > +++ b/arch/riscv/kernel/asm-offsets.c > @@ -35,6 +35,7 @@ void asm_offsets(void) > OFFSET(TASK_THREAD_S11, task_struct, thread.s[11]); > OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags); > OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count); > + OFFSET(TASK_TI_PREEMPT_LAZY_COUNT, task_struct, thread_info.preempt_lazy_count); > OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp); > OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp); > > diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S > index b9eda3fcbd6d..595100a4c2c7 100644 > --- a/arch/riscv/kernel/entry.S > +++ b/arch/riscv/kernel/entry.S > @@ -361,9 +361,14 @@ restore_all: > resume_kernel: > REG_L s0, TASK_TI_PREEMPT_COUNT(tp) > bnez s0, restore_all > - REG_L s0, TASK_TI_FLAGS(tp) > - andi s0, s0, _TIF_NEED_RESCHED > + REG_L s1, TASK_TI_FLAGS(tp) > + andi s0, s1, _TIF_NEED_RESCHED > + bnez s0, 1f > + REG_L s0, TASK_TI_PREEMPT_LAZY_COUNT(tp) > + bnez s0, restore_all > + andi s0, s1, _TIF_NEED_RESCHED_LAZY Can you tell me, who increased/decreased the PREEMPT_LAZY_COUNT? And who set NEED_RESCHED_LAZY? > beqz s0, restore_all > +1: > call preempt_schedule_irq > j restore_all > #endif > -- > 2.34.1 > > > -- > kvm-riscv mailing list > kvm-riscv@xxxxxxxxxxxxxxxxxxx > http://lists.infradead.org/mailman/listinfo/kvm-riscv -- Best Regards Guo Ren