[PATCH V10 18/19] locking/qspinlock: Move pv_ops into x86 directory

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Guo Ren <guoren@xxxxxxxxxxxxxxxxx>

The pv_ops belongs to x86 custom infrastructure and cleans up the
cna_configure_spin_lock_slowpath() with standard code. This is
preparation for riscv support CNA qspoinlock.

Signed-off-by: Guo Ren <guoren@xxxxxxxxxxxxxxxxx>
Signed-off-by: Guo Ren <guoren@xxxxxxxxxx>
---
 arch/x86/include/asm/qspinlock.h |  3 ++-
 arch/x86/kernel/alternative.c    |  6 +++++-
 kernel/locking/qspinlock_cna.h   | 14 ++++++--------
 3 files changed, 13 insertions(+), 10 deletions(-)

diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
index f48a2a250e57..100adad70bf5 100644
--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlock.h
@@ -28,7 +28,8 @@ static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lo
 }
 
 #ifdef CONFIG_NUMA_AWARE_SPINLOCKS
-extern void cna_configure_spin_lock_slowpath(void);
+extern bool cna_configure_spin_lock_slowpath(void);
+extern void __cna_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
 #endif
 
 #ifdef CONFIG_PARAVIRT_SPINLOCKS
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index c36df5aa3ab1..68b7392016c3 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -1538,7 +1538,11 @@ void __init alternative_instructions(void)
 	paravirt_set_cap();
 
 #if defined(CONFIG_NUMA_AWARE_SPINLOCKS)
-	cna_configure_spin_lock_slowpath();
+	if (pv_ops.lock.queued_spin_lock_slowpath == native_queued_spin_lock_slowpath) {
+		if (cna_configure_spin_lock_slowpath())
+			pv_ops.lock.queued_spin_lock_slowpath =
+							__cna_queued_spin_lock_slowpath;
+	}
 #endif
 
 	/*
diff --git a/kernel/locking/qspinlock_cna.h b/kernel/locking/qspinlock_cna.h
index 17d56c739e57..5e297dc687d9 100644
--- a/kernel/locking/qspinlock_cna.h
+++ b/kernel/locking/qspinlock_cna.h
@@ -406,20 +406,18 @@ void __cna_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
  * multiple NUMA nodes in native environment, unless the user has
  * overridden this default behavior by setting the numa_spinlock flag.
  */
-void __init cna_configure_spin_lock_slowpath(void)
+bool __init cna_configure_spin_lock_slowpath(void)
 {
 
 	if (numa_spinlock_flag < 0)
-		return;
+		return false;
 
-	if (numa_spinlock_flag == 0 && (nr_node_ids < 2 ||
-		    pv_ops.lock.queued_spin_lock_slowpath !=
-			native_queued_spin_lock_slowpath))
-		return;
+	if (numa_spinlock_flag == 0 && nr_node_ids < 2)
+		return false;
 
 	cna_init_nodes();
 
-	pv_ops.lock.queued_spin_lock_slowpath = __cna_queued_spin_lock_slowpath;
-
 	pr_info("Enabling CNA spinlock\n");
+
+	return true;
 }
-- 
2.36.1




[Index of Archives]     [Kernel Newbies]     [Security]     [Netfilter]     [Bugtraq]     [Linux FS]     [Yosemite Forum]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Samba]     [Video 4 Linux]     [Device Mapper]     [Linux Resources]

  Powered by Linux