[PATCH 12/18] arm64: implement spinlocks

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



We put this off, as it wasn't necessary without smp. Now it
is. Only need to do this for arm64, as we've already done it
already for arm.

Signed-off-by: Andrew Jones <drjones@xxxxxxxxxx>
---
 config/config-arm64.mak  |  1 +
 lib/arm64/asm/spinlock.h |  8 ++------
 lib/arm64/spinlock.c     | 43 +++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 46 insertions(+), 6 deletions(-)
 create mode 100644 lib/arm64/spinlock.c

diff --git a/config/config-arm64.mak b/config/config-arm64.mak
index 5f8550eb511e8..d61b703c8140e 100644
--- a/config/config-arm64.mak
+++ b/config/config-arm64.mak
@@ -9,6 +9,7 @@ kernel_offset = 0x80000
 
 cstart.o = $(TEST_DIR)/cstart64.o
 cflatobjs += lib/arm64/processor.o
+cflatobjs += lib/arm64/spinlock.o
 
 # arm64 specific tests
 tests =
diff --git a/lib/arm64/asm/spinlock.h b/lib/arm64/asm/spinlock.h
index 36b7b44fa4edf..43b2634b46459 100644
--- a/lib/arm64/asm/spinlock.h
+++ b/lib/arm64/asm/spinlock.h
@@ -5,11 +5,7 @@ struct spinlock {
 	int v;
 };
 
-static inline void spin_lock(struct spinlock *lock __unused)
-{
-}
-static inline void spin_unlock(struct spinlock *lock __unused)
-{
-}
+extern void spin_lock(struct spinlock *lock);
+extern void spin_unlock(struct spinlock *lock);
 
 #endif /* _ASMARM64_SPINLOCK_H_ */
diff --git a/lib/arm64/spinlock.c b/lib/arm64/spinlock.c
new file mode 100644
index 0000000000000..68b68b75ba60d
--- /dev/null
+++ b/lib/arm64/spinlock.c
@@ -0,0 +1,43 @@
+/*
+ * spinlocks
+ *
+ * Copyright (C) 2015, Red Hat Inc, Andrew Jones <drjones@xxxxxxxxxx>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.
+ */
+#include <asm/spinlock.h>
+#include <asm/barrier.h>
+#include <asm/mmu.h>
+
+void spin_lock(struct spinlock *lock)
+{
+	u32 val, fail;
+
+	smp_mb();
+
+	if (!mmu_enabled()) {
+		lock->v = 1;
+		return;
+	}
+
+	do {
+		asm volatile(
+		"1:	ldaxr	%w0, [%2]\n"
+		"	cbnz	%w0, 1b\n"
+		"	mov	%0, #1\n"
+		"	stxr	%w1, %w0, [%2]\n"
+		: "=&r" (val), "=&r" (fail)
+		: "r" (&lock->v)
+		: "cc" );
+	} while (fail);
+	smp_mb();
+}
+
+void spin_unlock(struct spinlock *lock)
+{
+	if (mmu_enabled())
+		asm volatile("stlrh wzr, [%0]" :: "r" (&lock->v));
+	else
+		lock->v = 0;
+	smp_mb();
+}
-- 
1.9.3

_______________________________________________
kvmarm mailing list
kvmarm@xxxxxxxxxxxxxxxxxxxxx
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm




[Index of Archives]     [Linux KVM]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux