[RFC PATCH 1/3] percpu: Define __pcpu_typeof()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Define __pcpu_typeof() to use __typeof_unqual__() as typeof operator
when available, to return unqualified type of the expression.

If an arch defines __percpu variables in their own named address
space, then __pcpu_typeof() returns unqualified type of the
expression without named address space quialifier when
CONFIG_CC_HAS_TYPEOF_UNQUAL is defined.

Signed-off-by: Uros Bizjak <ubizjak@xxxxxxxxx>
Cc: Dennis Zhou <dennis@xxxxxxxxxx>
Cc: Tejun Heo <tj@xxxxxxxxxx>
Cc: Christoph Lameter <cl@xxxxxxxxx>
Cc: Andy Lutomirski <luto@xxxxxxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: Nadav Amit <nadav.amit@xxxxxxxxx>
Cc: Brian Gerst <brgerst@xxxxxxxxx>
Cc: Denys Vlasenko <dvlasenk@xxxxxxxxxx>
Cc: H. Peter Anvin <hpa@xxxxxxxxx>
Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Borislav Petkov <bp@xxxxxxxxx>
Cc: Luc Van Oostenryck <luc.vanoostenryck@xxxxxxxxx>
---
 arch/x86/include/asm/percpu.h | 38 ++++++++++++++++++++++------------
 include/linux/part_stat.h     |  2 +-
 include/linux/percpu-defs.h   | 39 ++++++++++++++++++++++++++++-------
 init/Kconfig                  |  3 +++
 kernel/locking/percpu-rwsem.c |  2 +-
 5 files changed, 62 insertions(+), 22 deletions(-)

diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index c55a79d5feae..4d31203eb0d2 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -73,10 +73,14 @@
 	unsigned long tcp_ptr__ = raw_cpu_read_long(this_cpu_off);	\
 									\
 	tcp_ptr__ += (__force unsigned long)(_ptr);			\
-	(typeof(*(_ptr)) __kernel __force *)tcp_ptr__;			\
+	(__pcpu_typeof(*(_ptr)) __kernel __force *)tcp_ptr__;		\
 })
 #else
-#define arch_raw_cpu_ptr(_ptr) ({ BUILD_BUG(); (typeof(_ptr))0; })
+#define arch_raw_cpu_ptr(_ptr)						\
+({									\
+	BUILD_BUG();							\
+	(__pcpu_typeof(*(_ptr)) __kernel __force *)0;			\
+})
 #endif
 
 #define PER_CPU_VAR(var)	%__percpu_seg:(var)__percpu_rel
@@ -172,7 +176,7 @@ do {									\
 	    : [val] __pcpu_reg_##size("=", pfo_val__)			\
 	    : [var] "m" (__my_cpu_var(_var)));				\
 									\
-	(typeof(_var))(unsigned long) pfo_val__;			\
+	(__pcpu_typeof(_var))(unsigned long) pfo_val__;			\
 })
 
 #define __raw_cpu_write(size, qual, _var, _val)				\
@@ -180,7 +184,7 @@ do {									\
 	__pcpu_type_##size pto_val__ = __pcpu_cast_##size(_val);	\
 									\
 	if (0) {		                                        \
-		typeof(_var) pto_tmp__;					\
+		__pcpu_typeof(_var) pto_tmp__;				\
 		pto_tmp__ = (_val);					\
 		(void)pto_tmp__;					\
 	}								\
@@ -193,7 +197,11 @@ do {									\
  * The generic per-CPU infrastrucutre is not suitable for
  * reading const-qualified variables.
  */
-#define __raw_cpu_read_const(pcp)	({ BUILD_BUG(); (typeof(pcp))0; })
+#define __raw_cpu_read_const(pcp)					\
+({									\
+	BUILD_BUG();							\
+	(__pcpu_typeof(pcp))0;						\
+})
 
 #endif /* CONFIG_USE_X86_SEG_SUPPORT */
 
@@ -205,7 +213,7 @@ do {									\
 	    : [val] __pcpu_reg_##size("=", pfo_val__)			\
 	    : [var] "i" (&(_var)));					\
 									\
-	(typeof(_var))(unsigned long) pfo_val__;			\
+	(__pcpu_typeof(_var))(unsigned long) pfo_val__;			\
 })
 
 #define percpu_unary_op(size, qual, op, _var)				\
@@ -219,7 +227,7 @@ do {									\
 	__pcpu_type_##size pto_val__ = __pcpu_cast_##size(_val);	\
 									\
 	if (0) {		                                        \
-		typeof(_var) pto_tmp__;					\
+		__pcpu_typeof(_var) pto_tmp__;				\
 		pto_tmp__ = (_val);					\
 		(void)pto_tmp__;					\
 	}								\
@@ -239,7 +247,7 @@ do {									\
 				(int)(val) : 0;				\
 									\
 	if (0) {							\
-		typeof(var) pao_tmp__;					\
+		__pcpu_typeof(var) pao_tmp__;				\
 		pao_tmp__ = (val);					\
 		(void)pao_tmp__;					\
 	}								\
@@ -263,7 +271,7 @@ do {									\
 		  : [tmp] __pcpu_reg_##size("+", paro_tmp__),		\
 		    [var] "+m" (__my_cpu_var(_var))			\
 		  : : "memory");					\
-	(typeof(_var))(unsigned long) (paro_tmp__ + _val);		\
+	(__pcpu_typeof(_var))(unsigned long) (paro_tmp__ + _val);	\
 })
 
 /*
@@ -272,7 +280,7 @@ do {									\
  */
 #define raw_percpu_xchg_op(_var, _nval)					\
 ({									\
-	typeof(_var) pxo_old__ = raw_cpu_read(_var);			\
+	__pcpu_typeof(_var) pxo_old__ = raw_cpu_read(_var);		\
 									\
 	raw_cpu_write(_var, _nval);					\
 									\
@@ -286,7 +294,7 @@ do {									\
  */
 #define this_percpu_xchg_op(_var, _nval)				\
 ({									\
-	typeof(_var) pxo_old__ = this_cpu_read(_var);			\
+	__pcpu_typeof(_var) pxo_old__ = this_cpu_read(_var);		\
 									\
 	do { } while (!this_cpu_try_cmpxchg(_var, &pxo_old__, _nval));	\
 									\
@@ -309,7 +317,7 @@ do {									\
 		  : [nval] __pcpu_reg_##size(, pco_new__)		\
 		  : "memory");						\
 									\
-	(typeof(_var))(unsigned long) pco_old__;			\
+	(__pcpu_typeof(_var))(unsigned long) pco_old__;			\
 })
 
 #define percpu_try_cmpxchg_op(size, qual, _var, _ovalp, _nval)		\
@@ -568,7 +576,11 @@ do {									\
 #else /* !CONFIG_X86_64: */
 
 /* There is no generic 64-bit read stable operation for 32-bit targets. */
-#define this_cpu_read_stable_8(pcp)			({ BUILD_BUG(); (typeof(pcp))0; })
+#define this_cpu_read_stable_8(pcp)					\
+({									\
+	BUILD_BUG();							\
+	(__pcpu_typeof(pcp))0;						\
+})
 
 #define raw_cpu_read_long(pcp)				raw_cpu_read_4(pcp)
 
diff --git a/include/linux/part_stat.h b/include/linux/part_stat.h
index ac8c44dd8237..3807bc29ba05 100644
--- a/include/linux/part_stat.h
+++ b/include/linux/part_stat.h
@@ -33,7 +33,7 @@ struct disk_stats {
 
 #define part_stat_read(part, field)					\
 ({									\
-	typeof((part)->bd_stats->field) res = 0;			\
+	__pcpu_typeof((part)->bd_stats->field) res = 0;			\
 	unsigned int _cpu;						\
 	for_each_possible_cpu(_cpu)					\
 		res += per_cpu_ptr((part)->bd_stats, _cpu)->field; \
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 8efce7414fad..842d10912fdd 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -220,6 +220,21 @@ do {									\
 	(void)__vpp_verify;						\
 } while (0)
 
+/*
+ * Define __percpu_typeof() to use __typeof_unqual__() as typeof
+ * operator when available, to return unqualified type of the exp.
+ *
+ * If an arch defines __percpu variables in their own named address
+ * space, then __pcpu_typeof() returns unqualified type of the
+ * expression without named address space qualifier when
+ * CONFIG_CC_HAS_TYPEOF_UNQUAL is defined.
+ */
+#ifdef CONFIG_CC_HAS_TYPEOF_UNQUAL
+#define __pcpu_typeof(exp) __typeof_unqual__(exp)
+#else
+#define __pcpu_typeof(exp) __typeof__(exp)
+#endif
+
 #ifdef CONFIG_SMP
 
 /*
@@ -228,7 +243,10 @@ do {									\
  * pointer value.  The weird cast keeps both GCC and sparse happy.
  */
 #define SHIFT_PERCPU_PTR(__p, __offset)					\
-	RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset))
+	uintptr_t ptr__ = (__force uintptr_t)(__p);			\
+									\
+	RELOC_HIDE((__pcpu_typeof(*(__p)) __kernel __force *)(ptr__),	\
+		   (__offset))
 
 #define per_cpu_ptr(ptr, cpu)						\
 ({									\
@@ -254,13 +272,20 @@ do {									\
 
 #else	/* CONFIG_SMP */
 
-#define VERIFY_PERCPU_PTR(__p)						\
+#define PERCPU_PTR(__p)							\
 ({									\
-	__verify_pcpu_ptr(__p);						\
-	(typeof(*(__p)) __kernel __force *)(__p);			\
+	uintptr_t ptr__ = (__force uintptr_t)(__p);			\
+									\
+	(__pcpu_typeof(*(__p)) __kernel __force *)(ptr__);		\
+})
+
+#define per_cpu_ptr(ptr, cpu)						\
+({									\
+	__verify_pcpu_ptr(ptr);						\
+	(void)(cpu);							\
+	PERCPU_PTR(ptr);						\
 })
 
-#define per_cpu_ptr(ptr, cpu)	({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); })
 #define raw_cpu_ptr(ptr)	per_cpu_ptr(ptr, 0)
 #define this_cpu_ptr(ptr)	raw_cpu_ptr(ptr)
 
@@ -315,7 +340,7 @@ static __always_inline void __this_cpu_preempt_check(const char *op) { }
 
 #define __pcpu_size_call_return(stem, variable)				\
 ({									\
-	typeof(variable) pscr_ret__;					\
+	__pcpu_typeof(variable) pscr_ret__;				\
 	__verify_pcpu_ptr(&(variable));					\
 	switch(sizeof(variable)) {					\
 	case 1: pscr_ret__ = stem##1(variable); break;			\
@@ -330,7 +355,7 @@ static __always_inline void __this_cpu_preempt_check(const char *op) { }
 
 #define __pcpu_size_call_return2(stem, variable, ...)			\
 ({									\
-	typeof(variable) pscr2_ret__;					\
+	__pcpu_typeof(variable) pscr2_ret__;				\
 	__verify_pcpu_ptr(&(variable));					\
 	switch(sizeof(variable)) {					\
 	case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break;	\
diff --git a/init/Kconfig b/init/Kconfig
index 37260d17267e..a9a04d0683b6 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -873,6 +873,9 @@ config ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
 config CC_HAS_INT128
 	def_bool !$(cc-option,$(m64-flag) -D__SIZEOF_INT128__=0) && 64BIT
 
+config CC_HAS_TYPEOF_UNQUAL
+	def_bool $(success,echo 'int foo (int a) { __typeof_unqual__(a) b = a; return b; }' | $(CC) -x c - -S -o /dev/null)
+
 config CC_IMPLICIT_FALLTHROUGH
 	string
 	default "-Wimplicit-fallthrough=5" if CC_IS_GCC && $(cc-option,-Wimplicit-fallthrough=5)
diff --git a/kernel/locking/percpu-rwsem.c b/kernel/locking/percpu-rwsem.c
index 6083883c4fe0..ac9b2f4bcd92 100644
--- a/kernel/locking/percpu-rwsem.c
+++ b/kernel/locking/percpu-rwsem.c
@@ -184,7 +184,7 @@ EXPORT_SYMBOL_GPL(__percpu_down_read);
 
 #define per_cpu_sum(var)						\
 ({									\
-	typeof(var) __sum = 0;						\
+	__pcpu_typeof(var) __sum = 0;					\
 	int cpu;							\
 	compiletime_assert_atomic_type(__sum);				\
 	for_each_possible_cpu(cpu)					\
-- 
2.45.2





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux