> -#define __arch_cmpxchg_masked(sc_sfx, prepend, append, r, p, o, n) \ > +#define __arch_cmpxchg_masked(sc_sfx, cas_sfx, prepend, append, r, p, o, n) \ > ({ \ > + __label__ no_zacas, zabha, end; \ > + \ > + if (IS_ENABLED(CONFIG_RISCV_ISA_ZABHA)) { \ > + asm goto(ALTERNATIVE("j %[no_zacas]", "nop", 0, \ > + RISCV_ISA_EXT_ZACAS, 1) \ > + : : : : no_zacas); \ > + asm goto(ALTERNATIVE("nop", "j %[zabha]", 0, \ > + RISCV_ISA_EXT_ZABHA, 1) \ > + : : : : zabha); \ > + } \ > + \ > +no_zacas:; \ > u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \ > ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \ > ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0) \ > @@ -133,6 +145,19 @@ > : "memory"); \ > \ > r = (__typeof__(*(p)))((__retx & __mask) >> __s); \ > + goto end; \ > + \ > +zabha: \ > + if (IS_ENABLED(CONFIG_RISCV_ISA_ZABHA)) { \ > + __asm__ __volatile__ ( \ > + prepend \ > + " amocas" cas_sfx " %0, %z2, %1\n" \ > + append \ > + : "+&r" (r), "+A" (*(p)) \ > + : "rJ" (n) \ > + : "memory"); \ > + } \ > +end:; \ > }) I admit that I found this all quite difficult to read; IIUC, this is missing an IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) check. How about adding such a check under the zabha: label (replacing/in place of the second IS_ENABLED(CONFIG_RISCV_ISA_ZABHA) check) and moving the corresponding asm goto statement there, perhaps as follows? (on top of this patch) Also, the patch presents the first occurrence of RISCV_ISA_EXT_ZABHA; perhaps worth moving the hwcap/cpufeature changes from patch #6 here? Andrea diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h index b9a3fdcec919..3c913afec150 100644 --- a/arch/riscv/include/asm/cmpxchg.h +++ b/arch/riscv/include/asm/cmpxchg.h @@ -110,15 +110,12 @@ __label__ no_zacas, zabha, end; \ \ if (IS_ENABLED(CONFIG_RISCV_ISA_ZABHA)) { \ - asm goto(ALTERNATIVE("j %[no_zacas]", "nop", 0, \ - RISCV_ISA_EXT_ZACAS, 1) \ - : : : : no_zacas); \ asm goto(ALTERNATIVE("nop", "j %[zabha]", 0, \ RISCV_ISA_EXT_ZABHA, 1) \ : : : : zabha); \ } \ \ -no_zacas:; \ +no_zacas: \ u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \ ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \ ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0) \ @@ -148,16 +145,20 @@ no_zacas:; \ goto end; \ \ zabha: \ - if (IS_ENABLED(CONFIG_RISCV_ISA_ZABHA)) { \ - __asm__ __volatile__ ( \ - prepend \ - " amocas" cas_sfx " %0, %z2, %1\n" \ - append \ - : "+&r" (r), "+A" (*(p)) \ - : "rJ" (n) \ - : "memory"); \ + if (IS_ENABLED(CONFIG_RISCV_ISA_ZAZAS)) { \ + asm goto(ALTERNATIVE("j %[no_zacas]", "nop", 0, \ + RISCV_ISA_EXT_ZACAS, 1) \ + : : : : no_zacas); \ } \ -end:; \ + \ + __asm__ __volatile__ ( \ + prepend \ + " amocas" cas_sfx " %0, %z2, %1\n" \ + append \ + : "+&r" (r), "+A" (*(p)) \ + : "rJ" (n) \ + : "memory"); \ +end: \ }) #define __arch_cmpxchg(lr_sfx, sc_cas_sfx, prepend, append, r, p, co, o, n) \