From 1232618599c1863755a18894bc13336443b937e3 Mon Sep 17 00:00:00 2001 From: gaorui Date: Wed, 6 Aug 2025 17:18:16 +0800 Subject: [PATCH 1/7] asm-generic: ticket-lock: Optimize arch_spin_value_unlocked() mainline inclusion from mainline-v6.6-rc2 commit c6f4a90022524d06f6d9de323b1757031dcf0c26 category: feature bugzilla: https://github.com/RVCK-Project/rvck/issues/90 -------------------------------- The arch_spin_value_unlocked() of ticket-lock would cause the compiler to generate inefficient asm code in riscv architecture because of unnecessary memory access to the contended value. Before the patch: void lockref_get(struct lockref *lockref) { 78: fd010113 add sp,sp,-48 7c: 02813023 sd s0,32(sp) 80: 02113423 sd ra,40(sp) 84: 03010413 add s0,sp,48 0000000000000088 <.LBB296>: CMPXCHG_LOOP( 88: 00053783 ld a5,0(a0) After the patch: void lockref_get(struct lockref *lockref) { CMPXCHG_LOOP( 78: 00053783 ld a5,0(a0) After the patch, the lockref_get() could get in a fast path instead of the function's prologue. This is because ticket lock complex logic would limit compiler optimization for the spinlock fast path, and qspinlock won't. The caller of arch_spin_value_unlocked() could benefit from this change. Currently, the only caller is lockref. Signed-off-by: Guo Ren Signed-off-by: Guo Ren Signed-off-by: Ingo Molnar Acked-by: Waiman Long Acked-by: Will Deacon Link: https://lore.kernel.org/r/20230908154339.3250567-1-guoren@kernel.org Signed-off-by: Gao Rui --- include/asm-generic/spinlock.h | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/include/asm-generic/spinlock.h b/include/asm-generic/spinlock.h index fdfebcb050f40..90803a826ba03 100644 --- a/include/asm-generic/spinlock.h +++ b/include/asm-generic/spinlock.h @@ -68,11 +68,18 @@ static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) smp_store_release(ptr, (u16)val + 1); } +static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) +{ + u32 val = lock.counter; + + return ((val >> 16) == (val & 0xffff)); +} + static __always_inline int arch_spin_is_locked(arch_spinlock_t *lock) { - u32 val = atomic_read(lock); + arch_spinlock_t val = READ_ONCE(*lock); - return ((val >> 16) != (val & 0xffff)); + return !arch_spin_value_unlocked(val); } static __always_inline int arch_spin_is_contended(arch_spinlock_t *lock) @@ -82,11 +89,6 @@ static __always_inline int arch_spin_is_contended(arch_spinlock_t *lock) return (s16)((val >> 16) - (val & 0xffff)) > 1; } -static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) -{ - return !arch_spin_is_locked(&lock); -} - #include #endif /* __ASM_GENERIC_SPINLOCK_H */ From 1256cad8b30691ed7a8e017119a1d53531b18678 Mon Sep 17 00:00:00 2001 From: gaorui Date: Thu, 7 Aug 2025 22:27:10 +0800 Subject: [PATCH 2/7] riscv: Improve zacas fully-ordered cmpxchg() mainline inclusion from mainline-v6.12 commit 6116e22ef33a8239f3d53bb25377e9ed733c4176 category: feature bugzilla: https://github.com/RVCK-Project/rvck/issues/90 -------------------------------- The current fully-ordered cmpxchgXX() implementation results in: amocas.X.rl a5,a4,(s1) fence rw,rw This provides enough sync but we can actually use the following better mapping instead: amocas.X.aqrl a5,a4,(s1) Suggested-by: Andrea Parri Signed-off-by: Alexandre Ghiti Reviewed-by: Andrew Jones Reviewed-by: Andrea Parri Link: https://lore.kernel.org/r/20241103145153.105097-7-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt Signed-off-by: Gao Rui --- arch/riscv/include/asm/cmpxchg.h | 92 ++++++++++++++++++++++---------- 1 file changed, 64 insertions(+), 28 deletions(-) diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h index 1f4cd12e4664e..052418aba11a1 100644 --- a/arch/riscv/include/asm/cmpxchg.h +++ b/arch/riscv/include/asm/cmpxchg.h @@ -107,8 +107,10 @@ * store NEW in MEM. Return the initial value in MEM. Success is * indicated by comparing RETURN with OLD. */ - -#define __arch_cmpxchg_masked(sc_sfx, cas_sfx, prepend, append, r, p, o, n) \ +#define __arch_cmpxchg_masked(sc_sfx, cas_sfx, \ + sc_prepend, sc_append, \ + cas_prepend, cas_append, \ + r, p, o, n) \ ({ \ if (IS_ENABLED(CONFIG_RISCV_ISA_ZABHA) && \ IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) && \ @@ -117,9 +119,9 @@ r = o; \ \ __asm__ __volatile__ ( \ - prepend \ + cas_prepend \ " amocas" cas_sfx " %0, %z2, %1\n" \ - append \ + cas_append \ : "+&r" (r), "+A" (*(p)) \ : "rJ" (n) \ : "memory"); \ @@ -134,7 +136,7 @@ ulong __rc; \ \ __asm__ __volatile__ ( \ - prepend \ + sc_prepend \ "0: lr.w %0, %2\n" \ " and %1, %0, %z5\n" \ " bne %1, %z3, 1f\n" \ @@ -142,7 +144,7 @@ " or %1, %1, %z4\n" \ " sc.w" sc_sfx " %1, %1, %2\n" \ " bnez %1, 0b\n" \ - append \ + sc_append \ "1:\n" \ : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \ : "rJ" ((long)__oldx), "rJ" (__newx), \ @@ -153,16 +155,19 @@ } \ }) -#define __arch_cmpxchg(lr_sfx, sc_cas_sfx, prepend, append, r, p, co, o, n) \ +#define __arch_cmpxchg(lr_sfx, sc_sfx, cas_sfx, \ + sc_prepend, sc_append, \ + cas_prepend, cas_append, \ + r, p, co, o, n) \ ({ \ if (IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) && \ riscv_has_extension_unlikely(RISCV_ISA_EXT_ZACAS)) { \ r = o; \ \ __asm__ __volatile__ ( \ - prepend \ - " amocas" sc_cas_sfx " %0, %z2, %1\n" \ - append \ + cas_prepend \ + " amocas" cas_sfx " %0, %z2, %1\n" \ + cas_append \ : "+&r" (r), "+A" (*(p)) \ : "rJ" (n) \ : "memory"); \ @@ -170,12 +175,12 @@ register unsigned int __rc; \ \ __asm__ __volatile__ ( \ - prepend \ + sc_prepend \ "0: lr" lr_sfx " %0, %2\n" \ " bne %0, %z3, 1f\n" \ - " sc" sc_cas_sfx " %1, %z4, %2\n" \ + " sc" sc_sfx " %1, %z4, %2\n" \ " bnez %1, 0b\n" \ - append \ + sc_append \ "1:\n" \ : "=&r" (r), "=&r" (__rc), "+A" (*(p)) \ : "rJ" (co o), "rJ" (n) \ @@ -183,7 +188,9 @@ } \ }) -#define _arch_cmpxchg(ptr, old, new, sc_cas_sfx, prepend, append) \ +#define _arch_cmpxchg(ptr, old, new, sc_sfx, cas_sfx, \ + sc_prepend, sc_append, \ + cas_prepend, cas_append) \ ({ \ __typeof__(ptr) __ptr = (ptr); \ __typeof__(*(__ptr)) __old = (old); \ @@ -192,22 +199,28 @@ \ switch (sizeof(*__ptr)) { \ case 1: \ - __arch_cmpxchg_masked(sc_cas_sfx, ".b" sc_cas_sfx, \ - prepend, append, \ - __ret, __ptr, __old, __new); \ + __arch_cmpxchg_masked(sc_sfx, ".b" cas_sfx, \ + sc_prepend, sc_append, \ + cas_prepend, cas_append, \ + __ret, __ptr, __old, __new); \ break; \ case 2: \ - __arch_cmpxchg_masked(sc_cas_sfx, ".h" sc_cas_sfx, \ - prepend, append, \ - __ret, __ptr, __old, __new); \ + __arch_cmpxchg_masked(sc_sfx, ".h" cas_sfx, \ + sc_prepend, sc_append, \ + cas_prepend, cas_append, \ + __ret, __ptr, __old, __new); \ break; \ case 4: \ - __arch_cmpxchg(".w", ".w" sc_cas_sfx, prepend, append, \ - __ret, __ptr, (long), __old, __new); \ + __arch_cmpxchg(".w", ".w" sc_sfx, ".w" cas_sfx, \ + sc_prepend, sc_append, \ + cas_prepend, cas_append, \ + __ret, __ptr, (long), __old, __new); \ break; \ case 8: \ - __arch_cmpxchg(".d", ".d" sc_cas_sfx, prepend, append, \ - __ret, __ptr, /**/, __old, __new); \ + __arch_cmpxchg(".d", ".d" sc_sfx, ".d" cas_sfx, \ + sc_prepend, sc_append, \ + cas_prepend, cas_append, \ + __ret, __ptr, /**/, __old, __new); \ break; \ default: \ BUILD_BUG(); \ @@ -215,17 +228,40 @@ (__typeof__(*(__ptr)))__ret; \ }) +/* + * These macros are here to improve the readability of the arch_cmpxchg_XXX() + * macros. + */ +#define SC_SFX(x) x +#define CAS_SFX(x) x +#define SC_PREPEND(x) x +#define SC_APPEND(x) x +#define CAS_PREPEND(x) x +#define CAS_APPEND(x) x + #define arch_cmpxchg_relaxed(ptr, o, n) \ - _arch_cmpxchg((ptr), (o), (n), "", "", "") + _arch_cmpxchg((ptr), (o), (n), \ + SC_SFX(""), CAS_SFX(""), \ + SC_PREPEND(""), SC_APPEND(""), \ + CAS_PREPEND(""), CAS_APPEND("")) #define arch_cmpxchg_acquire(ptr, o, n) \ - _arch_cmpxchg((ptr), (o), (n), "", "", RISCV_ACQUIRE_BARRIER) + _arch_cmpxchg((ptr), (o), (n), \ + SC_SFX(""), CAS_SFX(""), \ + SC_PREPEND(""), SC_APPEND(RISCV_ACQUIRE_BARRIER), \ + CAS_PREPEND(""), CAS_APPEND(RISCV_ACQUIRE_BARRIER)) #define arch_cmpxchg_release(ptr, o, n) \ - _arch_cmpxchg((ptr), (o), (n), "", RISCV_RELEASE_BARRIER, "") + _arch_cmpxchg((ptr), (o), (n), \ + SC_SFX(""), CAS_SFX(""), \ + SC_PREPEND(RISCV_RELEASE_BARRIER), SC_APPEND(""), \ + CAS_PREPEND(RISCV_RELEASE_BARRIER), CAS_APPEND("")) #define arch_cmpxchg(ptr, o, n) \ - _arch_cmpxchg((ptr), (o), (n), ".rl", "", " fence rw, rw\n") + _arch_cmpxchg((ptr), (o), (n), \ + SC_SFX(".rl"), CAS_SFX(".aqrl"), \ + SC_PREPEND(""), SC_APPEND(RISCV_FULL_BARRIER), \ + CAS_PREPEND(""), CAS_APPEND("")) #define arch_cmpxchg_local(ptr, o, n) \ arch_cmpxchg_relaxed((ptr), (o), (n)) From 8b5cf44cd495e6b1d7aa8f32e64e58aca8bded9d Mon Sep 17 00:00:00 2001 From: gaorui Date: Fri, 8 Aug 2025 09:31:03 +0800 Subject: [PATCH 3/7] riscv: Implement arch_cmpxchg128() using Zacas mainline inclusion from mainline-v6.12 commit f7bd2be7663c7de1dde27dadd352b2c3f4e19106 category: feature bugzilla: https://github.com/RVCK-Project/rvck/issues/90 -------------------------------- Now that Zacas is supported in the kernel, let's use the double word atomic version of amocas to improve the SLUB allocator. Note that we have to select fixed registers, otherwise gcc fails to pick even registers and then produces a reserved encoding which fails to assemble. Signed-off-by: Alexandre Ghiti Reviewed-by: Andrew Jones Reviewed-by: Andrea Parri Link: https://lore.kernel.org/r/20241103145153.105097-8-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt Signed-off-by: Gao Rui --- arch/riscv/Kconfig | 1 + arch/riscv/include/asm/cmpxchg.h | 38 ++++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+) diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index e7eae8c5bea1e..924e7bb426586 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -97,6 +97,7 @@ config RISCV select GENERIC_VDSO_TIME_NS if HAVE_GENERIC_VDSO select HARDIRQS_SW_RESEND select HAS_IOPORT if MMU + select HAVE_ALIGNED_STRUCT_PAGE select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_HUGE_VMALLOC if HAVE_ARCH_HUGE_VMAP select HAVE_ARCH_HUGE_VMAP if MMU && 64BIT && !XIP_KERNEL diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h index 052418aba11a1..f95929f538b2d 100644 --- a/arch/riscv/include/asm/cmpxchg.h +++ b/arch/riscv/include/asm/cmpxchg.h @@ -296,6 +296,44 @@ arch_cmpxchg_release((ptr), (o), (n)); \ }) +#if defined(CONFIG_64BIT) && defined(CONFIG_RISCV_ISA_ZACAS) + +#define system_has_cmpxchg128() riscv_has_extension_unlikely(RISCV_ISA_EXT_ZACAS) + +union __u128_halves { + u128 full; + struct { + u64 low, high; + }; +}; + +#define __arch_cmpxchg128(p, o, n, cas_sfx) \ +({ \ + __typeof__(*(p)) __o = (o); \ + union __u128_halves __hn = { .full = (n) }; \ + union __u128_halves __ho = { .full = (__o) }; \ + register unsigned long t1 asm ("t1") = __hn.low; \ + register unsigned long t2 asm ("t2") = __hn.high; \ + register unsigned long t3 asm ("t3") = __ho.low; \ + register unsigned long t4 asm ("t4") = __ho.high; \ + \ + __asm__ __volatile__ ( \ + " amocas.q" cas_sfx " %0, %z3, %2" \ + : "+&r" (t3), "+&r" (t4), "+A" (*(p)) \ + : "rJ" (t1), "rJ" (t2) \ + : "memory"); \ + \ + ((u128)t4 << 64) | t3; \ +}) + +#define arch_cmpxchg128(ptr, o, n) \ + __arch_cmpxchg128((ptr), (o), (n), ".aqrl") + +#define arch_cmpxchg128_local(ptr, o, n) \ + __arch_cmpxchg128((ptr), (o), (n), "") + +#endif /* CONFIG_64BIT && CONFIG_RISCV_ISA_ZACAS */ + #ifdef CONFIG_RISCV_ISA_ZAWRS /* * Despite wrs.nto being "WRS-with-no-timeout", in the absence of changes to From adf07c020937430dee4edc40b98f2263ddab8946 Mon Sep 17 00:00:00 2001 From: gaorui Date: Fri, 8 Aug 2025 09:46:41 +0800 Subject: [PATCH 4/7] riscv: Implement xchg8/16() using Zabha mainline inclusion from mainline-v6.12 commit 97ddab7fbea8fceb044108b64ba2ee2c96ff8dab category: feature bugzilla: https://github.com/RVCK-Project/rvck/issues/90 -------------------------------- This adds runtime support for Zabha in xchg8/16() operations. Signed-off-by: Alexandre Ghiti Reviewed-by: Andrew Jones Reviewed-by: Andrea Parri Link: https://lore.kernel.org/r/20241103145153.105097-9-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt Signed-off-by: Gao Rui --- arch/riscv/include/asm/cmpxchg.h | 65 ++++++++++++++++++++------------ 1 file changed, 41 insertions(+), 24 deletions(-) diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h index f95929f538b2d..4cadc56220fea 100644 --- a/arch/riscv/include/asm/cmpxchg.h +++ b/arch/riscv/include/asm/cmpxchg.h @@ -14,29 +14,41 @@ #include #include -#define __arch_xchg_masked(sc_sfx, prepend, append, r, p, n) \ -({ \ - u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \ - ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \ - ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0) \ - << __s; \ - ulong __newx = (ulong)(n) << __s; \ - ulong __retx; \ - ulong __rc; \ - \ - __asm__ __volatile__ ( \ - prepend \ - "0: lr.w %0, %2\n" \ - " and %1, %0, %z4\n" \ - " or %1, %1, %z3\n" \ - " sc.w" sc_sfx " %1, %1, %2\n" \ - " bnez %1, 0b\n" \ - append \ - : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \ - : "rJ" (__newx), "rJ" (~__mask) \ - : "memory"); \ - \ - r = (__typeof__(*(p)))((__retx & __mask) >> __s); \ +#define __arch_xchg_masked(sc_sfx, swap_sfx, prepend, sc_append, \ + swap_append, r, p, n) \ +({ \ + if (IS_ENABLED(CONFIG_RISCV_ISA_ZABHA) && \ + riscv_has_extension_unlikely(RISCV_ISA_EXT_ZABHA)) { \ + __asm__ __volatile__ ( \ + prepend \ + " amoswap" swap_sfx " %0, %z2, %1\n" \ + swap_append \ + : "=&r" (r), "+A" (*(p)) \ + : "rJ" (n) \ + : "memory"); \ + } else { \ + u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \ + ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \ + ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0) \ + << __s; \ + ulong __newx = (ulong)(n) << __s; \ + ulong __retx; \ + ulong __rc; \ + \ + __asm__ __volatile__ ( \ + prepend \ + "0: lr.w %0, %2\n" \ + " and %1, %0, %z4\n" \ + " or %1, %1, %z3\n" \ + " sc.w" sc_sfx " %1, %1, %2\n" \ + " bnez %1, 0b\n" \ + sc_append \ + : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \ + : "rJ" (__newx), "rJ" (~__mask) \ + : "memory"); \ + \ + r = (__typeof__(*(p)))((__retx & __mask) >> __s); \ + } \ }) #define __arch_xchg(sfx, prepend, append, r, p, n) \ @@ -59,8 +71,13 @@ \ switch (sizeof(*__ptr)) { \ case 1: \ + __arch_xchg_masked(sc_sfx, ".b" swap_sfx, \ + prepend, sc_append, swap_append, \ + __ret, __ptr, __new); \ + break; \ case 2: \ - __arch_xchg_masked(sc_sfx, prepend, sc_append, \ + __arch_xchg_masked(sc_sfx, ".h" swap_sfx, \ + prepend, sc_append, swap_append, \ __ret, __ptr, __new); \ break; \ case 4: \ From 6891dec7555dbfa140406188a39e8480e4393a70 Mon Sep 17 00:00:00 2001 From: gaorui Date: Fri, 8 Aug 2025 09:53:06 +0800 Subject: [PATCH 5/7] asm-generic: ticket-lock: Reuse arch_spinlock_t of qspinlock mainline inclusion from mainline-v6.12 commit cbe82e140bb76e1aa9f808cc841654a25b70d4e5 category: feature bugzilla: https://github.com/RVCK-Project/rvck/issues/90 -------------------------------- The arch_spinlock_t of qspinlock has contained the atomic_t val, which satisfies the ticket-lock requirement. Thus, unify the arch_spinlock_t into qspinlock_types.h. This is the preparation for the next combo spinlock. Reviewed-by: Leonardo Bras Suggested-by: Arnd Bergmann Link: https://lore.kernel.org/linux-riscv/CAK8P3a2rnz9mQqhN6-e0CGUUv9rntRELFdxt_weiD7FxH7fkfQ@mail.gmail.com/ Signed-off-by: Guo Ren Signed-off-by: Guo Ren Acked-by: Peter Zijlstra (Intel) Reviewed-by: Andrew Jones Reviewed-by: Andrea Parri Link: https://lore.kernel.org/r/20241103145153.105097-10-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt Signed-off-by: Gao Rui --- include/asm-generic/spinlock.h | 14 +++++++------- include/asm-generic/spinlock_types.h | 12 ++---------- 2 files changed, 9 insertions(+), 17 deletions(-) diff --git a/include/asm-generic/spinlock.h b/include/asm-generic/spinlock.h index 90803a826ba03..4773334ee6388 100644 --- a/include/asm-generic/spinlock.h +++ b/include/asm-generic/spinlock.h @@ -32,7 +32,7 @@ static __always_inline void arch_spin_lock(arch_spinlock_t *lock) { - u32 val = atomic_fetch_add(1<<16, lock); + u32 val = atomic_fetch_add(1<<16, &lock->val); u16 ticket = val >> 16; if (ticket == (u16)val) @@ -46,31 +46,31 @@ static __always_inline void arch_spin_lock(arch_spinlock_t *lock) * have no outstanding writes due to the atomic_fetch_add() the extra * orderings are free. */ - atomic_cond_read_acquire(lock, ticket == (u16)VAL); + atomic_cond_read_acquire(&lock->val, ticket == (u16)VAL); smp_mb(); } static __always_inline bool arch_spin_trylock(arch_spinlock_t *lock) { - u32 old = atomic_read(lock); + u32 old = atomic_read(&lock->val); if ((old >> 16) != (old & 0xffff)) return false; - return atomic_try_cmpxchg(lock, &old, old + (1<<16)); /* SC, for RCsc */ + return atomic_try_cmpxchg(&lock->val, &old, old + (1<<16)); /* SC, for RCsc */ } static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) { u16 *ptr = (u16 *)lock + IS_ENABLED(CONFIG_CPU_BIG_ENDIAN); - u32 val = atomic_read(lock); + u32 val = atomic_read(&lock->val); smp_store_release(ptr, (u16)val + 1); } static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) { - u32 val = lock.counter; + u32 val = lock.val.counter; return ((val >> 16) == (val & 0xffff)); } @@ -84,7 +84,7 @@ static __always_inline int arch_spin_is_locked(arch_spinlock_t *lock) static __always_inline int arch_spin_is_contended(arch_spinlock_t *lock) { - u32 val = atomic_read(lock); + u32 val = atomic_read(&lock->val); return (s16)((val >> 16) - (val & 0xffff)) > 1; } diff --git a/include/asm-generic/spinlock_types.h b/include/asm-generic/spinlock_types.h index 8962bb730945d..f534aa5de3947 100644 --- a/include/asm-generic/spinlock_types.h +++ b/include/asm-generic/spinlock_types.h @@ -3,15 +3,7 @@ #ifndef __ASM_GENERIC_SPINLOCK_TYPES_H #define __ASM_GENERIC_SPINLOCK_TYPES_H -#include -typedef atomic_t arch_spinlock_t; - -/* - * qrwlock_types depends on arch_spinlock_t, so we must typedef that before the - * include. - */ -#include - -#define __ARCH_SPIN_LOCK_UNLOCKED ATOMIC_INIT(0) +#include +#include #endif /* __ASM_GENERIC_SPINLOCK_TYPES_H */ From 533892b8d09320a9ef37d6fd35d6cbe105fd3acd Mon Sep 17 00:00:00 2001 From: gaorui Date: Fri, 8 Aug 2025 09:55:42 +0800 Subject: [PATCH 6/7] asm-generic: ticket-lock: Add separate ticket-lock.h mainline inclusion from mainline-v6.12 commit 22c33321e260c8b4c1877b2cc0c4e26a0c74c23f category: feature bugzilla: https://github.com/RVCK-Project/rvck/issues/90 -------------------------------- Add a separate ticket-lock.h to include multiple spinlock versions and select one at compile time or runtime. Reviewed-by: Leonardo Bras Suggested-by: Arnd Bergmann Link: https://lore.kernel.org/linux-riscv/CAK8P3a2rnz9mQqhN6-e0CGUUv9rntRELFdxt_weiD7FxH7fkfQ@mail.gmail.com/ Signed-off-by: Guo Ren Signed-off-by: Guo Ren Acked-by: Peter Zijlstra (Intel) Reviewed-by: Andrew Jones Reviewed-by: Andrea Parri Link: https://lore.kernel.org/r/20241103145153.105097-11-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt Signed-off-by: Gao Rui --- include/asm-generic/spinlock.h | 87 +--------------------------------- 1 file changed, 1 insertion(+), 86 deletions(-) diff --git a/include/asm-generic/spinlock.h b/include/asm-generic/spinlock.h index 4773334ee6388..970590baf61b6 100644 --- a/include/asm-generic/spinlock.h +++ b/include/asm-generic/spinlock.h @@ -1,94 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * 'Generic' ticket-lock implementation. - * - * It relies on atomic_fetch_add() having well defined forward progress - * guarantees under contention. If your architecture cannot provide this, stick - * to a test-and-set lock. - * - * It also relies on atomic_fetch_add() being safe vs smp_store_release() on a - * sub-word of the value. This is generally true for anything LL/SC although - * you'd be hard pressed to find anything useful in architecture specifications - * about this. If your architecture cannot do this you might be better off with - * a test-and-set. - * - * It further assumes atomic_*_release() + atomic_*_acquire() is RCpc and hence - * uses atomic_fetch_add() which is RCsc to create an RCsc hot path, along with - * a full fence after the spin to upgrade the otherwise-RCpc - * atomic_cond_read_acquire(). - * - * The implementation uses smp_cond_load_acquire() to spin, so if the - * architecture has WFE like instructions to sleep instead of poll for word - * modifications be sure to implement that (see ARM64 for example). - * - */ - #ifndef __ASM_GENERIC_SPINLOCK_H #define __ASM_GENERIC_SPINLOCK_H -#include -#include - -static __always_inline void arch_spin_lock(arch_spinlock_t *lock) -{ - u32 val = atomic_fetch_add(1<<16, &lock->val); - u16 ticket = val >> 16; - - if (ticket == (u16)val) - return; - - /* - * atomic_cond_read_acquire() is RCpc, but rather than defining a - * custom cond_read_rcsc() here we just emit a full fence. We only - * need the prior reads before subsequent writes ordering from - * smb_mb(), but as atomic_cond_read_acquire() just emits reads and we - * have no outstanding writes due to the atomic_fetch_add() the extra - * orderings are free. - */ - atomic_cond_read_acquire(&lock->val, ticket == (u16)VAL); - smp_mb(); -} - -static __always_inline bool arch_spin_trylock(arch_spinlock_t *lock) -{ - u32 old = atomic_read(&lock->val); - - if ((old >> 16) != (old & 0xffff)) - return false; - - return atomic_try_cmpxchg(&lock->val, &old, old + (1<<16)); /* SC, for RCsc */ -} - -static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) -{ - u16 *ptr = (u16 *)lock + IS_ENABLED(CONFIG_CPU_BIG_ENDIAN); - u32 val = atomic_read(&lock->val); - - smp_store_release(ptr, (u16)val + 1); -} - -static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) -{ - u32 val = lock.val.counter; - - return ((val >> 16) == (val & 0xffff)); -} - -static __always_inline int arch_spin_is_locked(arch_spinlock_t *lock) -{ - arch_spinlock_t val = READ_ONCE(*lock); - - return !arch_spin_value_unlocked(val); -} - -static __always_inline int arch_spin_is_contended(arch_spinlock_t *lock) -{ - u32 val = atomic_read(&lock->val); - - return (s16)((val >> 16) - (val & 0xffff)) > 1; -} - +#include #include #endif /* __ASM_GENERIC_SPINLOCK_H */ From 1ec44f9a1101bc8fc636793a3af1881b773ec7b1 Mon Sep 17 00:00:00 2001 From: gaorui Date: Fri, 8 Aug 2025 10:06:55 +0800 Subject: [PATCH 7/7] riscv: Add qspinlock support mainline inclusion from mainline-v6.12 commit ab83647fadae2f1f723119dc066b39a461d6d288 category: feature bugzilla: https://github.com/RVCK-Project/rvck/issues/90 -------------------------------- In order to produce a generic kernel, a user can select CONFIG_COMBO_SPINLOCKS which will fallback at runtime to the ticket spinlock implementation if Zabha or Ziccrse are not present. Note that we can't use alternatives here because the discovery of extensions is done too late and we need to start with the qspinlock implementation because the ticket spinlock implementation would pollute the spinlock value, so let's use static keys. This is largely based on Guo's work and Leonardo reviews at [1]. Link: https://lore.kernel.org/linux-riscv/20231225125847.2778638-1-guoren@kernel.org/ [1] Signed-off-by: Guo Ren Signed-off-by: Alexandre Ghiti Reviewed-by: Andrea Parri Link: https://lore.kernel.org/r/20241103145153.105097-14-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt Signed-off-by: Gao Rui --- .../locking/queued-spinlocks/arch-support.txt | 2 +- arch/riscv/Kconfig | 34 ++++++ arch/riscv/include/asm/Kbuild | 4 +- arch/riscv/include/asm/spinlock.h | 47 ++++++++ arch/riscv/kernel/setup.c | 37 ++++++ include/asm-generic/qspinlock.h | 2 + include/asm-generic/ticket_spinlock.h | 105 ++++++++++++++++++ 7 files changed, 229 insertions(+), 2 deletions(-) create mode 100644 arch/riscv/include/asm/spinlock.h create mode 100644 include/asm-generic/ticket_spinlock.h diff --git a/Documentation/features/locking/queued-spinlocks/arch-support.txt b/Documentation/features/locking/queued-spinlocks/arch-support.txt index 2d3961bfef5d6..963ee9c757520 100644 --- a/Documentation/features/locking/queued-spinlocks/arch-support.txt +++ b/Documentation/features/locking/queued-spinlocks/arch-support.txt @@ -21,7 +21,7 @@ | openrisc: | ok | | parisc: | TODO | | powerpc: | ok | - | riscv: | TODO | + | riscv: | ok | | s390: | TODO | | sh: | TODO | | sparc: | ok | diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 924e7bb426586..186ba8c0d9777 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -67,6 +67,7 @@ config RISCV select ARCH_WANT_LD_ORPHAN_WARN if !XIP_KERNEL select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP select ARCH_WANTS_THP_SWAP if HAVE_ARCH_TRANSPARENT_HUGEPAGE + select ARCH_WEAK_RELEASE_ACQUIRE if ARCH_USE_QUEUED_SPINLOCKS select BINFMT_FLAT_NO_DATA_START_OFFSET if !MMU select BUILDTIME_TABLE_SORT if MMU select CLINT_TIMER if !MMU @@ -469,6 +470,39 @@ config NODES_SHIFT Specify the maximum number of NUMA Nodes available on the target system. Increases memory reserved to accommodate various tables. +choice + prompt "RISC-V spinlock type" + default RISCV_COMBO_SPINLOCKS + +config RISCV_TICKET_SPINLOCKS + bool "Using ticket spinlock" + +config RISCV_QUEUED_SPINLOCKS + bool "Using queued spinlock" + depends on SMP && MMU && NONPORTABLE + select ARCH_USE_QUEUED_SPINLOCKS + help + The queued spinlock implementation requires the forward progress + guarantee of cmpxchg()/xchg() atomic operations: CAS with Zabha or + LR/SC with Ziccrse provide such guarantee. + + Select this if and only if Zabha or Ziccrse is available on your + platform, RISCV_QUEUED_SPINLOCKS must not be selected for platforms + without one of those extensions. + + If unsure, select RISCV_COMBO_SPINLOCKS, which will use qspinlocks + when supported and otherwise ticket spinlocks. + +config RISCV_COMBO_SPINLOCKS + bool "Using combo spinlock" + depends on SMP && MMU + select ARCH_USE_QUEUED_SPINLOCKS + help + Embed both queued spinlock and ticket lock so that the spinlock + implementation can be chosen at runtime. + +endchoice + config RISCV_ALTERNATIVE bool depends on !XIP_KERNEL diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild index 504f8b7e72d41..ad72f2bd4cc93 100644 --- a/arch/riscv/include/asm/Kbuild +++ b/arch/riscv/include/asm/Kbuild @@ -2,10 +2,12 @@ generic-y += early_ioremap.h generic-y += flat.h generic-y += kvm_para.h +generic-y += mcs_spinlock.h generic-y += parport.h -generic-y += spinlock.h generic-y += spinlock_types.h +generic-y += ticket_spinlock.h generic-y += qrwlock.h generic-y += qrwlock_types.h +generic-y += qspinlock.h generic-y += user.h generic-y += vmlinux.lds.h diff --git a/arch/riscv/include/asm/spinlock.h b/arch/riscv/include/asm/spinlock.h new file mode 100644 index 0000000000000..e5121b89aceac --- /dev/null +++ b/arch/riscv/include/asm/spinlock.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_RISCV_SPINLOCK_H +#define __ASM_RISCV_SPINLOCK_H + +#ifdef CONFIG_RISCV_COMBO_SPINLOCKS +#define _Q_PENDING_LOOPS (1 << 9) + +#define __no_arch_spinlock_redefine +#include +#include +#include + +/* + * TODO: Use an alternative instead of a static key when we are able to parse + * the extensions string earlier in the boot process. + */ +DECLARE_STATIC_KEY_TRUE(qspinlock_key); + +#define SPINLOCK_BASE_DECLARE(op, type, type_lock) \ +static __always_inline type arch_spin_##op(type_lock lock) \ +{ \ + if (static_branch_unlikely(&qspinlock_key)) \ + return queued_spin_##op(lock); \ + return ticket_spin_##op(lock); \ +} + +SPINLOCK_BASE_DECLARE(lock, void, arch_spinlock_t *) +SPINLOCK_BASE_DECLARE(unlock, void, arch_spinlock_t *) +SPINLOCK_BASE_DECLARE(is_locked, int, arch_spinlock_t *) +SPINLOCK_BASE_DECLARE(is_contended, int, arch_spinlock_t *) +SPINLOCK_BASE_DECLARE(trylock, bool, arch_spinlock_t *) +SPINLOCK_BASE_DECLARE(value_unlocked, int, arch_spinlock_t) + +#elif defined(CONFIG_RISCV_QUEUED_SPINLOCKS) + +#include + +#else + +#include + +#endif + +#include + +#endif /* __ASM_RISCV_SPINLOCK_H */ diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index 5020c7ef66fbe..5d2c16a763a4b 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -298,6 +298,42 @@ static void __init parse_dtb(void) #endif } +#if defined(CONFIG_RISCV_COMBO_SPINLOCKS) +DEFINE_STATIC_KEY_TRUE(qspinlock_key); +EXPORT_SYMBOL(qspinlock_key); +#endif + +static void __init riscv_spinlock_init(void) +{ + char *using_ext = NULL; + + if (IS_ENABLED(CONFIG_RISCV_TICKET_SPINLOCKS)) { + pr_info("Ticket spinlock: enabled\n"); + return; + } + + if (IS_ENABLED(CONFIG_RISCV_ISA_ZABHA) && + IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) && + riscv_isa_extension_available(NULL, ZABHA) && + riscv_isa_extension_available(NULL, ZACAS)) { + using_ext = "using Zabha"; + } else if (riscv_isa_extension_available(NULL, ZICCRSE)) { + using_ext = "using Ziccrse"; + } +#if defined(CONFIG_RISCV_COMBO_SPINLOCKS) + else { + static_branch_disable(&qspinlock_key); + pr_info("Ticket spinlock: enabled\n"); + return; + } +#endif + + if (!using_ext) + pr_err("Queued spinlock without Zabha or Ziccrse"); + else + pr_info("Queued spinlock %s: enabled\n", using_ext); +} + extern void __init init_rt_signal_env(void); void __init setup_arch(char **cmdline_p) @@ -351,6 +387,7 @@ void __init setup_arch(char **cmdline_p) riscv_set_dma_cache_alignment(); riscv_user_isa_enable(); + riscv_spinlock_init(); } static int __init topology_init(void) diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index 0655aa5b57b29..bf47cca2c375d 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -136,6 +136,7 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock) } #endif +#ifndef __no_arch_spinlock_redefine /* * Remapping spinlock architecture specific functions to the corresponding * queued spinlock functions. @@ -146,5 +147,6 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock) #define arch_spin_lock(l) queued_spin_lock(l) #define arch_spin_trylock(l) queued_spin_trylock(l) #define arch_spin_unlock(l) queued_spin_unlock(l) +#endif #endif /* __ASM_GENERIC_QSPINLOCK_H */ diff --git a/include/asm-generic/ticket_spinlock.h b/include/asm-generic/ticket_spinlock.h new file mode 100644 index 0000000000000..325779970d8a0 --- /dev/null +++ b/include/asm-generic/ticket_spinlock.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * 'Generic' ticket-lock implementation. + * + * It relies on atomic_fetch_add() having well defined forward progress + * guarantees under contention. If your architecture cannot provide this, stick + * to a test-and-set lock. + * + * It also relies on atomic_fetch_add() being safe vs smp_store_release() on a + * sub-word of the value. This is generally true for anything LL/SC although + * you'd be hard pressed to find anything useful in architecture specifications + * about this. If your architecture cannot do this you might be better off with + * a test-and-set. + * + * It further assumes atomic_*_release() + atomic_*_acquire() is RCpc and hence + * uses atomic_fetch_add() which is RCsc to create an RCsc hot path, along with + * a full fence after the spin to upgrade the otherwise-RCpc + * atomic_cond_read_acquire(). + * + * The implementation uses smp_cond_load_acquire() to spin, so if the + * architecture has WFE like instructions to sleep instead of poll for word + * modifications be sure to implement that (see ARM64 for example). + * + */ + +#ifndef __ASM_GENERIC_TICKET_SPINLOCK_H +#define __ASM_GENERIC_TICKET_SPINLOCK_H + +#include +#include + +static __always_inline void ticket_spin_lock(arch_spinlock_t *lock) +{ + u32 val = atomic_fetch_add(1<<16, &lock->val); + u16 ticket = val >> 16; + + if (ticket == (u16)val) + return; + + /* + * atomic_cond_read_acquire() is RCpc, but rather than defining a + * custom cond_read_rcsc() here we just emit a full fence. We only + * need the prior reads before subsequent writes ordering from + * smb_mb(), but as atomic_cond_read_acquire() just emits reads and we + * have no outstanding writes due to the atomic_fetch_add() the extra + * orderings are free. + */ + atomic_cond_read_acquire(&lock->val, ticket == (u16)VAL); + smp_mb(); +} + +static __always_inline bool ticket_spin_trylock(arch_spinlock_t *lock) +{ + u32 old = atomic_read(&lock->val); + + if ((old >> 16) != (old & 0xffff)) + return false; + + return atomic_try_cmpxchg(&lock->val, &old, old + (1<<16)); /* SC, for RCsc */ +} + +static __always_inline void ticket_spin_unlock(arch_spinlock_t *lock) +{ + u16 *ptr = (u16 *)lock + IS_ENABLED(CONFIG_CPU_BIG_ENDIAN); + u32 val = atomic_read(&lock->val); + + smp_store_release(ptr, (u16)val + 1); +} + +static __always_inline int ticket_spin_value_unlocked(arch_spinlock_t lock) +{ + u32 val = lock.val.counter; + + return ((val >> 16) == (val & 0xffff)); +} + +static __always_inline int ticket_spin_is_locked(arch_spinlock_t *lock) +{ + arch_spinlock_t val = READ_ONCE(*lock); + + return !ticket_spin_value_unlocked(val); +} + +static __always_inline int ticket_spin_is_contended(arch_spinlock_t *lock) +{ + u32 val = atomic_read(&lock->val); + + return (s16)((val >> 16) - (val & 0xffff)) > 1; +} + +#ifndef __no_arch_spinlock_redefine +/* + * Remapping spinlock architecture specific functions to the corresponding + * ticket spinlock functions. + */ +#define arch_spin_is_locked(l) ticket_spin_is_locked(l) +#define arch_spin_is_contended(l) ticket_spin_is_contended(l) +#define arch_spin_value_unlocked(l) ticket_spin_value_unlocked(l) +#define arch_spin_lock(l) ticket_spin_lock(l) +#define arch_spin_trylock(l) ticket_spin_trylock(l) +#define arch_spin_unlock(l) ticket_spin_unlock(l) +#endif + +#endif /* __ASM_GENERIC_TICKET_SPINLOCK_H */