Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
8fe84e6
RISC-V: KVM: Allow Zicond extension for Guest/VM
avpatel Sep 15, 2023
9e7e2b2
RISC-V: KVM: Allow Zbc extension for Guest/VM
avpatel Nov 27, 2023
d6f214b
RISC-V: KVM: Allow scalar crypto extensions for Guest/VM
avpatel Nov 27, 2023
68d5782
RISC-V: KVM: Allow vector crypto extensions for Guest/VM
avpatel Nov 27, 2023
25482d2
RISC-V: KVM: Allow Zfh[min] extensions for Guest/VM
avpatel Nov 27, 2023
fc2be86
RISC-V: KVM: Allow Zihintntl extension for Guest/VM
avpatel Nov 27, 2023
c49a8b2
RISC-V: KVM: Allow Zvfh[min] extensions for Guest/VM
avpatel Nov 27, 2023
955667a
RISC-V: KVM: Allow Zfa extension for Guest/VM
avpatel Nov 27, 2023
d0397c7
RISC-V: KVM: Forward SEED CSR access to user space
avpatel Feb 13, 2024
4139a21
RISC-V: KVM: Allow Ztso extension for Guest/VM
avpatel Feb 13, 2024
23b6744
RISC-V: KVM: Allow Zacas extension for Guest/VM
avpatel Feb 13, 2024
b3a2247
RISC-V: KVM: Allow Zimop extension for Guest/VM
clementleger Jun 19, 2024
87c3d34
RISC-V: KVM: Allow Zca, Zcf, Zcd and Zcb extensions for Guest/VM
clementleger Jun 19, 2024
aa32b41
RISC-V: KVM: Allow Zcmop extension for Guest/VM
clementleger Jun 19, 2024
f4ba233
KVM: riscv: Support guest wrs.nto
Apr 26, 2024
dd34ccd
RISC-V: KVM: Allow Smnpm and Ssnpm extensions for guests
SiFiveHolland Oct 16, 2024
112acb1
RISC-V: KVM: Add Svade and Svadu Extensions Support for Guest/VM
yong-xuan Jul 26, 2024
9526aad
RISC-V: KVM: Allow Svvptc extension for Guest/VM
zcxGGmu Dec 2, 2024
ac0b5e7
RISC-V: KVM: Allow Zabha extension for Guest/VM
zcxGGmu Dec 2, 2024
06079e9
RISC-V: KVM: Allow Ziccrse extension for Guest/VM
zcxGGmu Dec 2, 2024
d88b85c
asm-generic: ticket-lock: Optimize arch_spin_value_unlocked()
guoren83 Sep 8, 2023
587fba9
riscv: Improve zacas fully-ordered cmpxchg()
Nov 3, 2024
8cef857
riscv: Implement arch_cmpxchg128() using Zacas
Nov 3, 2024
324ac09
riscv: Implement xchg8/16() using Zabha
Nov 3, 2024
66800c7
asm-generic: ticket-lock: Reuse arch_spinlock_t of qspinlock
guoren83 Nov 3, 2024
89154a5
asm-generic: ticket-lock: Add separate ticket-lock.h
guoren83 Nov 3, 2024
4286623
riscv: Add qspinlock support
Nov 3, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
| openrisc: | ok |
| parisc: | TODO |
| powerpc: | ok |
| riscv: | TODO |
| riscv: | ok |
| s390: | TODO |
| sh: | TODO |
| sparc: | ok |
Expand Down
35 changes: 35 additions & 0 deletions arch/riscv/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@ config RISCV
select ARCH_WANT_LD_ORPHAN_WARN if !XIP_KERNEL
select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
select ARCH_WANTS_THP_SWAP if HAVE_ARCH_TRANSPARENT_HUGEPAGE
select ARCH_WEAK_RELEASE_ACQUIRE if ARCH_USE_QUEUED_SPINLOCKS
select BINFMT_FLAT_NO_DATA_START_OFFSET if !MMU
select BUILDTIME_TABLE_SORT if MMU
select CLINT_TIMER if !MMU
Expand Down Expand Up @@ -98,6 +99,7 @@ config RISCV
select GENERIC_VDSO_TIME_NS if HAVE_GENERIC_VDSO
select HARDIRQS_SW_RESEND
select HAS_IOPORT if MMU
select HAVE_ALIGNED_STRUCT_PAGE
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_HUGE_VMALLOC if HAVE_ARCH_HUGE_VMAP
select HAVE_ARCH_HUGE_VMAP if MMU && 64BIT && !XIP_KERNEL
Expand Down Expand Up @@ -461,6 +463,39 @@ config NODES_SHIFT
Specify the maximum number of NUMA Nodes available on the target
system. Increases memory reserved to accommodate various tables.

choice
prompt "RISC-V spinlock type"
default RISCV_COMBO_SPINLOCKS

config RISCV_TICKET_SPINLOCKS
bool "Using ticket spinlock"

config RISCV_QUEUED_SPINLOCKS
bool "Using queued spinlock"
depends on SMP && MMU && NONPORTABLE
select ARCH_USE_QUEUED_SPINLOCKS
help
The queued spinlock implementation requires the forward progress
guarantee of cmpxchg()/xchg() atomic operations: CAS with Zabha or
LR/SC with Ziccrse provide such guarantee.

Select this if and only if Zabha or Ziccrse is available on your
platform, RISCV_QUEUED_SPINLOCKS must not be selected for platforms
without one of those extensions.

If unsure, select RISCV_COMBO_SPINLOCKS, which will use qspinlocks
when supported and otherwise ticket spinlocks.

config RISCV_COMBO_SPINLOCKS
bool "Using combo spinlock"
depends on SMP && MMU
select ARCH_USE_QUEUED_SPINLOCKS
help
Embed both queued spinlock and ticket lock so that the spinlock
implementation can be chosen at runtime.

endchoice

config RISCV_ALTERNATIVE
bool
depends on !XIP_KERNEL
Expand Down
4 changes: 3 additions & 1 deletion arch/riscv/include/asm/Kbuild
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,12 @@
generic-y += early_ioremap.h
generic-y += flat.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
generic-y += parport.h
generic-y += spinlock.h
generic-y += spinlock_types.h
generic-y += ticket_spinlock.h
generic-y += qrwlock.h
generic-y += qrwlock_types.h
generic-y += qspinlock.h
generic-y += user.h
generic-y += vmlinux.lds.h
195 changes: 143 additions & 52 deletions arch/riscv/include/asm/cmpxchg.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,29 +14,41 @@
#include <asm/insn-def.h>
#include <asm/cpufeature-macros.h>

#define __arch_xchg_masked(sc_sfx, prepend, append, r, p, n) \
({ \
u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \
ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \
ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0) \
<< __s; \
ulong __newx = (ulong)(n) << __s; \
ulong __retx; \
ulong __rc; \
\
__asm__ __volatile__ ( \
prepend \
"0: lr.w %0, %2\n" \
" and %1, %0, %z4\n" \
" or %1, %1, %z3\n" \
" sc.w" sc_sfx " %1, %1, %2\n" \
" bnez %1, 0b\n" \
append \
: "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \
: "rJ" (__newx), "rJ" (~__mask) \
: "memory"); \
\
r = (__typeof__(*(p)))((__retx & __mask) >> __s); \
#define __arch_xchg_masked(sc_sfx, swap_sfx, prepend, sc_append, \
swap_append, r, p, n) \
({ \
if (IS_ENABLED(CONFIG_RISCV_ISA_ZABHA) && \
riscv_has_extension_unlikely(RISCV_ISA_EXT_ZABHA)) { \
__asm__ __volatile__ ( \
prepend \
" amoswap" swap_sfx " %0, %z2, %1\n" \
swap_append \
: "=&r" (r), "+A" (*(p)) \
: "rJ" (n) \
: "memory"); \
} else { \
u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \
ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \
ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0) \
<< __s; \
ulong __newx = (ulong)(n) << __s; \
ulong __retx; \
ulong __rc; \
\
__asm__ __volatile__ ( \
prepend \
"0: lr.w %0, %2\n" \
" and %1, %0, %z4\n" \
" or %1, %1, %z3\n" \
" sc.w" sc_sfx " %1, %1, %2\n" \
" bnez %1, 0b\n" \
sc_append \
: "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \
: "rJ" (__newx), "rJ" (~__mask) \
: "memory"); \
\
r = (__typeof__(*(p)))((__retx & __mask) >> __s); \
} \
})

#define __arch_xchg(sfx, prepend, append, r, p, n) \
Expand All @@ -59,8 +71,13 @@
\
switch (sizeof(*__ptr)) { \
case 1: \
__arch_xchg_masked(sc_sfx, ".b" swap_sfx, \
prepend, sc_append, swap_append, \
__ret, __ptr, __new); \
break; \
case 2: \
__arch_xchg_masked(sc_sfx, prepend, sc_append, \
__arch_xchg_masked(sc_sfx, ".h" swap_sfx, \
prepend, sc_append, swap_append, \
__ret, __ptr, __new); \
break; \
case 4: \
Expand Down Expand Up @@ -107,8 +124,10 @@
* store NEW in MEM. Return the initial value in MEM. Success is
* indicated by comparing RETURN with OLD.
*/

#define __arch_cmpxchg_masked(sc_sfx, cas_sfx, prepend, append, r, p, o, n) \
#define __arch_cmpxchg_masked(sc_sfx, cas_sfx, \
sc_prepend, sc_append, \
cas_prepend, cas_append, \
r, p, o, n) \
({ \
if (IS_ENABLED(CONFIG_RISCV_ISA_ZABHA) && \
IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) && \
Expand All @@ -117,9 +136,9 @@
r = o; \
\
__asm__ __volatile__ ( \
prepend \
cas_prepend \
" amocas" cas_sfx " %0, %z2, %1\n" \
append \
cas_append \
: "+&r" (r), "+A" (*(p)) \
: "rJ" (n) \
: "memory"); \
Expand All @@ -134,15 +153,15 @@
ulong __rc; \
\
__asm__ __volatile__ ( \
prepend \
sc_prepend \
"0: lr.w %0, %2\n" \
" and %1, %0, %z5\n" \
" bne %1, %z3, 1f\n" \
" and %1, %0, %z6\n" \
" or %1, %1, %z4\n" \
" sc.w" sc_sfx " %1, %1, %2\n" \
" bnez %1, 0b\n" \
append \
sc_append \
"1:\n" \
: "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \
: "rJ" ((long)__oldx), "rJ" (__newx), \
Expand All @@ -153,37 +172,42 @@
} \
})

#define __arch_cmpxchg(lr_sfx, sc_cas_sfx, prepend, append, r, p, co, o, n) \
#define __arch_cmpxchg(lr_sfx, sc_sfx, cas_sfx, \
sc_prepend, sc_append, \
cas_prepend, cas_append, \
r, p, co, o, n) \
({ \
if (IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) && \
riscv_has_extension_unlikely(RISCV_ISA_EXT_ZACAS)) { \
r = o; \
\
__asm__ __volatile__ ( \
prepend \
" amocas" sc_cas_sfx " %0, %z2, %1\n" \
append \
cas_prepend \
" amocas" cas_sfx " %0, %z2, %1\n" \
cas_append \
: "+&r" (r), "+A" (*(p)) \
: "rJ" (n) \
: "memory"); \
} else { \
register unsigned int __rc; \
\
__asm__ __volatile__ ( \
prepend \
sc_prepend \
"0: lr" lr_sfx " %0, %2\n" \
" bne %0, %z3, 1f\n" \
" sc" sc_cas_sfx " %1, %z4, %2\n" \
" sc" sc_sfx " %1, %z4, %2\n" \
" bnez %1, 0b\n" \
append \
sc_append \
"1:\n" \
: "=&r" (r), "=&r" (__rc), "+A" (*(p)) \
: "rJ" (co o), "rJ" (n) \
: "memory"); \
} \
})

#define _arch_cmpxchg(ptr, old, new, sc_cas_sfx, prepend, append) \
#define _arch_cmpxchg(ptr, old, new, sc_sfx, cas_sfx, \
sc_prepend, sc_append, \
cas_prepend, cas_append) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(*(__ptr)) __old = (old); \
Expand All @@ -192,40 +216,69 @@
\
switch (sizeof(*__ptr)) { \
case 1: \
__arch_cmpxchg_masked(sc_cas_sfx, ".b" sc_cas_sfx, \
prepend, append, \
__ret, __ptr, __old, __new); \
__arch_cmpxchg_masked(sc_sfx, ".b" cas_sfx, \
sc_prepend, sc_append, \
cas_prepend, cas_append, \
__ret, __ptr, __old, __new); \
break; \
case 2: \
__arch_cmpxchg_masked(sc_cas_sfx, ".h" sc_cas_sfx, \
prepend, append, \
__ret, __ptr, __old, __new); \
__arch_cmpxchg_masked(sc_sfx, ".h" cas_sfx, \
sc_prepend, sc_append, \
cas_prepend, cas_append, \
__ret, __ptr, __old, __new); \
break; \
case 4: \
__arch_cmpxchg(".w", ".w" sc_cas_sfx, prepend, append, \
__ret, __ptr, (long), __old, __new); \
__arch_cmpxchg(".w", ".w" sc_sfx, ".w" cas_sfx, \
sc_prepend, sc_append, \
cas_prepend, cas_append, \
__ret, __ptr, (long), __old, __new); \
break; \
case 8: \
__arch_cmpxchg(".d", ".d" sc_cas_sfx, prepend, append, \
__ret, __ptr, /**/, __old, __new); \
__arch_cmpxchg(".d", ".d" sc_sfx, ".d" cas_sfx, \
sc_prepend, sc_append, \
cas_prepend, cas_append, \
__ret, __ptr, /**/, __old, __new); \
break; \
default: \
BUILD_BUG(); \
} \
(__typeof__(*(__ptr)))__ret; \
})

/*
* These macros are here to improve the readability of the arch_cmpxchg_XXX()
* macros.
*/
#define SC_SFX(x) x
#define CAS_SFX(x) x
#define SC_PREPEND(x) x
#define SC_APPEND(x) x
#define CAS_PREPEND(x) x
#define CAS_APPEND(x) x

#define arch_cmpxchg_relaxed(ptr, o, n) \
_arch_cmpxchg((ptr), (o), (n), "", "", "")
_arch_cmpxchg((ptr), (o), (n), \
SC_SFX(""), CAS_SFX(""), \
SC_PREPEND(""), SC_APPEND(""), \
CAS_PREPEND(""), CAS_APPEND(""))

#define arch_cmpxchg_acquire(ptr, o, n) \
_arch_cmpxchg((ptr), (o), (n), "", "", RISCV_ACQUIRE_BARRIER)
_arch_cmpxchg((ptr), (o), (n), \
SC_SFX(""), CAS_SFX(""), \
SC_PREPEND(""), SC_APPEND(RISCV_ACQUIRE_BARRIER), \
CAS_PREPEND(""), CAS_APPEND(RISCV_ACQUIRE_BARRIER))

#define arch_cmpxchg_release(ptr, o, n) \
_arch_cmpxchg((ptr), (o), (n), "", RISCV_RELEASE_BARRIER, "")
_arch_cmpxchg((ptr), (o), (n), \
SC_SFX(""), CAS_SFX(""), \
SC_PREPEND(RISCV_RELEASE_BARRIER), SC_APPEND(""), \
CAS_PREPEND(RISCV_RELEASE_BARRIER), CAS_APPEND(""))

#define arch_cmpxchg(ptr, o, n) \
_arch_cmpxchg((ptr), (o), (n), ".rl", "", " fence rw, rw\n")
_arch_cmpxchg((ptr), (o), (n), \
SC_SFX(".rl"), CAS_SFX(".aqrl"), \
SC_PREPEND(""), SC_APPEND(RISCV_FULL_BARRIER), \
CAS_PREPEND(""), CAS_APPEND(""))

#define arch_cmpxchg_local(ptr, o, n) \
arch_cmpxchg_relaxed((ptr), (o), (n))
Expand Down Expand Up @@ -260,6 +313,44 @@
arch_cmpxchg_release((ptr), (o), (n)); \
})

#if defined(CONFIG_64BIT) && defined(CONFIG_RISCV_ISA_ZACAS)

#define system_has_cmpxchg128() riscv_has_extension_unlikely(RISCV_ISA_EXT_ZACAS)

union __u128_halves {
u128 full;
struct {
u64 low, high;
};
};

#define __arch_cmpxchg128(p, o, n, cas_sfx) \
({ \
__typeof__(*(p)) __o = (o); \
union __u128_halves __hn = { .full = (n) }; \
union __u128_halves __ho = { .full = (__o) }; \
register unsigned long t1 asm ("t1") = __hn.low; \
register unsigned long t2 asm ("t2") = __hn.high; \
register unsigned long t3 asm ("t3") = __ho.low; \
register unsigned long t4 asm ("t4") = __ho.high; \
\
__asm__ __volatile__ ( \
" amocas.q" cas_sfx " %0, %z3, %2" \
: "+&r" (t3), "+&r" (t4), "+A" (*(p)) \
: "rJ" (t1), "rJ" (t2) \
: "memory"); \
\
((u128)t4 << 64) | t3; \
})

#define arch_cmpxchg128(ptr, o, n) \
__arch_cmpxchg128((ptr), (o), (n), ".aqrl")

#define arch_cmpxchg128_local(ptr, o, n) \
__arch_cmpxchg128((ptr), (o), (n), "")

#endif /* CONFIG_64BIT && CONFIG_RISCV_ISA_ZACAS */

#ifdef CONFIG_RISCV_ISA_ZAWRS
/*
* Despite wrs.nto being "WRS-with-no-timeout", in the absence of changes to
Expand Down
1 change: 1 addition & 0 deletions arch/riscv/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,7 @@ struct kvm_vcpu_stat {
struct kvm_vcpu_stat_generic generic;
u64 ecall_exit_stat;
u64 wfi_exit_stat;
u64 wrs_exit_stat;
u64 mmio_exit_user;
u64 mmio_exit_kernel;
u64 csr_exit_user;
Expand Down
Loading