Commit fab957c1 authored by Palmer Dabbelt's avatar Palmer Dabbelt

RISC-V: Atomic and Locking Code

This contains all the code that directly interfaces with the RISC-V
memory model.  While this code corforms to the current RISC-V ISA
specifications (user 2.2 and priv 1.10), the memory model is somewhat
underspecified in those documents.  There is a working group that hopes
to produce a formal memory model by the end of the year, but my
understanding is that the basic definitions we're relying on here won't
change significantly.
Reviewed-by: default avatarArnd Bergmann <arnd@arndb.de>
Signed-off-by: default avatarPalmer Dabbelt <palmer@dabbelt.com>
parent 76d2a049
This diff is collapsed.
/*
* Based on arch/arm/include/asm/barrier.h
*
* Copyright (C) 2012 ARM Ltd.
* Copyright (C) 2013 Regents of the University of California
* Copyright (C) 2017 SiFive
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _ASM_RISCV_BARRIER_H
#define _ASM_RISCV_BARRIER_H
#ifndef __ASSEMBLY__
#define nop() __asm__ __volatile__ ("nop")
#define RISCV_FENCE(p, s) \
__asm__ __volatile__ ("fence " #p "," #s : : : "memory")
/* These barriers need to enforce ordering on both devices or memory. */
#define mb() RISCV_FENCE(iorw,iorw)
#define rmb() RISCV_FENCE(ir,ir)
#define wmb() RISCV_FENCE(ow,ow)
/* These barriers do not need to enforce ordering on devices, just memory. */
#define smp_mb() RISCV_FENCE(rw,rw)
#define smp_rmb() RISCV_FENCE(r,r)
#define smp_wmb() RISCV_FENCE(w,w)
/*
* These fences exist to enforce ordering around the relaxed AMOs. The
* documentation defines that
* "
* atomic_fetch_add();
* is equivalent to:
* smp_mb__before_atomic();
* atomic_fetch_add_relaxed();
* smp_mb__after_atomic();
* "
* So we emit full fences on both sides.
*/
#define __smb_mb__before_atomic() smp_mb()
#define __smb_mb__after_atomic() smp_mb()
/*
* These barriers prevent accesses performed outside a spinlock from being moved
* inside a spinlock. Since RISC-V sets the aq/rl bits on our spinlock only
* enforce release consistency, we need full fences here.
*/
#define smb_mb__before_spinlock() smp_mb()
#define smb_mb__after_spinlock() smp_mb()
#include <asm-generic/barrier.h>
#endif /* __ASSEMBLY__ */
#endif /* _ASM_RISCV_BARRIER_H */
/*
* Copyright (C) 2012 Regents of the University of California
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _ASM_RISCV_BITOPS_H
#define _ASM_RISCV_BITOPS_H
#ifndef _LINUX_BITOPS_H
#error "Only <linux/bitops.h> can be included directly"
#endif /* _LINUX_BITOPS_H */
#include <linux/compiler.h>
#include <linux/irqflags.h>
#include <asm/barrier.h>
#include <asm/bitsperlong.h>
#ifndef smp_mb__before_clear_bit
#define smp_mb__before_clear_bit() smp_mb()
#define smp_mb__after_clear_bit() smp_mb()
#endif /* smp_mb__before_clear_bit */
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/hweight.h>
#if (BITS_PER_LONG == 64)
#define __AMO(op) "amo" #op ".d"
#elif (BITS_PER_LONG == 32)
#define __AMO(op) "amo" #op ".w"
#else
#error "Unexpected BITS_PER_LONG"
#endif
#define __test_and_op_bit_ord(op, mod, nr, addr, ord) \
({ \
unsigned long __res, __mask; \
__mask = BIT_MASK(nr); \
__asm__ __volatile__ ( \
__AMO(op) #ord " %0, %2, %1" \
: "=r" (__res), "+A" (addr[BIT_WORD(nr)]) \
: "r" (mod(__mask)) \
: "memory"); \
((__res & __mask) != 0); \
})
#define __op_bit_ord(op, mod, nr, addr, ord) \
__asm__ __volatile__ ( \
__AMO(op) #ord " zero, %1, %0" \
: "+A" (addr[BIT_WORD(nr)]) \
: "r" (mod(BIT_MASK(nr))) \
: "memory");
#define __test_and_op_bit(op, mod, nr, addr) \
__test_and_op_bit_ord(op, mod, nr, addr, )
#define __op_bit(op, mod, nr, addr) \
__op_bit_ord(op, mod, nr, addr, )
/* Bitmask modifiers */
#define __NOP(x) (x)
#define __NOT(x) (~(x))
/**
* test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation may be reordered on other architectures than x86.
*/
static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
{
return __test_and_op_bit(or, __NOP, nr, addr);
}
/**
* test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
* This operation can be reordered on other architectures other than x86.
*/
static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
{
return __test_and_op_bit(and, __NOT, nr, addr);
}
/**
* test_and_change_bit - Change a bit and return its old value
* @nr: Bit to change
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
{
return __test_and_op_bit(xor, __NOP, nr, addr);
}
/**
* set_bit - Atomically set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
* Note: there are no guarantees that this function will not be reordered
* on non x86 architectures, so if you are writing portable code,
* make sure not to rely on its reordering guarantees.
*
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
static inline void set_bit(int nr, volatile unsigned long *addr)
{
__op_bit(or, __NOP, nr, addr);
}
/**
* clear_bit - Clears a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
*
* Note: there are no guarantees that this function will not be reordered
* on non x86 architectures, so if you are writing portable code,
* make sure not to rely on its reordering guarantees.
*/
static inline void clear_bit(int nr, volatile unsigned long *addr)
{
__op_bit(and, __NOT, nr, addr);
}
/**
* change_bit - Toggle a bit in memory
* @nr: Bit to change
* @addr: Address to start counting from
*
* change_bit() may be reordered on other architectures than x86.
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
static inline void change_bit(int nr, volatile unsigned long *addr)
{
__op_bit(xor, __NOP, nr, addr);
}
/**
* test_and_set_bit_lock - Set a bit and return its old value, for lock
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is atomic and provides acquire barrier semantics.
* It can be used to implement bit locks.
*/
static inline int test_and_set_bit_lock(
unsigned long nr, volatile unsigned long *addr)
{
return __test_and_op_bit_ord(or, __NOP, nr, addr, .aq);
}
/**
* clear_bit_unlock - Clear a bit in memory, for unlock
* @nr: the bit to set
* @addr: the address to start counting from
*
* This operation is atomic and provides release barrier semantics.
*/
static inline void clear_bit_unlock(
unsigned long nr, volatile unsigned long *addr)
{
__op_bit_ord(and, __NOT, nr, addr, .rl);
}
/**
* __clear_bit_unlock - Clear a bit in memory, for unlock
* @nr: the bit to set
* @addr: the address to start counting from
*
* This operation is like clear_bit_unlock, however it is not atomic.
* It does provide release barrier semantics so it can be used to unlock
* a bit lock, however it would only be used if no other CPU can modify
* any bits in the memory until the lock is released (a good example is
* if the bit lock itself protects access to the other bits in the word).
*
* On RISC-V systems there seems to be no benefit to taking advantage of the
* non-atomic property here: it's a lot more instructions and we still have to
* provide release semantics anyway.
*/
static inline void __clear_bit_unlock(
unsigned long nr, volatile unsigned long *addr)
{
clear_bit_unlock(nr, addr);
}
#undef __test_and_op_bit
#undef __op_bit
#undef __NOP
#undef __NOT
#undef __AMO
#include <asm-generic/bitops/non-atomic.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic.h>
#endif /* _ASM_RISCV_BITOPS_H */
/*
* Copyright (C) 2015 Regents of the University of California
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _ASM_RISCV_CACHEFLUSH_H
#define _ASM_RISCV_CACHEFLUSH_H
#include <asm-generic/cacheflush.h>
#undef flush_icache_range
#undef flush_icache_user_range
static inline void local_flush_icache_all(void)
{
asm volatile ("fence.i" ::: "memory");
}
#ifndef CONFIG_SMP
#define flush_icache_range(start, end) local_flush_icache_all()
#define flush_icache_user_range(vma, pg, addr, len) local_flush_icache_all()
#else /* CONFIG_SMP */
#define flush_icache_range(start, end) sbi_remote_fence_i(0)
#define flush_icache_user_range(vma, pg, addr, len) sbi_remote_fence_i(0)
#endif /* CONFIG_SMP */
#endif /* _ASM_RISCV_CACHEFLUSH_H */
/*
* Copyright (C) 2014 Regents of the University of California
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _ASM_RISCV_CMPXCHG_H
#define _ASM_RISCV_CMPXCHG_H
#include <linux/bug.h>
#include <asm/barrier.h>
#define __xchg(new, ptr, size, asm_or) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(new) __new = (new); \
__typeof__(*(ptr)) __ret; \
switch (size) { \
case 4: \
__asm__ __volatile__ ( \
"amoswap.w" #asm_or " %0, %2, %1" \
: "=r" (__ret), "+A" (*__ptr) \
: "r" (__new) \
: "memory"); \
break; \
case 8: \
__asm__ __volatile__ ( \
"amoswap.d" #asm_or " %0, %2, %1" \
: "=r" (__ret), "+A" (*__ptr) \
: "r" (__new) \
: "memory"); \
break; \
default: \
BUILD_BUG(); \
} \
__ret; \
})
#define xchg(ptr, x) (__xchg((x), (ptr), sizeof(*(ptr)), .aqrl))
#define xchg32(ptr, x) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
xchg((ptr), (x)); \
})
#define xchg64(ptr, x) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
xchg((ptr), (x)); \
})
/*
* Atomic compare and exchange. Compare OLD with MEM, if identical,
* store NEW in MEM. Return the initial value in MEM. Success is
* indicated by comparing RETURN with OLD.
*/
#define __cmpxchg(ptr, old, new, size, lrb, scb) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(*(ptr)) __old = (old); \
__typeof__(*(ptr)) __new = (new); \
__typeof__(*(ptr)) __ret; \
register unsigned int __rc; \
switch (size) { \
case 4: \
__asm__ __volatile__ ( \
"0:" \
"lr.w" #scb " %0, %2\n" \
"bne %0, %z3, 1f\n" \
"sc.w" #lrb " %1, %z4, %2\n" \
"bnez %1, 0b\n" \
"1:" \
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
: "rJ" (__old), "rJ" (__new) \
: "memory"); \
break; \
case 8: \
__asm__ __volatile__ ( \
"0:" \
"lr.d" #scb " %0, %2\n" \
"bne %0, %z3, 1f\n" \
"sc.d" #lrb " %1, %z4, %2\n" \
"bnez %1, 0b\n" \
"1:" \
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
: "rJ" (__old), "rJ" (__new) \
: "memory"); \
break; \
default: \
BUILD_BUG(); \
} \
__ret; \
})
#define cmpxchg(ptr, o, n) \
(__cmpxchg((ptr), (o), (n), sizeof(*(ptr)), .aqrl, .aqrl))
#define cmpxchg_local(ptr, o, n) \
(__cmpxchg((ptr), (o), (n), sizeof(*(ptr)), , ))
#define cmpxchg32(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
cmpxchg((ptr), (o), (n)); \
})
#define cmpxchg32_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
cmpxchg_local((ptr), (o), (n)); \
})
#define cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg((ptr), (o), (n)); \
})
#define cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \
})
#endif /* _ASM_RISCV_CMPXCHG_H */
This diff is collapsed.
/*
* Copyright (C) 2015 Regents of the University of California
* Copyright (C) 2017 SiFive
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _ASM_RISCV_SPINLOCK_H
#define _ASM_RISCV_SPINLOCK_H
#include <linux/kernel.h>
#include <asm/current.h>
/*
* Simple spin lock operations. These provide no fairness guarantees.
*/
/* FIXME: Replace this with a ticket lock, like MIPS. */
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_is_locked(x) ((x)->lock != 0)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__asm__ __volatile__ (
"amoswap.w.rl x0, x0, %0"
: "=A" (lock->lock)
:: "memory");
}
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
int tmp = 1, busy;
__asm__ __volatile__ (
"amoswap.w.aq %0, %2, %1"
: "=r" (busy), "+A" (lock->lock)
: "r" (tmp)
: "memory");
return !busy;
}
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
while (1) {
if (arch_spin_is_locked(lock))
continue;
if (arch_spin_trylock(lock))
break;
}
}
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_rmb();
do {
cpu_relax();
} while (arch_spin_is_locked(lock));
smp_acquire__after_ctrl_dep();
}
/***********************************************************/
static inline int arch_read_can_lock(arch_rwlock_t *lock)
{
return lock->lock >= 0;
}
static inline int arch_write_can_lock(arch_rwlock_t *lock)
{
return lock->lock == 0;
}
static inline void arch_read_lock(arch_rwlock_t *lock)
{
int tmp;
__asm__ __volatile__(
"1: lr.w %1, %0\n"
" bltz %1, 1b\n"
" addi %1, %1, 1\n"
" sc.w.aq %1, %1, %0\n"
" bnez %1, 1b\n"
: "+A" (lock->lock), "=&r" (tmp)
:: "memory");
}
static inline void arch_write_lock(arch_rwlock_t *lock)
{
int tmp;
__asm__ __volatile__(
"1: lr.w %1, %0\n"
" bnez %1, 1b\n"
" li %1, -1\n"
" sc.w.aq %1, %1, %0\n"
" bnez %1, 1b\n"
: "+A" (lock->lock), "=&r" (tmp)
:: "memory");
}
static inline int arch_read_trylock(arch_rwlock_t *lock)
{
int busy;
__asm__ __volatile__(
"1: lr.w %1, %0\n"
" bltz %1, 1f\n"
" addi %1, %1, 1\n"
" sc.w.aq %1, %1, %0\n"
" bnez %1, 1b\n"
"1:\n"
: "+A" (lock->lock), "=&r" (busy)
:: "memory");
return !busy;
}
static inline int arch_write_trylock(arch_rwlock_t *lock)
{
int busy;
__asm__ __volatile__(
"1: lr.w %1, %0\n"
" bnez %1, 1f\n"
" li %1, -1\n"
" sc.w.aq %1, %1, %0\n"
" bnez %1, 1b\n"
"1:\n"
: "+A" (lock->lock), "=&r" (busy)
:: "memory");
return !busy;
}
static inline void arch_read_unlock(arch_rwlock_t *lock)
{
__asm__ __volatile__(
"amoadd.w.rl x0, %1, %0"
: "+A" (lock->lock)
: "r" (-1)
: "memory");
}
static inline void arch_write_unlock(arch_rwlock_t *lock)
{
__asm__ __volatile__ (
"amoswap.w.rl x0, x0, %0"
: "=A" (lock->lock)
:: "memory");
}
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
#endif /* _ASM_RISCV_SPINLOCK_H */
/*
* Copyright (C) 2015 Regents of the University of California
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _ASM_RISCV_SPINLOCK_TYPES_H
#define _ASM_RISCV_SPINLOCK_TYPES_H
#ifndef __LINUX_SPINLOCK_TYPES_H
# error "please don't include this file directly"
#endif
typedef struct {
volatile unsigned int lock;
} arch_spinlock_t;
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int lock;
} arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED { 0 }