Commit 43381d19 authored by Philippe Gerum's avatar Philippe Gerum

atomic: ipipe: keep atomic when pipelining IRQs

Because of the virtualization of interrupt masking for the regular
kernel code when the pipeline is enabled, atomic helpers relying on
common interrupt disabling helpers such as local_irq_save/restore
pairs would not be atomic anymore, leading to data corruption.

This commit restores true atomicity for the atomic helpers that would
be otherwise affected by interrupt virtualization.
parent e68fafcc
......@@ -80,9 +80,9 @@ static inline void atomic_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
\
raw_local_irq_save(flags); \
flags = hard_local_irq_save(); \
v->counter = v->counter c_op i; \
raw_local_irq_restore(flags); \
hard_local_irq_restore(flags); \
}
#define ATOMIC_OP_RETURN(op, c_op) \
......@@ -91,9 +91,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
unsigned long flags; \
int ret; \
\
raw_local_irq_save(flags); \
flags = hard_local_irq_save(); \
ret = (v->counter = v->counter c_op i); \
raw_local_irq_restore(flags); \
hard_local_irq_restore(flags); \
\
return ret; \
}
......@@ -104,10 +104,10 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
unsigned long flags; \
int ret; \
\
raw_local_irq_save(flags); \
flags = hard_local_irq_save(flags); \
ret = v->counter; \
v->counter = v->counter c_op i; \
raw_local_irq_restore(flags); \
hard_local_irq_restore(flags); \
\
return ret; \
}
......
......@@ -4,6 +4,7 @@
#include <linux/types.h>
#include <linux/irqflags.h>
#include <asm-generic/ipipe.h>
extern unsigned long wrong_size_cmpxchg(volatile void *ptr)
__noreturn;
......@@ -23,7 +24,7 @@ static inline unsigned long __cmpxchg_local_generic(volatile void *ptr,
if (size == 8 && sizeof(unsigned long) != 8)
wrong_size_cmpxchg(ptr);
raw_local_irq_save(flags);
flags = hard_local_irq_save();
switch (size) {
case 1: prev = *(u8 *)ptr;
if (prev == old)
......@@ -44,7 +45,7 @@ static inline unsigned long __cmpxchg_local_generic(volatile void *ptr,
default:
wrong_size_cmpxchg(ptr);
}
raw_local_irq_restore(flags);
hard_local_irq_restore(flags);
return prev;
}
......@@ -57,11 +58,11 @@ static inline u64 __cmpxchg64_local_generic(volatile void *ptr,
u64 prev;
unsigned long flags;
raw_local_irq_save(flags);
flags = hard_local_irq_save();
prev = *(u64 *)ptr;
if (prev == old)
*(u64 *)ptr = new;
raw_local_irq_restore(flags);
hard_local_irq_restore(flags);
return prev;
}
......
......@@ -29,15 +29,15 @@
* Ensure each lock is in a separate cacheline.
*/
static union {
raw_spinlock_t lock;
ipipe_spinlock_t lock;
char pad[L1_CACHE_BYTES];
} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
[0 ... (NR_LOCKS - 1)] = {
.lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
.lock = IPIPE_SPIN_LOCK_UNLOCKED,
},
};
static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
static inline ipipe_spinlock_t *lock_addr(const atomic64_t *v)
{
unsigned long addr = (unsigned long) v;
......@@ -49,7 +49,7 @@ static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
long long atomic64_read(const atomic64_t *v)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
ipipe_spinlock_t *lock = lock_addr(v);
long long val;
raw_spin_lock_irqsave(lock, flags);
......@@ -62,7 +62,7 @@ EXPORT_SYMBOL(atomic64_read);
void atomic64_set(atomic64_t *v, long long i)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
ipipe_spinlock_t *lock = lock_addr(v);
raw_spin_lock_irqsave(lock, flags);
v->counter = i;
......@@ -74,7 +74,7 @@ EXPORT_SYMBOL(atomic64_set);
void atomic64_##op(long long a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
ipipe_spinlock_t *lock = lock_addr(v); \
\
raw_spin_lock_irqsave(lock, flags); \
v->counter c_op a; \
......@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atomic64_##op);
long long atomic64_##op##_return(long long a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
ipipe_spinlock_t *lock = lock_addr(v); \
long long val; \
\
raw_spin_lock_irqsave(lock, flags); \
......@@ -100,7 +100,7 @@ EXPORT_SYMBOL(atomic64_##op##_return);
long long atomic64_fetch_##op(long long a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
ipipe_spinlock_t *lock = lock_addr(v); \
long long val; \
\
raw_spin_lock_irqsave(lock, flags); \
......@@ -137,7 +137,7 @@ ATOMIC64_OPS(xor, ^=)
long long atomic64_dec_if_positive(atomic64_t *v)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
ipipe_spinlock_t *lock = lock_addr(v);
long long val;
raw_spin_lock_irqsave(lock, flags);
......@@ -152,7 +152,7 @@ EXPORT_SYMBOL(atomic64_dec_if_positive);
long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
ipipe_spinlock_t *lock = lock_addr(v);
long long val;
raw_spin_lock_irqsave(lock, flags);
......@@ -167,7 +167,7 @@ EXPORT_SYMBOL(atomic64_cmpxchg);
long long atomic64_xchg(atomic64_t *v, long long new)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
ipipe_spinlock_t *lock = lock_addr(v);
long long val;
raw_spin_lock_irqsave(lock, flags);
......@@ -181,7 +181,7 @@ EXPORT_SYMBOL(atomic64_xchg);
long long atomic64_fetch_add_unless(atomic64_t *v, long long a, long long u)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
ipipe_spinlock_t *lock = lock_addr(v);
long long val;
raw_spin_lock_irqsave(lock, flags);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment