Commit 0199c4e6 authored by Thomas Gleixner's avatar Thomas Gleixner

locking: Convert __raw_spin* functions to arch_spin*

Name space cleanup. No functional change.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarPeter Zijlstra <peterz@infradead.org>
Acked-by: default avatarDavid S. Miller <davem@davemloft.net>
Acked-by: default avatarIngo Molnar <mingo@elte.hu>
Cc: linux-arch@vger.kernel.org
parent edc35bd7
......@@ -12,18 +12,18 @@
* We make no fairness assumptions. They have a cost.
*/
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
#define __raw_spin_is_locked(x) ((x)->lock != 0)
#define __raw_spin_unlock_wait(x) \
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_is_locked(x) ((x)->lock != 0)
#define arch_spin_unlock_wait(x) \
do { cpu_relax(); } while ((x)->lock)
static inline void __raw_spin_unlock(arch_spinlock_t * lock)
static inline void arch_spin_unlock(arch_spinlock_t * lock)
{
mb();
lock->lock = 0;
}
static inline void __raw_spin_lock(arch_spinlock_t * lock)
static inline void arch_spin_lock(arch_spinlock_t * lock)
{
long tmp;
......@@ -43,7 +43,7 @@ static inline void __raw_spin_lock(arch_spinlock_t * lock)
: "m"(lock->lock) : "memory");
}
static inline int __raw_spin_trylock(arch_spinlock_t *lock)
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
return !test_and_set_bit(0, &lock->lock);
}
......@@ -169,8 +169,8 @@ static inline void __raw_write_unlock(raw_rwlock_t * lock)
#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
#define _raw_spin_relax(lock) cpu_relax()
#define _raw_read_relax(lock) cpu_relax()
#define _raw_write_relax(lock) cpu_relax()
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()
#endif /* _ALPHA_SPINLOCK_H */
......@@ -17,13 +17,13 @@
* Locked value: 1
*/
#define __raw_spin_is_locked(x) ((x)->lock != 0)
#define __raw_spin_unlock_wait(lock) \
do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
#define arch_spin_is_locked(x) ((x)->lock != 0)
#define arch_spin_unlock_wait(lock) \
do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
static inline void __raw_spin_lock(arch_spinlock_t *lock)
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp;
......@@ -43,7 +43,7 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock)
smp_mb();
}
static inline int __raw_spin_trylock(arch_spinlock_t *lock)
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned long tmp;
......@@ -63,7 +63,7 @@ static inline int __raw_spin_trylock(arch_spinlock_t *lock)
}
}
static inline void __raw_spin_unlock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
smp_mb();
......@@ -220,8 +220,8 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
#define _raw_spin_relax(lock) cpu_relax()
#define _raw_read_relax(lock) cpu_relax()
#define _raw_write_relax(lock) cpu_relax()
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()
#endif /* __ASM_SPINLOCK_H */
......@@ -24,31 +24,31 @@ asmlinkage void __raw_write_lock_asm(volatile int *ptr);
asmlinkage int __raw_write_trylock_asm(volatile int *ptr);
asmlinkage void __raw_write_unlock_asm(volatile int *ptr);
static inline int __raw_spin_is_locked(arch_spinlock_t *lock)
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
return __raw_spin_is_locked_asm(&lock->lock);
}
static inline void __raw_spin_lock(arch_spinlock_t *lock)
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
__raw_spin_lock_asm(&lock->lock);
}
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
static inline int __raw_spin_trylock(arch_spinlock_t *lock)
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
return __raw_spin_trylock_asm(&lock->lock);
}
static inline void __raw_spin_unlock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__raw_spin_unlock_asm(&lock->lock);
}
static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock)
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
while (__raw_spin_is_locked(lock))
while (arch_spin_is_locked(lock))
cpu_relax();
}
......@@ -92,9 +92,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
__raw_write_unlock_asm(&rw->lock);
}
#define _raw_spin_relax(lock) cpu_relax()
#define _raw_read_relax(lock) cpu_relax()
#define _raw_write_relax(lock) cpu_relax()
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()
#endif
......
......@@ -9,12 +9,12 @@ extern void cris_spin_unlock(void *l, int val);
extern void cris_spin_lock(void *l);
extern int cris_spin_trylock(void *l);
static inline int __raw_spin_is_locked(arch_spinlock_t *x)
static inline int arch_spin_is_locked(arch_spinlock_t *x)
{
return *(volatile signed char *)(&(x)->slock) <= 0;
}
static inline void __raw_spin_unlock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__asm__ volatile ("move.d %1,%0" \
: "=m" (lock->slock) \
......@@ -22,26 +22,26 @@ static inline void __raw_spin_unlock(arch_spinlock_t *lock)
: "memory");
}
static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock)
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
while (__raw_spin_is_locked(lock))
while (arch_spin_is_locked(lock))
cpu_relax();
}
static inline int __raw_spin_trylock(arch_spinlock_t *lock)
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
return cris_spin_trylock((void *)&lock->slock);
}
static inline void __raw_spin_lock(arch_spinlock_t *lock)
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
cris_spin_lock((void *)&lock->slock);
}
static inline void
__raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
__raw_spin_lock(lock);
arch_spin_lock(lock);
}
/*
......@@ -68,64 +68,64 @@ static inline int __raw_write_can_lock(raw_rwlock_t *x)
static inline void __raw_read_lock(raw_rwlock_t *rw)
{
__raw_spin_lock(&rw->slock);
arch_spin_lock(&rw->slock);
while (rw->lock == 0);
rw->lock--;
__raw_spin_unlock(&rw->slock);
arch_spin_unlock(&rw->slock);
}
static inline void __raw_write_lock(raw_rwlock_t *rw)
{
__raw_spin_lock(&rw->slock);
arch_spin_lock(&rw->slock);
while (rw->lock != RW_LOCK_BIAS);
rw->lock = 0;
__raw_spin_unlock(&rw->slock);
arch_spin_unlock(&rw->slock);
}
static inline void __raw_read_unlock(raw_rwlock_t *rw)
{
__raw_spin_lock(&rw->slock);
arch_spin_lock(&rw->slock);
rw->lock++;
__raw_spin_unlock(&rw->slock);
arch_spin_unlock(&rw->slock);
}
static inline void __raw_write_unlock(raw_rwlock_t *rw)
{
__raw_spin_lock(&rw->slock);
arch_spin_lock(&rw->slock);
while (rw->lock != RW_LOCK_BIAS);
rw->lock = RW_LOCK_BIAS;
__raw_spin_unlock(&rw->slock);
arch_spin_unlock(&rw->slock);
}
static inline int __raw_read_trylock(raw_rwlock_t *rw)
{
int ret = 0;
__raw_spin_lock(&rw->slock);
arch_spin_lock(&rw->slock);
if (rw->lock != 0) {
rw->lock--;
ret = 1;
}
__raw_spin_unlock(&rw->slock);
arch_spin_unlock(&rw->slock);
return ret;
}
static inline int __raw_write_trylock(raw_rwlock_t *rw)
{
int ret = 0;
__raw_spin_lock(&rw->slock);
arch_spin_lock(&rw->slock);
if (rw->lock == RW_LOCK_BIAS) {
rw->lock = 0;
ret = 1;
}
__raw_spin_unlock(&rw->slock);
arch_spin_unlock(&rw->slock);
return 1;
}
#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
#define _raw_spin_relax(lock) cpu_relax()
#define _raw_read_relax(lock) cpu_relax()
#define _raw_write_relax(lock) cpu_relax()
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()
#endif /* __ASM_ARCH_SPINLOCK_H */
......@@ -127,7 +127,7 @@ clear_bit_unlock (int nr, volatile void *addr)
* @addr: Address to start counting from
*
* Similarly to clear_bit_unlock, the implementation uses a store
* with release semantics. See also __raw_spin_unlock().
* with release semantics. See also arch_spin_unlock().
*/
static __inline__ void
__clear_bit_unlock(int nr, void *addr)
......
......@@ -17,7 +17,7 @@
#include <asm/intrinsics.h>
#include <asm/system.h>
#define __raw_spin_lock_init(x) ((x)->lock = 0)
#define arch_spin_lock_init(x) ((x)->lock = 0)
/*
* Ticket locks are conceptually two parts, one indicating the current head of
......@@ -103,39 +103,39 @@ static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
}
static inline int __raw_spin_is_locked(arch_spinlock_t *lock)
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
return __ticket_spin_is_locked(lock);
}
static inline int __raw_spin_is_contended(arch_spinlock_t *lock)
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
return __ticket_spin_is_contended(lock);
}
#define __raw_spin_is_contended __raw_spin_is_contended
#define arch_spin_is_contended arch_spin_is_contended
static __always_inline void __raw_spin_lock(arch_spinlock_t *lock)
static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
{
__ticket_spin_lock(lock);
}
static __always_inline int __raw_spin_trylock(arch_spinlock_t *lock)
static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
{
return __ticket_spin_trylock(lock);
}
static __always_inline void __raw_spin_unlock(arch_spinlock_t *lock)
static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__ticket_spin_unlock(lock);
}
static __always_inline void __raw_spin_lock_flags(arch_spinlock_t *lock,
static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
unsigned long flags)
{
__raw_spin_lock(lock);
arch_spin_lock(lock);
}
static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock)
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
__ticket_spin_unlock_wait(lock);
}
......@@ -285,8 +285,8 @@ static inline int __raw_read_trylock(raw_rwlock_t *x)
return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
}
#define _raw_spin_relax(lock) cpu_relax()
#define _raw_read_relax(lock) cpu_relax()
#define _raw_write_relax(lock) cpu_relax()
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()
#endif /* _ASM_IA64_SPINLOCK_H */
......@@ -24,19 +24,19 @@
* We make no fairness assumptions. They have a cost.
*/
#define __raw_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0)
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
#define __raw_spin_unlock_wait(x) \
do { cpu_relax(); } while (__raw_spin_is_locked(x))
#define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0)
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_unlock_wait(x) \
do { cpu_relax(); } while (arch_spin_is_locked(x))
/**
* __raw_spin_trylock - Try spin lock and return a result
* arch_spin_trylock - Try spin lock and return a result
* @lock: Pointer to the lock variable
*
* __raw_spin_trylock() tries to get the lock and returns a result.
* arch_spin_trylock() tries to get the lock and returns a result.
* On the m32r, the result value is 1 (= Success) or 0 (= Failure).
*/
static inline int __raw_spin_trylock(arch_spinlock_t *lock)
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
int oldval;
unsigned long tmp1, tmp2;
......@@ -50,7 +50,7 @@ static inline int __raw_spin_trylock(arch_spinlock_t *lock)
* }
*/
__asm__ __volatile__ (
"# __raw_spin_trylock \n\t"
"# arch_spin_trylock \n\t"
"ldi %1, #0; \n\t"
"mvfc %2, psw; \n\t"
"clrpsw #0x40 -> nop; \n\t"
......@@ -69,7 +69,7 @@ static inline int __raw_spin_trylock(arch_spinlock_t *lock)
return (oldval > 0);
}
static inline void __raw_spin_lock(arch_spinlock_t *lock)
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp0, tmp1;
......@@ -84,7 +84,7 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock)
* }
*/
__asm__ __volatile__ (
"# __raw_spin_lock \n\t"
"# arch_spin_lock \n\t"
".fillinsn \n"
"1: \n\t"
"mvfc %1, psw; \n\t"
......@@ -111,7 +111,7 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock)
);
}
static inline void __raw_spin_unlock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
mb();
lock->slock = 1;
......@@ -319,8 +319,8 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
#define _raw_spin_relax(lock) cpu_relax()
#define _raw_read_relax(lock) cpu_relax()
#define _raw_write_relax(lock) cpu_relax()
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()
#endif /* _ASM_M32R_SPINLOCK_H */
......@@ -34,33 +34,33 @@
* becomes equal to the the initial value of the tail.
*/
static inline int __raw_spin_is_locked(arch_spinlock_t *lock)
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
unsigned int counters = ACCESS_ONCE(lock->lock);
return ((counters >> 14) ^ counters) & 0x1fff;
}
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
#define __raw_spin_unlock_wait(x) \
while (__raw_spin_is_locked(x)) { cpu_relax(); }
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_unlock_wait(x) \
while (arch_spin_is_locked(x)) { cpu_relax(); }
static inline int __raw_spin_is_contended(arch_spinlock_t *lock)
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
unsigned int counters = ACCESS_ONCE(lock->lock);
return (((counters >> 14) - counters) & 0x1fff) > 1;
}
#define __raw_spin_is_contended __raw_spin_is_contended
#define arch_spin_is_contended arch_spin_is_contended
static inline void __raw_spin_lock(arch_spinlock_t *lock)
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
int my_ticket;
int tmp;
if (R10000_LLSC_WAR) {
__asm__ __volatile__ (
" .set push # __raw_spin_lock \n"
" .set push # arch_spin_lock \n"
" .set noreorder \n"
" \n"
"1: ll %[ticket], %[ticket_ptr] \n"
......@@ -94,7 +94,7 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock)
[my_ticket] "=&r" (my_ticket));
} else {
__asm__ __volatile__ (
" .set push # __raw_spin_lock \n"
" .set push # arch_spin_lock \n"
" .set noreorder \n"
" \n"
" ll %[ticket], %[ticket_ptr] \n"
......@@ -134,7 +134,7 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock)
smp_llsc_mb();
}
static inline void __raw_spin_unlock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
int tmp;
......@@ -142,7 +142,7 @@ static inline void __raw_spin_unlock(arch_spinlock_t *lock)
if (R10000_LLSC_WAR) {
__asm__ __volatile__ (
" # __raw_spin_unlock \n"
" # arch_spin_unlock \n"
"1: ll %[ticket], %[ticket_ptr] \n"
" addiu %[ticket], %[ticket], 1 \n"
" ori %[ticket], %[ticket], 0x2000 \n"
......@@ -153,7 +153,7 @@ static inline void __raw_spin_unlock(arch_spinlock_t *lock)
[ticket] "=&r" (tmp));
} else {
__asm__ __volatile__ (
" .set push # __raw_spin_unlock \n"
" .set push # arch_spin_unlock \n"
" .set noreorder \n"
" \n"
" ll %[ticket], %[ticket_ptr] \n"
......@@ -174,13 +174,13 @@ static inline void __raw_spin_unlock(arch_spinlock_t *lock)
}
}
static inline unsigned int __raw_spin_trylock(arch_spinlock_t *lock)
static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
{
int tmp, tmp2, tmp3;
if (R10000_LLSC_WAR) {
__asm__ __volatile__ (
" .set push # __raw_spin_trylock \n"
" .set push # arch_spin_trylock \n"
" .set noreorder \n"
" \n"
"1: ll %[ticket], %[ticket_ptr] \n"
......@@ -204,7 +204,7 @@ static inline unsigned int __raw_spin_trylock(arch_spinlock_t *lock)
[now_serving] "=&r" (tmp3));
} else {
__asm__ __volatile__ (
" .set push # __raw_spin_trylock \n"
" .set push # arch_spin_trylock \n"
" .set noreorder \n"
" \n"
" ll %[ticket], %[ticket_ptr] \n"
......@@ -483,8 +483,8 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
#define _raw_spin_relax(lock) cpu_relax()
#define _raw_read_relax(lock) cpu_relax()
#define _raw_write_relax(lock) cpu_relax()
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()
#endif /* _ASM_SPINLOCK_H */
......@@ -34,12 +34,12 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
#define _atomic_spin_lock_irqsave(l,f) do { \
arch_spinlock_t *s = ATOMIC_HASH(l); \
local_irq_save(f); \
__raw_spin_lock(s); \
arch_spin_lock(s); \
} while(0)
#define _atomic_spin_unlock_irqrestore(l,f) do { \
arch_spinlock_t *s = ATOMIC_HASH(l); \
__raw_spin_unlock(s); \
arch_spin_unlock(s); \
local_irq_restore(f); \
} while(0)
......
......@@ -5,17 +5,17 @@
#include <asm/processor.h>
#include <asm/spinlock_types.h>
static inline int __raw_spin_is_locked(arch_spinlock_t *x)
static inline int arch_spin_is_locked(arch_spinlock_t *x)
{
volatile unsigned int *a = __ldcw_align(x);
return *a == 0;
}
#define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0)
#define __raw_spin_unlock_wait(x) \
do { cpu_relax(); } while (__raw_spin_is_locked(x))
#define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
#define arch_spin_unlock_wait(x) \
do { cpu_relax(); } while (arch_spin_is_locked(x))
static inline void __raw_spin_lock_flags(arch_spinlock_t *x,
static inline void arch_spin_lock_flags(arch_spinlock_t *x,
unsigned long flags)
{
volatile unsigned int *a;
......@@ -33,7 +33,7 @@ static inline void __raw_spin_lock_flags(arch_spinlock_t *x,
mb();
}
static inline void __raw_spin_unlock(arch_spinlock_t *x)
static inline void arch_spin_unlock(arch_spinlock_t *x)
{
volatile unsigned int *a;
mb();
......@@ -42,7 +42,7 @@ static inline void __raw_spin_unlock(arch_spinlock_t *x)
mb();
}
static inline int __raw_spin_trylock(arch_spinlock_t *x)
static inline int arch_spin_trylock(arch_spinlock_t *x)
{
volatile unsigned int *a;
int ret;
......@@ -73,9 +73,9 @@ static __inline__ void __raw_read_lock(raw_rwlock_t *rw)
{
unsigned long flags;
local_irq_save(flags);
__raw_spin_lock_flags(&rw->lock, flags);
arch_spin_lock_flags(&rw->lock, flags);
rw->counter++;
__raw_spin_unlock(&rw->lock);
arch_spin_unlock(&rw->lock);
local_irq_restore(flags);
}
......@@ -85,9 +85,9 @@ static __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
{
unsigned long flags;
local_irq_save(flags);
__raw_spin_lock_flags(&rw->lock, flags);
arch_spin_lock_flags(&rw->lock, flags);
rw->counter--;
__raw_spin_unlock(&rw->lock);
arch_spin_unlock(&rw->lock);
local_irq_restore(flags);
}
......@@ -98,9 +98,9 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
unsigned long flags;
retry:
local_irq_save(flags);
if (__raw_spin_trylock(&rw->lock)) {
if (arch_spin_trylock(&rw->lock)) {
rw->counter++;
__raw_spin_unlock(&rw->lock);
arch_spin_unlock(&rw->lock);
local_irq_restore(flags);
return 1;
}
......@@ -111,7 +111,7 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
return 0;
/* Wait until we have a realistic chance at the lock */
while (__raw_spin_is_locked(&rw->lock) && rw->counter >= 0)
while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0)
cpu_relax();
goto retry;
......@@ -124,10 +124,10 @@ static __inline__ void __raw_write_lock(raw_rwlock_t *rw)
unsigned long flags;
retry:
local_irq_save(flags);
__raw_spin_lock_flags(&rw->lock, flags);
arch_spin_lock_flags(&rw->lock, flags);
if (rw->counter != 0) {
__raw_spin_unlock(&rw->lock);
arch_spin_unlock(&rw->lock);
local_irq_restore(flags);
while (rw->counter != 0)
......@@ -144,7 +144,7 @@ static __inline__ void __raw_write_lock(raw_rwlock_t *rw)
static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
{
rw->counter = 0;
__raw_spin_unlock(&rw->lock);
arch_spin_unlock(&rw->lock);
}
/* Note that we have to ensure interrupts are disabled in case we're
......@@ -155,13 +155,13 @@ static __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
int result = 0;
local_irq_save(flags);
if (__raw_spin_trylock(&rw->lock)) {
if (arch_spin_trylock(&rw->lock)) {
if (rw->counter == 0) {
rw->counter = -1;
result = 1;
} else {
/* Read-locked. Oh well. */
__raw_spin_unlock(&rw->lock);
arch_spin_unlock(&rw->lock);
}
}
local_irq_restore(flags);
......@@ -190,8 +190,8 @@ static __inline__ int __raw_write_can_lock(raw_rwlock_t *rw)
#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
#define _raw_spin_relax(lock) cpu_relax()
#define _raw_read_relax(lock) cpu_relax()
#define _raw_write_relax(lock) cpu_relax()
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()
#endif /* __ASM_SPINLOCK_H */
......@@ -28,7 +28,7 @@
#include <asm/asm-compat.h>
#include <asm/synch.h>
#define __raw_spin_is_locked(x) ((x)->slock != 0)
#define arch_spin_is_locked(x) ((x)->slock != 0)
#ifdef CONFIG_PPC64
/* use 0x800000yy when locked, where yy == CPU number */
......@@ -54,7 +54,7 @@
* This returns the old value in the lock, so we succeeded
* in getting the lock if the return value is 0.
*/
static inline unsigned long arch_spin_trylock(arch_spinlock_t *lock)
static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned long tmp, token;
......@@ -73,10 +73,10 @@ static inline unsigned long arch_spin_trylock(arch_spinlock_t *lock)
return tmp;
}
static inline int __raw_spin_trylock(arch_spinlock_t *lock)
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
CLEAR_IO_SYNC;
return arch_spin_trylock(lock) == 0;
return __arch_spin_trylock(lock) == 0;
}
/*
......@@ -104,11 +104,11 @@ extern void __rw_yield(raw_rwlock_t *lock);
#define SHARED_PROCESSOR 0
#endif
static inline void __raw_spin_lock(arch_spinlock_t *lock)
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
CLEAR_IO_SYNC;
while (1) {
if (likely(arch_spin_trylock(lock) == 0))
if (likely(__arch_spin_trylock(lock) == 0))
break;
do {
HMT_low();
......@@ -120,13 +120,13 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock)
}
static inline
void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{