Commit 445c8951 authored by Thomas Gleixner's avatar Thomas Gleixner

locking: Convert raw_spinlock to arch_spinlock

The raw_spin* namespace was taken by lockdep for the architecture
specific implementations. raw_spin_* would be the ideal name space for
the spinlocks which are not converted to sleeping locks in preempt-rt.

Linus suggested to convert the raw_ to arch_ locks and cleanup the
name space instead of using an artifical name like core_spin,
atomic_spin or whatever

No functional change.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarPeter Zijlstra <peterz@infradead.org>
Acked-by: default avatarDavid S. Miller <davem@davemloft.net>
Acked-by: default avatarIngo Molnar <mingo@elte.hu>
Cc: linux-arch@vger.kernel.org
parent 6b6b4792
......@@ -17,13 +17,13 @@
#define __raw_spin_unlock_wait(x) \
do { cpu_relax(); } while ((x)->lock)
static inline void __raw_spin_unlock(raw_spinlock_t * lock)
static inline void __raw_spin_unlock(arch_spinlock_t * lock)
{
mb();
lock->lock = 0;
}
static inline void __raw_spin_lock(raw_spinlock_t * lock)
static inline void __raw_spin_lock(arch_spinlock_t * lock)
{
long tmp;
......@@ -43,7 +43,7 @@ static inline void __raw_spin_lock(raw_spinlock_t * lock)
: "m"(lock->lock) : "memory");
}
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
static inline int __raw_spin_trylock(arch_spinlock_t *lock)
{
return !test_and_set_bit(0, &lock->lock);
}
......
......@@ -7,7 +7,7 @@
typedef struct {
volatile unsigned int lock;
} raw_spinlock_t;
} arch_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
......
......@@ -23,7 +23,7 @@
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
static inline void __raw_spin_lock(raw_spinlock_t *lock)
static inline void __raw_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp;
......@@ -43,7 +43,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
smp_mb();
}
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
static inline int __raw_spin_trylock(arch_spinlock_t *lock)
{
unsigned long tmp;
......@@ -63,7 +63,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
}
}
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
static inline void __raw_spin_unlock(arch_spinlock_t *lock)
{
smp_mb();
......
......@@ -7,7 +7,7 @@
typedef struct {
volatile unsigned int lock;
} raw_spinlock_t;
} arch_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
......
......@@ -24,29 +24,29 @@ asmlinkage void __raw_write_lock_asm(volatile int *ptr);
asmlinkage int __raw_write_trylock_asm(volatile int *ptr);
asmlinkage void __raw_write_unlock_asm(volatile int *ptr);
static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
static inline int __raw_spin_is_locked(arch_spinlock_t *lock)
{
return __raw_spin_is_locked_asm(&lock->lock);
}
static inline void __raw_spin_lock(raw_spinlock_t *lock)
static inline void __raw_spin_lock(arch_spinlock_t *lock)
{
__raw_spin_lock_asm(&lock->lock);
}
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
static inline int __raw_spin_trylock(arch_spinlock_t *lock)
{
return __raw_spin_trylock_asm(&lock->lock);
}
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
static inline void __raw_spin_unlock(arch_spinlock_t *lock)
{
__raw_spin_unlock_asm(&lock->lock);
}
static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock)
{
while (__raw_spin_is_locked(lock))
cpu_relax();
......
......@@ -15,7 +15,7 @@
typedef struct {
volatile unsigned int lock;
} raw_spinlock_t;
} arch_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
......
......@@ -9,12 +9,12 @@ extern void cris_spin_unlock(void *l, int val);
extern void cris_spin_lock(void *l);
extern int cris_spin_trylock(void *l);
static inline int __raw_spin_is_locked(raw_spinlock_t *x)
static inline int __raw_spin_is_locked(arch_spinlock_t *x)
{
return *(volatile signed char *)(&(x)->slock) <= 0;
}
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
static inline void __raw_spin_unlock(arch_spinlock_t *lock)
{
__asm__ volatile ("move.d %1,%0" \
: "=m" (lock->slock) \
......@@ -22,24 +22,24 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
: "memory");
}
static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock)
{
while (__raw_spin_is_locked(lock))
cpu_relax();
}
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
static inline int __raw_spin_trylock(arch_spinlock_t *lock)
{
return cris_spin_trylock((void *)&lock->slock);
}
static inline void __raw_spin_lock(raw_spinlock_t *lock)
static inline void __raw_spin_lock(arch_spinlock_t *lock)
{
cris_spin_lock((void *)&lock->slock);
}
static inline void
__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
__raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
__raw_spin_lock(lock);
}
......
......@@ -38,7 +38,7 @@
#define TICKET_BITS 15
#define TICKET_MASK ((1 << TICKET_BITS) - 1)
static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
{
int *p = (int *)&lock->lock, ticket, serve;
......@@ -58,7 +58,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
}
}
static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
{
int tmp = ACCESS_ONCE(lock->lock);
......@@ -67,7 +67,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
return 0;
}
static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
{
unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
......@@ -75,7 +75,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
ACCESS_ONCE(*p) = (tmp + 2) & ~1;
}
static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
{
int *p = (int *)&lock->lock, ticket;
......@@ -89,53 +89,53 @@ static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
}
}
static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
{
long tmp = ACCESS_ONCE(lock->lock);
return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
}
static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
{
long tmp = ACCESS_ONCE(lock->lock);
return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
}
static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
static inline int __raw_spin_is_locked(arch_spinlock_t *lock)
{
return __ticket_spin_is_locked(lock);
}
static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
static inline int __raw_spin_is_contended(arch_spinlock_t *lock)
{
return __ticket_spin_is_contended(lock);
}
#define __raw_spin_is_contended __raw_spin_is_contended
static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
static __always_inline void __raw_spin_lock(arch_spinlock_t *lock)
{
__ticket_spin_lock(lock);
}
static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
static __always_inline int __raw_spin_trylock(arch_spinlock_t *lock)
{
return __ticket_spin_trylock(lock);
}
static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
static __always_inline void __raw_spin_unlock(arch_spinlock_t *lock)
{
__ticket_spin_unlock(lock);
}
static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
static __always_inline void __raw_spin_lock_flags(arch_spinlock_t *lock,
unsigned long flags)
{
__raw_spin_lock(lock);
}
static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock)
{
__ticket_spin_unlock_wait(lock);
}
......
......@@ -7,7 +7,7 @@
typedef struct {
volatile unsigned int lock;
} raw_spinlock_t;
} arch_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
......
......@@ -36,7 +36,7 @@
* __raw_spin_trylock() tries to get the lock and returns a result.
* On the m32r, the result value is 1 (= Success) or 0 (= Failure).
*/
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
static inline int __raw_spin_trylock(arch_spinlock_t *lock)
{
int oldval;
unsigned long tmp1, tmp2;
......@@ -69,7 +69,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
return (oldval > 0);
}
static inline void __raw_spin_lock(raw_spinlock_t *lock)
static inline void __raw_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp0, tmp1;
......@@ -111,7 +111,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
);
}
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
static inline void __raw_spin_unlock(arch_spinlock_t *lock)
{
mb();
lock->slock = 1;
......
......@@ -7,7 +7,7 @@
typedef struct {
volatile int slock;
} raw_spinlock_t;
} arch_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
......
......@@ -34,7 +34,7 @@
* becomes equal to the the initial value of the tail.
*/
static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
static inline int __raw_spin_is_locked(arch_spinlock_t *lock)
{
unsigned int counters = ACCESS_ONCE(lock->lock);
......@@ -45,7 +45,7 @@ static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
#define __raw_spin_unlock_wait(x) \
while (__raw_spin_is_locked(x)) { cpu_relax(); }
static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
static inline int __raw_spin_is_contended(arch_spinlock_t *lock)
{
unsigned int counters = ACCESS_ONCE(lock->lock);
......@@ -53,7 +53,7 @@ static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
}
#define __raw_spin_is_contended __raw_spin_is_contended
static inline void __raw_spin_lock(raw_spinlock_t *lock)
static inline void __raw_spin_lock(arch_spinlock_t *lock)
{
int my_ticket;
int tmp;
......@@ -134,7 +134,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
smp_llsc_mb();
}
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
static inline void __raw_spin_unlock(arch_spinlock_t *lock)
{
int tmp;
......@@ -174,7 +174,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
}
}
static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
static inline unsigned int __raw_spin_trylock(arch_spinlock_t *lock)
{
int tmp, tmp2, tmp3;
......
......@@ -12,7 +12,7 @@ typedef struct {
* bits 15..28: ticket
*/
unsigned int lock;
} raw_spinlock_t;
} arch_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
......
......@@ -27,18 +27,18 @@
# define ATOMIC_HASH_SIZE 4
# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
/* Can't use raw_spin_lock_irq because of #include problems, so
* this is the substitute */
#define _atomic_spin_lock_irqsave(l,f) do { \
raw_spinlock_t *s = ATOMIC_HASH(l); \
arch_spinlock_t *s = ATOMIC_HASH(l); \
local_irq_save(f); \
__raw_spin_lock(s); \
} while(0)
#define _atomic_spin_unlock_irqrestore(l,f) do { \
raw_spinlock_t *s = ATOMIC_HASH(l); \
arch_spinlock_t *s = ATOMIC_HASH(l); \
__raw_spin_unlock(s); \
local_irq_restore(f); \
} while(0)
......
......@@ -5,7 +5,7 @@
#include <asm/processor.h>
#include <asm/spinlock_types.h>
static inline int __raw_spin_is_locked(raw_spinlock_t *x)
static inline int __raw_spin_is_locked(arch_spinlock_t *x)
{
volatile unsigned int *a = __ldcw_align(x);
return *a == 0;
......@@ -15,7 +15,7 @@ static inline int __raw_spin_is_locked(raw_spinlock_t *x)
#define __raw_spin_unlock_wait(x) \
do { cpu_relax(); } while (__raw_spin_is_locked(x))
static inline void __raw_spin_lock_flags(raw_spinlock_t *x,
static inline void __raw_spin_lock_flags(arch_spinlock_t *x,
unsigned long flags)
{
volatile unsigned int *a;
......@@ -33,7 +33,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *x,
mb();
}
static inline void __raw_spin_unlock(raw_spinlock_t *x)
static inline void __raw_spin_unlock(arch_spinlock_t *x)
{
volatile unsigned int *a;
mb();
......@@ -42,7 +42,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *x)
mb();
}
static inline int __raw_spin_trylock(raw_spinlock_t *x)
static inline int __raw_spin_trylock(arch_spinlock_t *x)
{
volatile unsigned int *a;
int ret;
......
......@@ -9,10 +9,10 @@ typedef struct {
volatile unsigned int lock[4];
# define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
#endif
} raw_spinlock_t;
} arch_spinlock_t;
typedef struct {
raw_spinlock_t lock;
arch_spinlock_t lock;
volatile int counter;
} raw_rwlock_t;
......
......@@ -12,7 +12,7 @@
#include <asm/atomic.h>
#ifdef CONFIG_SMP
raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
[0 ... (ATOMIC_HASH_SIZE-1)] = __RAW_SPIN_LOCK_UNLOCKED
};
#endif
......
......@@ -58,7 +58,7 @@ struct rtas_t {
unsigned long entry; /* physical address pointer */
unsigned long base; /* physical address pointer */
unsigned long size;
raw_spinlock_t lock;
arch_spinlock_t lock;
struct rtas_args args;
struct device_node *dev; /* virtual address pointer */
};
......
......@@ -54,7 +54,7 @@
* This returns the old value in the lock, so we succeeded
* in getting the lock if the return value is 0.
*/
static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock)
static inline unsigned long arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned long tmp, token;
......@@ -73,7 +73,7 @@ static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock)
return tmp;
}
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
static inline int __raw_spin_trylock(arch_spinlock_t *lock)
{
CLEAR_IO_SYNC;
return arch_spin_trylock(lock) == 0;
......@@ -96,7 +96,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
/* We only yield to the hypervisor if we are in shared processor mode */
#define SHARED_PROCESSOR (get_lppaca()->shared_proc)
extern void __spin_yield(raw_spinlock_t *lock);
extern void __spin_yield(arch_spinlock_t *lock);
extern void __rw_yield(raw_rwlock_t *lock);
#else /* SPLPAR || ISERIES */
#define __spin_yield(x) barrier()
......@@ -104,7 +104,7 @@ extern void __rw_yield(raw_rwlock_t *lock);
#define SHARED_PROCESSOR 0
#endif
static inline void __raw_spin_lock(raw_spinlock_t *lock)
static inline void __raw_spin_lock(arch_spinlock_t *lock)
{
CLEAR_IO_SYNC;
while (1) {
......@@ -120,7 +120,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
}
static inline
void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
unsigned long flags_dis;
......@@ -140,7 +140,7 @@ void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
}
}
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
static inline void __raw_spin_unlock(arch_spinlock_t *lock)
{
SYNC_IO;
__asm__ __volatile__("# __raw_spin_unlock\n\t"
......@@ -149,7 +149,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
}
#ifdef CONFIG_PPC64
extern void __raw_spin_unlock_wait(raw_spinlock_t *lock);
extern void __raw_spin_unlock_wait(arch_spinlock_t *lock);
#else
#define __raw_spin_unlock_wait(lock) \
do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
......
......@@ -7,7 +7,7 @@
typedef struct {
volatile unsigned int slock;
} raw_spinlock_t;
} arch_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
......
......@@ -978,7 +978,7 @@ int __init early_init_dt_scan_rtas(unsigned long node,
return 1;
}
static raw_spinlock_t timebase_lock;
static arch_spinlock_t timebase_lock;
static u64 timebase = 0;
void __cpuinit rtas_give_timebase(void)
......
......@@ -25,7 +25,7 @@
#include <asm/smp.h>
#include <asm/firmware.h>
void __spin_yield(raw_spinlock_t *lock)
void __spin_yield(arch_spinlock_t *lock)
{
unsigned int lock_value, holder_cpu, yield_count;
......@@ -82,7 +82,7 @@ void __rw_yield(raw_rwlock_t *rw)
}
#endif
void __raw_spin_unlock_wait(raw_spinlock_t *lock)
void __raw_spin_unlock_wait(arch_spinlock_t *lock)
{
while (lock->slock) {
HMT_low();
......
......@@ -71,7 +71,7 @@ static void pas_restart(char *cmd)
}
#ifdef CONFIG_SMP
static raw_spinlock_t timebase_lock;
static arch_spinlock_t timebase_lock;
static unsigned long timebase;
static void __devinit pas_give_timebase(void)
......
......@@ -57,12 +57,12 @@ _raw_compare_and_swap(volatile unsigned int *lock,
do { while (__raw_spin_is_locked(lock)) \
_raw_spin_relax(lock); } while (0)
extern void _raw_spin_lock_wait(raw_spinlock_t *);
extern void _raw_spin_lock_wait_flags(raw_spinlock_t *, unsigned long flags);
extern int _raw_spin_trylock_retry(raw_spinlock_t *);
extern void _raw_spin_relax(raw_spinlock_t *lock);
extern void _raw_spin_lock_wait(arch_spinlock_t *);
extern void _raw_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
extern int _raw_spin_trylock_retry(arch_spinlock_t *);
extern void _raw_spin_relax(arch_spinlock_t *lock);
static inline void __raw_spin_lock(raw_spinlock_t *lp)
static inline void __raw_spin_lock(arch_spinlock_t *lp)
{
int old;
......@@ -72,7 +72,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lp)
_raw_spin_lock_wait(lp);
}
static inline void __raw_spin_lock_flags(raw_spinlock_t *lp,
static inline void __raw_spin_lock_flags(arch_spinlock_t *lp,
unsigned long flags)
{
int old;
......@@ -83,7 +83,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lp,
_raw_spin_lock_wait_flags(lp, flags);
}
static inline int __raw_spin_trylock(raw_spinlock_t *lp)
static inline int __raw_spin_trylock(arch_spinlock_t *lp)
{
int old;
......@@ -93,7 +93,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lp)
return _raw_spin_trylock_retry(lp);
}
static inline void __raw_spin_unlock(raw_spinlock_t *lp)
static inline void __raw_spin_unlock(arch_spinlock_t *lp)
{
_raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0);
}
......
......@@ -7,7 +7,7 @@
typedef struct {
volatile unsigned int owner_cpu;
} __attribute__ ((aligned (4))) raw_spinlock_t;
} __attribute__ ((aligned (4))) arch_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
......
......@@ -39,7 +39,7 @@ static inline void _raw_yield_cpu(int cpu)
_raw_yield();
}
void _raw_spin_lock_wait(raw_spinlock_t *lp)
void _raw_spin_lock_wait(arch_spinlock_t *lp)
{
int count = spin_retry;
unsigned int cpu = ~smp_processor_id();
......@@ -59,7 +59,7 @@ void _raw_spin_lock_wait(raw_spinlock_t *lp)
}
EXPORT_SYMBOL(_raw_spin_lock_wait);
void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags)
void _raw_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
{
int count = spin_retry;
unsigned int cpu = ~smp_processor_id();
......@@ -82,7 +82,7 @@ void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags)
}
EXPORT_SYMBOL(_raw_spin_lock_wait_flags);
int _raw_spin_trylock_retry(raw_spinlock_t *lp)
int _raw_spin_trylock_retry(arch_spinlock_t *lp)
{
unsigned int cpu = ~smp_processor_id();
int count;
......@@ -97,7 +97,7 @@ int _raw_spin_trylock_retry(raw_spinlock_t *lp)
}
EXPORT_SYMBOL(_raw_spin_trylock_retry);
void _raw_spin_relax(raw_spinlock_t *lock)
void _raw_spin_relax(arch_spinlock_t *lock)
{
unsigned int cpu = lock->owner_cpu;
if (cpu != 0)
......
......@@ -34,7 +34,7 @@
*
* We make no fairness assumptions. They have a cost.
*/
static inline void __raw_spin_lock(raw_spinlock_t *lock)
static inline void __raw_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp;
unsigned long oldval;
......@@ -54,7 +54,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
);
}
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
static inline void __raw_spin_unlock(arch_spinlock_t *lock)
{
unsigned long tmp;
......@@ -67,7 +67,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
);
}
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
static inline int __raw_spin_trylock(arch_spinlock_t *lock)
{
unsigned long tmp, oldval;
......
......@@ -7,7 +7,7 @@
typedef struct {
volatile unsigned int lock;
} raw_spinlock_t;
} arch_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
......
......@@ -15,7 +15,7 @@
#define __raw_spin_unlock_wait(lock) \
do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
static inline void __raw_spin_lock(raw_spinlock_t *lock)
static inline void __raw_spin_lock(arch_spinlock_t *lock)
{
__asm__ __volatile__(
"\n1:\n\t"
......@@ -35,7 +35,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
: "g2", "memory", "cc");
}
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
static inline int __raw_spin_trylock(arch_spinlock_t *lock)
{
unsigned int result;
__asm__ __volatile__("ldstub [%1], %0"
......@@ -45,7 +45,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
return (result == 0);
}
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
static inline void __raw_spin_unlock(arch_spinlock_t *lock)
{
__asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
}
......
......@@ -27,7 +27,7 @@
do { rmb(); \
} while((lp)->lock)
static inline void __raw_spin_lock(raw_spinlock_t *lock)
static inline void __raw_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp;
......@@ -46,7 +46,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
: "memory");
}
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
static inline int __raw_spin_trylock(arch_spinlock_t *lock)
{
unsigned long result;
......@@ -59,7 +59,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
return (result == 0UL);