Commit fd0dca86 authored by Philippe Gerum's avatar Philippe Gerum

locking: ipipe: add pipeline-aware locks

parent 0640b5d5
......@@ -61,8 +61,8 @@ do { \
#define read_trylock(lock) __cond_lock(lock, _raw_read_trylock(lock))
#define write_trylock(lock) __cond_lock(lock, _raw_write_trylock(lock))
#define write_lock(lock) _raw_write_lock(lock)
#define read_lock(lock) _raw_read_lock(lock)
#define write_lock(lock) PICK_RWOP(_write_lock, lock)
#define read_lock(lock) PICK_RWOP(_read_lock, lock)
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
......@@ -96,8 +96,8 @@ do { \
#define read_lock_bh(lock) _raw_read_lock_bh(lock)
#define write_lock_irq(lock) _raw_write_lock_irq(lock)
#define write_lock_bh(lock) _raw_write_lock_bh(lock)
#define read_unlock(lock) _raw_read_unlock(lock)
#define write_unlock(lock) _raw_write_unlock(lock)
#define read_unlock(lock) PICK_RWOP(_read_unlock, lock)
#define write_unlock(lock) PICK_RWOP(_write_unlock, lock)
#define read_unlock_irq(lock) _raw_read_unlock_irq(lock)
#define write_unlock_irq(lock) _raw_write_unlock_irq(lock)
......
......@@ -141,7 +141,9 @@ static inline int __raw_write_trylock(rwlock_t *lock)
* even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
* not re-enabled during lock-acquire (which the preempt-spin-ops do):
*/
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
#if !defined(CONFIG_GENERIC_LOCKBREAK) || \
defined(CONFIG_DEBUG_LOCK_ALLOC) || \
defined(CONFIG_IPIPE)
static inline void __raw_read_lock(rwlock_t *lock)
{
......
......@@ -90,10 +90,12 @@
# include <linux/spinlock_up.h>
#endif
#include <linux/ipipe_lock.h>
#ifdef CONFIG_DEBUG_SPINLOCK
extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
struct lock_class_key *key);
# define raw_spin_lock_init(lock) \
# define __real_raw_spin_lock_init(lock) \
do { \
static struct lock_class_key __key; \
\
......@@ -101,11 +103,14 @@ do { \
} while (0)
#else
# define raw_spin_lock_init(lock) \
# define __real_raw_spin_lock_init(lock) \
do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
#endif
#define raw_spin_lock_init(lock) PICK_SPINOP(_lock_init, lock)
#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
#define __real_raw_spin_is_locked(lock) \
arch_spin_is_locked(&(lock)->raw_lock)
#define raw_spin_is_locked(lock) PICK_SPINOP_RET(_is_locked, lock, int)
#ifdef CONFIG_GENERIC_LOCKBREAK
#define raw_spin_is_contended(lock) ((lock)->break_lock)
......@@ -191,9 +196,11 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
* various methods are defined as nops in the case they are not
* required.
*/
#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
#define __real_raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
#define raw_spin_trylock(lock) PICK_SPINOP_RET(_trylock, lock, int)
#define raw_spin_lock(lock) _raw_spin_lock(lock)
#define __real_raw_spin_lock(lock) _raw_spin_lock(lock)
#define raw_spin_lock(lock) PICK_SPINOP(_lock, lock)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define raw_spin_lock_nested(lock, subclass) \
......@@ -217,7 +224,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
#define raw_spin_lock_irqsave(lock, flags) \
#define __real_raw_spin_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
flags = _raw_spin_lock_irqsave(lock); \
......@@ -239,7 +246,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
#else
#define raw_spin_lock_irqsave(lock, flags) \
#define __real_raw_spin_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
_raw_spin_lock_irqsave(lock, flags); \
......@@ -250,34 +257,46 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
#endif
#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
#define raw_spin_lock_irqsave(lock, flags) \
PICK_SPINLOCK_IRQSAVE(lock, flags)
#define __real_raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
#define raw_spin_lock_irq(lock) PICK_SPINOP(_lock_irq, lock)
#define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
#define raw_spin_unlock(lock) _raw_spin_unlock(lock)
#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
#define __real_raw_spin_unlock(lock) _raw_spin_unlock(lock)
#define raw_spin_unlock(lock) PICK_SPINOP(_unlock, lock)
#define __real_raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
#define raw_spin_unlock_irq(lock) PICK_SPINOP(_unlock_irq, lock)
#define raw_spin_unlock_irqrestore(lock, flags) \
#define __real_raw_spin_unlock_irqrestore(lock, flags) \
do { \
typecheck(unsigned long, flags); \
_raw_spin_unlock_irqrestore(lock, flags); \
} while (0)
#define raw_spin_unlock_irqrestore(lock, flags) \
PICK_SPINUNLOCK_IRQRESTORE(lock, flags)
#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
#define raw_spin_trylock_bh(lock) \
__cond_lock(lock, _raw_spin_trylock_bh(lock))
#define raw_spin_trylock_irq(lock) \
#define __real_raw_spin_trylock_irq(lock) \
({ \
local_irq_disable(); \
raw_spin_trylock(lock) ? \
__real_raw_spin_trylock(lock) ? \
1 : ({ local_irq_enable(); 0; }); \
})
#define raw_spin_trylock_irq(lock) PICK_SPINTRYLOCK_IRQ(lock)
#define raw_spin_trylock_irqsave(lock, flags) \
#define __real_raw_spin_trylock_irqsave(lock, flags) \
({ \
local_irq_save(flags); \
raw_spin_trylock(lock) ? \
1 : ({ local_irq_restore(flags); 0; }); \
})
#define raw_spin_trylock_irqsave(lock, flags) \
PICK_SPINTRYLOCK_IRQSAVE(lock, flags)
/**
* raw_spin_can_lock - would raw_spin_trylock() succeed?
......@@ -308,24 +327,17 @@ static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
#define spin_lock_init(_lock) \
do { \
spinlock_check(_lock); \
raw_spin_lock_init(&(_lock)->rlock); \
raw_spin_lock_init(_lock); \
} while (0)
static __always_inline void spin_lock(spinlock_t *lock)
{
raw_spin_lock(&lock->rlock);
}
#define spin_lock(lock) raw_spin_lock(lock)
static __always_inline void spin_lock_bh(spinlock_t *lock)
{
raw_spin_lock_bh(&lock->rlock);
}
static __always_inline int spin_trylock(spinlock_t *lock)
{
return raw_spin_trylock(&lock->rlock);
}
#define spin_trylock(lock) raw_spin_trylock(lock)
#define spin_lock_nested(lock, subclass) \
do { \
......@@ -337,14 +349,11 @@ do { \
raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
} while (0)
static __always_inline void spin_lock_irq(spinlock_t *lock)
{
raw_spin_lock_irq(&lock->rlock);
}
#define spin_lock_irq(lock) raw_spin_lock_irq(lock)
#define spin_lock_irqsave(lock, flags) \
do { \
raw_spin_lock_irqsave(spinlock_check(lock), flags); \
raw_spin_lock_irqsave(lock, flags); \
} while (0)
#define spin_lock_irqsave_nested(lock, flags, subclass) \
......@@ -352,39 +361,28 @@ do { \
raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
} while (0)
static __always_inline void spin_unlock(spinlock_t *lock)
{
raw_spin_unlock(&lock->rlock);
}
#define spin_unlock(lock) raw_spin_unlock(lock)
static __always_inline void spin_unlock_bh(spinlock_t *lock)
{
raw_spin_unlock_bh(&lock->rlock);
}
static __always_inline void spin_unlock_irq(spinlock_t *lock)
{
raw_spin_unlock_irq(&lock->rlock);
}
#define spin_unlock_irq(lock) raw_spin_unlock_irq(lock)
static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
{
raw_spin_unlock_irqrestore(&lock->rlock, flags);
}
#define spin_unlock_irqrestore(lock, flags) \
raw_spin_unlock_irqrestore(lock, flags)
static __always_inline int spin_trylock_bh(spinlock_t *lock)
{
return raw_spin_trylock_bh(&lock->rlock);
}
static __always_inline int spin_trylock_irq(spinlock_t *lock)
{
return raw_spin_trylock_irq(&lock->rlock);
}
#define spin_trylock_irq(lock) raw_spin_trylock_irq(lock)
#define spin_trylock_irqsave(lock, flags) \
({ \
raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
raw_spin_trylock_irqsave(lock, flags); \
})
static __always_inline int spin_is_locked(spinlock_t *lock)
......
......@@ -99,7 +99,9 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
* even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
* not re-enabled during lock-acquire (which the preempt-spin-ops do):
*/
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
#if !defined(CONFIG_GENERIC_LOCKBREAK) || \
defined(CONFIG_DEBUG_LOCK_ALLOC) || \
defined(CONFIG_IPIPE)
static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
{
......@@ -113,7 +115,7 @@ static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
* do_raw_spin_lock_flags() code, because lockdep assumes
* that interrupts are not re-enabled during lock-acquire:
*/
#ifdef CONFIG_LOCKDEP
#if defined(CONFIG_LOCKDEP) || defined(CONFIG_IPIPE)
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
#else
do_raw_spin_lock_flags(lock, &flags);
......
......@@ -56,16 +56,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
lock->slock = 1;
}
/*
* Read-write spinlocks. No debug version.
*/
#define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0)
#define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0)
#define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; })
#define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; })
#define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0)
#define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0)
#else /* DEBUG_SPINLOCK */
#define arch_spin_is_locked(lock) ((void)(lock), 0)
/* for sched/core.c and kernel_lock.c: */
......@@ -75,6 +65,13 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
# define arch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; })
#endif /* DEBUG_SPINLOCK */
#define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0)
#define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0)
#define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; })
#define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; })
#define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0)
#define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0)
#define arch_spin_is_contended(lock) (((void)(lock), 0))
#define arch_read_can_lock(lock) (((void)(lock), 1))
......
......@@ -2887,6 +2887,9 @@ static void __trace_hardirqs_on_caller(unsigned long ip)
__visible void trace_hardirqs_on_caller(unsigned long ip)
{
if (!ipipe_root_p)
return;
time_hardirqs_on(CALLER_ADDR0, ip);
if (unlikely(!debug_locks || current->lockdep_recursion))
......@@ -2907,7 +2910,7 @@ __visible void trace_hardirqs_on_caller(unsigned long ip)
* already enabled, yet we find the hardware thinks they are in fact
* enabled.. someone messed up their IRQ state tracing.
*/
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled() && !hard_irqs_disabled()))
return;
/*
......@@ -2940,7 +2943,12 @@ EXPORT_SYMBOL(trace_hardirqs_on);
*/
__visible void trace_hardirqs_off_caller(unsigned long ip)
{
struct task_struct *curr = current;
struct task_struct *curr;
if (!ipipe_root_p)
return;
curr = current;
time_hardirqs_off(CALLER_ADDR0, ip);
......@@ -2951,7 +2959,7 @@ __visible void trace_hardirqs_off_caller(unsigned long ip)
* So we're supposed to get called after you mask local IRQs, but for
* some reason the hardware doesn't quite think you did a proper job.
*/
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled() && !hard_irqs_disabled()))
return;
if (curr->hardirqs_enabled) {
......@@ -2987,7 +2995,7 @@ void trace_softirqs_on(unsigned long ip)
* We fancy IRQs being disabled here, see softirq.c, avoids
* funny state and nesting things.
*/
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled() && !hard_irqs_disabled()))
return;
if (curr->softirqs_enabled) {
......@@ -3026,7 +3034,7 @@ void trace_softirqs_off(unsigned long ip)
/*
* We fancy IRQs being disabled here, see softirq.c
*/
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled() && !hard_irqs_disabled()))
return;
if (curr->softirqs_enabled) {
......
......@@ -27,7 +27,9 @@
* even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
* not re-enabled during lock-acquire (which the preempt-spin-ops do):
*/
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
#if !defined(CONFIG_GENERIC_LOCKBREAK) || \
defined(CONFIG_DEBUG_LOCK_ALLOC) || \
defined(CONFIG_IPIPE)
/*
* The __lock_function inlines are taken from
* include/linux/spinlock_api_smp.h
......
......@@ -15,6 +15,7 @@
#include <linux/wait.h>
#include <linux/vt_kern.h>
#include <linux/console.h>
#include <linux/ipipe_trace.h>
void __attribute__((weak)) bust_spinlocks(int yes)
......@@ -26,6 +27,7 @@ void __attribute__((weak)) bust_spinlocks(int yes)
unblank_screen();
#endif
console_unblank();
ipipe_trace_panic_dump();
if (--oops_in_progress == 0)
wake_up_klogd();
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment