Commit cce4d9da authored by Philippe Gerum's avatar Philippe Gerum

ipipe: use raw_spinlock* API with IRQ pipeline locks

Mixing PREEMPT_RT_FULL and IPIPE requires to free the regular
spinlock* API from I-pipe virtualization code, which is useless for
sleeping locks, and conflicts with spinlock_rt definitions.

Since I-pipe locks are basically raw spinlocks with hard IRQ
management, switch all ipipe_spinlock_t users to the raw_spinlock*
API.
parent 0ce3878f
......@@ -26,22 +26,87 @@ typedef struct {
arch_spinlock_t arch_lock;
} __ipipe_spinlock_t;
#define ipipe_spinlock(lock) ((__ipipe_spinlock_t *)(lock))
#define ipipe_spinlock_p(lock) \
__builtin_types_compatible_p(typeof(lock), __ipipe_spinlock_t *) || \
__builtin_types_compatible_p(typeof(lock), __ipipe_spinlock_t [])
#define std_spinlock_raw(lock) ((raw_spinlock_t *)(lock))
#define std_spinlock_raw_p(lock) \
__builtin_types_compatible_p(typeof(lock), raw_spinlock_t *) || \
__builtin_types_compatible_p(typeof(lock), raw_spinlock_t [])
#ifdef CONFIG_PREEMPT_RT_FULL
#define PICK_SPINLOCK_IRQSAVE(lock, flags) \
do { \
if (ipipe_spinlock_p(lock)) \
(flags) = __ipipe_spin_lock_irqsave(ipipe_spinlock(lock)); \
else if (std_spinlock_raw_p(lock)) \
__real_raw_spin_lock_irqsave(std_spinlock_raw(lock), flags); \
else __bad_lock_type(); \
} while (0)
#define PICK_SPINTRYLOCK_IRQSAVE(lock, flags) \
({ \
int __ret__; \
if (ipipe_spinlock_p(lock)) \
__ret__ = __ipipe_spin_trylock_irqsave(ipipe_spinlock(lock), &(flags)); \
else if (std_spinlock_raw_p(lock)) \
__ret__ = __real_raw_spin_trylock_irqsave(std_spinlock_raw(lock), flags); \
else __bad_lock_type(); \
__ret__; \
})
#define PICK_SPINTRYLOCK_IRQ(lock) \
({ \
int __ret__; \
if (ipipe_spinlock_p(lock)) \
__ret__ = __ipipe_spin_trylock_irq(ipipe_spinlock(lock)); \
else if (std_spinlock_raw_p(lock)) \
__ret__ = __real_raw_spin_trylock_irq(std_spinlock_raw(lock)); \
else __bad_lock_type(); \
__ret__; \
})
#define PICK_SPINUNLOCK_IRQRESTORE(lock, flags) \
do { \
if (ipipe_spinlock_p(lock)) \
__ipipe_spin_unlock_irqrestore(ipipe_spinlock(lock), flags); \
else if (std_spinlock_raw_p(lock)) { \
__ipipe_spin_unlock_debug(flags); \
__real_raw_spin_unlock_irqrestore(std_spinlock_raw(lock), flags); \
} else __bad_lock_type(); \
} while (0)
#define PICK_SPINOP(op, lock) \
({ \
if (ipipe_spinlock_p(lock)) \
arch_spin##op(&ipipe_spinlock(lock)->arch_lock); \
else if (std_spinlock_raw_p(lock)) \
__real_raw_spin##op(std_spinlock_raw(lock)); \
else __bad_lock_type(); \
(void)0; \
})
#define PICK_SPINOP_RET(op, lock, type) \
({ \
type __ret__; \
if (ipipe_spinlock_p(lock)) \
__ret__ = arch_spin##op(&ipipe_spinlock(lock)->arch_lock); \
else if (std_spinlock_raw_p(lock)) \
__ret__ = __real_raw_spin##op(std_spinlock_raw(lock)); \
else { __ret__ = -1; __bad_lock_type(); } \
__ret__; \
})
#else /* !CONFIG_PREEMPT_RT_FULL */
#define std_spinlock(lock) ((spinlock_t *)(lock))
#define std_spinlock_p(lock) \
__builtin_types_compatible_p(typeof(lock), spinlock_t *) || \
__builtin_types_compatible_p(typeof(lock), spinlock_t [])
#define ipipe_spinlock(lock) ((__ipipe_spinlock_t *)(lock))
#define std_spinlock_raw(lock) ((raw_spinlock_t *)(lock))
#define std_spinlock(lock) ((spinlock_t *)(lock))
#define PICK_SPINLOCK_IRQSAVE(lock, flags) \
do { \
if (ipipe_spinlock_p(lock)) \
......@@ -117,6 +182,8 @@ typedef struct {
__ret__; \
})
#endif /* !CONFIG_PREEMPT_RT_FULL */
#define arch_spin_lock_init(lock) \
do { \
IPIPE_DEFINE_SPINLOCK(__lock__); \
......
......@@ -822,7 +822,7 @@ unsigned int ipipe_alloc_virq(void)
unsigned long flags, irq = 0;
int ipos;
spin_lock_irqsave(&__ipipe_lock, flags);
raw_spin_lock_irqsave(&__ipipe_lock, flags);
if (__ipipe_virtual_irq_map != ~0) {
ipos = ffz(__ipipe_virtual_irq_map);
......@@ -830,7 +830,7 @@ unsigned int ipipe_alloc_virq(void)
irq = ipos + IPIPE_VIRQ_BASE;
}
spin_unlock_irqrestore(&__ipipe_lock, flags);
raw_spin_unlock_irqrestore(&__ipipe_lock, flags);
return irq;
}
......@@ -860,7 +860,7 @@ int ipipe_request_irq(struct ipipe_domain *ipd,
(irq >= IPIPE_NR_XIRQS && !ipipe_virtual_irq_p(irq)))
return -EINVAL;
spin_lock_irqsave(&__ipipe_lock, flags);
raw_spin_lock_irqsave(&__ipipe_lock, flags);
if (ipd->irqs[irq].handler) {
ret = -EBUSY;
......@@ -878,7 +878,7 @@ int ipipe_request_irq(struct ipipe_domain *ipd,
if (irq < IPIPE_NR_ROOT_IRQS)
__ipipe_enable_irqdesc(ipd, irq);
out:
spin_unlock_irqrestore(&__ipipe_lock, flags);
raw_spin_unlock_irqrestore(&__ipipe_lock, flags);
return ret;
}
......@@ -893,7 +893,7 @@ void ipipe_free_irq(struct ipipe_domain *ipd,
ipipe_root_only();
#endif /* CONFIG_IPIPE_LEGACY */
spin_lock_irqsave(&__ipipe_lock, flags);
raw_spin_lock_irqsave(&__ipipe_lock, flags);
if (ipd->irqs[irq].handler == NULL)
goto out;
......@@ -906,7 +906,7 @@ void ipipe_free_irq(struct ipipe_domain *ipd,
if (irq < IPIPE_NR_ROOT_IRQS)
__ipipe_disable_irqdesc(ipd, irq);
out:
spin_unlock_irqrestore(&__ipipe_lock, flags);
raw_spin_unlock_irqrestore(&__ipipe_lock, flags);
}
EXPORT_SYMBOL_GPL(ipipe_free_irq);
......@@ -1549,7 +1549,7 @@ void __ipipe_do_critical_sync(unsigned int irq, void *cookie)
* another CPU. Enter a spinning wait until he releases the
* global lock.
*/
spin_lock(&__ipipe_cpu_barrier);
raw_spin_lock(&__ipipe_cpu_barrier);
/* Got it. Now get out. */
......@@ -1559,7 +1559,7 @@ void __ipipe_do_critical_sync(unsigned int irq, void *cookie)
cpumask_set_cpu(cpu, &__ipipe_cpu_pass_map);
spin_unlock(&__ipipe_cpu_barrier);
raw_spin_unlock(&__ipipe_cpu_barrier);
cpumask_clear_cpu(cpu, &__ipipe_cpu_sync_map);
}
......@@ -1592,7 +1592,7 @@ unsigned long ipipe_critical_enter(void (*syncfn)(void))
}
restart:
online = *cpu_online_mask;
spin_lock(&__ipipe_cpu_barrier);
raw_spin_lock(&__ipipe_cpu_barrier);
__ipipe_cpu_sync = syncfn;
......@@ -1618,7 +1618,7 @@ restart:
*/
__ipipe_cpu_sync = NULL;
spin_unlock(&__ipipe_cpu_barrier);
raw_spin_unlock(&__ipipe_cpu_barrier);
/*
* Ensure all CPUs consumed the IPI to avoid
* running __ipipe_cpu_sync prematurely. This
......@@ -1648,7 +1648,7 @@ void ipipe_critical_exit(unsigned long flags)
#ifdef CONFIG_SMP
if (atomic_dec_and_test(&__ipipe_critical_count)) {
spin_unlock(&__ipipe_cpu_barrier);
raw_spin_unlock(&__ipipe_cpu_barrier);
while (!cpumask_empty(&__ipipe_cpu_sync_map))
cpu_relax();
cpumask_clear_cpu(ipipe_processor_id(), &__ipipe_cpu_lock_map);
......
......@@ -124,7 +124,7 @@ void ipipe_timer_register(struct ipipe_timer *timer)
if (timer->cpumask == NULL)
timer->cpumask = cpumask_of(smp_processor_id());
spin_lock_irqsave(&lock, flags);
raw_spin_lock_irqsave(&lock, flags);
list_for_each_entry(t, &timers, link) {
if (t->rating <= timer->rating) {
......@@ -134,7 +134,7 @@ void ipipe_timer_register(struct ipipe_timer *timer)
}
list_add_tail(&timer->link, &timers);
done:
spin_unlock_irqrestore(&lock, flags);
raw_spin_unlock_irqrestore(&lock, flags);
}
static void ipipe_timer_request_sync(void)
......@@ -239,7 +239,7 @@ int ipipe_select_timers(const struct cpumask *mask)
} else
hrclock_freq = __ipipe_hrclock_freq;
spin_lock_irqsave(&lock, flags);
raw_spin_lock_irqsave(&lock, flags);
/* First, choose timers for the CPUs handled by ipipe */
for_each_cpu(cpu, mask) {
......@@ -279,7 +279,7 @@ found:
}
}
spin_unlock_irqrestore(&lock, flags);
raw_spin_unlock_irqrestore(&lock, flags);
flags = ipipe_critical_enter(ipipe_timer_request_sync);
ipipe_timer_request_sync();
......@@ -288,7 +288,7 @@ found:
return 0;
err_remove_all:
spin_unlock_irqrestore(&lock, flags);
raw_spin_unlock_irqrestore(&lock, flags);
for_each_cpu(cpu, mask) {
per_cpu(ipipe_percpu.hrtimer_irq, cpu) = -1;
......
......@@ -202,7 +202,7 @@ __ipipe_trace_end(int cpu, struct ipipe_trace_path *tp, int pos)
if (length > per_cpu(trace_path, cpu)[per_cpu(max_path, cpu)].length) {
/* we need protection here against other cpus trying
to start a proc dump */
spin_lock(&global_path_lock);
raw_spin_lock(&global_path_lock);
/* active path holds new worst case */
tp->length = length;
......@@ -211,7 +211,7 @@ __ipipe_trace_end(int cpu, struct ipipe_trace_path *tp, int pos)
/* find next unused trace path */
active = __ipipe_get_free_trace_path(active, cpu);
spin_unlock(&global_path_lock);
raw_spin_unlock(&global_path_lock);
tp = &per_cpu(trace_path, cpu)[active];
......@@ -234,7 +234,7 @@ __ipipe_trace_freeze(int cpu, struct ipipe_trace_path *tp, int pos)
/* we need protection here against other cpus trying
* to set their frozen path or to start a proc dump */
spin_lock(&global_path_lock);
raw_spin_lock(&global_path_lock);
per_cpu(frozen_path, cpu) = active;
......@@ -248,7 +248,7 @@ __ipipe_trace_freeze(int cpu, struct ipipe_trace_path *tp, int pos)
tp->end = -1;
}
spin_unlock(&global_path_lock);
raw_spin_unlock(&global_path_lock);
tp = &per_cpu(trace_path, cpu)[active];
......@@ -403,7 +403,7 @@ static unsigned long __ipipe_global_path_lock(void)
int cpu;
struct ipipe_trace_path *tp;
spin_lock_irqsave(&global_path_lock, flags);
raw_spin_lock_irqsave(&global_path_lock, flags);
cpu = ipipe_processor_id();
restart:
......
......@@ -1847,7 +1847,7 @@ void __ipipe_flush_printk (unsigned virq, void *cookie)
goto start;
do {
spin_unlock_irqrestore(&__ipipe_printk_lock, flags);
raw_spin_unlock_irqrestore(&__ipipe_printk_lock, flags);
start:
lmax = __ipipe_printk_fill;
while (out < lmax) {
......@@ -1856,13 +1856,13 @@ void __ipipe_flush_printk (unsigned virq, void *cookie)
p += len;
out += len;
}
spin_lock_irqsave(&__ipipe_printk_lock, flags);
raw_spin_lock_irqsave(&__ipipe_printk_lock, flags);
}
while (__ipipe_printk_fill != lmax);
__ipipe_printk_fill = 0;
spin_unlock_irqrestore(&__ipipe_printk_lock, flags);
raw_spin_unlock_irqrestore(&__ipipe_printk_lock, flags);
}
/**
......@@ -1916,7 +1916,7 @@ asmlinkage __visible int printk(const char *fmt, ...)
goto out;
}
spin_lock_irqsave(&__ipipe_printk_lock, flags);
raw_spin_lock_irqsave(&__ipipe_printk_lock, flags);
oldcount = __ipipe_printk_fill;
fbytes = __LOG_BUF_LEN - oldcount;
......@@ -1927,7 +1927,7 @@ asmlinkage __visible int printk(const char *fmt, ...)
} else
r = 0;
spin_unlock_irqrestore(&__ipipe_printk_lock, flags);
raw_spin_unlock_irqrestore(&__ipipe_printk_lock, flags);
if (oldcount == 0)
ipipe_raise_irq(__ipipe_printk_virq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment