Commit b5f1f372 authored by Philippe Gerum's avatar Philippe Gerum

powerpc/ipipe: add support for Book3E

parent cce4d9da
......@@ -11,6 +11,8 @@
#ifndef _ASM_POWERPC_EXCEPTION_64E_H
#define _ASM_POWERPC_EXCEPTION_64E_H
#include <asm/irq_softstate.h>
/*
* SPRGs usage an other considerations...
*
......
......@@ -35,6 +35,8 @@
* implementations as possible.
*/
#include <asm/irq_softstate.h>
#define EX_R9 0
#define EX_R10 8
#define EX_R11 16
......@@ -332,20 +334,6 @@ do_kvm_##n: \
GET_CTR(r10, area); \
std r10,_CTR(r1);
#ifdef CONFIG_IPIPE
/* Do NOT alter Rc(eq) in this code; our caller uses it. */
#define COPY_SOFTISTATE(mreg) \
ld mreg,PACAROOTPCPU(r13); \
ld mreg,0(mreg); \
nor mreg,mreg,mreg; \
clrldi mreg,mreg,63; \
std mreg,SOFTE(r1)
#else /* !CONFIG_IPIPE */
#define COPY_SOFTISTATE(mreg) \
lbz mreg,PACASOFTIRQEN(r13); \
std mreg,SOFTE(r1)
#endif /* !CONFIG_IPIPE */
#define EXCEPTION_PROLOG_COMMON_3(n) \
std r2,GPR2(r1); /* save r2 in stackframe */ \
SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
......@@ -353,7 +341,7 @@ do_kvm_##n: \
mflr r9; /* Get LR, later save to stack */ \
ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
std r9,_LINK(r1); \
COPY_SOFTISTATE(r10); \
EXC_SAVE_SOFTISTATE(r10); \
mfspr r11,SPRN_XER; /* save XER in stackframe */ \
std r11,_XER(r1); \
li r9,(n)+1; \
......@@ -532,46 +520,6 @@ label##_relon_hv: \
* runlatch, etc...
*/
.macro HARD_ENABLE_INTS tmp=r10
#ifdef CONFIG_PPC_BOOK3E
wrteei 1
#else
ld \tmp,PACAKMSR(r13)
ori \tmp,\tmp,MSR_EE
mtmsrd \tmp,1
#endif /* CONFIG_PPC_BOOK3E */
.endm
.macro HARD_DISABLE_INTS tmp=r10
#ifdef CONFIG_PPC_BOOK3E
wrteei 0
#else
ld \tmp,PACAKMSR(r13) /* Get kernel MSR without EE */
mtmsrd \tmp,1 /* Update machine state */
#endif /* CONFIG_PPC_BOOK3E */
.endm
.macro HARD_DISABLE_INTS_RI
#ifdef CONFIG_PPC_BOOK3E
wrteei 0
#else
/*
* For performance reasons we clear RI the same time that we
* clear EE. We only need to clear RI just before we restore r13
* below, but batching it with EE saves us one expensive mtmsrd call.
* We have to be careful to restore RI if we branch anywhere from
* here (eg syscall_exit_work).
*
* CAUTION: using r9-r11 the way they are is assumed by the
* caller.
*/
ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */
li r9,MSR_RI
andc r11,r10,r9
mtmsrd r11,1 /* Update machine state */
#endif /* CONFIG_PPC_BOOK3E */
.endm
/*
* This addition reconciles our actual IRQ state with the various software
* flags that track it. This may call C code.
......@@ -585,7 +533,6 @@ label##_relon_hv: \
mfmsr r11; \
ori r11,r11,MSR_EE; \
mtmsrd r11,1;
#define RECONCILE_IRQ_STATE(__rA, __rB) HARD_DISABLE_INTS __rA
#else /* !CONFIG_IPIPE */
#define ADD_RECONCILE RECONCILE_IRQ_STATE(r10,r11)
#endif /* !CONFIG_IPIPE */
......
......@@ -108,7 +108,9 @@ struct ipipe_ipi_struct {
void __ipipe_hook_critical_ipi(struct ipipe_domain *ipd);
void __ipipe_register_ipi(unsigned int irq);
void __ipipe_register_mux_ipi(unsigned int irq);
void __ipipe_finish_ipi_demux(unsigned int irq);
#else
#define __ipipe_hook_critical_ipi(ipd) do { } while(0)
#endif /* CONFIG_SMP */
......
......@@ -35,7 +35,8 @@
* The first virtual interrupt is reserved for the timer (see
* __ipipe_early_core_setup).
*/
#define IPIPE_TIMER_VIRQ IPIPE_VIRQ_BASE
#define IPIPE_TIMER_VIRQ (IPIPE_VIRQ_BASE + 0)
#define IPIPE_DOORBELL_VIRQ (IPIPE_VIRQ_BASE + 1)
#ifdef CONFIG_SMP
/*
......@@ -44,21 +45,19 @@
* implemented by piggybacking the debugger break IPI 0x3,
* which is demultiplexed in __ipipe_ipi_demux().
*/
#define IPIPE_CRITICAL_IPI (IPIPE_VIRQ_BASE + 2)
#define IPIPE_HRTIMER_IPI (IPIPE_VIRQ_BASE + 3)
#define IPIPE_RESCHEDULE_IPI (IPIPE_VIRQ_BASE + 4)
#define IPIPE_BASE_IPI_OFFSET IPIPE_CRITICAL_IPI
/* these are bit numbers in practice */
#define IPIPE_MSG_CRITICAL_IPI 0
#define IPIPE_MSG_HRTIMER_IPI (IPIPE_MSG_CRITICAL_IPI + 1)
#define IPIPE_MSG_RESCHEDULE_IPI (IPIPE_MSG_CRITICAL_IPI + 2)
#define IPIPE_MSG_IPI_MASK ((1UL << IPIPE_MSG_CRITICAL_IPI) | \
(1UL << IPIPE_MSG_HRTIMER_IPI) | \
(1UL << IPIPE_MSG_RESCHEDULE_IPI))
#define IPIPE_CRITICAL_IPI (IPIPE_VIRQ_BASE + 1)
#define IPIPE_HRTIMER_IPI (IPIPE_CRITICAL_IPI + 1)
#define IPIPE_RESCHEDULE_IPI (IPIPE_CRITICAL_IPI + 2)
#define IPIPE_BASE_IPI_OFFSET IPIPE_CRITICAL_IPI
#define ipipe_processor_id() raw_smp_processor_id()
#else /* !CONFIG_SMP */
......
......@@ -74,6 +74,9 @@ static inline void hard_local_irq_enable_notrace(void)
{
__asm__ __volatile__("wrteei 1": : :"memory");
}
#define hard_local_irq_restore_notrace(x) mtmsr(x)
#else /* !CONFIG_PPC_BOOK3E */
static inline void hard_local_irq_disable_notrace(void)
{
......@@ -84,6 +87,9 @@ static inline void hard_local_irq_enable_notrace(void)
{
__mtmsrd(mfmsr() | MSR_EE, 1);
}
#define hard_local_irq_restore_notrace(x) __mtmsrd(x, 1)
#endif /* !CONFIG_PPC_BOOK3E */
static inline unsigned long hard_local_irq_save_notrace(void)
......@@ -93,8 +99,6 @@ static inline unsigned long hard_local_irq_save_notrace(void)
return msr;
}
#define hard_local_irq_restore_notrace(x) __mtmsrd(x, 1)
#endif /* CONFIG_PPC64 */
#ifdef CONFIG_IPIPE
......
#ifndef _ASM_POWERPC_IRQ_SOFTSTATE_H
#define _ASM_POWERPC_IRQ_SOFTSTATE_H
#ifdef __ASSEMBLY__
.macro HARD_ENABLE_INTS tmp=r10
#ifdef CONFIG_PPC_BOOK3E
wrteei 1
#else
ld \tmp,PACAKMSR(r13)
ori \tmp,\tmp,MSR_EE
mtmsrd \tmp,1
#endif /* CONFIG_PPC_BOOK3E */
.endm
.macro HARD_DISABLE_INTS tmp=r10
#ifdef CONFIG_PPC_BOOK3E
wrteei 0
#else
ld \tmp,PACAKMSR(r13) /* Get kernel MSR without EE */
mtmsrd \tmp,1 /* Update machine state */
#endif /* CONFIG_PPC_BOOK3E */
.endm
.macro HARD_DISABLE_INTS_RI
#ifdef CONFIG_PPC_BOOK3E
wrteei 0
#else
/*
* For performance reasons we clear RI the same time that we
* clear EE. We only need to clear RI just before we restore r13
* below, but batching it with EE saves us one expensive mtmsrd call.
* We have to be careful to restore RI if we branch anywhere from
* here (eg syscall_exit_work).
*
* CAUTION: using r9-r11 the way they are is assumed by the
* caller.
*/
ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */
li r9,MSR_RI
andc r11,r10,r9
mtmsrd r11,1 /* Update machine state */
#endif /* CONFIG_PPC_BOOK3E */
.endm
#ifdef CONFIG_IPIPE
/* Do NOT alter Rc(eq) in this code; our caller uses it. */
#define __COPY_SOFTISTATE(mreg) \
ld mreg,PACAROOTPCPU(r13); \
ld mreg,0(mreg); \
nor mreg,mreg,mreg; \
clrldi mreg,mreg,63; \
/* Do NOT alter Rc(eq) in this code; our caller uses it. */
#define COPY_SOFTISTATE(mreg) \
__COPY_SOFTISTATE(mreg); \
std mreg,SOFTE(r1)
#ifdef CONFIG_PPC_BOOK3E
#define SPECIAL_SAVE_SOFTISTATE(mreg) \
__COPY_SOFTISTATE(mreg); \
SPECIAL_EXC_STORE(mreg, SOFTE)
#endif
#define EXC_SAVE_SOFTISTATE(mreg) \
COPY_SOFTISTATE(mreg)
#define RECONCILE_IRQ_STATE(__rA, __rB) HARD_DISABLE_INTS __rA
#else /* !CONFIG_IPIPE */
#define COPY_SOFTISTATE(mreg) \
lbz mreg,PACASOFTIRQEN(r13); \
std mreg,SOFTE(r1)
#ifdef CONFIG_PPC_BOOK3E
#define SPECIAL_SAVE_SOFTISTATE(mreg) \
lbz mreg,PACASOFTIRQEN(r13); \
SPECIAL_EXC_STORE(mreg, SOFTE)
#endif
#define EXC_SAVE_SOFTISTATE(mreg) \
COPY_SOFTISTATE(mreg)
/*
* This is used by assembly code to soft-disable interrupts first and
* reconcile irq state.
*
* NB: This may call C code, so the caller must be prepared for volatiles to
* be clobbered.
*/
#ifdef CONFIG_TRACE_IRQFLAGS
#define RECONCILE_IRQ_STATE(__rA, __rB) \
lbz __rA,PACASOFTIRQEN(r13); \
lbz __rB,PACAIRQHAPPENED(r13); \
cmpwi cr0,__rA,0; \
li __rA,0; \
ori __rB,__rB,PACA_IRQ_HARD_DIS; \
stb __rB,PACAIRQHAPPENED(r13); \
beq 44f; \
stb __rA,PACASOFTIRQEN(r13); \
TRACE_DISABLE_INTS; \
44:
#else
#define RECONCILE_IRQ_STATE(__rA, __rB) \
lbz __rA,PACAIRQHAPPENED(r13); \
li __rB,0; \
ori __rA,__rA,PACA_IRQ_HARD_DIS; \
stb __rB,PACASOFTIRQEN(r13); \
stb __rA,PACAIRQHAPPENED(r13)
#endif /* !CONFIG_TRACE_IRQFLAGS */
#endif /* !CONFIG_IPIPE */
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_IRQ_SOFTSTATE_H */
......@@ -38,40 +38,10 @@
#define TRACE_ENABLE_INTS TRACE_WITH_FRAME_BUFFER(trace_hardirqs_on)
#define TRACE_DISABLE_INTS TRACE_WITH_FRAME_BUFFER(trace_hardirqs_off)
/*
* This is used by assembly code to soft-disable interrupts first and
* reconcile irq state.
*
* NB: This may call C code, so the caller must be prepared for volatiles to
* be clobbered.
*/
#ifndef CONFIG_IPIPE
#define RECONCILE_IRQ_STATE(__rA, __rB) \
lbz __rA,PACASOFTIRQEN(r13); \
lbz __rB,PACAIRQHAPPENED(r13); \
cmpwi cr0,__rA,0; \
li __rA,0; \
ori __rB,__rB,PACA_IRQ_HARD_DIS; \
stb __rB,PACAIRQHAPPENED(r13); \
beq 44f; \
stb __rA,PACASOFTIRQEN(r13); \
TRACE_DISABLE_INTS; \
44:
#endif /* !CONFIG_IPIPE */
#else
#define TRACE_ENABLE_INTS
#define TRACE_DISABLE_INTS
#ifndef CONFIG_IPIPE
#define RECONCILE_IRQ_STATE(__rA, __rB) \
lbz __rA,PACAIRQHAPPENED(r13); \
li __rB,0; \
ori __rA,__rA,PACA_IRQ_HARD_DIS; \
stb __rB,PACASOFTIRQEN(r13); \
stb __rA,PACAIRQHAPPENED(r13)
#endif /* !CONFIG_IPIPE */
#endif
#endif
......
......@@ -16,6 +16,7 @@
#define _ASM_POWERPC_QE_IC_H
#include <linux/irq.h>
#include <linux/ipipe.h>
struct device_node;
struct qe_ic;
......
......@@ -65,7 +65,9 @@
ld reg, (SPECIAL_EXC_##name * 8 + SPECIAL_EXC_FRAME_OFFS)(r1)
special_reg_save:
#ifndef CONFIG_IPIPE
lbz r9,PACAIRQHAPPENED(r13)
#endif
RECONCILE_IRQ_STATE(r3,r4)
/*
......@@ -132,15 +134,15 @@ BEGIN_FTR_SECTION
mtspr SPRN_MAS5,r10
mtspr SPRN_MAS8,r10
END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
#ifndef CONFIG_IPIPE
SPECIAL_EXC_STORE(r9,IRQHAPPENED)
#endif
mfspr r10,SPRN_DEAR
SPECIAL_EXC_STORE(r10,DEAR)
mfspr r10,SPRN_ESR
SPECIAL_EXC_STORE(r10,ESR)
lbz r10,PACASOFTIRQEN(r13)
SPECIAL_EXC_STORE(r10,SOFTE)
SPECIAL_SAVE_SOFTISTATE(r10)
ld r10,_NIP(r1)
SPECIAL_EXC_STORE(r10,CSRR0)
ld r10,_MSR(r1)
......@@ -206,8 +208,15 @@ BEGIN_FTR_SECTION
mtspr SPRN_MAS8,r10
END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
lbz r6,PACASOFTIRQEN(r13)
#ifdef CONFIG_IPIPE
ld r6,PACAROOTPCPU(r13)
cmpwi cr0,r6,0
bne 1f
TRACE_ENABLE_INTS
1:
#else
ld r5,SOFTE(r1)
lbz r6,PACASOFTIRQEN(r13)
/* Interrupts had better not already be enabled... */
twnei r6,0
......@@ -226,6 +235,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
*/
SPECIAL_EXC_LOAD(r10,IRQHAPPENED)
stb r10,PACAIRQHAPPENED(r13)
#endif
SPECIAL_EXC_LOAD(r10,DEAR)
mtspr SPRN_DEAR,r10
......@@ -350,10 +360,16 @@ ret_from_mc_except:
#define PROLOG_ADDITION_NONE_DBG(n)
#define PROLOG_ADDITION_NONE_MC(n)
#ifdef CONFIG_IPIPE
#define PROLOG_ADDITION_MASKABLE_GEN(n)
#define MASKABLE_EXCEPTION_EXIT b __ipipe_ret_from_except_lite
#else
#define PROLOG_ADDITION_MASKABLE_GEN(n) \
lbz r10,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \
cmpwi cr0,r10,0; /* yes -> go out of line */ \
beq masked_interrupt_book3e_##n
#define MASKABLE_EXCEPTION_EXIT b ret_from_except_lite
#endif
#define PROLOG_ADDITION_2REGS_GEN(n) \
std r14,PACA_EXGEN+EX_R14(r13); \
......@@ -397,8 +413,8 @@ exc_##n##_common: \
mfspr r8,SPRN_XER; /* save XER in stackframe */ \
ld r9,excf+EX_R1(r13); /* load orig r1 back from PACA */ \
lwz r10,excf+EX_CR(r13); /* load orig CR back from PACA */ \
lbz r11,PACASOFTIRQEN(r13); /* get current IRQ softe */ \
ld r12,exception_marker@toc(r2); \
EXC_SAVE_SOFTISTATE(r11); \
li r0,0; \
std r3,GPR10(r1); /* save r10 to stackframe */ \
std r4,GPR11(r1); /* save r11 to stackframe */ \
......@@ -410,7 +426,6 @@ exc_##n##_common: \
std r9,0(r1); /* store stack frame back link */ \
std r10,_CCR(r1); /* store orig CR in stackframe */ \
std r9,GPR1(r1); /* store stack frame back link */ \
std r11,SOFTE(r1); /* and save it to stackframe */ \
std r12,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ \
std r3,_TRAP(r1); /* set trap number */ \
std r0,RESULT(r1); /* clear regs->result */
......@@ -499,7 +514,7 @@ exc_##n##_bad_stack: \
CHECK_NAPPING(); \
addi r3,r1,STACK_FRAME_OVERHEAD; \
bl hdlr; \
b ret_from_except_lite;
MASKABLE_EXCEPTION_EXIT;
/* This value is used to mark exception frames on the stack. */
.section ".toc","aw"
......@@ -545,6 +560,16 @@ interrupt_base_book3e: /* fake trap */
.globl interrupt_end_book3e
interrupt_end_book3e:
#ifdef CONFIG_IPIPE
#define BOOKE_EXTIRQ_HANDLER __ipipe_grab_irq
#define BOOKE_TIMER_HANDLER __ipipe_grab_timer
#define BOOKE_DBELL_HANDLER __ipipe_grab_doorbell
#else
#define BOOKE_EXTIRQ_HANDLER do_IRQ
#define BOOKE_TIMER_HANDLER timer_interrupt
#define BOOKE_DBELL_HANDLER doorbell_exception
#endif
/* Critical Input Interrupt */
START_EXCEPTION(critical_input);
CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL,
......@@ -591,8 +616,8 @@ interrupt_end_book3e:
/* External Input Interrupt */
MASKABLE_EXCEPTION(0x500, BOOKE_INTERRUPT_EXTERNAL,
external_input, do_IRQ, ACK_NONE)
external_input, BOOKE_EXTIRQ_HANDLER, ACK_NONE)
/* Alignment */
START_EXCEPTION(alignment);
NORMAL_EXCEPTION_PROLOG(0x600, BOOKE_INTERRUPT_ALIGNMENT,
......@@ -676,7 +701,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
/* Decrementer Interrupt */
MASKABLE_EXCEPTION(0x900, BOOKE_INTERRUPT_DECREMENTER,
decrementer, timer_interrupt, ACK_DEC)
decrementer, BOOKE_TIMER_HANDLER, ACK_DEC)
/* Fixed Interval Timer Interrupt */
MASKABLE_EXCEPTION(0x980, BOOKE_INTERRUPT_FIT,
......@@ -855,7 +880,7 @@ kernel_dbg_exc:
/* Doorbell interrupt */
MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL,
doorbell, doorbell_exception, ACK_NONE)
doorbell, BOOKE_DBELL_HANDLER, ACK_NONE)
/* Doorbell critical Interrupt */
START_EXCEPTION(doorbell_crit);
......@@ -928,6 +953,7 @@ kernel_dbg_exc:
bl .unknown_exception
b .ret_from_except
#ifndef CONFIG_IPIPE
/*
* An interrupt came in while soft-disabled; We mark paca->irq_happened
* accordingly and if the interrupt is level sensitive, we hard disable
......@@ -1000,6 +1026,7 @@ _GLOBAL(__replay_interrupt)
beq exc_0x280_common
blr
#endif /* !CONFIG_IPIPE */
/*
* This is called from 0x300 and 0x400 handlers after the prologs with
......
......@@ -27,15 +27,20 @@ _GLOBAL(\name)
mflr r0
std r0,16(r1)
#ifndef CONFIG_IPIPE
/* Hard disable interrupts */
wrteei 0
/* Now check if an interrupt came in while we were soft disabled
* since we may otherwise lose it (doorbells etc...).
* since we may otherwise lose it (doorbells etc...). There is no
* need to do that if pipelining IRQs, since our caller already
* cleared the stall bit, then synchronized the interrupt log,
* disabling hw IRQs before getting here.
*/
lbz r3,PACAIRQHAPPENED(r13)
cmpwi cr0,r3,0
bnelr
#endif
/* Now we are going to mark ourselves as soft and hard enabled in
* order to be able to take interrupts while asleep. We inform lockdep
......@@ -46,8 +51,10 @@ _GLOBAL(\name)
bl trace_hardirqs_on
addi r1,r1,128
#endif
#ifndef CONFIG_IPIPE
li r0,1
stb r0,PACASOFTIRQEN(r13)
#endif
/* Interrupts will make use return to LR, so get something we want
* in there
......
......@@ -44,23 +44,33 @@
#include <asm/time.h>
#include <asm/runlatch.h>
#include <asm/debug.h>
#include <asm/dbell.h>
static void __ipipe_do_IRQ(unsigned int irq, void *cookie);
static void __ipipe_do_timer(unsigned int irq, void *cookie);
#ifdef CONFIG_PPC_DOORBELL
static void __ipipe_do_doorbell(unsigned int irq, void *cookie);
#endif
#define DECREMENTER_MAX 0x7fffffff
#ifdef CONFIG_SMP
static DEFINE_PER_CPU(struct ipipe_ipi_struct, ipipe_ipi_message);
unsigned int __ipipe_ipi_irq = NR_IRQS + 1; /* dummy value */
#ifdef CONFIG_DEBUGGER
cpumask_t __ipipe_dbrk_pending; /* pending debugger break IPIs */
#endif
static unsigned int mux_ipi;
void __ipipe_register_mux_ipi(unsigned int irq)
{
mux_ipi = irq;
}
void __ipipe_hook_critical_ipi(struct ipipe_domain *ipd)
{
unsigned int ipi = IPIPE_CRITICAL_IPI;
......@@ -71,23 +81,15 @@ void __ipipe_hook_critical_ipi(struct ipipe_domain *ipd)
ipd->irqs[ipi].control = IPIPE_HANDLE_MASK|IPIPE_STICKY_MASK;
}
void __ipipe_register_ipi(unsigned int irq)
{
__ipipe_ipi_irq = irq;
}
static void __ipipe_ipi_demux(int irq, struct pt_regs *regs)
static void do_ipi_demux(int irq, struct pt_regs *regs)
{
struct irq_desc *desc = irq_to_desc(irq);
int ipi, cpu = ipipe_processor_id();
desc->ipipe_ack(irq, desc);
kstat_incr_irq_this_cpu(irq);
while (per_cpu(ipipe_ipi_message, cpu).value & IPIPE_MSG_IPI_MASK) {
for (ipi = IPIPE_MSG_CRITICAL_IPI; ipi <= IPIPE_MSG_RESCHEDULE_IPI; ++ipi) {
if (test_and_clear_bit(ipi, &per_cpu(ipipe_ipi_message, cpu).value)) {
int cpu __maybe_unused = ipipe_processor_id(), ipi;
while (this_cpu_ptr(&ipipe_ipi_message)->value & IPIPE_MSG_IPI_MASK) {
for (ipi = IPIPE_MSG_CRITICAL_IPI;
ipi <= IPIPE_MSG_RESCHEDULE_IPI; ++ipi) {
if (test_and_clear_bit(ipi,
&this_cpu_ptr(&ipipe_ipi_message)->value)) {
mb();
__ipipe_handle_irq(ipi + IPIPE_BASE_IPI_OFFSET, NULL);
}
......@@ -105,7 +107,7 @@ static void __ipipe_ipi_demux(int irq, struct pt_regs *regs)
}
#endif /* CONFIG_DEBUGGER */
ipipe_end_irq(irq);
__ipipe_finish_ipi_demux(irq);
}
void ipipe_set_irq_affinity(unsigned int irq, cpumask_t cpumask)
......@@ -127,22 +129,20 @@ void ipipe_send_ipi(unsigned int ipi, cpumask_t cpumask)
flags = hard_local_irq_save();
ipi -= IPIPE_BASE_IPI_OFFSET;
for_each_online_cpu(cpu) {
if (cpumask_test_cpu(cpu, &cpumask))
set_bit(ipi, &per_cpu(ipipe_ipi_message, cpu).value);
}
mb();
if (unlikely(cpumask_empty(&cpumask)))
goto out;
me = ipipe_processor_id();
ipi -= IPIPE_BASE_IPI_OFFSET;
for_each_cpu(cpu, &cpumask) {
if (cpu != me)
if (cpu == me)
continue;
set_bit(ipi, &per_cpu(ipipe_ipi_message, cpu).value);
if (smp_ops->message_pass)
smp_ops->message_pass(cpu, PPC_MSG_IPIPE_DEMUX);
#ifdef CONFIG_PPC_SMP_MUXED_IPI
else
smp_muxed_ipi_message_pass(cpu, PPC_MSG_IPIPE_DEMUX);
#endif
}
out:
hard_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(ipipe_send_ipi);
......@@ -185,7 +185,7 @@ unsigned long ipipe_test_root(void)
}
EXPORT_SYMBOL_GPL(ipipe_test_root);
#endif /* CONFIG_SMP */
#endif /* !CONFIG_SMP */
void __ipipe_early_core_setup(void)
{
......@@ -197,6 +197,13 @@ void __ipipe_early_core_setup(void)
*/
virq = ipipe_alloc_virq();
BUG_ON(virq != IPIPE_TIMER_VIRQ);
/*
* Although not all CPUs define the doorbell event, we always
* allocate the corresponding VIRQ, so that we can keep fixed
* values for all VIRQ numbers.
*/
virq = ipipe_alloc_virq();
BUG_ON(virq != IPIPE_DOORBELL_VIRQ);
#ifdef CONFIG_SMP
virq = ipipe_alloc_virq();
BUG_ON(virq != IPIPE_CRITICAL_IPI);
......@@ -238,6 +245,13 @@ void __ipipe_enable_pipeline(void)
__ipipe_do_timer, NULL,
NULL);
#ifdef CONFIG_PPC_DOORBELL
ipipe_request_irq(ipipe_root_domain,
IPIPE_DOORBELL_VIRQ,
__ipipe_do_doorbell, NULL,
NULL);
#endif
ipipe_critical_exit(flags);
}
......@@ -298,10 +312,13 @@ int __ipipe_grab_irq(struct pt_regs *regs)
if (likely(irq != NO_IRQ)) {
ipipe_trace_irq_entry(irq);
#ifdef CONFIG_SMP
/* Check for cascaded I-pipe IPIs */
if (irq == __ipipe_ipi_irq)
__ipipe_ipi_demux(irq, regs);
else
if (irq == mux_ipi) {
struct irq_desc *desc = irq_to_desc(irq);
desc->ipipe_ack(irq, desc);
kstat_incr_irq_this_cpu(irq);
do_ipi_demux(irq, regs);
ipipe_end_irq(irq);
} else
#endif /* CONFIG_SMP */
__ipipe_handle_irq(irq, regs);
}
......@@ -328,6 +345,23 @@ static void __ipipe_do_timer(unsigned int irq, void *cookie)
timer_interrupt(raw_cpu_ptr(&ipipe_percpu.tick_regs));
}
#ifdef CONFIG_PPC_DOORBELL
int __ipipe_grab_doorbell(struct pt_regs *regs)
{
#ifdef CONFIG_SMP
do_ipi_demux(IPIPE_DOORBELL_VIRQ, regs);
#endif
return __ipipe_exit_irq(regs);
}
static void __ipipe_do_doorbell(unsigned int irq, void *cookie)
{
doorbell_exception(raw_cpu_ptr(&ipipe_percpu.tick_regs));
}
#endif
int __ipipe_grab_timer(struct pt_regs *regs)
{
struct pt_regs *tick_regs;
......
......@@ -192,8 +192,7 @@ int smp_request_message_ipi(int virq, int msg)
}
#ifdef CONFIG_IPIPE
if (msg == PPC_MSG_DEBUGGER_BREAK)
/* Piggyback the debugger IPI for the I-pipe. */
__ipipe_register_ipi(virq);
__ipipe_register_mux_ipi(virq);
#endif
err = request_irq(virq, smp_ipi_action[msg],
IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
......@@ -262,6 +261,24 @@ irqreturn_t smp_ipi_demux(void)
return IRQ_HANDLED;
}
#ifdef CONFIG_IPIPE