Commit e401d452 authored by Philippe Gerum's avatar Philippe Gerum Committed by Dmitriy Cherkasov

arm64/ipipe: multiplex IPIs

SGI8-15 can be reserved for the exclusive use of the firmware. The
ARM64 kernel currently uses six of them (NR_IPI), and the pipeline
needs to define three more for conveying out-of-band events
(i.e. reschedule, hrtimer and critical IPIs). Therefore we have to
multiplex nine inter-processor events over eight SGIs (SGI0-7).

This patch changes the IPI management in order to multiplex all
regular (in-band) IPIs over SGI0, reserving SGI1-3 for out-of-band
events.
parent 720914e8
......@@ -32,6 +32,7 @@
#include <linux/jump_label.h>
#include <linux/ipipe_trace.h>
#include <linux/ipipe_debug.h>
#include <asm/hardirq.h>
#define IPIPE_CORE_RELEASE 2
......@@ -165,7 +166,7 @@ static inline void ipipe_unmute_pic(void)
void __ipipe_early_core_setup(void);
void __ipipe_hook_critical_ipi(struct ipipe_domain *ipd);
void __ipipe_root_localtimer(unsigned int irq, void *cookie);
void __ipipe_grab_ipi(unsigned svc, struct pt_regs *regs);
void __ipipe_grab_ipi(unsigned int sgi, struct pt_regs *regs);
void __ipipe_ipis_alloc(void);
void __ipipe_ipis_request(void);
......
......@@ -33,13 +33,15 @@
#ifdef CONFIG_SMP
extern unsigned __ipipe_first_ipi;
#define IPIPE_CRITICAL_IPI __ipipe_first_ipi
#define IPIPE_HRTIMER_IPI (IPIPE_CRITICAL_IPI + 1)
#define IPIPE_RESCHEDULE_IPI (IPIPE_CRITICAL_IPI + 2)
#define IPIPE_LAST_IPI IPIPE_RESCHEDULE_IPI
/*
* Out-of-band IPIs are directly mapped to SGI1-3, instead of
* multiplexed over SGI0 like regular in-band messages.
*/
#define IPIPE_IPI_BASE IPIPE_VIRQ_BASE
#define IPIPE_OOB_IPI_NR 3
#define IPIPE_CRITICAL_IPI (IPIPE_IPI_BASE + NR_IPI)
#define IPIPE_HRTIMER_IPI (IPIPE_IPI_BASE + NR_IPI + 1)
#define IPIPE_RESCHEDULE_IPI (IPIPE_IPI_BASE + NR_IPI + 2)
#ifdef CONFIG_IPIPE_LEGACY
#define hard_smp_processor_id() \
......
......@@ -83,23 +83,8 @@ enum ipi_msg_type {
IPI_TIMER,
IPI_IRQ_WORK,
IPI_WAKEUP,
#ifdef CONFIG_IPIPE
IPI_IPIPE_FIRST,
#endif /* CONFIG_IPIPE */
};
#ifdef CONFIG_IPIPE
#define noipipe_irq_enter() \
do { \
} while(0)
#define noipipe_irq_exit() \
do { \
} while(0)
#else /* !CONFIG_IPIPE */
#define noipipe_irq_enter() irq_enter()
#define noipipe_irq_exit() irq_exit()
#endif /* !CONFIG_IPIPE */
#ifdef CONFIG_ARM64_VHE
/* Whether the boot CPU is running in HYP mode or not*/
......@@ -791,12 +776,6 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = {
S(IPI_WAKEUP, "CPU wake-up interrupts"),
};
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
{
trace_ipi_raise(target, ipi_types[ipinr]);
__smp_cross_call(target, ipinr);
}
void show_ipi_list(struct seq_file *p, int prec)
{
unsigned int cpu, i;
......@@ -823,66 +802,124 @@ u64 smp_irq_stat_cpu(unsigned int cpu)
}
#ifdef CONFIG_IPIPE
#define IPIPE_IPI_BASE IPIPE_VIRQ_BASE
unsigned __ipipe_first_ipi;
EXPORT_SYMBOL_GPL(__ipipe_first_ipi);
static DEFINE_PER_CPU(unsigned long, ipi_messages);
#define noipipe_irq_enter() \
do { \
} while (0)
#define noipipe_irq_exit() \
do { \
} while (0)
static void __ipipe_do_IPI(unsigned virq, void *cookie)
static void __ipipe_do_IPI(unsigned int virq, void *cookie)
{
enum ipi_msg_type msg = virq - IPIPE_IPI_BASE;
handle_IPI(msg, raw_cpu_ptr(&ipipe_percpu.tick_regs));
unsigned int ipinr = virq - IPIPE_IPI_BASE;
handle_IPI(ipinr, raw_cpu_ptr(&ipipe_percpu.tick_regs));
}
void __ipipe_ipis_alloc(void)
{
unsigned virq, _virq;
unsigned ipi_nr;
unsigned int virq, ipi;
static bool done;
if (__ipipe_first_ipi)
if (done)
return;
/* __ipipe_first_ipi is 0 here */
ipi_nr = IPI_IPIPE_FIRST + IPIPE_LAST_IPI + 1;
for (virq = IPIPE_IPI_BASE; virq < IPIPE_IPI_BASE + ipi_nr; virq++) {
_virq = ipipe_alloc_virq();
if (virq != _virq)
panic("I-pipe: cannot reserve virq #%d (got #%d)\n",
virq, _virq);
if (virq - IPIPE_IPI_BASE == IPI_IPIPE_FIRST)
__ipipe_first_ipi = virq;
/*
* We have to get virtual IRQs in the range
* [ IPIPE_IPI_BASE..IPIPE_IPI_BASE + NR_IPI + IPIPE_OOB_IPI_NR - 1 ],
* otherwise something is wrong (likely someone would have
* allocated virqs before we do, and this would break our
* fixed numbering scheme for IPIs).
*/
for (ipi = 0; ipi < NR_IPI + IPIPE_OOB_IPI_NR; ipi++) {
virq = ipipe_alloc_virq();
WARN_ON_ONCE(virq != IPIPE_IPI_BASE + ipi);
}
done = true;
}
void __ipipe_ipis_request(void)
{
unsigned virq;
unsigned int virq;
for (virq = IPIPE_IPI_BASE; virq < __ipipe_first_ipi; virq++)
/*
* Attach a handler to each VIRQ mapping an IPI which might be
* posted by __ipipe_grab_ipi(). This handler will invoke
* handle_IPI() from the root stage in turn, passing it the
* corresponding IPI message number.
*/
for (virq = IPIPE_IPI_BASE;
virq < IPIPE_IPI_BASE + NR_IPI + IPIPE_OOB_IPI_NR; virq++)
ipipe_request_irq(ipipe_root_domain,
virq,
(ipipe_irq_handler_t)__ipipe_do_IPI,
NULL, NULL);
}
void ipipe_send_ipi(unsigned ipi, cpumask_t cpumask)
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
{
enum ipi_msg_type msg = ipi - IPIPE_IPI_BASE;
smp_cross_call(&cpumask, msg);
unsigned int cpu, sgi;
if (ipinr < NR_IPI) {
/* regular in-band IPI (multiplexed over SGI0). */
trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
for_each_cpu(cpu, target)
set_bit(ipinr, &per_cpu(ipi_messages, cpu));
smp_mb();
sgi = 0;
} else /* out-of-band IPI (SGI1-3). */
sgi = ipinr - NR_IPI + 1;
__smp_cross_call(target, sgi);
}
void ipipe_send_ipi(unsigned int ipi, cpumask_t cpumask)
{
unsigned int ipinr = ipi - IPIPE_IPI_BASE;
smp_cross_call(&cpumask, ipinr);
}
EXPORT_SYMBOL_GPL(ipipe_send_ipi);
/* hw IRQs off */
asmlinkage void __exception __ipipe_grab_ipi(unsigned svc, struct pt_regs *regs)
asmlinkage void __ipipe_grab_ipi(unsigned int sgi, struct pt_regs *regs)
{
int virq = IPIPE_IPI_BASE + svc;
unsigned int ipinr, irq;
unsigned long *pmsg;
__ipipe_dispatch_irq(virq, IPIPE_IRQF_NOACK);
if (sgi) { /* SGI1-3, OOB messages. */
irq = sgi + NR_IPI - 1 + IPIPE_IPI_BASE;
__ipipe_dispatch_irq(irq, IPIPE_IRQF_NOACK);
} else {
/* In-band IPI (0..NR_IPI-1) multiplexed over SGI0. */
pmsg = raw_cpu_ptr(&ipi_messages);
while (*pmsg) {
ipinr = ffs(*pmsg) - 1;
clear_bit(ipinr, pmsg);
irq = IPIPE_IPI_BASE + ipinr;
__ipipe_dispatch_irq(irq, IPIPE_IRQF_NOACK);
}
}
__ipipe_exit_irq(regs);
}
#else
#define noipipe_irq_enter() irq_enter()
#define noipipe_irq_exit() irq_exit()
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
{
trace_ipi_raise(target, ipi_types[ipinr]);
__smp_cross_call(target, ipinr);
}
#endif /* CONFIG_IPIPE */
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment