Commit 96df83cb authored by Philippe Gerum's avatar Philippe Gerum

ipipe: add kernel event notifiers

Add the core API for enabling (regular) kernel event notifications to
a co-kernel running over the head domain. For instance, such a
co-kernel may need to know when a task is about to be resumed upon
signal receipt, or when it gets an access fault trap.

This commit adds the client-side API for enabling such notification
for class of events, but does not provide the notification points per
se, which comes later.
parent 4cd03bbb
......@@ -38,6 +38,11 @@
#include <linux/ipipe_domain.h>
/* ipipe_set_hooks(..., enables) */
#define IPIPE_SYSCALL __IPIPE_SYSCALL_E
#define IPIPE_TRAP __IPIPE_TRAP_E
#define IPIPE_KEVENT __IPIPE_KEVENT_E
struct ipipe_sysinfo {
int sys_nr_cpus; /* Number of CPUs on board */
int sys_hrtimer_irq; /* hrtimer device IRQ */
......@@ -108,6 +113,13 @@ void ipipe_free_irq(struct ipipe_domain *ipd,
void ipipe_raise_irq(unsigned int irq);
void ipipe_set_hooks(struct ipipe_domain *ipd,
int enables);
int ipipe_handle_syscall(struct thread_info *ti,
unsigned long nr, struct pt_regs *regs);
unsigned int ipipe_alloc_virq(void);
void ipipe_free_virq(unsigned int virq);
......@@ -299,6 +311,27 @@ int ipipe_test_ti_thread_flag(struct thread_info *ti, int flag)
#define ipipe_test_thread_flag(flag) \
ipipe_test_ti_thread_flag(current_thread_info(), flag)
#define ipipe_enable_notifier(p) \
ipipe_set_ti_thread_flag(task_thread_info(p), TIP_NOTIFY)
#define ipipe_disable_notifier(p) \
do { \
struct thread_info *ti = task_thread_info(p); \
ipipe_clear_ti_thread_flag(ti, TIP_NOTIFY); \
ipipe_clear_ti_thread_flag(ti, TIP_MAYDAY); \
} while (0)
#define ipipe_notifier_enabled_p(p) \
ipipe_test_ti_thread_flag(task_thread_info(p), TIP_NOTIFY)
#define ipipe_raise_mayday(p) \
do { \
struct thread_info *ti = task_thread_info(p); \
ipipe_check_irqoff(); \
if (ipipe_test_ti_thread_flag(ti, TIP_NOTIFY)) \
ipipe_set_ti_thread_flag(ti, TIP_MAYDAY); \
} while (0)
#ifdef CONFIG_IPIPE_TRACE
void __ipipe_tracer_hrclock_initialized(void);
#else /* !CONFIG_IPIPE_TRACE */
......@@ -322,6 +355,13 @@ static inline void ipipe_lock_irq(unsigned int irq) { }
static inline void ipipe_unlock_irq(unsigned int irq) { }
static inline
int ipipe_handle_syscall(struct thread_info *ti,
unsigned long nr, struct pt_regs *regs)
{
return 0;
}
#endif /* !CONFIG_IPIPE */
#endif /* !__LINUX_IPIPE_H */
......@@ -37,6 +37,11 @@ struct irq_desc;
struct pt_regs;
struct ipipe_domain;
struct ipipe_trap_data {
int exception;
struct pt_regs *regs;
};
static inline int ipipe_virtual_irq_p(unsigned int irq)
{
return irq >= IPIPE_VIRQ_BASE && irq < IPIPE_NR_IRQS;
......@@ -103,6 +108,65 @@ void __ipipe_flush_printk(unsigned int irq, void *cookie);
#define __ipipe_get_cpu(flags) ({ (flags) = hard_preempt_disable(); ipipe_processor_id(); })
#define __ipipe_put_cpu(flags) hard_preempt_enable(flags)
int __ipipe_notify_kevent(int event, void *data);
#define __ipipe_report_sigwake(p) \
do { \
if (ipipe_notifier_enabled_p(p)) \
__ipipe_notify_kevent(IPIPE_KEVT_SIGWAKE, p); \
} while (0)
struct ipipe_cpu_migration_data {
struct task_struct *task;
int dest_cpu;
};
#define __ipipe_report_setaffinity(__p, __dest_cpu) \
do { \
struct ipipe_cpu_migration_data d = { \
.task = (__p), \
.dest_cpu = (__dest_cpu), \
}; \
if (ipipe_notifier_enabled_p(__p)) \
__ipipe_notify_kevent(IPIPE_KEVT_SETAFFINITY, &d); \
} while (0)
#define __ipipe_report_exit(p) \
do { \
if (ipipe_notifier_enabled_p(p)) \
__ipipe_notify_kevent(IPIPE_KEVT_EXIT, p); \
} while (0)
#define __ipipe_report_setsched(p) \
do { \
if (ipipe_notifier_enabled_p(p)) \
__ipipe_notify_kevent(IPIPE_KEVT_SETSCHED, p); \
} while (0)
#define __ipipe_report_schedule(prev, next) \
do { \
if (ipipe_notifier_enabled_p(next) || \
ipipe_notifier_enabled_p(prev)) { \
__this_cpu_write(ipipe_percpu.rqlock_owner, prev); \
__ipipe_notify_kevent(IPIPE_KEVT_SCHEDULE, next); \
} \
} while (0)
#define __ipipe_report_cleanup(mm) \
__ipipe_notify_kevent(IPIPE_KEVT_CLEANUP, mm)
#define __ipipe_report_clockfreq_update(freq) \
__ipipe_notify_kevent(IPIPE_KEVT_CLOCKFREQ, &(freq))
int __ipipe_notify_syscall(struct pt_regs *regs);
int __ipipe_notify_trap(int exception, struct pt_regs *regs);
#define __ipipe_report_trap(exception, regs) \
__ipipe_notify_trap(exception, regs)
void __ipipe_call_mayday(struct pt_regs *regs);
#define __ipipe_serial_debug(__fmt, __args...) raw_printk(__fmt, ##__args)
#else /* !CONFIG_IPIPE */
......@@ -118,6 +182,19 @@ static inline void __ipipe_init_proc(void) { }
static inline void __ipipe_idle(void) { }
static inline void __ipipe_report_sigwake(struct task_struct *p) { }
static inline void __ipipe_report_setaffinity(struct task_struct *p,
int dest_cpu) { }
static inline void __ipipe_report_setsched(struct task_struct *p) { }
static inline void __ipipe_report_exit(struct task_struct *p) { }
static inline void __ipipe_report_cleanup(struct mm_struct *mm) { }
#define __ipipe_report_trap(exception, regs) 0
#define hard_preempt_disable() ({ preempt_disable(); 0; })
#define hard_preempt_enable(flags) ({ preempt_enable(); (void)(flags); })
......
......@@ -37,6 +37,8 @@
#include <linux/ipipe_trace.h>
#include <linux/ipipe.h>
#include <ipipe/setup.h>
#include <asm/syscall.h>
#include <asm/unistd.h>
struct ipipe_domain ipipe_root;
EXPORT_SYMBOL_GPL(ipipe_root);
......@@ -938,6 +940,257 @@ void ipipe_free_irq(struct ipipe_domain *ipd,
}
EXPORT_SYMBOL_GPL(ipipe_free_irq);
void ipipe_set_hooks(struct ipipe_domain *ipd, int enables)
{
struct ipipe_percpu_domain_data *p;
unsigned long flags;
int cpu, wait;
if (ipd == ipipe_root_domain) {
IPIPE_WARN(enables & __IPIPE_TRAP_E);
enables &= ~__IPIPE_TRAP_E;
} else {
IPIPE_WARN(enables & __IPIPE_KEVENT_E);
enables &= ~__IPIPE_KEVENT_E;
}
flags = ipipe_critical_enter(NULL);
for_each_online_cpu(cpu) {
p = ipipe_percpu_context(ipd, cpu);
p->coflags &= ~__IPIPE_ALL_E;
p->coflags |= enables;
}
wait = (enables ^ __IPIPE_ALL_E) << __IPIPE_SHIFT_R;
if (wait == 0 || !__ipipe_root_p) {
ipipe_critical_exit(flags);
return;
}
ipipe_this_cpu_context(ipd)->coflags &= ~wait;
ipipe_critical_exit(flags);
/*
* In case we cleared some hooks over the root domain, we have
* to wait for any ongoing execution to finish, since our
* caller might subsequently unmap the target domain code.
*
* We synchronize with the relevant __ipipe_notify_*()
* helpers, disabling all hooks before we start waiting for
* completion on all CPUs.
*/
for_each_online_cpu(cpu) {
while (ipipe_percpu_context(ipd, cpu)->coflags & wait)
schedule_timeout_interruptible(HZ / 50);
}
}
EXPORT_SYMBOL_GPL(ipipe_set_hooks);
int __weak ipipe_fastcall_hook(struct pt_regs *regs)
{
return -1; /* i.e. fall back to slow path. */
}
int __weak ipipe_syscall_hook(struct ipipe_domain *ipd, struct pt_regs *regs)
{
return 0;
}
static inline void sync_root_irqs(void)
{
struct ipipe_percpu_domain_data *p;
unsigned long flags;
flags = hard_local_irq_save();
p = ipipe_this_cpu_root_context();
if (unlikely(__ipipe_ipending_p(p)))
__ipipe_sync_stage();
hard_local_irq_restore(flags);
}
int ipipe_handle_syscall(struct thread_info *ti,
unsigned long nr, struct pt_regs *regs)
{
unsigned long local_flags = READ_ONCE(ti->ipipe_flags);
int ret;
/*
* NOTE: This is a backport from the DOVETAIL syscall
* redirector to the older pipeline implementation.
*
* ==
*
* If the syscall # is out of bounds and the current IRQ stage
* is not the root one, this has to be a non-native system
* call handled by some co-kernel on the head stage. Hand it
* over to the head stage via the fast syscall handler.
*
* Otherwise, if the system call is out of bounds or the
* current thread is shared with a co-kernel, hand the syscall
* over to the latter through the pipeline stages. This
* allows:
*
* - the co-kernel to receive the initial - foreign - syscall
* a thread should send for enabling syscall handling by the
* co-kernel.
*
* - the co-kernel to manipulate the current execution stage
* for handling the request, which includes switching the
* current thread back to the root stage if the syscall is a
* native one, or promoting it to the head stage if handling
* the foreign syscall requires this.
*
* Native syscalls from regular (non-pipeline) threads are
* ignored by this routine, and flow down to the regular
* system call handler.
*/
if (nr >= NR_syscalls && (local_flags & _TIP_HEAD)) {
ipipe_fastcall_hook(regs);
local_flags = READ_ONCE(ti->ipipe_flags);
if (local_flags & _TIP_HEAD) {
if (local_flags & _TIP_MAYDAY)
__ipipe_call_mayday(regs);
return 1; /* don't pass down, no tail work. */
} else {
sync_root_irqs();
return -1; /* don't pass down, do tail work. */
}
}
if ((local_flags & _TIP_NOTIFY) || nr >= NR_syscalls) {
ret =__ipipe_notify_syscall(regs);
local_flags = READ_ONCE(ti->ipipe_flags);
if (local_flags & _TIP_HEAD)
return 1; /* don't pass down, no tail work. */
if (ret)
return -1; /* don't pass down, do tail work. */
}
return 0; /* pass syscall down to the host. */
}
int __ipipe_notify_syscall(struct pt_regs *regs)
{
struct ipipe_domain *caller_domain, *this_domain, *ipd;
struct ipipe_percpu_domain_data *p;
unsigned long flags;
int ret = 0;
/*
* We should definitely not pipeline a syscall with IRQs off.
*/
IPIPE_WARN_ONCE(hard_irqs_disabled());
flags = hard_local_irq_save();
caller_domain = this_domain = __ipipe_current_domain;
ipd = ipipe_head_domain;
next:
p = ipipe_this_cpu_context(ipd);
if (likely(p->coflags & __IPIPE_SYSCALL_E)) {
__ipipe_set_current_context(p);
p->coflags |= __IPIPE_SYSCALL_R;
hard_local_irq_restore(flags);
ret = ipipe_syscall_hook(caller_domain, regs);
flags = hard_local_irq_save();
p->coflags &= ~__IPIPE_SYSCALL_R;
if (__ipipe_current_domain != ipd)
/* Account for domain migration. */
this_domain = __ipipe_current_domain;
else
__ipipe_set_current_domain(this_domain);
}
if (this_domain == ipipe_root_domain) {
if (ipd != ipipe_root_domain && ret == 0) {
ipd = ipipe_root_domain;
goto next;
}
/*
* Careful: we may have migrated from head->root, so p
* would be ipipe_this_cpu_context(head).
*/
p = ipipe_this_cpu_root_context();
if (__ipipe_ipending_p(p))
__ipipe_sync_stage();
} else if (ipipe_test_thread_flag(TIP_MAYDAY))
__ipipe_call_mayday(regs);
hard_local_irq_restore(flags);
return ret;
}
int __weak ipipe_trap_hook(struct ipipe_trap_data *data)
{
return 0;
}
int __ipipe_notify_trap(int exception, struct pt_regs *regs)
{
struct ipipe_percpu_domain_data *p;
struct ipipe_trap_data data;
unsigned long flags;
int ret = 0;
flags = hard_local_irq_save();
/*
* We send a notification about all traps raised over a
* registered head domain only.
*/
if (__ipipe_root_p)
goto out;
p = ipipe_this_cpu_head_context();
if (likely(p->coflags & __IPIPE_TRAP_E)) {
p->coflags |= __IPIPE_TRAP_R;
hard_local_irq_restore(flags);
data.exception = exception;
data.regs = regs;
ret = ipipe_trap_hook(&data);
flags = hard_local_irq_save();
p->coflags &= ~__IPIPE_TRAP_R;
}
out:
hard_local_irq_restore(flags);
return ret;
}
int __weak ipipe_kevent_hook(int kevent, void *data)
{
return 0;
}
int __ipipe_notify_kevent(int kevent, void *data)
{
struct ipipe_percpu_domain_data *p;
unsigned long flags;
int ret = 0;
ipipe_root_only();
flags = hard_local_irq_save();
p = ipipe_this_cpu_root_context();
if (likely(p->coflags & __IPIPE_KEVENT_E)) {
p->coflags |= __IPIPE_KEVENT_R;
hard_local_irq_restore(flags);
ret = ipipe_kevent_hook(kevent, data);
flags = hard_local_irq_save();
p->coflags &= ~__IPIPE_KEVENT_R;
}
hard_local_irq_restore(flags);
return ret;
}
static void dispatch_irq_head(unsigned int irq) /* hw interrupts off */
{
struct ipipe_percpu_domain_data *p = ipipe_this_cpu_head_context(), *old;
......@@ -1265,6 +1518,16 @@ void __ipipe_do_sync_stage(void)
__clear_bit(IPIPE_STALL_FLAG, &p->status);
}
void __ipipe_call_mayday(struct pt_regs *regs)
{
unsigned long flags;
ipipe_clear_thread_flag(TIP_MAYDAY);
flags = hard_local_irq_save();
__ipipe_notify_trap(IPIPE_TRAP_MAYDAY, regs);
hard_local_irq_restore(flags);
}
#ifdef CONFIG_SMP
/* Always called with hw interrupts off. */
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment