Commit 6d2dd0bf authored by Philippe Gerum's avatar Philippe Gerum Committed by Jan Kiszka

cobalt/sched: group high-level init/cleanup code

Move scheduler init/cleanup bits to dedicated routines.
This concentrates scheduler related code in the sched module.
Signed-off-by: Philippe Gerum's avatarPhilippe Gerum <rpm@xenomai.org>
[Jan: tweak commit message]
Signed-off-by: Jan Kiszka's avatarJan Kiszka <jan.kiszka@siemens.com>
parent e9de2fe2
......@@ -410,9 +410,9 @@ void xnsched_cleanup_proc(void);
void xnsched_register_classes(void);
void xnsched_init(struct xnsched *sched, int cpu);
void xnsched_init_all(void);
void xnsched_destroy(struct xnsched *sched);
void xnsched_destroy_all(void);
struct xnthread *xnsched_pick_next(struct xnsched *sched);
......
......@@ -124,34 +124,10 @@ EXPORT_SYMBOL_GPL(cobalt_call_state_chain);
static void sys_shutdown(void)
{
struct xnthread *thread, *tmp;
struct xnsched *sched;
void *membase;
int cpu;
spl_t s;
xntimer_release_hardware();
#ifdef CONFIG_SMP
ipipe_free_irq(&xnsched_realtime_domain, IPIPE_RESCHEDULE_IPI);
#endif
xnlock_get_irqsave(&nklock, s);
/* NOTE: &nkthreadq can't be empty (root thread(s)). */
list_for_each_entry_safe(thread, tmp, &nkthreadq, glink) {
if (!xnthread_test_state(thread, XNROOT))
xnthread_cancel(thread);
}
xnsched_run();
for_each_online_cpu(cpu) {
sched = xnsched_struct(cpu);
xnsched_destroy(sched);
}
xnlock_put_irqrestore(&nklock, s);
xnsched_destroy_all();
xnregistry_cleanup();
membase = xnheap_get_membase(&cobalt_heap);
xnheap_destroy(&cobalt_heap);
......@@ -281,9 +257,8 @@ static void __init setup_init_state(void)
static __init int sys_init(void)
{
struct xnsched *sched;
void *heapaddr;
int ret, cpu;
int ret;
if (sysheap_size_arg == 0)
sysheap_size_arg = CONFIG_XENO_OPT_SYS_HEAPSZ;
......@@ -295,17 +270,7 @@ static __init int sys_init(void)
}
xnheap_set_name(&cobalt_heap, "system heap");
for_each_online_cpu(cpu) {
sched = &per_cpu(nksched, cpu);
xnsched_init(sched, cpu);
}
#ifdef CONFIG_SMP
ipipe_request_irq(&xnsched_realtime_domain,
IPIPE_RESCHEDULE_IPI,
(ipipe_irq_handler_t)__xnsched_run_handler,
NULL, NULL);
#endif
xnsched_init_all();
xnregistry_init();
......
......@@ -149,7 +149,7 @@ static void roundrobin_handler(struct xntimer *timer)
xnsched_tick(sched);
}
void xnsched_init(struct xnsched *sched, int cpu)
static void xnsched_init(struct xnsched *sched, int cpu)
{
char rrbtimer_name[XNOBJECT_NAME_LEN];
char htimer_name[XNOBJECT_NAME_LEN];
......@@ -219,7 +219,25 @@ void xnsched_init(struct xnsched *sched, int cpu)
#endif /* CONFIG_XENO_OPT_WATCHDOG */
}
void xnsched_destroy(struct xnsched *sched)
void xnsched_init_all(void)
{
struct xnsched *sched;
int cpu;
for_each_online_cpu(cpu) {
sched = &per_cpu(nksched, cpu);
xnsched_init(sched, cpu);
}
#ifdef CONFIG_SMP
ipipe_request_irq(&xnsched_realtime_domain,
IPIPE_RESCHEDULE_IPI,
(ipipe_irq_handler_t)__xnsched_run_handler,
NULL, NULL);
#endif
}
static void xnsched_destroy(struct xnsched *sched)
{
xntimer_destroy(&sched->htimer);
xntimer_destroy(&sched->rrbtimer);
......@@ -230,6 +248,35 @@ void xnsched_destroy(struct xnsched *sched)
#endif /* CONFIG_XENO_OPT_WATCHDOG */
}
void xnsched_destroy_all(void)
{
struct xnthread *thread, *tmp;
struct xnsched *sched;
int cpu;
spl_t s;
#ifdef CONFIG_SMP
ipipe_free_irq(&xnsched_realtime_domain, IPIPE_RESCHEDULE_IPI);
#endif
xnlock_get_irqsave(&nklock, s);
/* NOTE: &nkthreadq can't be empty (root thread(s)). */
list_for_each_entry_safe(thread, tmp, &nkthreadq, glink) {
if (!xnthread_test_state(thread, XNROOT))
xnthread_cancel(thread);
}
xnsched_run();
for_each_online_cpu(cpu) {
sched = xnsched_struct(cpu);
xnsched_destroy(sched);
}
xnlock_put_irqrestore(&nklock, s);
}
static inline void set_thread_running(struct xnsched *sched,
struct xnthread *thread)
{
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment