Commit c016ff2d authored by Gilles Chanteperdrix's avatar Gilles Chanteperdrix

Merge commit 'rpm/for-upstream'

parents c301fd36 036120a6
......@@ -2,7 +2,7 @@
* @ingroup hal
* @file
*
* Adeos-based Real-Time Abstraction Layer for PowerPC.
* Adeos-based Real-Time Abstraction Layer for ARM.
*
* ARM port
* Copyright (C) 2005 Stelian Pop
......@@ -49,142 +49,78 @@
#endif /* CONFIG_PROC_FS */
#include <stdarg.h>
static struct {
unsigned long flags;
int count;
} rthal_linux_irq[IPIPE_NR_XIRQS];
rthal_u32frac_t rthal_tsc_to_timer;
EXPORT_SYMBOL(rthal_tsc_to_timer);
enum rthal_ktimer_mode rthal_ktimer_saved_mode;
int rthal_timer_request(void (*handler)(void),
#ifdef CONFIG_GENERIC_CLOCKEVENTS
void (*mode_emul)(enum clock_event_mode mode,
struct clock_event_device *cdev),
int (*tick_emul)(unsigned long delay,
struct clock_event_device *cdev),
#endif /* CONFIG_GENERIC_CLOCKEVENTS */
int cpu)
{
int tickval, err;
unsigned long flags;
#ifdef CONFIG_GENERIC_CLOCKEVENTS
unsigned long dummy, *tmfreq = &dummy;
int res;
if (rthal_timerfreq_arg == 0)
tmfreq = &rthal_tunables.timer_freq;
res = ipipe_request_tickdev(RTHAL_TIMER_DEVICE, mode_emul,
tick_emul, cpu, tmfreq);
switch (res) {
case CLOCK_EVT_MODE_PERIODIC:
/* oneshot tick emulation callback won't be used, ask
* the caller to start an internal timer for emulating
* a periodic tick. */
tickval = 1000000000UL / HZ;
break;
case CLOCK_EVT_MODE_ONESHOT:
/* oneshot tick emulation */
tickval = 1;
break;
case CLOCK_EVT_MODE_UNUSED:
/* we don't need to emulate the tick at all. */
tickval = 0;
break;
case CLOCK_EVT_MODE_SHUTDOWN:
return -ENOSYS;
default:
return res;
}
rthal_ktimer_saved_mode = res;
#else /* !CONFIG_GENERIC_CLOCKEVENTS */
tickval = 1000000000UL / HZ;
rthal_ktimer_saved_mode = KTIMER_MODE_PERIODIC;
if (rthal_timerfreq_arg == 0)
rthal_tunables.timer_freq = CLOCK_TICK_RATE;
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
flags = rthal_critical_enter(NULL);
__ipipe_mach_timerstolen = 1;
rthal_timer_program_shot(__ipipe_mach_ticks_per_jiffy);
#define RTHAL_CALIBRATE_LOOPS 10
rthal_irq_release(RTHAL_TIMER_IRQ);
static struct {
unsigned long flags;
int count;
} rthal_linux_irq[IPIPE_NR_XIRQS];
err = rthal_irq_request(RTHAL_TIMER_IRQ,
(rthal_irq_handler_t) handler, NULL, NULL);
enum rthal_ktimer_mode rthal_ktimer_saved_mode;
rthal_critical_exit(flags);
#define RTHAL_SET_ONESHOT_XENOMAI 1
#define RTHAL_SET_ONESHOT_LINUX 2
#define RTHAL_SET_PERIODIC 3
return err ?: tickval;
static inline void steal_timer(int stolen)
{
/*
* Some platform-specific I-pipe bits may want to know whether
* non-vanilla kernel code is currently fiddling with the
* timer chip; setting this flag on tells them so.
*/
__ipipe_mach_timerstolen = stolen;
}
void rthal_timer_release(int cpu)
static inline void force_oneshot_hw_mode(void)
{
unsigned long flags;
#ifdef CONFIG_GENERIC_CLOCKEVENTS
ipipe_release_tickdev(cpu);
#endif
flags = rthal_critical_enter(NULL);
__ipipe_mach_release_timer();
rthal_irq_release(RTHAL_TIMER_IRQ);
__ipipe_mach_timerstolen = 0;
rthal_critical_exit(flags);
/*
* Program next tick ahead at a sensible date. We expect
* __ipipe_mach_set_dec() to switch off any auto-reload mode
* if that makes sense for the hardware.
*/
__ipipe_mach_set_dec(__ipipe_mach_ticks_per_jiffy);
}
#ifdef CONFIG_GENERIC_CLOCKEVENTS
void rthal_timer_notify_switch(enum clock_event_mode mode,
struct clock_event_device *cdev)
static inline void restore_normal_hw_mode(void)
{
rthal_ktimer_saved_mode = mode;
steal_timer(0);
/*
* Ask the I-pipe to reset the normal timer operating mode at
* hardware level, which should match the current logical mode
* for the active clockevent.
*/
__ipipe_mach_release_timer();
}
EXPORT_SYMBOL(rthal_timer_notify_switch);
#endif
#define RTHAL_CALIBRATE_LOOPS 10
unsigned long rthal_timer_calibrate(void)
{
unsigned long long next_shot = 0, start, end, sum = 0, sum_sq = 0;
unsigned long long start, end, sum = 0, sum_sq = 0;
volatile unsigned const_delay = 0xffffffff;
unsigned int delay = const_delay, diff;
unsigned long result, flags, tsc_lat;
unsigned delay = const_delay;
unsigned diff;
int i, j;
flags = rthal_critical_enter(NULL);
/*
* Hw interrupts off, other CPUs quiesced, no migration
* possible. We can now fiddle with the timer chip (per-cpu
* local or global, rthal_timer_program_shot() will handle
* this transparently via the I-pipe).
*/
steal_timer(1);
force_oneshot_hw_mode();
rthal_read_tsc(start);
barrier();
rthal_read_tsc(end);
tsc_lat = end - start;
barrier();
if (__ipipe_mach_timerstolen) {
rthal_read_tsc(next_shot);
next_shot += rthal_imuldiv(__ipipe_mach_get_dec(),
RTHAL_CLOCK_FREQ, RTHAL_TIMER_FREQ);
next_shot -= tsc_lat;
}
for (i = 0; i < RTHAL_CALIBRATE_LOOPS; i++) {
flush_cache_all();
for (j = 0; j < RTHAL_CALIBRATE_LOOPS; j++) {
......@@ -195,20 +131,12 @@ unsigned long rthal_timer_calibrate(void)
barrier();
rthal_read_tsc(end);
diff = end - start - tsc_lat;
sum += diff;
sum_sq += diff * diff;
}
}
if (__ipipe_mach_timerstolen) {
delay = (next_shot > end
? rthal_nodiv_imuldiv_ceil(next_shot - end,
rthal_tsc_to_timer)
: 0);
rthal_timer_program_shot(delay);
} else
__ipipe_mach_release_timer();
restore_normal_hw_mode();
rthal_critical_exit(flags);
......@@ -216,79 +144,273 @@ unsigned long rthal_timer_calibrate(void)
do_div(sum, RTHAL_CALIBRATE_LOOPS * RTHAL_CALIBRATE_LOOPS);
do_div(sum_sq, RTHAL_CALIBRATE_LOOPS * RTHAL_CALIBRATE_LOOPS);
result = sum + int_sqrt(sum_sq - sum * sum) + 1;
return result;
}
#ifdef CONFIG_SMP
static void rthal_critical_sync(void)
{
switch (rthal_sync_op) {
case RTHAL_SET_ONESHOT_XENOMAI:
force_oneshot_hw_mode();
steal_timer(1);
break;
case RTHAL_SET_ONESHOT_LINUX:
force_oneshot_hw_mode();
steal_timer(0);
/* We need to keep the timing cycle alive for the kernel. */
rthal_trigger_irq(RTHAL_TIMER_IRQ);
break;
case RTHAL_SET_PERIODIC:
restore_normal_hw_mode();
break;
}
}
#else /* CONFIG_SMP */
#define rthal_critical_sync NULL
#endif /* !CONFIG_SMP */
static void rthal_timer_set_oneshot(int rt_mode)
{
unsigned long flags;
flags = rthal_critical_enter(rthal_critical_sync);
if (rt_mode) {
rthal_sync_op = RTHAL_SET_ONESHOT_XENOMAI;
force_oneshot_hw_mode();
steal_timer(1);
} else {
rthal_sync_op = RTHAL_SET_ONESHOT_LINUX;
force_oneshot_hw_mode();
steal_timer(0);
/* We need to keep the timing cycle alive for the kernel. */
rthal_trigger_irq(RTHAL_TIMER_IRQ);
}
rthal_critical_exit(flags);
}
static void rthal_timer_set_periodic(void)
{
unsigned long flags;
flags = rthal_critical_enter(rthal_critical_sync);
rthal_sync_op = RTHAL_SET_PERIODIC;
restore_normal_hw_mode();
rthal_critical_exit(flags);
}
static int cpu_timers_requested;
#ifdef CONFIG_GENERIC_CLOCKEVENTS
int rthal_timer_request(
void (*tick_handler)(void),
void (*mode_emul)(enum clock_event_mode mode,
struct clock_event_device *cdev),
int (*tick_emul)(unsigned long delay,
struct clock_event_device *cdev),
int cpu)
{
unsigned long dummy, *tmfreq = &dummy;
int tickval, ret;
if (rthal_timerfreq_arg == 0)
tmfreq = &rthal_tunables.timer_freq;
ret = ipipe_request_tickdev(RTHAL_TIMER_DEVICE, mode_emul, tick_emul, cpu,
tmfreq);
switch (ret) {
case CLOCK_EVT_MODE_PERIODIC:
/* oneshot tick emulation callback won't be used, ask
* the caller to start an internal timer for emulating
* a periodic tick. */
tickval = 1000000000UL / HZ;
break;
case CLOCK_EVT_MODE_ONESHOT:
/* oneshot tick emulation */
tickval = 1;
break;
case CLOCK_EVT_MODE_UNUSED:
/* we don't need to emulate the tick at all. */
tickval = 0;
break;
case CLOCK_EVT_MODE_SHUTDOWN:
return -ENODEV;
default:
return ret;
}
rthal_ktimer_saved_mode = ret;
/*
* The rest of the initialization should only be performed
* once by a single CPU.
*/
if (cpu_timers_requested++ > 0)
goto out;
ret = rthal_irq_request(RTHAL_TIMER_IRQ,
(rthal_irq_handler_t)tick_handler,
NULL, NULL);
if (ret)
return ret;
rthal_timer_set_oneshot(1);
out:
return tickval;
}
void rthal_timer_release(int cpu)
{
ipipe_release_tickdev(cpu);
if (--cpu_timers_requested > 0)
return;
rthal_irq_release(RTHAL_TIMER_IRQ);
if (rthal_ktimer_saved_mode == KTIMER_MODE_PERIODIC)
rthal_timer_set_periodic();
else if (rthal_ktimer_saved_mode == KTIMER_MODE_ONESHOT)
rthal_timer_set_oneshot(0);
}
void rthal_timer_notify_switch(enum clock_event_mode mode,
struct clock_event_device *cdev)
{
if (rthal_processor_id() > 0)
/*
* We assume all CPUs switch the same way, so we only
* track mode switches from the boot CPU.
*/
return;
rthal_ktimer_saved_mode = mode;
}
EXPORT_SYMBOL(rthal_timer_notify_switch);
#else /* !CONFIG_GENERIC_CLOCKEVENTS */
int rthal_timer_request(void (*handler)(void), int cpu)
{
int ret;
/*
* The rest of the initialization should only be performed
* once by a single CPU.
*/
if (cpu_timers_requested++ > 0)
return 0;
rthal_ktimer_saved_mode = KTIMER_MODE_PERIODIC;
if (rthal_timerfreq_arg == 0)
rthal_tunables.timer_freq = rthal_cpufreq_arg;
ret = rthal_irq_request(RTHAL_TIMER_IRQ,
(rthal_irq_handler_t) handler,
NULL, NULL);
if (ret)
return ret;
rthal_timer_set_oneshot(1);
return 0;
}
void rthal_timer_release(int cpu)
{
if (--cpu_timers_requested > 0)
return;
rthal_irq_release(RTHAL_TIMER_IRQ);
rthal_timer_set_periodic();
}
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
int rthal_irq_host_request(unsigned irq,
rthal_irq_host_handler_t handler,
char *name, void *dev_id)
rthal_irq_host_handler_t handler,
char *name, void *dev_id)
{
unsigned long flags;
unsigned long flags;
if (irq >= IPIPE_NR_XIRQS ||
handler == NULL ||
rthal_irq_descp(irq) == NULL)
return -EINVAL;
if (irq >= IPIPE_NR_XIRQS ||
handler == NULL ||
rthal_irq_descp(irq) == NULL)
return -EINVAL;
rthal_irqdesc_lock(irq, flags);
rthal_irqdesc_lock(irq, flags);
if (rthal_linux_irq[irq].count++ == 0 && rthal_irq_descp(irq)->action) {
rthal_linux_irq[irq].flags = rthal_irq_descp(irq)->action->flags;
rthal_irq_descp(irq)->action->flags |= IRQF_SHARED;
}
if (rthal_linux_irq[irq].count++ == 0 && rthal_irq_descp(irq)->action) {
rthal_linux_irq[irq].flags = rthal_irq_descp(irq)->action->flags;
rthal_irq_descp(irq)->action->flags |= IRQF_SHARED;
}
rthal_irqdesc_unlock(irq, flags);
rthal_irqdesc_unlock(irq, flags);
return request_irq(irq, handler, IRQF_SHARED, name, dev_id);
return request_irq(irq, handler, IRQF_SHARED, name, dev_id);
}
int rthal_irq_host_release(unsigned irq, void *dev_id)
{
unsigned long flags;
unsigned long flags;
if (irq >= IPIPE_NR_XIRQS ||
rthal_linux_irq[irq].count == 0 ||
rthal_irq_descp(irq) == NULL)
return -EINVAL;
if (irq >= IPIPE_NR_XIRQS ||
rthal_linux_irq[irq].count == 0 ||
rthal_irq_descp(irq) == NULL)
return -EINVAL;
free_irq(irq, dev_id);
free_irq(irq, dev_id);
rthal_irqdesc_lock(irq, flags);
rthal_irqdesc_lock(irq, flags);
if (--rthal_linux_irq[irq].count == 0 && rthal_irq_descp(irq)->action)
rthal_irq_descp(irq)->action->flags = rthal_linux_irq[irq].flags;
if (--rthal_linux_irq[irq].count == 0 && rthal_irq_descp(irq)->action)
rthal_irq_descp(irq)->action->flags = rthal_linux_irq[irq].flags;
rthal_irqdesc_unlock(irq, flags);
rthal_irqdesc_unlock(irq, flags);
return 0;
return 0;
}
int rthal_irq_enable(unsigned irq)
int rthal_irq_enable(unsigned int irq)
{
if (irq >= IPIPE_NR_XIRQS || rthal_irq_descp(irq) == NULL)
return -EINVAL;
if (irq >= IPIPE_NR_XIRQS || rthal_irq_descp(irq) == NULL)
return -EINVAL;
/*
* We don't care of disable nesting level: real-time IRQ
* channels are not meant to be shared with the regular
* kernel.
*/
rthal_mark_irq_enabled(irq);
/* We don't care of disable nesting level: real-time IRQ channels
are not meant to be shared with the regular kernel. */
rthal_mark_irq_enabled(irq);
return rthal_irq_chip_enable(irq);
return rthal_irq_chip_enable(irq);
}
int rthal_irq_disable(unsigned irq)
int rthal_irq_disable(unsigned int irq)
{
if (irq >= IPIPE_NR_XIRQS || rthal_irq_descp(irq) == NULL)
return -EINVAL;
if (irq >= IPIPE_NR_XIRQS || rthal_irq_descp(irq) == NULL)
return -EINVAL;
rthal_mark_irq_disabled(irq);
return rthal_irq_chip_disable(irq);
rthal_mark_irq_disabled(irq);
return rthal_irq_chip_disable(irq);
}
int rthal_irq_end(unsigned irq)
int rthal_irq_end(unsigned int irq)
{
if (irq >= IPIPE_NR_XIRQS || rthal_irq_descp(irq) == NULL)
return -EINVAL;
if (irq >= IPIPE_NR_XIRQS || rthal_irq_descp(irq) == NULL)
return -EINVAL;
return rthal_irq_chip_end(irq);
return rthal_irq_chip_end(irq);
}
void __rthal_arm_fault_range(struct vm_area_struct *vma)
......@@ -300,28 +422,28 @@ void __rthal_arm_fault_range(struct vm_area_struct *vma)
static inline int do_exception_event(unsigned event, unsigned domid, void *data)
{
if (domid == RTHAL_DOMAIN_ID) {
rthal_realtime_faults[rthal_processor_id()][event]++;
if (domid == RTHAL_DOMAIN_ID) {
rthal_realtime_faults[rthal_processor_id()][event]++;
if (rthal_trap_handler != NULL &&
rthal_trap_handler(event, domid, data) != 0)
return RTHAL_EVENT_STOP;
}
if (rthal_trap_handler != NULL &&
rthal_trap_handler(event, domid, data) != 0)
return RTHAL_EVENT_STOP;
}
return RTHAL_EVENT_PROPAGATE;
return RTHAL_EVENT_PROPAGATE;
}
RTHAL_DECLARE_EVENT(exception_event);
static inline void do_rthal_domain_entry(void)
{
unsigned trapnr;
unsigned trapnr;
/* Trap all faults. */
for (trapnr = 0; trapnr < RTHAL_NR_FAULTS; trapnr++)
rthal_catch_exception(trapnr, &exception_event);
/* Trap all faults. */
for (trapnr = 0; trapnr < RTHAL_NR_FAULTS; trapnr++)
rthal_catch_exception(trapnr, &exception_event);
printk(KERN_INFO "Xenomai: hal/arm started.\n");
printk(KERN_INFO "Xenomai: hal/arm started.\n");
}
RTHAL_DECLARE_DOMAIN(rthal_domain_entry);
......@@ -345,8 +467,8 @@ int rthal_arch_init(void)
void rthal_arch_cleanup(void)
{
/* Nothing to cleanup so far. */
printk(KERN_INFO "Xenomai: hal/arm stopped.\n");
/* Nothing to cleanup so far. */
printk(KERN_INFO "Xenomai: hal/arm stopped.\n");
}
/*@}*/
......@@ -361,5 +483,3 @@ EXPORT_SYMBOL(last_VFP_context);
EXPORT_SYMBOL(rthal_vfp_save);
EXPORT_SYMBOL(rthal_vfp_load);
#endif /* CONFIG_VFP && CONFIG_XENO_HW_FPU */
// vim: ts=4 et sw=4 sts=4
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment