...
 
Commits (41)
This diff is collapsed.
......@@ -53,7 +53,7 @@ config ARM
select HAVE_ARM_SMCCC if CPU_V7
select HAVE_EBPF_JIT if !CPU_ENDIAN_BE32
select HAVE_CC_STACKPROTECTOR
select HAVE_CONTEXT_TRACKING
select HAVE_CONTEXT_TRACKING if !IPIPE
select HAVE_C_RECORDMCOUNT
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_API_DEBUG
......@@ -70,6 +70,7 @@ config ARM
select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7))
select HAVE_IDE if PCI || ISA || PCMCIA
select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_IPIPE_SUPPORT
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZ4
select HAVE_KERNEL_LZMA
......@@ -913,6 +914,15 @@ config PLAT_PXA
config PLAT_VERSATILE
bool
if IPIPE
config IPIPE_ARM_KUSER_TSC
bool
select HAVE_IPIPE_TRACER_SUPPORT
select GENERIC_TIME_VSYSCALL
select IPIPE_HAVE_HOSTRT if IPIPE
default y if ARM_TIMER_SP804 || ARCH_MXC || ARCH_OMAP
endif
source "arch/arm/firmware/Kconfig"
source arch/arm/mm/Kconfig
......@@ -1483,6 +1493,8 @@ config ARCH_NR_GPIO
If unsure, leave the default value.
source kernel/ipipe/Kconfig
source kernel/Kconfig.preempt
config HZ_FIXED
......@@ -1757,6 +1769,7 @@ config ALIGNMENT_TRAP
config UACCESS_WITH_MEMCPY
bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
depends on MMU
depends on !IPIPE
default y if CPU_FEROCEON
help
Implement faster copy_to_user and clear_user methods for CPU
......
......@@ -26,6 +26,7 @@
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/export.h>
#include <linux/ipipe.h>
#include <asm/mach/pci.h>
#include <asm/hardware/it8152.h>
......@@ -75,6 +76,7 @@ static struct irq_chip it8152_irq_chip = {
.irq_ack = it8152_mask_irq,
.irq_mask = it8152_mask_irq,
.irq_unmask = it8152_unmask_irq,
.flags = IRQCHIP_PIPELINE_SAFE,
};
void it8152_init_irq(void)
......@@ -124,21 +126,21 @@ void it8152_irq_demux(struct irq_desc *desc)
bits_pd &= ((1 << IT8152_PD_IRQ_COUNT) - 1);
while (bits_pd) {
i = __ffs(bits_pd);
generic_handle_irq(IT8152_PD_IRQ(i));
ipipe_handle_demuxed_irq(IT8152_PD_IRQ(i));
bits_pd &= ~(1 << i);
}
bits_lp &= ((1 << IT8152_LP_IRQ_COUNT) - 1);
while (bits_lp) {
i = __ffs(bits_lp);
generic_handle_irq(IT8152_LP_IRQ(i));
ipipe_handle_demuxed_irq(IT8152_LP_IRQ(i));
bits_lp &= ~(1 << i);
}
bits_ld &= ((1 << IT8152_LD_IRQ_COUNT) - 1);
while (bits_ld) {
i = __ffs(bits_ld);
generic_handle_irq(IT8152_LD_IRQ(i));
ipipe_handle_demuxed_irq(IT8152_LD_IRQ(i));
bits_ld &= ~(1 << i);
}
}
......
......@@ -100,6 +100,18 @@
.macro enable_irq_notrace
cpsie i
.endm
.macro disable_irq_cond
#ifdef CONFIG_IPIPE
cpsid i
#endif /* CONFIG_IPIPE */
.endm
.macro enable_irq_cond
#ifdef CONFIG_IPIPE
cpsie i
#endif /* CONFIG_IPIPE */
.endm
#else
.macro disable_irq_notrace
msr cpsr_c, #PSR_I_BIT | SVC_MODE
......@@ -108,10 +120,22 @@
.macro enable_irq_notrace
msr cpsr_c, #SVC_MODE
.endm
.macro disable_irq_cond
#ifdef CONFIG_IPIPE
msr cpsr_c, #PSR_I_BIT | SVC_MODE
#endif /* CONFIG_IPIPE */
.endm
.macro enable_irq_cond
#ifdef CONFIG_IPIPE
msr cpsr_c, #SVC_MODE
#endif /* CONFIG_IPIPE */
.endm
#endif
.macro asm_trace_hardirqs_off, save=1
#if defined(CONFIG_TRACE_IRQFLAGS)
#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_IPIPE)
.if \save
stmdb sp!, {r0-r3, ip, lr}
.endif
......@@ -123,7 +147,7 @@
.endm
.macro asm_trace_hardirqs_on, cond=al, save=1
#if defined(CONFIG_TRACE_IRQFLAGS)
#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_IPIPE)
/*
* actually the registers should be pushed and pop'd conditionally, but
* after bl the flags are certainly clobbered
......
......@@ -168,9 +168,9 @@ static inline void atomic_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
\
raw_local_irq_save(flags); \
flags = hard_local_irq_save(); \
v->counter c_op i; \
raw_local_irq_restore(flags); \
hard_local_irq_restore(flags); \
} \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
......@@ -179,10 +179,10 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
unsigned long flags; \
int val; \
\
raw_local_irq_save(flags); \
flags = hard_local_irq_save(); \
v->counter c_op i; \
val = v->counter; \
raw_local_irq_restore(flags); \
hard_local_irq_restore(flags); \
\
return val; \
}
......@@ -193,10 +193,10 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
unsigned long flags; \
int val; \
\
raw_local_irq_save(flags); \
flags = hard_local_irq_save(); \
val = v->counter; \
v->counter c_op i; \
raw_local_irq_restore(flags); \
hard_local_irq_restore(flags); \
\
return val; \
}
......@@ -206,11 +206,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
int ret;
unsigned long flags;
raw_local_irq_save(flags);
flags = hard_local_irq_save();
ret = v->counter;
if (likely(ret == old))
v->counter = new;
raw_local_irq_restore(flags);
hard_local_irq_restore(flags);
return ret;
}
......
......@@ -40,9 +40,9 @@ static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *
p += BIT_WORD(bit);
raw_local_irq_save(flags);
flags = hard_local_irq_save();
*p |= mask;
raw_local_irq_restore(flags);
hard_local_irq_restore(flags);
}
static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p)
......@@ -52,9 +52,9 @@ static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long
p += BIT_WORD(bit);
raw_local_irq_save(flags);
flags = hard_local_irq_save();
*p &= ~mask;
raw_local_irq_restore(flags);
hard_local_irq_restore(flags);
}
static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p)
......@@ -64,9 +64,9 @@ static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned lon
p += BIT_WORD(bit);
raw_local_irq_save(flags);
flags = hard_local_irq_save();
*p ^= mask;
raw_local_irq_restore(flags);
hard_local_irq_restore(flags);
}
static inline int
......@@ -78,10 +78,10 @@ ____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p)
p += BIT_WORD(bit);
raw_local_irq_save(flags);
flags = hard_local_irq_save();
res = *p;
*p = res | mask;
raw_local_irq_restore(flags);
hard_local_irq_restore(flags);
return (res & mask) != 0;
}
......@@ -95,10 +95,10 @@ ____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
p += BIT_WORD(bit);
raw_local_irq_save(flags);
flags = hard_local_irq_save();
res = *p;
*p = res & ~mask;
raw_local_irq_restore(flags);
hard_local_irq_restore(flags);
return (res & mask) != 0;
}
......@@ -112,10 +112,10 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p)
p += BIT_WORD(bit);
raw_local_irq_save(flags);
flags = hard_local_irq_save();
res = *p;
*p = res ^ mask;
raw_local_irq_restore(flags);
hard_local_irq_restore(flags);
return (res & mask) != 0;
}
......
......@@ -77,17 +77,17 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
#error SMP is not supported on this platform
#endif
case 1:
raw_local_irq_save(flags);
flags = hard_local_irq_save();
ret = *(volatile unsigned char *)ptr;
*(volatile unsigned char *)ptr = x;
raw_local_irq_restore(flags);
hard_local_irq_restore(flags);
break;
case 4:
raw_local_irq_save(flags);
flags = hard_local_irq_save();
ret = *(volatile unsigned long *)ptr;
*(volatile unsigned long *)ptr = x;
raw_local_irq_restore(flags);
hard_local_irq_restore(flags);
break;
#else
case 1:
......
......@@ -41,7 +41,7 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
static inline void efi_set_pgd(struct mm_struct *mm)
{
check_and_switch_context(mm, NULL);
check_and_switch_context(mm, NULL, true);
}
void efi_virtmap_load(void);
......
......@@ -12,7 +12,11 @@
@ routine called with r0 = irq number, r1 = struct pt_regs *
@
badrne lr, 1b
#ifdef CONFIG_IPIPE
bne __ipipe_grab_irq
#else
bne asm_do_IRQ
#endif
#ifdef CONFIG_SMP
/*
......@@ -25,8 +29,12 @@
ALT_UP_B(9997f)
movne r1, sp
badrne lr, 1b
#ifdef CONFIG_IPIPE
bne __ipipe_grab_ipi
#else
bne do_IPI
#endif
#endif
9997:
.endm
......
......@@ -14,4 +14,8 @@ static inline void ack_bad_irq(int irq)
#define ARCH_IRQ_INIT_FLAGS (IRQ_NOREQUEST | IRQ_NOPROBE)
#define IPIPE_NR_ROOT_IRQS 1024
#define IPIPE_NR_XIRQS IPIPE_NR_ROOT_IRQS
#endif
/* -*- linux-c -*-
* arch/arm/include/asm/ipipe.h
*
* Copyright (C) 2002-2005 Philippe Gerum.
* Copyright (C) 2005 Stelian Pop.
* Copyright (C) 2006-2008 Gilles Chanteperdrix.
* Copyright (C) 2010 Philippe Gerum (SMP port).
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
* USA; either version 2 of the License, or (at your option) any later
* version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#ifndef __ARM_IPIPE_H
#define __ARM_IPIPE_H
#include <linux/irqdomain.h>
#ifdef CONFIG_IPIPE
#define BROKEN_BUILTIN_RETURN_ADDRESS
#undef __BUILTIN_RETURN_ADDRESS0
#undef __BUILTIN_RETURN_ADDRESS1
#ifdef CONFIG_FRAME_POINTER
#define __BUILTIN_RETURN_ADDRESS0 arm_return_addr(0)
#define __BUILTIN_RETURN_ADDRESS1 arm_return_addr(1)
extern unsigned long arm_return_addr(int level);
#else
#define __BUILTIN_RETURN_ADDRESS0 ((unsigned long)__builtin_return_address(0))
#define __BUILTIN_RETURN_ADDRESS1 (0)
#endif
#include <linux/jump_label.h>
#include <linux/ipipe_trace.h>
#define IPIPE_CORE_RELEASE 6
struct ipipe_domain;
struct timekeeper;
#define IPIPE_TSC_TYPE_NONE 0
#define IPIPE_TSC_TYPE_FREERUNNING 1
#define IPIPE_TSC_TYPE_DECREMENTER 2
#define IPIPE_TSC_TYPE_FREERUNNING_COUNTDOWN 3
#define IPIPE_TSC_TYPE_FREERUNNING_TWICE 4
#define IPIPE_TSC_TYPE_FREERUNNING_ARCH 5
/* tscinfo, exported to user-space */
struct __ipipe_tscinfo {
unsigned type;
unsigned freq;
unsigned long counter_vaddr;
union {
struct {
unsigned long counter_paddr;
unsigned long long mask;
};
struct {
unsigned *counter; /* Hw counter physical address */
unsigned long long mask; /* Significant bits in the hw counter. */
unsigned long long *tsc; /* 64 bits tsc value. */
} fr;
struct {
unsigned *counter; /* Hw counter physical address */
unsigned long long mask; /* Significant bits in the hw counter. */
unsigned *last_cnt; /* Counter value when updating
tsc value. */
unsigned long long *tsc; /* 64 bits tsc value. */
} dec;
} u;
unsigned int (*refresh_freq)(void);
};
struct ipipe_arch_sysinfo {
struct __ipipe_tscinfo tsc;
};
/* arch specific stuff */
extern char __ipipe_tsc_area[];
void __ipipe_mach_get_tscinfo(struct __ipipe_tscinfo *info);
#ifdef CONFIG_IPIPE_ARM_KUSER_TSC
unsigned long long __ipipe_tsc_get(void) __attribute__((long_call));
void __ipipe_tsc_register(struct __ipipe_tscinfo *info);
void __ipipe_tsc_update(void);
void __ipipe_update_vsyscall(struct timekeeper *tk);
extern unsigned long __ipipe_kuser_tsc_freq;
#define __ipipe_hrclock_freq __ipipe_kuser_tsc_freq
#else /* ! generic tsc */
unsigned long long __ipipe_mach_get_tsc(void);
#define __ipipe_tsc_get() __ipipe_mach_get_tsc()
static inline void __ipipe_update_vsyscall(struct timekeeper *tk) {}
#ifndef __ipipe_hrclock_freq
extern unsigned long __ipipe_hrtimer_freq;
#define __ipipe_hrclock_freq __ipipe_hrtimer_freq
#endif /* !__ipipe_mach_hrclock_freq */
#endif /* ! generic tsc */
#ifdef CONFIG_IPIPE_DEBUG_INTERNAL
extern void (*__ipipe_mach_hrtimer_debug)(unsigned irq);
#endif /* CONFIG_IPIPE_DEBUG_INTERNAL */
#define ipipe_read_tsc(t) do { t = __ipipe_tsc_get(); } while(0)
#define __ipipe_read_timebase() __ipipe_tsc_get()
#define ipipe_tsc2ns(t) \
({ \
unsigned long long delta = (t)*1000; \
do_div(delta, __ipipe_hrclock_freq / 1000000 + 1); \
(unsigned long)delta; \
})
#define ipipe_tsc2us(t) \
({ \
unsigned long long delta = (t); \
do_div(delta, __ipipe_hrclock_freq / 1000000 + 1); \
(unsigned long)delta; \
})
static inline const char *ipipe_clock_name(void)
{
return "ipipe_tsc";
}
/* Private interface -- Internal use only */
#define __ipipe_enable_irq(irq) enable_irq(irq)
#define __ipipe_disable_irq(irq) disable_irq(irq)
static inline void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
{ }
static inline void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
{ }
#define ipipe_notify_root_preemption() do { } while(0)
#ifdef CONFIG_SMP
void __ipipe_early_core_setup(void);
void __ipipe_hook_critical_ipi(struct ipipe_domain *ipd);
void __ipipe_root_localtimer(unsigned int irq, void *cookie);
void __ipipe_send_vnmi(void (*fn)(void *), cpumask_t cpumask, void *arg);
void __ipipe_do_vnmi(unsigned int irq, void *cookie);
void __ipipe_grab_ipi(unsigned svc, struct pt_regs *regs);
void __ipipe_ipis_alloc(void);
void __ipipe_ipis_request(void);
static inline void ipipe_handle_multi_ipi(int irq, struct pt_regs *regs)
{
__ipipe_grab_ipi(irq, regs);
}
#ifdef CONFIG_SMP_ON_UP
extern struct static_key __ipipe_smp_key;
#define ipipe_smp_p (static_key_true(&__ipipe_smp_key))
#endif /* SMP_ON_UP */
#else /* !CONFIG_SMP */
#define __ipipe_early_core_setup() do { } while(0)
#define __ipipe_hook_critical_ipi(ipd) do { } while(0)
#endif /* !CONFIG_SMP */
#ifndef __ipipe_mach_init_platform
#define __ipipe_mach_init_platform() do { } while(0)
#endif
void __ipipe_enable_pipeline(void);
void __ipipe_do_critical_sync(unsigned irq, void *cookie);
void __ipipe_grab_irq(int irq, struct pt_regs *regs);
void __ipipe_exit_irq(struct pt_regs *regs);
static inline unsigned long __ipipe_ffnz(unsigned long ul)
{
return ffs(ul) - 1;
}
#define __ipipe_root_tick_p(regs) (!arch_irqs_disabled_flags(regs->ARM_cpsr))
#ifdef CONFIG_IRQ_DOMAIN
static inline
int ipipe_handle_domain_irq(struct irq_domain *domain,
unsigned int hwirq, struct pt_regs *regs)
{
unsigned int irq;
irq = irq_find_mapping(domain, hwirq);
__ipipe_grab_irq(irq, regs);
return 0;
}
#endif /* irq domains */
#else /* !CONFIG_IPIPE */
#include <linux/irq.h>
#include <linux/irqdesc.h>
#define __ipipe_tsc_update() do { } while(0)
#define hard_smp_processor_id() smp_processor_id()
#ifdef CONFIG_SMP
static inline void ipipe_handle_multi_ipi(int irq, struct pt_regs *regs)
{
handle_IPI(irq, regs);
}
#endif /* CONFIG_SMP */
struct timekeeper;
static inline void __ipipe_update_vsyscall(struct timekeeper *tk) {}
#endif /* !CONFIG_IPIPE */
#endif /* !__ARM_IPIPE_H */
/* -*- linux-c -*-
* arch/arm/include/asm/ipipe_base.h
*
* Copyright (C) 2007 Gilles Chanteperdrix.
* Copyright (C) 2010 Philippe Gerum (SMP port).
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
* USA; either version 2 of the License, or (at your option) any later
* version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#ifndef __ASM_ARM_IPIPE_BASE_H
#define __ASM_ARM_IPIPE_BASE_H
#ifndef __ASSEMBLY__
#include <asm-generic/ipipe.h>
#endif
#ifdef CONFIG_IPIPE
#ifndef __ASSEMBLY__
#ifdef CONFIG_SMP
extern unsigned __ipipe_first_ipi;
#define IPIPE_CRITICAL_IPI __ipipe_first_ipi
#define IPIPE_HRTIMER_IPI (IPIPE_CRITICAL_IPI + 1)
#define IPIPE_RESCHEDULE_IPI (IPIPE_CRITICAL_IPI + 2)
#define IPIPE_SERVICE_VNMI (IPIPE_CRITICAL_IPI + 3)
#define IPIPE_LAST_IPI IPIPE_SERVICE_VNMI
#define hard_smp_processor_id() raw_smp_processor_id()
#ifdef CONFIG_SMP_ON_UP
unsigned __ipipe_processor_id(void);
#define ipipe_processor_id() \
({ \
register unsigned int cpunum __asm__ ("r0"); \
register unsigned int r1 __asm__ ("r1"); \
register unsigned int r2 __asm__ ("r2"); \
register unsigned int r3 __asm__ ("r3"); \
register unsigned int ip __asm__ ("ip"); \
register unsigned int lr __asm__ ("lr"); \
__asm__ __volatile__ ("\n" \
"1: bl __ipipe_processor_id\n" \
" .pushsection \".alt.smp.init\", \"a\"\n" \
" .long 1b\n" \
" mov %0, #0\n" \
" .popsection" \
: "=r"(cpunum), "=r"(r1), "=r"(r2), "=r"(r3), \
"=r"(ip), "=r"(lr) \
: /* */ : "cc"); \
cpunum; \
})
#else /* !SMP_ON_UP */
#define ipipe_processor_id() raw_smp_processor_id()
#endif /* !SMP_ON_UP */
#define IPIPE_ARCH_HAVE_VIRQ_IPI
#else /* !CONFIG_SMP */
#define ipipe_processor_id() (0)
#endif /* !CONFIG_IPIPE */
#endif /* !__ASSEMBLY__ */
#define IPIPE_TRAP_MAYDAY 0 /* Internal recovery trap */
/* ARM traps */
#define IPIPE_TRAP_ACCESS 1 /* Data or instruction access exception */
#define IPIPE_TRAP_SECTION 2 /* Section fault */
#define IPIPE_TRAP_DABT 3 /* Generic data abort */
#define IPIPE_TRAP_UNKNOWN 4 /* Unknown exception */
#define IPIPE_TRAP_BREAK 5 /* Instruction breakpoint */
#define IPIPE_TRAP_FPU 6 /* Floating point exception */
#define IPIPE_TRAP_VFP 7 /* VFP floating point exception */
#define IPIPE_TRAP_UNDEFINSTR 8 /* Undefined instruction */
#define IPIPE_TRAP_ALIGNMENT 9 /* Unaligned access exception */
#define IPIPE_NR_FAULTS 10
#endif /* CONFIG_IPIPE */
#endif /* __ASM_ARM_IPIPE_BASE_H */
/* -*- linux-c -*-
* arch/arm/include/asm/ipipe_hwirq.h
*
* Copyright (C) 2002-2005 Philippe Gerum.
* Copyright (C) 2005 Stelian Pop.
* Copyright (C) 2006-2008 Gilles Chanteperdrix.
* Copyright (C) 2010 Philippe Gerum (SMP port).
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
* USA; either version 2 of the License, or (at your option) any later
* version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#ifndef _ASM_ARM_IPIPE_HWIRQ_H
#define _ASM_ARM_IPIPE_HWIRQ_H
#include <asm-generic/ipipe.h>
#ifdef CONFIG_IPIPE
#include <linux/ipipe_trace.h>
#define hard_local_irq_restore_notrace(x) \
__asm__ __volatile__( \
"msr cpsr_c, %0 @ hard_local_irq_restore\n" \
: \
: "r" (x) \
: "memory", "cc")
static inline void hard_local_irq_disable_notrace(void)
{
#if __LINUX_ARM_ARCH__ >= 6
__asm__("cpsid i @ __cli" : : : "memory", "cc");
#else /* linux arch <= 5 */
unsigned long temp;
__asm__ __volatile__(
"mrs %0, cpsr @ hard_local_irq_disable\n"
"orr %0, %0, #128\n"
"msr cpsr_c, %0"
: "=r" (temp)
:
: "memory", "cc");
#endif /* linux arch <= 5 */
}
static inline void hard_local_irq_enable_notrace(void)
{
#if __LINUX_ARM_ARCH__ >= 6
__asm__("cpsie i @ __sti" : : : "memory", "cc");
#else /* linux arch <= 5 */
unsigned long temp;
__asm__ __volatile__(
"mrs %0, cpsr @ hard_local_irq_enable\n"
"bic %0, %0, #128\n"
"msr cpsr_c, %0"
: "=r" (temp)
:
: "memory", "cc");
#endif /* linux arch <= 5 */
}
static inline void hard_local_fiq_disable_notrace(void)
{
#if __LINUX_ARM_ARCH__ >= 6
__asm__("cpsid f @ __clf" : : : "memory", "cc");
#else /* linux arch <= 5 */
unsigned long temp;
__asm__ __volatile__(
"mrs %0, cpsr @ clf\n"
"orr %0, %0, #64\n"
"msr cpsr_c, %0"
: "=r" (temp)
:
: "memory", "cc");
#endif /* linux arch <= 5 */
}
static inline void hard_local_fiq_enable_notrace(void)
{
#if __LINUX_ARM_ARCH__ >= 6
__asm__("cpsie f @ __stf" : : : "memory", "cc");
#else /* linux arch <= 5 */
unsigned long temp;
__asm__ __volatile__(
"mrs %0, cpsr @ stf\n"
"bic %0, %0, #64\n"
"msr cpsr_c, %0"
: "=r" (temp)
:
: "memory", "cc");
#endif /* linux arch <= 5 */
}
static inline unsigned long hard_local_irq_save_notrace(void)
{
unsigned long res;
#if __LINUX_ARM_ARCH__ >= 6
__asm__ __volatile__(
"mrs %0, cpsr @ hard_local_irq_save\n"
"cpsid i"
: "=r" (res) : : "memory", "cc");
#else /* linux arch <= 5 */
unsigned long temp;
__asm__ __volatile__(
"mrs %0, cpsr @ hard_local_irq_save\n"
"orr %1, %0, #128\n"
"msr cpsr_c, %1"
: "=r" (res), "=r" (temp)
:
: "memory", "cc");
#endif /* linux arch <= 5 */
return res;
}
static inline int arch_irqs_disabled_flags(unsigned long flags)
{
return (int)((flags) & PSR_I_BIT);
}
static inline unsigned long hard_local_save_flags(void)
{
unsigned long flags;
__asm__ __volatile__(
"mrs %0, cpsr @ hard_local_save_flags"
: "=r" (flags) : : "memory", "cc");
return flags;
}
#define hard_irqs_disabled_flags(flags) arch_irqs_disabled_flags(flags)
static inline int hard_irqs_disabled(void)
{
return hard_irqs_disabled_flags(hard_local_save_flags());
}
#ifdef CONFIG_IPIPE_TRACE_IRQSOFF
static inline void hard_local_irq_disable(void)
{
if (!hard_irqs_disabled()) {
hard_local_irq_disable_notrace();
ipipe_trace_begin(0x80000000);
}
}
static inline void hard_local_irq_enable(void)
{
if (hard_irqs_disabled()) {
ipipe_trace_end(0x80000000);
hard_local_irq_enable_notrace();
}
}
static inline unsigned long hard_local_irq_save(void)
{
unsigned long flags;
flags = hard_local_irq_save_notrace();
if (!arch_irqs_disabled_flags(flags))
ipipe_trace_begin(0x80000001);
return flags;
}
static inline void hard_local_irq_restore(unsigned long x)
{
if (!arch_irqs_disabled_flags(x))
ipipe_trace_end(0x80000001);
hard_local_irq_restore_notrace(x);
}
#else /* !CONFIG_IPIPE_TRACE_IRQSOFF */
#define hard_local_irq_disable hard_local_irq_disable_notrace
#define hard_local_irq_enable hard_local_irq_enable_notrace
#define hard_local_irq_save hard_local_irq_save_notrace
#define hard_local_irq_restore hard_local_irq_restore_notrace
#endif /* CONFIG_IPIPE_TRACE_IRQSOFF */
#define arch_local_irq_disable() \
({ \
ipipe_stall_root(); \
barrier(); \
})
#define arch_local_irq_enable() \
do { \
barrier(); \
ipipe_unstall_root(); \
} while (0)
#define local_fiq_enable() hard_local_fiq_enable_notrace()
#define local_fiq_disable() hard_local_fiq_disable_notrace()
#define arch_local_irq_restore(flags) \
do { \
if (!arch_irqs_disabled_flags(flags)) \
arch_local_irq_enable(); \
} while (0)
#define arch_local_irq_save() \
({ \
unsigned long _flags; \
_flags = ipipe_test_and_stall_root() << 7; \
barrier(); \
_flags; \
})
#define arch_local_save_flags() \
({ \
unsigned long _flags; \
_flags = ipipe_test_root() << 7; \
barrier(); \
_flags; \
})
#define arch_irqs_disabled() ipipe_test_root()
#define hard_irq_disable() hard_local_irq_disable()
static inline unsigned long arch_mangle_irq_bits(int virt, unsigned long real)
{
/* Merge virtual and real interrupt mask bits into a single
32bit word. */
return (real & ~(1L << 8)) | ((virt != 0) << 8);
}
static inline int arch_demangle_irq_bits(unsigned long *x)
{
int virt = (*x & (1 << 8)) != 0;
*x &= ~(1L << 8);
return virt;
}
#endif /* !CONFIG_IPIPE */
#endif /* _ASM_ARM_IPIPE_HWIRQ_H */
......@@ -7,9 +7,14 @@
#ifndef CONFIG_SPARSE_IRQ
#include <mach/irqs.h>
#else
#if !defined(CONFIG_IPIPE) || defined(CONFIG_IRQ_DOMAIN)
#define NR_IRQS NR_IRQS_LEGACY
#else
#define NR_IRQS 512
#endif
#endif
#ifndef irq_canonicalize
#define irq_canonicalize(i) (i)
#endif
......@@ -50,4 +55,3 @@ static inline int nr_legacy_irqs(void)
#endif
#endif
......@@ -6,6 +6,10 @@
#include <asm/ptrace.h>
#include <asm/ipipe_hwirq.h>
#ifndef CONFIG_IPIPE
/*
* CPU interrupt mask handling.
*/
......@@ -56,13 +60,6 @@ static inline void arch_local_irq_disable(void)
#define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc")
#define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc")
#ifndef CONFIG_CPU_V7M
#define local_abt_enable() __asm__("cpsie a @ __sta" : : : "memory", "cc")
#define local_abt_disable() __asm__("cpsid a @ __cla" : : : "memory", "cc")
#else
#define local_abt_enable() do { } while (0)
#define local_abt_disable() do { } while (0)
#endif
#else
/*
......@@ -183,5 +180,15 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
#include <asm-generic/irqflags.h>
#endif /* ifndef IPIPE */
#ifndef CONFIG_CPU_V7M
#define local_abt_enable() __asm__("cpsie a @ __sta" : : : "memory", "cc")
#define local_abt_disable() __asm__("cpsid a @ __cla" : : : "memory", "cc")
#else
#define local_abt_enable() do { } while (0)
#define local_abt_disable() do { } while (0)
#endif
#endif /* ifdef __KERNEL__ */
#endif /* ifndef __ASM_ARM_IRQFLAGS_H */
......@@ -16,6 +16,7 @@
#include <linux/compiler.h>
#include <linux/sched.h>
#include <linux/mm_types.h>
#include <linux/ipipe.h>
#include <linux/preempt.h>
#include <asm/cacheflush.h>
......@@ -28,7 +29,8 @@ void __check_vmalloc_seq(struct mm_struct *mm);
#ifdef CONFIG_CPU_HAS_ASID
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
int check_and_switch_context(struct mm_struct *mm,
struct task_struct *tsk, bool may_defer);
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
......@@ -50,13 +52,14 @@ static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
#ifdef CONFIG_MMU
static inline void check_and_switch_context(struct mm_struct *mm,
struct task_struct *tsk)
static inline int
check_and_switch_context(struct mm_struct *mm,
struct task_struct *tsk, bool may_defer)
{
if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
__check_vmalloc_seq(mm);
if (irqs_disabled())
if (may_defer && irqs_disabled()) {
/*
* cpu_switch_mm() needs to flush the VIVT caches. To avoid
* high interrupt latencies, defer the call and continue
......@@ -65,10 +68,23 @@ static inline void check_and_switch_context(struct mm_struct *mm,
* finish_arch_post_lock_switch() call.
*/
mm->context.switch_pending = 1;
else
return -EAGAIN;
} else {
cpu_switch_mm(mm->pgd, mm);
}
return 0;
}
#ifdef CONFIG_IPIPE
extern void deferred_switch_mm(struct mm_struct *mm);
#else /* !I-pipe */
static inline void deferred_switch_mm(struct mm_struct *next)
{
cpu_switch_mm(next->pgd, next);
}
#endif /* !I-pipe */
#ifndef MODULE
#define finish_arch_post_lock_switch \
finish_arch_post_lock_switch
......@@ -85,8 +101,11 @@ static inline void finish_arch_post_lock_switch(void)
*/
preempt_disable();
if (mm->context.switch_pending) {
unsigned long flags;
mm->context.switch_pending = 0;
cpu_switch_mm(mm->pgd, mm);
ipipe_mm_switch_protect(flags);
deferred_switch_mm(mm);
ipipe_mm_switch_unprotect(flags);
}
preempt_enable_no_resched();
}
......@@ -101,12 +120,8 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
return 0;
}
#endif /* CONFIG_CPU_HAS_ASID */
#define destroy_context(mm) do { } while(0)
#define activate_mm(prev,next) switch_mm(prev, next, NULL)
/*
* This is called when "tsk" is about to enter lazy TLB mode.
*
......@@ -127,12 +142,12 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
* calling the CPU specific function when the mm hasn't
* actually changed.
*/
static inline void
switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
static inline int
__do_switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk, bool may_defer)
{
#ifdef CONFIG_MMU
unsigned int cpu = smp_processor_id();
const unsigned int cpu = ipipe_processor_id();
/*
* __sync_icache_dcache doesn't broadcast the I-cache invalidation,
......@@ -145,13 +160,60 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
__flush_icache_all();
if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
check_and_switch_context(next, tsk);
if (cache_is_vivt())
int rc = check_and_switch_context(next, tsk, may_defer);
if (rc < 0) {
#ifdef CONFIG_IPIPE
cpumask_clear_cpu(cpu, mm_cpumask(next));
return rc;
#endif /* CONFIG_IPIPE */
}
if (cache_is_vivt() && prev)
cpumask_clear_cpu(cpu, mm_cpumask(prev));
}
#endif
#endif /* CONFIG_MMU */
return 0;
}
#if defined(CONFIG_IPIPE) && defined(CONFIG_MMU)
extern void __switch_mm_inner(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk);
#else /* !I-pipe || !MMU */
#define __switch_mm_inner(prev, next, tsk) \
__do_switch_mm(prev, next, tsk, true)
#endif /* !I-pipe || !MMU */
static inline void
ipipe_switch_mm_head(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
__do_switch_mm(prev, next, tsk, false);
}
static inline void
__switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
__switch_mm_inner(prev, next, tsk);
}
static inline void
switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
#ifdef CONFIG_MMU
unsigned long flags;
ipipe_mm_switch_protect(flags);
__switch_mm(prev, next, tsk);
ipipe_mm_switch_unprotect(flags);
#endif /* CONFIG_MMU */
}
#define deactivate_mm(tsk,mm) do { } while (0)
#define activate_mm(prev,next) __switch_mm(prev, next, NULL)
static inline void destroy_context(struct mm_struct *mm)
{
}
#endif
......@@ -16,11 +16,15 @@
#ifndef _ASM_ARM_PERCPU_H_
#define _ASM_ARM_PERCPU_H_
#include <asm/ipipe_base.h>
/*
* Same as asm-generic/percpu.h, except that we store the per cpu offset
* in the TPIDRPRW. TPIDRPRW only exists on V6K and V7
*/
#if defined(CONFIG_SMP) && !defined(CONFIG_CPU_V6)
#if defined(CONFIG_SMP) && !defined(CONFIG_CPU_V6) && \
(!defined(CONFIG_IPIPE) || \
(!defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_IPIPE_TRACE)))
static inline void set_my_cpu_offset(unsigned long off)
{
/* Set TPIDRPRW */
......@@ -43,6 +47,10 @@ static inline unsigned long __my_cpu_offset(void)
}
#define __my_cpu_offset __my_cpu_offset()
#else
#if defined(CONFIG_SMP) && defined(CONFIG_IPIPE)
#define __my_cpu_offset (per_cpu_offset(ipipe_processor_id()))
#endif /* SMP && IPIPE */
#define set_my_cpu_offset(x) do {} while(0)
#endif /* CONFIG_SMP */
......
......@@ -31,4 +31,10 @@ extern void save_atags(const struct tag *tags);
static inline void save_atags(const struct tag *tags) { }
#endif
#ifdef CONFIG_IPIPE
void smp_build_cpu_revmap(void);
#else
static inline void smp_build_cpu_revmap(void) { }
#endif
#endif
......@@ -23,10 +23,19 @@
*/
extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *);
#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
#define switch_to(prev,next,last) \
do { \
__complete_pending_tlbi(); \
hard_cond_local_irq_disable(); \
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
hard_cond_local_irq_enable(); \
} while (0)
#else /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
#define switch_to(prev,next,last) \
do { \
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
} while (0)
#endif /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
#endif /* __ASM_ARM_SWITCH_TO_H */
......@@ -25,6 +25,7 @@
struct task_struct;
#include <asm/types.h>
#include <ipipe/thread_info.h>
typedef unsigned long mm_segment_t;
......@@ -65,6 +66,10 @@ struct thread_info {
#ifdef CONFIG_ARM_THUMBEE
unsigned long thumbee_state; /* ThumbEE Handler Base register */
#endif
#ifdef CONFIG_IPIPE
unsigned long ipipe_flags;
struct ipipe_threadinfo ipipe_data;
#endif
};
#define INIT_THREAD_INFO(tsk) \
......@@ -149,6 +154,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 20
#define TIF_MMSWITCH_INT 23 /* MMU context switch preempted */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
......@@ -159,6 +166,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
#define _TIF_MMSWITCH_INT (1 << TIF_MMSWITCH_INT)
/* Checks for any syscall work in entry-common.S */
#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
_TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
......@@ -169,5 +178,16 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
_TIF_NOTIFY_RESUME | _TIF_UPROBE)
/* ti->ipipe_flags */
#define TIP_MAYDAY 0 /* MAYDAY call is pending */
#define TIP_NOTIFY 1 /* Notify head domain about kernel events */
#define TIP_HEAD 2 /* Runs in head domain */
#define TIP_USERINTRET 3 /* Notify on IRQ/trap return to root userspace */
#define _TIP_MAYDAY (1 << TIP_MAYDAY)
#define _TIP_NOTIFY (1 << TIP_NOTIFY)
#define _TIP_HEAD (1 << TIP_HEAD)
#define _TIP_USERINTRET (1 << TIP_USERINTRET)
#endif /* __KERNEL__ */
#endif /* __ASM_ARM_THREAD_INFO_H */
......@@ -16,6 +16,7 @@
#include <asm/domain.h>
#include <asm/unified.h>
#include <asm/compiler.h>
#include <asm-generic/ipipe.h>
#include <asm/extable.h>
......@@ -198,7 +199,7 @@ extern int __get_user_64t_4(void *);
#define get_user(x, p) \
({ \
might_fault(); \
__ipipe_uaccess_might_fault(); \
__get_user_check(x, p); \
})
......@@ -282,7 +283,7 @@ do { \
unsigned long __gu_val; \
unsigned int __ua_flags; \
__chk_user_ptr(ptr); \
might_fault(); \
__ipipe_uaccess_might_fault(); \
__ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(ptr))) { \
case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \
......@@ -343,7 +344,7 @@ do { \
const __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \
__typeof__(*(ptr)) __pu_val = (x); \
unsigned int __ua_flags; \
might_fault(); \
__ipipe_uaccess_might_fault(); \
__ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(ptr))) { \
case 1: __fn(__pu_val, __pu_ptr, __err, 1); break; \
......@@ -454,7 +455,6 @@ do { \
: "r" (x), "i" (-EFAULT) \
: "cc")
#ifdef CONFIG_MMU
extern unsigned long __must_check
arm_copy_from_user(void *to, const void __user *from, unsigned long n);
......
......@@ -37,4 +37,10 @@
#define __ARM_NR_usr32 (__ARM_NR_BASE+4)
#define __ARM_NR_set_tls (__ARM_NR_BASE+5)
/*
* This SWI is IPIPE private, for dispatching syscalls to the head
* domain.
*/
#define __ARM_NR_ipipe (__ARM_NR_BASE+66)
#endif /* _UAPI__ASM_ARM_UNISTD_H */
......@@ -88,6 +88,9 @@ obj-$(CONFIG_PARAVIRT) += paravirt.o
head-y := head$(MMUEXT).o
obj-$(CONFIG_DEBUG_LL) += debug.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_RAW_PRINTK) += raw_printk.o
obj-$(CONFIG_IPIPE) += ipipe.o
obj-$(CONFIG_IPIPE_ARM_KUSER_TSC) += ipipe_tsc.o ipipe_tsc_asm.o
obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o
AFLAGS_hyp-stub.o :=-Wa,-march=armv7-a
......
......@@ -64,6 +64,9 @@ int main(void)
#endif
BLANK();
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
#ifdef CONFIG_IPIPE
DEFINE(TI_IPIPE, offsetof(struct thread_info, ipipe_flags));
#endif
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
DEFINE(TI_TASK, offsetof(struct thread_info, task));
......
......@@ -188,6 +188,8 @@ void __init arm_dt_init_cpu_maps(void)
cpu_logical_map(i) = tmp_map[i];
pr_debug("cpu logical map 0x%x\n", cpu_logical_map(i));
}
smp_build_cpu_revmap();
}
bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
......
......@@ -4,6 +4,7 @@
* Copyright (C) 1996,1997,1998 Russell King.
* ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
* nommu support by Hyok S. Choi (hyok.choi@samsung.com)
* Copyright (C) 2005 Stelian Pop.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
......@@ -34,6 +35,7 @@
#include "entry-header.S"
#include <asm/entry-macro-multi.S>
#include <asm/probes.h>
#include <asm/ipipe_base.h>
/*
* Interrupt handling.
......@@ -48,6 +50,10 @@
arch_irq_handler_default
#endif
9997:
#ifdef CONFIG_IPIPE
bl __ipipe_check_root_interruptible
cmp r0, #1
#endif /* CONFIG_IPIPE */
.endm
.macro pabt_helper
......@@ -200,6 +206,14 @@ ENDPROC(__und_invalid)
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
#ifdef CONFIG_IPIPE_TRACE_IRQSOFF
mov r0, #1 /* IPIPE_TRACE_BEGIN */
mov r3, #0x90000000
ldr r2, [sp, #S_PC]
mov r1, pc
bl ipipe_trace_asm
ldmia r7, {r2 - r6}
#endif /* CONFIG_IPIPE_TRACE_IRQSOFF */
.endif
.endm
......@@ -217,6 +231,9 @@ ENDPROC(__dabt_svc)
__irq_svc:
svc_entry
irq_handler
#ifdef CONFIG_IPIPE
bne __ipipe_fast_svc_irq_exit
#endif
#ifdef CONFIG_PREEMPT
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
......@@ -227,6 +244,9 @@ __irq_svc:
blne svc_preempt
#endif
#ifdef CONFIG_IPIPE
__ipipe_fast_svc_irq_exit:
#endif
svc_exit r5, irq = 1 @ return from exception
UNWIND(.fnend )
ENDPROC(__irq_svc)
......@@ -236,12 +256,16 @@ ENDPROC(__irq_svc)
#ifdef CONFIG_PREEMPT
svc_preempt:
mov r8, lr
#ifdef CONFIG_IPIPE
1: bl __ipipe_preempt_schedule_irq @ irq en/disable is done inside
#else /* CONFIG_IPIPE */
1: bl preempt_schedule_irq @ irq en/disable is done inside
#endif /* CONFIG_IPIPE */
ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
tst r0, #_TIF_NEED_RESCHED
reteq r8 @ go again
b 1b
#endif
#endif /* CONFIG_PREEMPT */
__und_fault:
@ Correct the PC such that it is pointing at the instruction
......@@ -266,6 +290,14 @@ __und_svc:
#else
svc_entry
#endif
#ifdef CONFIG_IPIPE
mov r0, #IPIPE_TRAP_UNDEFINSTR
mov r1, sp @ r1 = &regs
bl __ipipe_notify_trap @ branch to trap handler
cmp r0, #0
bne __und_svc_finish
#endif /* CONFIG_IPIPE */
@
@ call emulation code, which returns using r9 if it has emulated
@ the instruction, or the more conventional lr if we are to treat
......@@ -385,6 +417,15 @@ ENDPROC(__fiq_abt)
sub sp, sp, #PT_REGS_SIZE
ARM( stmib sp, {r1 - r12} )
THUMB( stmia sp, {r0 - r12} )
#ifdef CONFIG_IPIPE_TRACE_IRQSOFF
mov r4, r0
mov r0, #1 /* IPIPE_TRACE_BEGIN */
mov r3, #0x90000000
ldr r2, [r4, #4] /* lr_<exception> */
mov r1, pc
bl ipipe_trace_asm
mov r0, r4
#endif /* CONFIG_IPIPE_TRACE_IRQSOFF */
ATRAP( mrc p15, 0, r7, c1, c0, 0)
ATRAP( ldr r8, .LCcralign)
......@@ -462,6 +503,10 @@ __irq_usr:
usr_entry
kuser_cmpxchg_check
irq_handler
#ifdef CONFIG_IPIPE
THUMB( it ne)
bne __ipipe_ret_to_user_irqs_disabled
#endif /* CONFIG_IPIPE */
get_thread_info tsk
mov why, #0
b ret_to_user_from_irq
......@@ -474,6 +519,15 @@ ENDPROC(__irq_usr)
__und_usr:
usr_entry uaccess=0
#ifdef CONFIG_IPIPE
mov r0, #IPIPE_TRAP_UNDEFINSTR
mov r1, sp @ r1 = &regs
bl __ipipe_notify_trap @ branch to trap handler
cmp r0, #0
bne ret_from_exception
uaccess_enable ip
#endif /* CONFIG_IPIPE */
mov r2, r4
mov r3, r5
......@@ -756,7 +810,16 @@ __pabt_usr:
ENTRY(ret_from_exception)
UNWIND(.fnstart )
UNWIND(.cantunwind )
#ifdef CONFIG_IPIPE
disable_irq
get_thread_info tsk
ldr r0, [tsk, #TI_IPIPE]
tst r0, #_TIP_HEAD
THUMB( it ne)
bne __ipipe_ret_to_user_irqs_disabled @ Fast exit path over non-root domains
#else /* !CONFIG_IPIPE */
get_thread_info tsk
#endif /* !CONFIG_IPIPE */
mov why, #0
b ret_to_user
UNWIND(.fnend )
......@@ -810,7 +873,11 @@ ENTRY(__switch_to)
add r4, r2, #TI_CPU_SAVE
ldr r0, =thread_notify_head
mov r1, #THREAD_NOTIFY_SWITCH
#ifdef CONFIG_IPIPE
bl __ipipe_switch_to_notifier_call_chain
#else /* CONFIG_IPIPE */
bl atomic_notifier_call_chain
#endif /* CONFIG_IPIPE */
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
str r7, [r8]
#endif
......@@ -845,6 +912,50 @@ ENDPROC(__switch_to)
#endif
.endm
#ifdef CONFIG_IPIPE
/*
I-pipe tsc area, here we store data shared with user-space for
tsc-emulation. If CONFIG_IPIPE_ARM_KUSER_TSC is enabled
__ipipe_kuser_get_tsc will be overwritten with the real TSC
emulation code.
*/
.globl __ipipe_tsc_area
.equ __ipipe_tsc_area, CONFIG_VECTORS_BASE + 0x1000 + __ipipe_tsc_area_start - __kuser_helper_end
#ifdef CONFIG_IPIPE_ARM_KUSER_TSC
.globl __ipipe_tsc_addr
.equ __ipipe_tsc_addr, CONFIG_VECTORS_BASE + 0x1000 + .LCcntr_addr - __kuser_helper_end
.globl __ipipe_tsc_get
.equ __ipipe_tsc_get, CONFIG_VECTORS_BASE + 0x1000 + __ipipe_kuser_get_tsc - __kuser_helper_end
#endif
.align 5
.globl __ipipe_tsc_area_start
__ipipe_tsc_area_start: