Commit 5d5e25dc authored by Philippe Gerum's avatar Philippe Gerum Committed by Dmitriy Cherkasov

arm64: ipipe: add pipeline core

Introduce the arm64-specific bits enabling the interrupt pipeline
exclusively.
parent 89ec8d23
......@@ -108,6 +108,8 @@ config ARM64
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_KPROBES
select HAVE_KRETPROBES
select HAVE_IPIPE_SUPPORT
select HAVE_IPIPE_TRACER_SUPPORT
select IOMMU_DMA if IOMMU_SUPPORT
select IRQ_DOMAIN
select IRQ_FORCED_THREADING
......@@ -694,6 +696,7 @@ config NEED_PER_CPU_EMBED_FIRST_CHUNK
config HOLES_IN_ZONE
def_bool y
source kernel/ipipe/Kconfig
source kernel/Kconfig.preempt
source kernel/Kconfig.hz
......
......@@ -51,6 +51,18 @@
msr daif, \flags
.endm
.macro disable_irq_cond
#ifdef CONFIG_IPIPE
msr daifset, #2
#endif
.endm
.macro enable_irq_cond
#ifdef CONFIG_IPIPE
msr daifclr, #2
#endif
.endm
/*
* Enable and disable debug exceptions.
*/
......
/* -*- linux-c -*-
* arch/arm/include/asm/ipipe.h
*
* Copyright (C) 2002-2005 Philippe Gerum.
* Copyright (C) 2005 Stelian Pop.
* Copyright (C) 2006-2008 Gilles Chanteperdrix.
* Copyright (C) 2010 Philippe Gerum (SMP port).
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
* USA; either version 2 of the License, or (at your option) any later
* version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#ifndef __ARM_IPIPE_H
#define __ARM_IPIPE_H
#include <linux/irqdomain.h>
#ifdef CONFIG_IPIPE
#include <linux/jump_label.h>
#include <linux/ipipe_trace.h>
#include <linux/ipipe_debug.h>
#define IPIPE_CORE_RELEASE 2
struct ipipe_domain;
#define IPIPE_TSC_TYPE_NONE 0
#define IPIPE_TSC_TYPE_FREERUNNING 1
#define IPIPE_TSC_TYPE_DECREMENTER 2
#define IPIPE_TSC_TYPE_FREERUNNING_COUNTDOWN 3
#define IPIPE_TSC_TYPE_FREERUNNING_TWICE 4
#define IPIPE_TSC_TYPE_FREERUNNING_ARCH 5
/* tscinfo, exported to user-space */
struct __ipipe_tscinfo {
unsigned type;
unsigned freq;
unsigned long counter_vaddr;
union {
struct {
unsigned long counter_paddr;
unsigned long long mask;
};
struct {
unsigned *counter; /* Hw counter physical address */
unsigned long long mask; /* Significant bits in the hw counter. */
unsigned long long *tsc; /* 64 bits tsc value. */
} fr;
struct {
unsigned *counter; /* Hw counter physical address */
unsigned long long mask; /* Significant bits in the hw counter. */
unsigned *last_cnt; /* Counter value when updating
tsc value. */
unsigned long long *tsc; /* 64 bits tsc value. */
} dec;
} u;
};
struct ipipe_arch_sysinfo {
struct __ipipe_tscinfo tsc;
};
/* arch specific stuff */
void __ipipe_mach_get_tscinfo(struct __ipipe_tscinfo *info);
static inline void __ipipe_mach_update_tsc(void) {}
static inline notrace unsigned long long __ipipe_mach_get_tsc(void)
{
return arch_counter_get_cntvct();
}
#define __ipipe_tsc_get() __ipipe_mach_get_tsc()
void __ipipe_tsc_register(struct __ipipe_tscinfo *info);
static inline void __ipipe_tsc_update(void) {}
#ifndef __ipipe_hrclock_freq
extern unsigned long __ipipe_hrtimer_freq;
#define __ipipe_hrclock_freq __ipipe_hrtimer_freq
#endif /* !__ipipe_mach_hrclock_freq */
#ifdef CONFIG_IPIPE_DEBUG_INTERNAL
extern void (*__ipipe_mach_hrtimer_debug)(unsigned irq);
#endif /* CONFIG_IPIPE_DEBUG_INTERNAL */
#define ipipe_read_tsc(t) do { t = __ipipe_tsc_get(); } while(0)
#define __ipipe_read_timebase() __ipipe_tsc_get()
#define ipipe_tsc2ns(t) \
({ \
unsigned long long delta = (t)*1000; \
do_div(delta, __ipipe_hrclock_freq / 1000000 + 1); \
(unsigned long)delta; \
})
#define ipipe_tsc2us(t) \
({ \
unsigned long long delta = (t); \
do_div(delta, __ipipe_hrclock_freq / 1000000 + 1); \
(unsigned long)delta; \
})
static inline const char *ipipe_clock_name(void)
{
return "ipipe_tsc";
}
/* Private interface -- Internal use only */
#define __ipipe_enable_irq(irq) enable_irq(irq)
#define __ipipe_disable_irq(irq) disable_irq(irq)
#define ipipe_notify_root_preemption() do { } while(0)
#ifdef CONFIG_SMP
void __ipipe_early_core_setup(void);
void __ipipe_hook_critical_ipi(struct ipipe_domain *ipd);
void __ipipe_root_localtimer(unsigned int irq, void *cookie);
void __ipipe_grab_ipi(unsigned sgi, struct pt_regs *regs);
void __ipipe_ipis_alloc(void);
void __ipipe_ipis_request(void);
#ifdef CONFIG_SMP_ON_UP
extern struct static_key __ipipe_smp_key;
#define ipipe_smp_p (static_key_true(&__ipipe_smp_key))
#endif /* SMP_ON_UP */
#else /* !CONFIG_SMP */
#define __ipipe_early_core_setup() do { } while(0)
#define __ipipe_hook_critical_ipi(ipd) do { } while(0)
#endif /* !CONFIG_SMP */
#ifndef __ipipe_mach_init_platform
#define __ipipe_mach_init_platform() do { } while(0)
#endif
void __ipipe_enable_pipeline(void);
void __ipipe_do_critical_sync(unsigned irq, void *cookie);
void __ipipe_grab_irq(int irq, struct pt_regs *regs);
void __ipipe_exit_irq(struct pt_regs *regs);
static inline
int ipipe_handle_domain_irq(struct irq_domain *domain,
unsigned int hwirq, struct pt_regs *regs)
{
unsigned int irq;
irq = irq_find_mapping(domain, hwirq);
__ipipe_grab_irq(irq, regs);
return 0;
}
static inline void ipipe_handle_multi_ipi(int irq, struct pt_regs *regs)
{
__ipipe_grab_ipi(irq, regs);
}
static inline unsigned long __ipipe_ffnz(unsigned long ul)
{
int __r;
/* zero input is not valid */
IPIPE_WARN(ul == 0);
__asm__ ("rbit\t%0, %1\n"
"clz\t%0, %0\n"
: "=r" (__r) : "r"(ul) : "cc");
return __r;
}
#define __ipipe_syscall_watched_p(p, sc) \
(ipipe_notifier_enabled_p(p) || (unsigned long)sc >= __NR_syscalls)
#define __ipipe_root_tick_p(regs) (!arch_irqs_disabled_flags(regs->pstate))
#else /* !CONFIG_IPIPE */
#ifdef CONFIG_SMP
static inline void ipipe_handle_multi_ipi(int irq, struct pt_regs *regs)
{
handle_IPI(irq, regs);
}
#endif /* CONFIG_SMP */
#define __ipipe_tsc_update() do { } while(0)
#endif /* CONFIG_IPIPE */
#endif /* !__ARM_IPIPE_H */
/* -*- linux-c -*-
* arch/arm/include/asm/ipipe_base.h
*
* Copyright (C) 2007 Gilles Chanteperdrix.
* Copyright (C) 2010 Philippe Gerum (SMP port).
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
* USA; either version 2 of the License, or (at your option) any later
* version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#ifndef __ASM_ARM_IPIPE_BASE_H
#define __ASM_ARM_IPIPE_BASE_H
#include <asm-generic/ipipe.h>
#ifdef CONFIG_IPIPE
#include <asm/hardirq.h>
#define IPIPE_NR_ROOT_IRQS 1024
#define IPIPE_NR_XIRQS IPIPE_NR_ROOT_IRQS
#ifdef CONFIG_SMP
/*
* Out-of-band IPIs are directly mapped to SGI1-3, instead of
* multiplexed over SGI0 like regular in-band messages.
*/
#define IPIPE_IPI_BASE IPIPE_VIRQ_BASE
#define IPIPE_OOB_IPI_NR 3
#define IPIPE_CRITICAL_IPI (IPIPE_IPI_BASE + NR_IPI)
#define IPIPE_HRTIMER_IPI (IPIPE_IPI_BASE + NR_IPI + 1)
#define IPIPE_RESCHEDULE_IPI (IPIPE_IPI_BASE + NR_IPI + 2)
#define hard_smp_processor_id() raw_smp_processor_id()
#ifdef CONFIG_SMP_ON_UP
unsigned __ipipe_processor_id(void);
#define ipipe_processor_id() \
({ \
register unsigned int cpunum __asm__ ("r0"); \
register unsigned int r1 __asm__ ("r1"); \
register unsigned int r2 __asm__ ("r2"); \
register unsigned int r3 __asm__ ("r3"); \
register unsigned int ip __asm__ ("ip"); \
register unsigned int lr __asm__ ("lr"); \
__asm__ __volatile__ ("\n" \
"1: bl __ipipe_processor_id\n" \
" .pushsection \".alt.smp.init\", \"a\"\n" \
" .long 1b\n" \
" mov %0, #0\n" \
" .popsection" \
: "=r"(cpunum), "=r"(r1), "=r"(r2), "=r"(r3), \
"=r"(ip), "=r"(lr) \
: /* */ : "cc"); \
cpunum; \
})
#else /* !SMP_ON_UP */
#define ipipe_processor_id() raw_smp_processor_id()
#endif /* !SMP_ON_UP */
#define IPIPE_ARCH_HAVE_VIRQ_IPI
#else /* !CONFIG_SMP */
#define ipipe_processor_id() (0)
#endif /* !CONFIG_SMP */
/* ARM64 traps */
#define IPIPE_TRAP_MAYDAY 0 /* Internal recovery trap */
#endif /* CONFIG_IPIPE */
#endif /* __ASM_ARM_IPIPE_BASE_H */
/* -*- linux-c -*-
* arch/arm/include/asm/ipipe_hwirq.h
*
* Copyright (C) 2002-2005 Philippe Gerum.
* Copyright (C) 2005 Stelian Pop.
* Copyright (C) 2006-2008 Gilles Chanteperdrix.
* Copyright (C) 2010 Philippe Gerum (SMP port).
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
* USA; either version 2 of the License, or (at your option) any later
* version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#ifndef _ASM_ARM_IPIPE_HWIRQ_H
#define _ASM_ARM_IPIPE_HWIRQ_H
#include <asm-generic/ipipe.h>
#ifdef CONFIG_IPIPE
#define hard_local_irq_restore_notrace(x) \
__asm__ __volatile__( \
"msr daif, %0" \
: \
: "r" (x) \
: "memory", "cc")
static inline void hard_local_irq_disable_notrace(void)
{
__asm__ __volatile__("msr daifset, #2" : : : "memory", "cc");
}
static inline void hard_local_irq_enable_notrace(void)
{
__asm__ __volatile__("msr daifclr, #2" : : : "memory", "cc");
}
static inline void hard_local_fiq_disable_notrace(void)
{
__asm__ __volatile__("msr daifset, #1" : : : "memory", "cc");
}
static inline void hard_local_fiq_enable_notrace(void)
{
__asm__ __volatile__("msr daifclr, #1" : : : "memory", "cc");
}
static inline unsigned long hard_local_irq_save_notrace(void)
{
unsigned long res;
__asm__ __volatile__(
"mrs %0, daif\n"
"msr daifset, #2"
: "=r" (res) : : "memory", "cc");
return res;
}
#include <linux/ipipe_trace.h>
static inline int arch_irqs_disabled_flags(unsigned long flags)
{
return (int)((flags) & PSR_I_BIT);
}
static inline unsigned long hard_local_save_flags(void)
{
unsigned long flags;
__asm__ __volatile__(
"mrs %0, daif"
: "=r" (flags) : : "memory", "cc");
return flags;
}
#define hard_irqs_disabled_flags(flags) arch_irqs_disabled_flags(flags)
static inline int hard_irqs_disabled(void)
{
return hard_irqs_disabled_flags(hard_local_save_flags());
}
#ifdef CONFIG_IPIPE_TRACE_IRQSOFF
static inline void hard_local_irq_disable(void)
{
if (!hard_irqs_disabled()) {
hard_local_irq_disable_notrace();
ipipe_trace_begin(0x80000000);
}
}
static inline void hard_local_irq_enable(void)
{
if (hard_irqs_disabled()) {
ipipe_trace_end(0x80000000);
hard_local_irq_enable_notrace();
}
}
static inline unsigned long hard_local_irq_save(void)
{
unsigned long flags;
flags = hard_local_irq_save_notrace();
if (!arch_irqs_disabled_flags(flags))
ipipe_trace_begin(0x80000001);
return flags;
}
static inline void hard_local_irq_restore(unsigned long x)
{
if (!arch_irqs_disabled_flags(x))
ipipe_trace_end(0x80000001);
hard_local_irq_restore_notrace(x);
}
#else /* !CONFIG_IPIPE_TRACE_IRQSOFF */
#define hard_local_irq_disable hard_local_irq_disable_notrace
#define hard_local_irq_enable hard_local_irq_enable_notrace
#define hard_local_irq_save hard_local_irq_save_notrace
#define hard_local_irq_restore hard_local_irq_restore_notrace
#endif /* CONFIG_IPIPE_TRACE_IRQSOFF */
#define arch_local_irq_disable() \
({ \
ipipe_stall_root(); \
barrier(); \
})
#define arch_local_irq_enable() \
do { \
barrier(); \
ipipe_unstall_root(); \
} while (0)
#define arch_local_irq_restore(flags) \
do { \
if (!arch_irqs_disabled_flags(flags)) \
arch_local_irq_enable(); \
} while (0)
#define arch_local_irq_save() \
({ \
unsigned long _flags; \
_flags = ipipe_test_and_stall_root() << 7; \
barrier(); \
_flags; \
})
#define arch_local_save_flags() \
({ \
unsigned long _flags; \
_flags = ipipe_test_root() << 7; \
barrier(); \
_flags; \
})
#define arch_irqs_disabled() ipipe_test_root()
#define hard_irq_disable() hard_local_irq_disable()
static inline unsigned long arch_mangle_irq_bits(int virt, unsigned long real)
{
/* Merge virtual and real interrupt mask bits into a single
32bit word. */
return (real & ~(1L << 8)) | ((virt != 0) << 8);
}
static inline int arch_demangle_irq_bits(unsigned long *x)
{
int virt = (*x & (1 << 8)) != 0;
*x &= ~(1L << 8);
return virt;
}
#endif /* !CONFIG_IPIPE */
#endif /* _ASM_ARM_IPIPE_HWIRQ_H */
......@@ -20,6 +20,10 @@
#include <asm/ptrace.h>
#include <asm/ipipe_hwirq.h>
#ifndef CONFIG_IPIPE
/*
* CPU interrupt mask handling.
*/
......@@ -53,12 +57,6 @@ static inline void arch_local_irq_disable(void)
: "memory");
}
#define local_fiq_enable() asm("msr daifclr, #1" : : : "memory")
#define local_fiq_disable() asm("msr daifset, #1" : : : "memory")
#define local_async_enable() asm("msr daifclr, #4" : : : "memory")
#define local_async_disable() asm("msr daifset, #4" : : : "memory")
/*
* Save the current interrupt enable state.
*/
......@@ -90,6 +88,14 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
return flags & PSR_I_BIT;
}
#endif /* !IPIPE */
#define local_fiq_enable() asm("msr daifclr, #1" : : : "memory")
#define local_fiq_disable() asm("msr daifset, #1" : : : "memory")
#define local_async_enable() asm("msr daifclr, #4" : : : "memory")
#define local_async_disable() asm("msr daifset, #4" : : : "memory")
/*
* save and restore debug state
*/
......
......@@ -18,6 +18,7 @@
#include <asm/alternative.h>
#include <asm/stack_pointer.h>
#include <asm/ipipe_base.h>
static inline void set_my_cpu_offset(unsigned long off)
{
......
......@@ -30,6 +30,7 @@ struct task_struct;
#include <asm/memory.h>
#include <asm/stack_pointer.h>
#include <asm/types.h>
#include <ipipe/thread_info.h>
typedef unsigned long mm_segment_t;
......@@ -43,6 +44,10 @@ struct thread_info {
u64 ttbr0; /* saved TTBR0_EL1 */
#endif
int preempt_count; /* 0 => preemptable, <0 => bug */
#ifdef CONFIG_IPIPE
unsigned long ipipe_flags;
#endif
struct ipipe_threadinfo ipipe_data;
};
#define INIT_THREAD_INFO(tsk) \
......@@ -115,5 +120,14 @@ void arch_setup_new_exec(void);
_TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
_TIF_NOHZ)
/* ti->ipipe_flags */
#define TIP_MAYDAY 0 /* MAYDAY call is pending */
#define TIP_NOTIFY 1 /* Notify head domain about kernel events */
#define TIP_HEAD 2 /* Runs in head domain */
#define _TIP_MAYDAY (1 << TIP_MAYDAY)
#define _TIP_NOTIFY (1 << TIP_NOTIFY)
#define _TIP_HEAD (1 << TIP_HEAD)
#endif /* __KERNEL__ */
#endif /* __ASM_THREAD_INFO_H */
......@@ -29,6 +29,7 @@
#include <linux/kasan-checks.h>
#include <linux/string.h>
#include <asm-generic/ipipe.h>
#include <asm/cpufeature.h>
#include <asm/ptrace.h>
#include <asm/memory.h>
......@@ -297,7 +298,7 @@ do { \
#define __get_user_check(x, ptr, err) \
({ \
__typeof__(*(ptr)) __user *__p = (ptr); \
might_fault(); \
__ipipe_uaccess_might_fault(); \
if (access_ok(VERIFY_READ, __p, sizeof(*__p))) { \
__p = uaccess_mask_ptr(__p); \
__get_user_err((x), __p, (err)); \
......@@ -366,7 +367,7 @@ do { \
#define __put_user_check(x, ptr, err) \
({ \
__typeof__(*(ptr)) __user *__p = (ptr); \
might_fault(); \
__ipipe_uaccess_might_fault(); \
if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) { \
__p = uaccess_mask_ptr(__p); \
__put_user_err((x), __p, (err)); \
......
......@@ -62,6 +62,7 @@ endif
obj-y += $(arm64-obj-y) vdso/ probes/
obj-m += $(arm64-obj-m)
obj-$(CONFIG_IPIPE) += ipipe.o
head-y := head.o
extra-y += $(head-y) vmlinux.lds
......
......@@ -38,6 +38,9 @@ int main(void)
DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
BLANK();
DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags));
#ifdef CONFIG_IPIPE
DEFINE(TSK_TI_IPIPE, offsetof(struct task_struct, thread_info.ipipe_flags));
#endif
DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count));
DEFINE(TSK_TI_ADDR_LIMIT, offsetof(struct task_struct, thread_info.addr_limit));
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
......
......@@ -416,7 +416,11 @@ tsk .req x28 // current thread_info
* Interrupt handling.
*/
.macro irq_handler
#ifdef CONFIG_IPIPE
ldr x1, =handle_arch_irq_pipelined
#else
ldr_l x1, handle_arch_irq
#endif
mov x0, sp
irq_stack_entry
blr x1
......@@ -629,12 +633,15 @@ ENDPROC(el1_sync)
el1_irq:
kernel_entry 1
enable_dbg
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
irq_handler
#ifdef CONFIG_IPIPE
cbz w0, 2f
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
#ifdef CONFIG_PREEMPT
ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count
cbnz w24, 1f // preempt count != 0
......@@ -644,15 +651,20 @@ el1_irq:
1:
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_on
bl trace_hardirqs_on_virt
#endif
2:
kernel_exit 1
ENDPROC(el1_irq)
#ifdef CONFIG_PREEMPT
el1_preempt:
mov x24, lr
#ifdef CONFIG_IPIPE
1: bl __ipipe_preempt_schedule_irq
#else
1: bl preempt_schedule_irq // irq en/disable is done inside
#endif
ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS
tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
ret x24
......@@ -858,10 +870,13 @@ el0_irq_naked:
#endif
irq_handler
#ifdef CONFIG_IPIPE
cbz w0, work_done
#endif /* CONFIG_IPIPE */
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_on
bl trace_hardirqs_on_virt
#endif
b ret_to_user
b ret_to_user_nocheck
ENDPROC(el0_irq)
/*
......@@ -891,13 +906,23 @@ work_pending:
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_on // enabled while in userspace
#endif
work_done:
ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for single-step
b finish_ret_to_user
/*
* "slow" syscall return path.
*/
ret_to_user:
#ifdef CONFIG_IPIPE
disable_irq
ldr x0, [tsk, #TSK_TI_IPIPE]
tst x0, #_TIP_HEAD
b.eq ret_to_user_noirq
kernel_exit 0
#endif
ret_to_user_nocheck:
disable_irq // disable interrupts
ret_to_user_noirq:
ldr x1, [tsk, #TSK_TI_FLAGS]
and x2, x1, #_TIF_WORK_MASK
cbnz x2, work_pending
......@@ -1128,6 +1153,7 @@ NOKPROBE(cpu_switch_to)
* This is how we return from a fork.
*/
ENTRY(ret_from_fork)
enable_irq_cond
bl schedule_tail
cbz x19, 1f // not a kernel thread
mov x0, x20
......
/* -*- linux-c -*-
* linux/arch/arm64/kernel/ipipe.c
*
* Copyright (C) 2002-2005 Philippe Gerum.
* Copyright (C) 2004 Wolfgang Grandegger (Adeos/arm port over 2.4).
* Copyright (C) 2005 Heikki Lindholm (PowerPC 970 fixes).
* Copyright (C) 2005 Stelian Pop.
* Copyright (C) 2006-2008 Gilles Chanteperdrix.
* Copyright (C) 2010 Philippe Gerum (SMP port).
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
* USA; either version 2 of the License, or (at your option) any later