Commit d0feba4f authored by Gilles Chanteperdrix's avatar Gilles Chanteperdrix Committed by Philippe Gerum

ARM: ipipe: share context switch code with the head domain

parent 1fffbb3a
......@@ -41,7 +41,7 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
static inline void efi_set_pgd(struct mm_struct *mm)
{
check_and_switch_context(mm, NULL);
check_and_switch_context(mm, NULL, true);
}
void efi_virtmap_load(void);
......
......@@ -16,6 +16,7 @@
#include <linux/compiler.h>
#include <linux/sched.h>
#include <linux/mm_types.h>
#include <linux/ipipe.h>
#include <linux/preempt.h>
#include <asm/cacheflush.h>
......@@ -28,7 +29,8 @@ void __check_vmalloc_seq(struct mm_struct *mm);
#ifdef CONFIG_CPU_HAS_ASID
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
int check_and_switch_context(struct mm_struct *mm,
struct task_struct *tsk, bool may_defer);
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
......@@ -50,13 +52,14 @@ static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
#ifdef CONFIG_MMU
static inline void check_and_switch_context(struct mm_struct *mm,
struct task_struct *tsk)
static inline int
check_and_switch_context(struct mm_struct *mm,
struct task_struct *tsk, bool may_defer)
{
if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
__check_vmalloc_seq(mm);
if (irqs_disabled())
if (may_defer && irqs_disabled()) {
/*
* cpu_switch_mm() needs to flush the VIVT caches. To avoid
* high interrupt latencies, defer the call and continue
......@@ -65,10 +68,23 @@ static inline void check_and_switch_context(struct mm_struct *mm,
* finish_arch_post_lock_switch() call.
*/
mm->context.switch_pending = 1;
else
return -EAGAIN;
} else {
cpu_switch_mm(mm->pgd, mm);
}
return 0;
}
#ifdef CONFIG_IPIPE
extern void deferred_switch_mm(struct mm_struct *mm);
#else /* !I-pipe */
static inline void deferred_switch_mm(struct mm_struct *next)
{
cpu_switch_mm(next->pgd, next);
}
#endif /* !I-pipe */
#ifndef MODULE
#define finish_arch_post_lock_switch \
finish_arch_post_lock_switch
......@@ -85,8 +101,11 @@ static inline void finish_arch_post_lock_switch(void)
*/
preempt_disable();
if (mm->context.switch_pending) {
unsigned long flags;
mm->context.switch_pending = 0;
cpu_switch_mm(mm->pgd, mm);
ipipe_mm_switch_protect(flags);
deferred_switch_mm(mm);
ipipe_mm_switch_unprotect(flags);
}
preempt_enable_no_resched();
}
......@@ -101,12 +120,8 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
return 0;
}
#endif /* CONFIG_CPU_HAS_ASID */
#define destroy_context(mm) do { } while(0)
#define activate_mm(prev,next) switch_mm(prev, next, NULL)
/*
* This is called when "tsk" is about to enter lazy TLB mode.
*
......@@ -127,12 +142,12 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
* calling the CPU specific function when the mm hasn't
* actually changed.
*/
static inline void
switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
static inline int
__do_switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk, bool may_defer)
{
#ifdef CONFIG_MMU
unsigned int cpu = smp_processor_id();
const unsigned int cpu = ipipe_processor_id();
/*
* __sync_icache_dcache doesn't broadcast the I-cache invalidation,
......@@ -145,13 +160,60 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
__flush_icache_all();
if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
check_and_switch_context(next, tsk);
if (cache_is_vivt())
int rc = check_and_switch_context(next, tsk, may_defer);
if (rc < 0) {
#ifdef CONFIG_IPIPE
cpumask_clear_cpu(cpu, mm_cpumask(next));
return rc;
#endif /* CONFIG_IPIPE */
}
if (cache_is_vivt() && prev)
cpumask_clear_cpu(cpu, mm_cpumask(prev));
}
#endif
#endif /* CONFIG_MMU */
return 0;
}
#if defined(CONFIG_IPIPE) && defined(CONFIG_MMU)
extern void __switch_mm_inner(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk);
#else /* !I-pipe || !MMU */
#define __switch_mm_inner(prev, next, tsk) \
__do_switch_mm(prev, next, tsk, true)
#endif /* !I-pipe || !MMU */
static inline void
ipipe_switch_mm_head(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
__do_switch_mm(prev, next, tsk, false);
}
static inline void
__switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
__switch_mm_inner(prev, next, tsk);
}
static inline void
switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
#ifdef CONFIG_MMU
unsigned long flags;
ipipe_mm_switch_protect(flags);
__switch_mm(prev, next, tsk);
ipipe_mm_switch_unprotect(flags);
#endif /* CONFIG_MMU */
}
#define deactivate_mm(tsk,mm) do { } while (0)
#define activate_mm(prev,next) __switch_mm(prev, next, NULL)
static inline void destroy_context(struct mm_struct *mm)
{
}
#endif
......@@ -23,10 +23,19 @@
*/
extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *);
#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
#define switch_to(prev,next,last) \
do { \
__complete_pending_tlbi(); \
hard_cond_local_irq_disable(); \
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
hard_cond_local_irq_enable(); \
} while (0)
#else /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
#define switch_to(prev,next,last) \
do { \
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
} while (0)
#endif /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
#endif /* __ASM_ARM_SWITCH_TO_H */
......@@ -856,7 +856,11 @@ ENTRY(__switch_to)
add r4, r2, #TI_CPU_SAVE
ldr r0, =thread_notify_head
mov r1, #THREAD_NOTIFY_SWITCH
#ifdef CONFIG_IPIPE
bl __ipipe_switch_to_notifier_call_chain
#else /* CONFIG_IPIPE */
bl atomic_notifier_call_chain
#endif /* CONFIG_IPIPE */
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
str r7, [r8]
#endif
......
......@@ -50,6 +50,7 @@
#include <asm/unistd.h>
#include <asm/mach/irq.h>
#include <asm/exception.h>
#include <asm/mmu_context.h>
static void __ipipe_do_IRQ(unsigned irq, void *cookie);
......@@ -350,11 +351,116 @@ void __ipipe_root_sync(void)
hard_local_irq_restore(flags);
}
#ifdef CONFIG_MMU
void __switch_mm_inner(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
struct mm_struct ** const active_mm =
raw_cpu_ptr(&ipipe_percpu.active_mm);
#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
struct thread_info *const tip = current_thread_info();
prev = *active_mm;
clear_bit(TIF_MMSWITCH_INT, &tip->flags);
barrier();
*active_mm = NULL;
barrier();
for (;;) {
unsigned long flags;
#endif /* CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
int rc __maybe_unused = __do_switch_mm(prev, next, tsk, true);
#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
* Reading thread_info flags and setting active_mm
* must be done atomically.
*/
flags = hard_local_irq_save();
if (__test_and_clear_bit(TIF_MMSWITCH_INT, &tip->flags) == 0) {
if (rc < 0)
*active_mm = prev;
else
*active_mm = next;
hard_local_irq_restore(flags);
return;
}
hard_local_irq_restore(flags);
if (rc < 0)
/*
* We were interrupted by head domain, which
* may have changed the mm context, mm context
* is now unknown, but will be switched in
* deferred_switch_mm
*/
return;
prev = NULL;
}
#else
if (rc < 0)
*active_mm = prev;
else
*active_mm = next;
#endif /* !IPIPE_WANT_PREEMPTIBLE_SWITCH */
}
#ifdef finish_arch_post_lock_switch
void deferred_switch_mm(struct mm_struct *next)
{
struct mm_struct ** const active_mm =
raw_cpu_ptr(&ipipe_percpu.active_mm);
struct mm_struct *prev = *active_mm;
#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
struct thread_info *const tip = current_thread_info();
clear_bit(TIF_MMSWITCH_INT, &tip->flags);
barrier();
*active_mm = NULL;
barrier();
for (;;) {
unsigned long flags;
#endif /* CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
__do_switch_mm(prev, next, NULL, false);
#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
* Reading thread_info flags and setting active_mm
* must be done atomically.
*/
flags = hard_local_irq_save();
if (__test_and_clear_bit(TIF_MMSWITCH_INT, &tip->flags) == 0) {
*active_mm = next;
hard_local_irq_restore(flags);
return;
}
hard_local_irq_restore(flags);
prev = NULL;
}
#else
*active_mm = next;
#endif /* CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
}
#endif
#endif /* CONFIG_MMU */
EXPORT_SYMBOL_GPL(do_munmap);
EXPORT_SYMBOL_GPL(show_stack);
EXPORT_SYMBOL_GPL(init_mm);
#ifndef MULTI_CPU
EXPORT_SYMBOL_GPL(cpu_do_switch_mm);
#endif
EXPORT_SYMBOL_GPL(__check_vmalloc_seq);
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
EXPORT_SYMBOL_GPL(tasklist_lock);
#endif /* CONFIG_SMP || CONFIG_DEBUG_SPINLOCK */
#ifndef CONFIG_SPARSE_IRQ
EXPORT_SYMBOL_GPL(irq_desc);
#endif
#ifdef CONFIG_CPU_HAS_ASID
EXPORT_SYMBOL_GPL(check_and_switch_context);
#endif /* CONFIG_CPU_HAS_ASID */
EXPORT_SYMBOL_GPL(cpu_architecture);
......@@ -105,6 +105,9 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
return;
}
if (IS_ENABLED(CONFIG_IPIPE))
return;
arm_get_current_stackframe(regs, &fr);
walk_stackframe(&fr, callchain_trace, entry);
}
......
......@@ -42,7 +42,7 @@
#define ASID_FIRST_VERSION (1ULL << ASID_BITS)
#define NUM_USER_ASIDS ASID_FIRST_VERSION
static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
static IPIPE_DEFINE_RAW_SPINLOCK(cpu_asid_lock);
static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
......@@ -237,15 +237,18 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
return asid | generation;
}
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
int check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk, bool root_p)
{
unsigned long flags;
unsigned int cpu = smp_processor_id();
unsigned int cpu = ipipe_processor_id();
u64 asid;
if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
__check_vmalloc_seq(mm);
#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
flags = hard_local_irq_save();
#endif /* CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
/*
* We cannot update the pgd and the ASID atomicly with classic
* MMU, so switch exclusively to global mappings to avoid
......@@ -258,7 +261,11 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
&& atomic64_xchg(&per_cpu(active_asids, cpu), asid))
goto switch_mm_fastpath;
#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
raw_spin_lock(&cpu_asid_lock);
#else /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
raw_spin_lock_irqsave(&cpu_asid_lock, flags);
#endif /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
/* Check that our ASID belongs to the current generation. */
asid = atomic64_read(&mm->context.id);
if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
......@@ -273,8 +280,17 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
atomic64_set(&per_cpu(active_asids, cpu), asid);
cpumask_set_cpu(cpu, mm_cpumask(mm));
#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
raw_spin_unlock(&cpu_asid_lock);
#else /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
#endif /* CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
switch_mm_fastpath:
cpu_switch_mm(mm->pgd, mm);
#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
hard_local_irq_restore(flags);
#endif /* CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
return 0;
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment