context_tracking.c 6.64 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * Context tracking: Probe on high level context boundaries such as kernel
 * and userspace. This includes syscalls and exceptions entry/exit.
 *
 * This is used by RCU to remove its dependency on the timer tick while a CPU
 * runs in userspace.
 *
 *  Started by Frederic Weisbecker:
 *
 * Copyright (C) 2012 Red Hat, Inc., Frederic Weisbecker <fweisbec@redhat.com>
 *
 * Many thanks to Gilad Ben-Yossef, Paul McKenney, Ingo Molnar, Andrew Morton,
 * Steven Rostedt, Peter Zijlstra for suggestions and improvements.
 *
 */

17 18 19 20
#include <linux/context_tracking.h>
#include <linux/rcupdate.h>
#include <linux/sched.h>
#include <linux/hardirq.h>
21
#include <linux/export.h>
22

23 24 25
struct static_key context_tracking_enabled = STATIC_KEY_INIT_FALSE;

DEFINE_PER_CPU(struct context_tracking, context_tracking);
26

27 28
void context_tracking_cpu_set(int cpu)
{
29 30 31 32
	if (!per_cpu(context_tracking.active, cpu)) {
		per_cpu(context_tracking.active, cpu) = true;
		static_key_slow_inc(&context_tracking_enabled);
	}
33 34
}

35
/**
36 37
 * context_tracking_user_enter - Inform the context tracking that the CPU is going to
 *                               enter userspace mode.
38 39 40 41 42 43
 *
 * This function must be called right before we switch from the kernel
 * to userspace, when it's guaranteed the remaining kernel instructions
 * to execute won't use any RCU read side critical section because this
 * function sets RCU in extended quiescent state.
 */
44
void context_tracking_user_enter(void)
45 46 47 48 49 50 51 52 53 54 55 56 57 58
{
	unsigned long flags;

	/*
	 * Some contexts may involve an exception occuring in an irq,
	 * leading to that nesting:
	 * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
	 * This would mess up the dyntick_nesting count though. And rcu_irq_*()
	 * helpers are enough to protect RCU uses inside the exception. So
	 * just return immediately if we detect we are in an IRQ.
	 */
	if (in_interrupt())
		return;

59
	/* Kernel threads aren't supposed to go to userspace */
60 61 62
	WARN_ON_ONCE(!current->mm);

	local_irq_save(flags);
63 64 65 66 67 68 69 70 71 72 73 74
	if ( __this_cpu_read(context_tracking.state) != IN_USER) {
		if (__this_cpu_read(context_tracking.active)) {
			/*
			 * At this stage, only low level arch entry code remains and
			 * then we'll run in userspace. We can assume there won't be
			 * any RCU read-side critical section until the next call to
			 * user_exit() or rcu_irq_enter(). Let's remove RCU's dependency
			 * on the tick.
			 */
			vtime_user_enter(current);
			rcu_user_enter();
		}
75
		/*
76 77 78 79 80 81 82 83 84 85 86
		 * Even if context tracking is disabled on this CPU, because it's outside
		 * the full dynticks mask for example, we still have to keep track of the
		 * context transitions and states to prevent inconsistency on those of
		 * other CPUs.
		 * If a task triggers an exception in userspace, sleep on the exception
		 * handler and then migrate to another CPU, that new CPU must know where
		 * the exception returns by the time we call exception_exit().
		 * This information can only be provided by the previous CPU when it called
		 * exception_enter().
		 * OTOH we can spare the calls to vtime and RCU when context_tracking.active
		 * is false because we know that CPU is not tickless.
87
		 */
88
		__this_cpu_write(context_tracking.state, IN_USER);
89 90 91 92
	}
	local_irq_restore(flags);
}

93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
#ifdef CONFIG_PREEMPT
/**
 * preempt_schedule_context - preempt_schedule called by tracing
 *
 * The tracing infrastructure uses preempt_enable_notrace to prevent
 * recursion and tracing preempt enabling caused by the tracing
 * infrastructure itself. But as tracing can happen in areas coming
 * from userspace or just about to enter userspace, a preempt enable
 * can occur before user_exit() is called. This will cause the scheduler
 * to be called when the system is still in usermode.
 *
 * To prevent this, the preempt_enable_notrace will use this function
 * instead of preempt_schedule() to exit user context if needed before
 * calling the scheduler.
 */
void __sched notrace preempt_schedule_context(void)
{
	enum ctx_state prev_ctx;

112
	if (likely(!preemptible()))
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
		return;

	/*
	 * Need to disable preemption in case user_exit() is traced
	 * and the tracer calls preempt_enable_notrace() causing
	 * an infinite recursion.
	 */
	preempt_disable_notrace();
	prev_ctx = exception_enter();
	preempt_enable_no_resched_notrace();

	preempt_schedule();

	preempt_disable_notrace();
	exception_exit(prev_ctx);
	preempt_enable_notrace();
}
EXPORT_SYMBOL_GPL(preempt_schedule_context);
#endif /* CONFIG_PREEMPT */
132 133

/**
134 135
 * context_tracking_user_exit - Inform the context tracking that the CPU is
 *                              exiting userspace mode and entering the kernel.
136 137 138 139 140 141 142 143
 *
 * This function must be called after we entered the kernel from userspace
 * before any use of RCU read side critical section. This potentially include
 * any high level kernel code like syscalls, exceptions, signal handling, etc...
 *
 * This call supports re-entrancy. This way it can be called from any exception
 * handler without needing to know if we came from userspace or not.
 */
144
void context_tracking_user_exit(void)
145 146 147 148 149 150 151 152
{
	unsigned long flags;

	if (in_interrupt())
		return;

	local_irq_save(flags);
	if (__this_cpu_read(context_tracking.state) == IN_USER) {
153 154 155 156 157 158 159 160
		if (__this_cpu_read(context_tracking.active)) {
			/*
			 * We are going to run code that may use RCU. Inform
			 * RCU core about that (ie: we may need the tick again).
			 */
			rcu_user_exit();
			vtime_user_exit(current);
		}
161
		__this_cpu_write(context_tracking.state, IN_KERNEL);
162 163 164 165
	}
	local_irq_restore(flags);
}

166
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
167 168 169 170 171
void guest_enter(void)
{
	if (vtime_accounting_enabled())
		vtime_guest_enter(current);
	else
172
		current->flags |= PF_VCPU;
173 174 175 176 177 178 179 180
}
EXPORT_SYMBOL_GPL(guest_enter);

void guest_exit(void)
{
	if (vtime_accounting_enabled())
		vtime_guest_exit(current);
	else
181
		current->flags &= ~PF_VCPU;
182 183
}
EXPORT_SYMBOL_GPL(guest_exit);
184
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
185

186 187 188 189 190 191 192 193 194 195 196 197 198 199

/**
 * context_tracking_task_switch - context switch the syscall callbacks
 * @prev: the task that is being switched out
 * @next: the task that is being switched in
 *
 * The context tracking uses the syscall slow path to implement its user-kernel
 * boundaries probes on syscalls. This way it doesn't impact the syscall fast
 * path on CPUs that don't do context tracking.
 *
 * But we need to clear the flag on the previous task because it may later
 * migrate to some CPU that doesn't do the context tracking. As such the TIF
 * flag may not be desired there.
 */
200 201 202
void context_tracking_task_switch(struct task_struct *prev,
			     struct task_struct *next)
{
203 204
	clear_tsk_thread_flag(prev, TIF_NOHZ);
	set_tsk_thread_flag(next, TIF_NOHZ);
205
}
206 207 208 209 210 211 212 213 214 215

#ifdef CONFIG_CONTEXT_TRACKING_FORCE
void __init context_tracking_init(void)
{
	int cpu;

	for_each_possible_cpu(cpu)
		context_tracking_cpu_set(cpu);
}
#endif