softirq.c 19 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5
/*
 *	linux/kernel/softirq.c
 *
 *	Copyright (C) 1992 Linus Torvalds
 *
6 7 8
 *	Distribute under GPLv2.
 *
 *	Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
Linus Torvalds's avatar
Linus Torvalds committed
9 10
 */

11 12
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

13
#include <linux/export.h>
Linus Torvalds's avatar
Linus Torvalds committed
14 15 16 17 18 19 20
#include <linux/kernel_stat.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/notifier.h>
#include <linux/percpu.h>
#include <linux/cpu.h>
21
#include <linux/freezer.h>
Linus Torvalds's avatar
Linus Torvalds committed
22 23
#include <linux/kthread.h>
#include <linux/rcupdate.h>
24
#include <linux/ftrace.h>
25
#include <linux/smp.h>
26
#include <linux/smpboot.h>
27
#include <linux/tick.h>
28
#include <linux/irq.h>
29 30

#define CREATE_TRACE_POINTS
31
#include <trace/events/irq.h>
Linus Torvalds's avatar
Linus Torvalds committed
32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55

/*
   - No shared variables, all the data are CPU local.
   - If a softirq needs serialization, let it serialize itself
     by its own spinlocks.
   - Even if softirq is serialized, only local cpu is marked for
     execution. Hence, we get something sort of weak cpu binding.
     Though it is still not clear, will it result in better locality
     or will not.

   Examples:
   - NET RX softirq. It is multithreaded and does not require
     any global serialization.
   - NET TX softirq. It kicks software netdevice queues, hence
     it is logically serialized per device, but this serialization
     is invisible to common code.
   - Tasklets: serialized wrt itself.
 */

#ifndef __ARCH_IRQ_STAT
irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
EXPORT_SYMBOL(irq_stat);
#endif

56
static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
Linus Torvalds's avatar
Linus Torvalds committed
57

58
DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
Linus Torvalds's avatar
Linus Torvalds committed
59

60
const char * const softirq_to_name[NR_SOFTIRQS] = {
61
	"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
62
	"TASKLET", "SCHED", "HRTIMER", "RCU"
63 64
};

Linus Torvalds's avatar
Linus Torvalds committed
65 66 67 68 69 70
/*
 * we cannot loop indefinitely here to avoid userspace starvation,
 * but we also don't want to introduce a worst case 1/HZ latency
 * to the pending events, so lets the scheduler to balance
 * the softirq load for us.
 */
71
static void wakeup_softirqd(void)
Linus Torvalds's avatar
Linus Torvalds committed
72 73
{
	/* Interrupts are disabled: no need to stop preemption */
74
	struct task_struct *tsk = __this_cpu_read(ksoftirqd);
Linus Torvalds's avatar
Linus Torvalds committed
75 76 77 78 79

	if (tsk && tsk->state != TASK_RUNNING)
		wake_up_process(tsk);
}

80 81
/*
 * If ksoftirqd is scheduled, we do not want to process pending softirqs
82 83
 * right now. Let ksoftirqd handle this at its own rate, to get fairness,
 * unless we're doing some of the synchronous softirqs.
84
 */
85 86
#define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ))
static bool ksoftirqd_running(unsigned long pending)
87 88 89
{
	struct task_struct *tsk = __this_cpu_read(ksoftirqd);

90 91
	if (pending & SOFTIRQ_NOW_MASK)
		return false;
92 93 94
	return tsk && (tsk->state == TASK_RUNNING);
}

95 96 97 98 99 100 101 102 103 104
/*
 * preempt_count and SOFTIRQ_OFFSET usage:
 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
 *   softirq processing.
 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
 *   on local_bh_disable or local_bh_enable.
 * This lets us distinguish between whether we are currently processing
 * softirq and whether we just have bh disabled.
 */

105 106 107 108
/*
 * This one is for softirq.c-internal use,
 * where hardirqs are disabled legitimately:
 */
109
#ifdef CONFIG_TRACE_IRQFLAGS
110
void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
111 112 113 114 115 116
{
	unsigned long flags;

	WARN_ON_ONCE(in_irq());

	raw_local_irq_save(flags);
117
	/*
118
	 * The preempt tracer hooks into preempt_count_add and will break
119 120 121 122 123
	 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
	 * is set and before current->softirq_enabled is cleared.
	 * We must manually increment preempt_count here and manually
	 * call the trace_preempt_off later.
	 */
124
	__preempt_count_add(cnt);
125 126 127
	/*
	 * Were softirqs turned off above:
	 */
128
	if (softirq_count() == (cnt & SOFTIRQ_MASK))
129 130
		trace_softirqs_off(ip);
	raw_local_irq_restore(flags);
131

132 133
	if (preempt_count() == cnt) {
#ifdef CONFIG_DEBUG_PREEMPT
134
		current->preempt_disable_ip = get_lock_parent_ip();
135
#endif
136
		trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
137
	}
138
}
139
EXPORT_SYMBOL(__local_bh_disable_ip);
140
#endif /* CONFIG_TRACE_IRQFLAGS */
141

142 143 144 145
static void __local_bh_enable(unsigned int cnt)
{
	WARN_ON_ONCE(!irqs_disabled());

146
	if (softirq_count() == (cnt & SOFTIRQ_MASK))
Davidlohr Bueso's avatar
Davidlohr Bueso committed
147
		trace_softirqs_on(_RET_IP_);
148
	preempt_count_sub(cnt);
149 150
}

151 152 153 154 155 156 157
/*
 * Special-case - softirqs can safely be enabled in
 * cond_resched_softirq(), or by __do_softirq(),
 * without processing still-pending softirqs:
 */
void _local_bh_enable(void)
{
158
	WARN_ON_ONCE(in_irq());
159
	__local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
160 161 162
}
EXPORT_SYMBOL(_local_bh_enable);

163
void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
164
{
165
	WARN_ON_ONCE(in_irq() || irqs_disabled());
166
#ifdef CONFIG_TRACE_IRQFLAGS
167
	local_irq_disable();
168
#endif
169 170 171
	/*
	 * Are softirqs going to be turned on now:
	 */
172
	if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
173
		trace_softirqs_on(ip);
174 175 176
	/*
	 * Keep preemption disabled until we are done with
	 * softirq processing:
177
	 */
178
	preempt_count_sub(cnt - 1);
179

180 181 182 183 184
	if (unlikely(!in_interrupt() && local_softirq_pending())) {
		/*
		 * Run softirq if any pending. And do it in its own stack
		 * as we may be calling this deep in a task call stack already.
		 */
185
		do_softirq();
186
	}
187

188
	preempt_count_dec();
189
#ifdef CONFIG_TRACE_IRQFLAGS
190
	local_irq_enable();
191
#endif
192 193
	preempt_check_resched();
}
194
EXPORT_SYMBOL(__local_bh_enable_ip);
195

Linus Torvalds's avatar
Linus Torvalds committed
196
/*
197 198 199 200 201 202
 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
 * but break the loop if need_resched() is set or after 2 ms.
 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
 * certain cases, such as stop_machine(), jiffies may cease to
 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
 * well to make sure we eventually return from this method.
Linus Torvalds's avatar
Linus Torvalds committed
203
 *
Eric Dumazet's avatar
Eric Dumazet committed
204
 * These limits have been established via experimentation.
Linus Torvalds's avatar
Linus Torvalds committed
205 206 207 208
 * The two things to balance is latency against fairness -
 * we want to handle softirqs as soon as possible, but they
 * should not be able to lock up the box.
 */
Eric Dumazet's avatar
Eric Dumazet committed
209
#define MAX_SOFTIRQ_TIME  msecs_to_jiffies(2)
210
#define MAX_SOFTIRQ_RESTART 10
Linus Torvalds's avatar
Linus Torvalds committed
211

212 213 214 215 216 217 218
#ifdef CONFIG_TRACE_IRQFLAGS
/*
 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
 * to keep the lockdep irq context tracking as tight as possible in order to
 * not miss-qualify lock contexts and miss possible deadlocks.
 */

219
static inline bool lockdep_softirq_start(void)
220
{
221
	bool in_hardirq = false;
222

223 224
	if (trace_hardirq_context(current)) {
		in_hardirq = true;
225
		trace_hardirq_exit();
226 227
	}

228
	lockdep_softirq_enter();
229 230

	return in_hardirq;
231 232
}

233
static inline void lockdep_softirq_end(bool in_hardirq)
234 235
{
	lockdep_softirq_exit();
236 237

	if (in_hardirq)
238 239 240
		trace_hardirq_enter();
}
#else
241 242
static inline bool lockdep_softirq_start(void) { return false; }
static inline void lockdep_softirq_end(bool in_hardirq) { }
243 244
#endif

245
asmlinkage __visible void __softirq_entry __do_softirq(void)
Linus Torvalds's avatar
Linus Torvalds committed
246
{
Eric Dumazet's avatar
Eric Dumazet committed
247
	unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
248
	unsigned long old_flags = current->flags;
249
	int max_restart = MAX_SOFTIRQ_RESTART;
250
	struct softirq_action *h;
251
	bool in_hardirq;
252
	__u32 pending;
253
	int softirq_bit;
254 255 256 257 258 259 260

	/*
	 * Mask out PF_MEMALLOC s current task context is borrowed for the
	 * softirq. A softirq handled such as network RX might set PF_MEMALLOC
	 * again if the socket is related to swap
	 */
	current->flags &= ~PF_MEMALLOC;
Linus Torvalds's avatar
Linus Torvalds committed
261 262

	pending = local_softirq_pending();
263
	account_irq_enter_time(current);
264

265
	__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
266
	in_hardirq = lockdep_softirq_start();
Linus Torvalds's avatar
Linus Torvalds committed
267 268 269

restart:
	/* Reset the pending bitmask before enabling irqs */
270
	set_softirq_pending(0);
Linus Torvalds's avatar
Linus Torvalds committed
271

272
	local_irq_enable();
Linus Torvalds's avatar
Linus Torvalds committed
273 274 275

	h = softirq_vec;

276 277 278 279 280 281 282 283 284 285 286 287 288 289 290
	while ((softirq_bit = ffs(pending))) {
		unsigned int vec_nr;
		int prev_count;

		h += softirq_bit - 1;

		vec_nr = h - softirq_vec;
		prev_count = preempt_count();

		kstat_incr_softirqs_this_cpu(vec_nr);

		trace_softirq_entry(vec_nr);
		h->action(h);
		trace_softirq_exit(vec_nr);
		if (unlikely(prev_count != preempt_count())) {
291
			pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
292 293 294
			       vec_nr, softirq_to_name[vec_nr], h->action,
			       prev_count, preempt_count());
			preempt_count_set(prev_count);
Linus Torvalds's avatar
Linus Torvalds committed
295 296
		}
		h++;
297 298
		pending >>= softirq_bit;
	}
Linus Torvalds's avatar
Linus Torvalds committed
299

300
	rcu_bh_qs();
301
	local_irq_disable();
Linus Torvalds's avatar
Linus Torvalds committed
302 303

	pending = local_softirq_pending();
Eric Dumazet's avatar
Eric Dumazet committed
304
	if (pending) {
305 306
		if (time_before(jiffies, end) && !need_resched() &&
		    --max_restart)
Eric Dumazet's avatar
Eric Dumazet committed
307
			goto restart;
Linus Torvalds's avatar
Linus Torvalds committed
308 309

		wakeup_softirqd();
Eric Dumazet's avatar
Eric Dumazet committed
310
	}
Linus Torvalds's avatar
Linus Torvalds committed
311

312
	lockdep_softirq_end(in_hardirq);
313
	account_irq_exit_time(current);
314
	__local_bh_enable(SOFTIRQ_OFFSET);
315
	WARN_ON_ONCE(in_interrupt());
316
	current_restore_flags(old_flags, PF_MEMALLOC);
Linus Torvalds's avatar
Linus Torvalds committed
317 318
}

319
asmlinkage __visible void do_softirq(void)
Linus Torvalds's avatar
Linus Torvalds committed
320 321 322 323 324 325 326 327 328 329 330
{
	__u32 pending;
	unsigned long flags;

	if (in_interrupt())
		return;

	local_irq_save(flags);

	pending = local_softirq_pending();

331
	if (pending && !ksoftirqd_running(pending))
332
		do_softirq_own_stack();
Linus Torvalds's avatar
Linus Torvalds committed
333 334 335 336

	local_irq_restore(flags);
}

Ingo Molnar's avatar
Ingo Molnar committed
337 338 339 340 341
/*
 * Enter an interrupt context.
 */
void irq_enter(void)
{
342
	rcu_irq_enter();
343
	if (is_idle_task(current) && !in_interrupt()) {
344 345 346 347 348
		/*
		 * Prevent raise_softirq from needlessly waking up ksoftirqd
		 * here, as softirq will be serviced on return from interrupt.
		 */
		local_bh_disable();
349
		tick_irq_enter();
350 351 352 353
		_local_bh_enable();
	}

	__irq_enter();
Ingo Molnar's avatar
Ingo Molnar committed
354 355
}

356 357
static inline void invoke_softirq(void)
{
358
	if (ksoftirqd_running(local_softirq_pending()))
359 360
		return;

361
	if (!force_irqthreads) {
362
#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
363 364 365
		/*
		 * We can safely execute softirq on the current stack if
		 * it is the irq stack, because it should be near empty
366 367 368 369 370 371 372 373
		 * at this stage.
		 */
		__do_softirq();
#else
		/*
		 * Otherwise, irq_exit() is called on the task stack that can
		 * be potentially deep already. So call softirq in its own stack
		 * to prevent from any overrun.
374
		 */
375
		do_softirq_own_stack();
376
#endif
377
	} else {
378
		wakeup_softirqd();
379
	}
380
}
Linus Torvalds's avatar
Linus Torvalds committed
381

382 383 384 385 386 387 388
static inline void tick_irq_exit(void)
{
#ifdef CONFIG_NO_HZ_COMMON
	int cpu = smp_processor_id();

	/* Make sure that timer wheel updates are propagated */
	if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
389
		if (!in_irq())
390 391 392 393 394
			tick_nohz_irq_exit();
	}
#endif
}

Linus Torvalds's avatar
Linus Torvalds committed
395 396 397 398 399
/*
 * Exit an interrupt context. Process softirqs if needed and possible:
 */
void irq_exit(void)
{
400
#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
401
	local_irq_disable();
402 403 404 405
#else
	WARN_ON_ONCE(!irqs_disabled());
#endif

406
	account_irq_exit_time(current);
407
	preempt_count_sub(HARDIRQ_OFFSET);
Linus Torvalds's avatar
Linus Torvalds committed
408 409
	if (!in_interrupt() && local_softirq_pending())
		invoke_softirq();
410

411
	tick_irq_exit();
412
	rcu_irq_exit();
413
	trace_hardirq_exit(); /* must be last! */
Linus Torvalds's avatar
Linus Torvalds committed
414 415 416 417 418
}

/*
 * This function must run with irqs disabled!
 */
419
inline void raise_softirq_irqoff(unsigned int nr)
Linus Torvalds's avatar
Linus Torvalds committed
420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435
{
	__raise_softirq_irqoff(nr);

	/*
	 * If we're in an interrupt or softirq, we're done
	 * (this also catches softirq-disabled code). We will
	 * actually run the softirq once we return from
	 * the irq or softirq.
	 *
	 * Otherwise we wake up ksoftirqd to make sure we
	 * schedule the softirq soon.
	 */
	if (!in_interrupt())
		wakeup_softirqd();
}

436
void raise_softirq(unsigned int nr)
Linus Torvalds's avatar
Linus Torvalds committed
437 438 439 440 441 442 443 444
{
	unsigned long flags;

	local_irq_save(flags);
	raise_softirq_irqoff(nr);
	local_irq_restore(flags);
}

445 446 447 448 449 450
void __raise_softirq_irqoff(unsigned int nr)
{
	trace_softirq_raise(nr);
	or_softirq_pending(1UL << nr);
}

451
void open_softirq(int nr, void (*action)(struct softirq_action *))
Linus Torvalds's avatar
Linus Torvalds committed
452 453 454 455
{
	softirq_vec[nr].action = action;
}

456 457 458
/*
 * Tasklets
 */
459
struct tasklet_head {
460 461
	struct tasklet_struct *head;
	struct tasklet_struct **tail;
Linus Torvalds's avatar
Linus Torvalds committed
462 463
};

464 465
static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
Linus Torvalds's avatar
Linus Torvalds committed
466

467
void __tasklet_schedule(struct tasklet_struct *t)
Linus Torvalds's avatar
Linus Torvalds committed
468 469 470 471
{
	unsigned long flags;

	local_irq_save(flags);
472
	t->next = NULL;
473 474
	*__this_cpu_read(tasklet_vec.tail) = t;
	__this_cpu_write(tasklet_vec.tail, &(t->next));
Linus Torvalds's avatar
Linus Torvalds committed
475 476 477 478 479
	raise_softirq_irqoff(TASKLET_SOFTIRQ);
	local_irq_restore(flags);
}
EXPORT_SYMBOL(__tasklet_schedule);

480
void __tasklet_hi_schedule(struct tasklet_struct *t)
Linus Torvalds's avatar
Linus Torvalds committed
481 482 483 484
{
	unsigned long flags;

	local_irq_save(flags);
485
	t->next = NULL;
486 487
	*__this_cpu_read(tasklet_hi_vec.tail) = t;
	__this_cpu_write(tasklet_hi_vec.tail,  &(t->next));
Linus Torvalds's avatar
Linus Torvalds committed
488 489 490 491 492
	raise_softirq_irqoff(HI_SOFTIRQ);
	local_irq_restore(flags);
}
EXPORT_SYMBOL(__tasklet_hi_schedule);

493
static __latent_entropy void tasklet_action(struct softirq_action *a)
Linus Torvalds's avatar
Linus Torvalds committed
494 495 496 497
{
	struct tasklet_struct *list;

	local_irq_disable();
498 499
	list = __this_cpu_read(tasklet_vec.head);
	__this_cpu_write(tasklet_vec.head, NULL);
500
	__this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head));
Linus Torvalds's avatar
Linus Torvalds committed
501 502 503 504 505 506 507 508 509
	local_irq_enable();

	while (list) {
		struct tasklet_struct *t = list;

		list = list->next;

		if (tasklet_trylock(t)) {
			if (!atomic_read(&t->count)) {
510 511
				if (!test_and_clear_bit(TASKLET_STATE_SCHED,
							&t->state))
Linus Torvalds's avatar
Linus Torvalds committed
512 513 514 515 516 517 518 519 520
					BUG();
				t->func(t->data);
				tasklet_unlock(t);
				continue;
			}
			tasklet_unlock(t);
		}

		local_irq_disable();
521
		t->next = NULL;
522 523
		*__this_cpu_read(tasklet_vec.tail) = t;
		__this_cpu_write(tasklet_vec.tail, &(t->next));
Linus Torvalds's avatar
Linus Torvalds committed
524 525 526 527 528
		__raise_softirq_irqoff(TASKLET_SOFTIRQ);
		local_irq_enable();
	}
}

529
static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
Linus Torvalds's avatar
Linus Torvalds committed
530 531 532 533
{
	struct tasklet_struct *list;

	local_irq_disable();
534 535
	list = __this_cpu_read(tasklet_hi_vec.head);
	__this_cpu_write(tasklet_hi_vec.head, NULL);
536
	__this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head));
Linus Torvalds's avatar
Linus Torvalds committed
537 538 539 540 541 542 543 544 545
	local_irq_enable();

	while (list) {
		struct tasklet_struct *t = list;

		list = list->next;

		if (tasklet_trylock(t)) {
			if (!atomic_read(&t->count)) {
546 547
				if (!test_and_clear_bit(TASKLET_STATE_SCHED,
							&t->state))
Linus Torvalds's avatar
Linus Torvalds committed
548 549 550 551 552 553 554 555 556
					BUG();
				t->func(t->data);
				tasklet_unlock(t);
				continue;
			}
			tasklet_unlock(t);
		}

		local_irq_disable();
557
		t->next = NULL;
558 559
		*__this_cpu_read(tasklet_hi_vec.tail) = t;
		__this_cpu_write(tasklet_hi_vec.tail, &(t->next));
Linus Torvalds's avatar
Linus Torvalds committed
560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578
		__raise_softirq_irqoff(HI_SOFTIRQ);
		local_irq_enable();
	}
}

void tasklet_init(struct tasklet_struct *t,
		  void (*func)(unsigned long), unsigned long data)
{
	t->next = NULL;
	t->state = 0;
	atomic_set(&t->count, 0);
	t->func = func;
	t->data = data;
}
EXPORT_SYMBOL(tasklet_init);

void tasklet_kill(struct tasklet_struct *t)
{
	if (in_interrupt())
579
		pr_notice("Attempt to kill tasklet from interrupt\n");
Linus Torvalds's avatar
Linus Torvalds committed
580 581

	while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
582
		do {
Linus Torvalds's avatar
Linus Torvalds committed
583
			yield();
584
		} while (test_bit(TASKLET_STATE_SCHED, &t->state));
Linus Torvalds's avatar
Linus Torvalds committed
585 586 587 588 589 590
	}
	tasklet_unlock_wait(t);
	clear_bit(TASKLET_STATE_SCHED, &t->state);
}
EXPORT_SYMBOL(tasklet_kill);

591 592 593 594 595
/*
 * tasklet_hrtimer
 */

/*
596 597 598
 * The trampoline is called when the hrtimer expires. It schedules a tasklet
 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
 * hrtimer callback, but from softirq context.
599 600 601 602 603 604
 */
static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
{
	struct tasklet_hrtimer *ttimer =
		container_of(timer, struct tasklet_hrtimer, timer);

605 606
	tasklet_hi_schedule(&ttimer->tasklet);
	return HRTIMER_NORESTART;
607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625
}

/*
 * Helper function which calls the hrtimer callback from
 * tasklet/softirq context
 */
static void __tasklet_hrtimer_trampoline(unsigned long data)
{
	struct tasklet_hrtimer *ttimer = (void *)data;
	enum hrtimer_restart restart;

	restart = ttimer->function(&ttimer->timer);
	if (restart != HRTIMER_NORESTART)
		hrtimer_restart(&ttimer->timer);
}

/**
 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
 * @ttimer:	 tasklet_hrtimer which is initialized
Lucas De Marchi's avatar
Lucas De Marchi committed
626
 * @function:	 hrtimer callback function which gets called from softirq context
627 628 629 630 631 632 633 634 635 636 637 638 639 640 641
 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
 * @mode:	 hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
 */
void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
			  enum hrtimer_restart (*function)(struct hrtimer *),
			  clockid_t which_clock, enum hrtimer_mode mode)
{
	hrtimer_init(&ttimer->timer, which_clock, mode);
	ttimer->timer.function = __hrtimer_tasklet_trampoline;
	tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
		     (unsigned long)ttimer);
	ttimer->function = function;
}
EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);

Linus Torvalds's avatar
Linus Torvalds committed
642 643
void __init softirq_init(void)
{
644 645 646 647 648 649 650 651 652
	int cpu;

	for_each_possible_cpu(cpu) {
		per_cpu(tasklet_vec, cpu).tail =
			&per_cpu(tasklet_vec, cpu).head;
		per_cpu(tasklet_hi_vec, cpu).tail =
			&per_cpu(tasklet_hi_vec, cpu).head;
	}

653 654
	open_softirq(TASKLET_SOFTIRQ, tasklet_action);
	open_softirq(HI_SOFTIRQ, tasklet_hi_action);
Linus Torvalds's avatar
Linus Torvalds committed
655 656
}

657
static int ksoftirqd_should_run(unsigned int cpu)
Linus Torvalds's avatar
Linus Torvalds committed
658
{
659 660
	return local_softirq_pending();
}
Linus Torvalds's avatar
Linus Torvalds committed
661

662 663 664 665
static void run_ksoftirqd(unsigned int cpu)
{
	local_irq_disable();
	if (local_softirq_pending()) {
666 667 668 669
		/*
		 * We can safely run softirq on inline stack, as we are not deep
		 * in the task stack here.
		 */
670 671
		__do_softirq();
		local_irq_enable();
672
		cond_resched_rcu_qs();
673
		return;
Linus Torvalds's avatar
Linus Torvalds committed
674
	}
675
	local_irq_enable();
Linus Torvalds's avatar
Linus Torvalds committed
676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698
}

#ifdef CONFIG_HOTPLUG_CPU
/*
 * tasklet_kill_immediate is called to remove a tasklet which can already be
 * scheduled for execution on @cpu.
 *
 * Unlike tasklet_kill, this function removes the tasklet
 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
 *
 * When this function is called, @cpu must be in the CPU_DEAD state.
 */
void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
{
	struct tasklet_struct **i;

	BUG_ON(cpu_online(cpu));
	BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));

	if (!test_bit(TASKLET_STATE_SCHED, &t->state))
		return;

	/* CPU is dead, so no lock needed. */
699
	for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
Linus Torvalds's avatar
Linus Torvalds committed
700 701
		if (*i == t) {
			*i = t->next;
702 703 704
			/* If this was the tail element, move the tail ptr */
			if (*i == NULL)
				per_cpu(tasklet_vec, cpu).tail = i;
Linus Torvalds's avatar
Linus Torvalds committed
705 706 707 708 709 710
			return;
		}
	}
	BUG();
}

711
static int takeover_tasklets(unsigned int cpu)
Linus Torvalds's avatar
Linus Torvalds committed
712 713 714 715 716
{
	/* CPU is dead, so no lock needed. */
	local_irq_disable();

	/* Find end, append list for that CPU. */
717
	if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
718 719
		*__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
		this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
720 721 722
		per_cpu(tasklet_vec, cpu).head = NULL;
		per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
	}
Linus Torvalds's avatar
Linus Torvalds committed
723 724
	raise_softirq_irqoff(TASKLET_SOFTIRQ);

725
	if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
726 727
		*__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
		__this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
728 729 730
		per_cpu(tasklet_hi_vec, cpu).head = NULL;
		per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
	}
Linus Torvalds's avatar
Linus Torvalds committed
731 732 733
	raise_softirq_irqoff(HI_SOFTIRQ);

	local_irq_enable();
734
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
735
}
736 737
#else
#define takeover_tasklets	NULL
Linus Torvalds's avatar
Linus Torvalds committed
738 739
#endif /* CONFIG_HOTPLUG_CPU */

740 741 742 743 744 745 746
static struct smp_hotplug_thread softirq_threads = {
	.store			= &ksoftirqd,
	.thread_should_run	= ksoftirqd_should_run,
	.thread_fn		= run_ksoftirqd,
	.thread_comm		= "ksoftirqd/%u",
};

747
static __init int spawn_ksoftirqd(void)
Linus Torvalds's avatar
Linus Torvalds committed
748
{
749 750
	cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
				  takeover_tasklets);
751 752
	BUG_ON(smpboot_register_percpu_thread(&softirq_threads));

Linus Torvalds's avatar
Linus Torvalds committed
753 754
	return 0;
}
755
early_initcall(spawn_ksoftirqd);
756

757 758 759 760 761 762 763 764 765 766
/*
 * [ These __weak aliases are kept in a separate compilation unit, so that
 *   GCC does not inline them incorrectly. ]
 */

int __init __weak early_irq_init(void)
{
	return 0;
}

Yinghai Lu's avatar
Yinghai Lu committed
767 768
int __init __weak arch_probe_nr_irqs(void)
{
769
	return NR_IRQS_LEGACY;
Yinghai Lu's avatar
Yinghai Lu committed
770 771
}

772 773 774 775
int __init __weak arch_early_irq_init(void)
{
	return 0;
}
776 777 778 779 780

unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
{
	return from;
}