wait.c 13.3 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3
/*
 * Generic waiting primitives.
 *
4
 * (C) 2004 Nadia Yvette Chambers, Oracle
Linus Torvalds's avatar
Linus Torvalds committed
5 6
 */
#include <linux/init.h>
7
#include <linux/export.h>
8
#include <linux/sched/signal.h>
9
#include <linux/sched/debug.h>
Linus Torvalds's avatar
Linus Torvalds committed
10 11 12
#include <linux/mm.h>
#include <linux/wait.h>
#include <linux/hash.h>
13
#include <linux/kthread.h>
Linus Torvalds's avatar
Linus Torvalds committed
14

15
void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key)
16
{
17 18
	spin_lock_init(&wq_head->lock);
	lockdep_set_class_and_name(&wq_head->lock, key, name);
19
	INIT_LIST_HEAD(&wq_head->head);
20
}
21

22
EXPORT_SYMBOL(__init_waitqueue_head);
23

24
void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
Linus Torvalds's avatar
Linus Torvalds committed
25 26 27
{
	unsigned long flags;

28
	wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
29 30 31
	spin_lock_irqsave(&wq_head->lock, flags);
	__add_wait_queue_entry_tail(wq_head, wq_entry);
	spin_unlock_irqrestore(&wq_head->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
32 33 34
}
EXPORT_SYMBOL(add_wait_queue);

35
void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
Linus Torvalds's avatar
Linus Torvalds committed
36 37 38
{
	unsigned long flags;

39
	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
40 41 42
	spin_lock_irqsave(&wq_head->lock, flags);
	__add_wait_queue_entry_tail(wq_head, wq_entry);
	spin_unlock_irqrestore(&wq_head->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
43 44 45
}
EXPORT_SYMBOL(add_wait_queue_exclusive);

46
void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
Linus Torvalds's avatar
Linus Torvalds committed
47 48 49
{
	unsigned long flags;

50 51 52
	spin_lock_irqsave(&wq_head->lock, flags);
	__remove_wait_queue(wq_head, wq_entry);
	spin_unlock_irqrestore(&wq_head->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
53 54 55
}
EXPORT_SYMBOL(remove_wait_queue);

56 57 58 59 60 61
/*
 * Scan threshold to break wait queue walk.
 * This allows a waker to take a break from holding the
 * wait queue lock during the wait queue walk.
 */
#define WAITQUEUE_WALK_BREAK_CNT 64
Linus Torvalds's avatar
Linus Torvalds committed
62

63 64 65 66 67 68 69 70 71
/*
 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
 * number) then we wake all the non-exclusive tasks and one exclusive task.
 *
 * There are circumstances in which we can try to wake a task which has already
 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
 * zero in this (rare) case, and we handle it by continuing to scan the queue.
 */
72 73 74
static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
			int nr_exclusive, int wake_flags, void *key,
			wait_queue_entry_t *bookmark)
75
{
76
	wait_queue_entry_t *curr, *next;
77 78 79 80
	int cnt = 0;

	if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) {
		curr = list_next_entry(bookmark, entry);
81

82 83 84 85 86
		list_del(&bookmark->entry);
		bookmark->flags = 0;
	} else
		curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry);

87 88
	ipipe_root_only();

89 90 91 92
	if (&curr->entry == &wq_head->head)
		return nr_exclusive;

	list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) {
93
		unsigned flags = curr->flags;
94 95 96 97 98 99
		int ret;

		if (flags & WQ_FLAG_BOOKMARK)
			continue;

		ret = curr->func(curr, mode, wake_flags, key);
Linus Torvalds's avatar
Linus Torvalds committed
100 101 102
		if (ret < 0)
			break;
		if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
103
			break;
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134

		if (bookmark && (++cnt > WAITQUEUE_WALK_BREAK_CNT) &&
				(&next->entry != &wq_head->head)) {
			bookmark->flags = WQ_FLAG_BOOKMARK;
			list_add_tail(&bookmark->entry, &next->entry);
			break;
		}
	}
	return nr_exclusive;
}

static void __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode,
			int nr_exclusive, int wake_flags, void *key)
{
	unsigned long flags;
	wait_queue_entry_t bookmark;

	bookmark.flags = 0;
	bookmark.private = NULL;
	bookmark.func = NULL;
	INIT_LIST_HEAD(&bookmark.entry);

	spin_lock_irqsave(&wq_head->lock, flags);
	nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive, wake_flags, key, &bookmark);
	spin_unlock_irqrestore(&wq_head->lock, flags);

	while (bookmark.flags & WQ_FLAG_BOOKMARK) {
		spin_lock_irqsave(&wq_head->lock, flags);
		nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive,
						wake_flags, key, &bookmark);
		spin_unlock_irqrestore(&wq_head->lock, flags);
135 136 137 138 139
	}
}

/**
 * __wake_up - wake up threads blocked on a waitqueue.
140
 * @wq_head: the waitqueue
141 142 143 144 145 146 147
 * @mode: which threads
 * @nr_exclusive: how many wake-one or wake-many threads to wake up
 * @key: is directly passed to the wakeup function
 *
 * It may be assumed that this function implies a write memory barrier before
 * changing the task state if and only if any tasks are woken up.
 */
148
void __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
149 150
			int nr_exclusive, void *key)
{
151
	__wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key);
152 153 154 155 156 157
}
EXPORT_SYMBOL(__wake_up);

/*
 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
 */
158
void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr)
159
{
160
	__wake_up_common(wq_head, mode, nr, 0, NULL, NULL);
161 162 163
}
EXPORT_SYMBOL_GPL(__wake_up_locked);

164
void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key)
165
{
166
	__wake_up_common(wq_head, mode, 1, 0, key, NULL);
167 168 169
}
EXPORT_SYMBOL_GPL(__wake_up_locked_key);

170 171 172 173 174 175 176
void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
		unsigned int mode, void *key, wait_queue_entry_t *bookmark)
{
	__wake_up_common(wq_head, mode, 1, 0, key, bookmark);
}
EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark);

177 178
/**
 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
179
 * @wq_head: the waitqueue
180 181 182 183 184 185 186 187 188 189 190 191 192 193
 * @mode: which threads
 * @nr_exclusive: how many wake-one or wake-many threads to wake up
 * @key: opaque value to be passed to wakeup targets
 *
 * The sync wakeup differs that the waker knows that it will schedule
 * away soon, so while the target thread will be woken up, it will not
 * be migrated to another CPU - ie. the two threads are 'synchronized'
 * with each other. This can prevent needless bouncing between CPUs.
 *
 * On UP it can prevent extra preemption.
 *
 * It may be assumed that this function implies a write memory barrier before
 * changing the task state if and only if any tasks are woken up.
 */
194
void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode,
195 196 197 198
			int nr_exclusive, void *key)
{
	int wake_flags = 1; /* XXX WF_SYNC */

199
	if (unlikely(!wq_head))
200 201 202 203 204
		return;

	if (unlikely(nr_exclusive != 1))
		wake_flags = 0;

205
	__wake_up_common_lock(wq_head, mode, nr_exclusive, wake_flags, key);
206 207 208 209 210 211
}
EXPORT_SYMBOL_GPL(__wake_up_sync_key);

/*
 * __wake_up_sync - see __wake_up_sync_key()
 */
212
void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr_exclusive)
213
{
214
	__wake_up_sync_key(wq_head, mode, nr_exclusive, NULL);
215 216 217
}
EXPORT_SYMBOL_GPL(__wake_up_sync);	/* For internal use only */

Linus Torvalds's avatar
Linus Torvalds committed
218 219 220 221 222 223 224 225 226 227
/*
 * Note: we use "set_current_state()" _after_ the wait-queue add,
 * because we need a memory barrier there on SMP, so that any
 * wake-function that tests for the wait-queue being active
 * will be guaranteed to see waitqueue addition _or_ subsequent
 * tests in this thread will see the wakeup having taken place.
 *
 * The spin_unlock() itself is semi-permeable and only protects
 * one way (it only protects stuff inside the critical region and
 * stops them from bleeding out - it would still allow subsequent
228
 * loads to move into the critical region).
Linus Torvalds's avatar
Linus Torvalds committed
229
 */
230
void
231
prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
Linus Torvalds's avatar
Linus Torvalds committed
232 233 234
{
	unsigned long flags;

235
	wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
236
	spin_lock_irqsave(&wq_head->lock, flags);
237
	if (list_empty(&wq_entry->entry))
238
		__add_wait_queue(wq_head, wq_entry);
Tejun Heo's avatar
Tejun Heo committed
239
	set_current_state(state);
240
	spin_unlock_irqrestore(&wq_head->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
241 242 243
}
EXPORT_SYMBOL(prepare_to_wait);

244
void
245
prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
Linus Torvalds's avatar
Linus Torvalds committed
246 247 248
{
	unsigned long flags;

249
	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
250
	spin_lock_irqsave(&wq_head->lock, flags);
251
	if (list_empty(&wq_entry->entry))
252
		__add_wait_queue_entry_tail(wq_head, wq_entry);
Tejun Heo's avatar
Tejun Heo committed
253
	set_current_state(state);
254
	spin_unlock_irqrestore(&wq_head->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
255 256 257
}
EXPORT_SYMBOL(prepare_to_wait_exclusive);

258
void init_wait_entry(struct wait_queue_entry *wq_entry, int flags)
259
{
260 261 262
	wq_entry->flags = flags;
	wq_entry->private = current;
	wq_entry->func = autoremove_wake_function;
263
	INIT_LIST_HEAD(&wq_entry->entry);
264 265 266
}
EXPORT_SYMBOL(init_wait_entry);

267
long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
268 269
{
	unsigned long flags;
270
	long ret = 0;
271

272
	spin_lock_irqsave(&wq_head->lock, flags);
273 274 275 276 277 278 279
	if (unlikely(signal_pending_state(state, current))) {
		/*
		 * Exclusive waiter must not fail if it was selected by wakeup,
		 * it should "consume" the condition we were waiting for.
		 *
		 * The caller will recheck the condition and return success if
		 * we were already woken up, we can not miss the event because
280
		 * wakeup locks/unlocks the same wq_head->lock.
281 282 283 284 285
		 *
		 * But we need to ensure that set-condition + wakeup after that
		 * can't see us, it should wake up another exclusive waiter if
		 * we fail.
		 */
286
		list_del_init(&wq_entry->entry);
287 288
		ret = -ERESTARTSYS;
	} else {
289
		if (list_empty(&wq_entry->entry)) {
290
			if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
291
				__add_wait_queue_entry_tail(wq_head, wq_entry);
292
			else
293
				__add_wait_queue(wq_head, wq_entry);
294 295
		}
		set_current_state(state);
296
	}
297
	spin_unlock_irqrestore(&wq_head->lock, flags);
298

299
	return ret;
300 301 302
}
EXPORT_SYMBOL(prepare_to_wait_event);

303 304 305 306 307 308 309
/*
 * Note! These two wait functions are entered with the
 * wait-queue lock held (and interrupts off in the _irq
 * case), so there is no race with testing the wakeup
 * condition in the caller before they add the wait
 * entry to the wake queue.
 */
310
int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait)
311
{
312
	if (likely(list_empty(&wait->entry)))
313
		__add_wait_queue_entry_tail(wq, wait);
314 315 316 317 318 319 320 321 322 323 324 325

	set_current_state(TASK_INTERRUPTIBLE);
	if (signal_pending(current))
		return -ERESTARTSYS;

	spin_unlock(&wq->lock);
	schedule();
	spin_lock(&wq->lock);
	return 0;
}
EXPORT_SYMBOL(do_wait_intr);

326
int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait)
327
{
328
	if (likely(list_empty(&wait->entry)))
329
		__add_wait_queue_entry_tail(wq, wait);
330 331 332 333 334 335 336 337 338 339 340 341

	set_current_state(TASK_INTERRUPTIBLE);
	if (signal_pending(current))
		return -ERESTARTSYS;

	spin_unlock_irq(&wq->lock);
	schedule();
	spin_lock_irq(&wq->lock);
	return 0;
}
EXPORT_SYMBOL(do_wait_intr_irq);

342
/**
343
 * finish_wait - clean up after waiting in a queue
344
 * @wq_head: waitqueue waited on
345
 * @wq_entry: wait descriptor
346 347 348 349 350
 *
 * Sets current thread back to running state and removes
 * the wait descriptor from the given waitqueue if still
 * queued.
 */
351
void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
Linus Torvalds's avatar
Linus Torvalds committed
352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
{
	unsigned long flags;

	__set_current_state(TASK_RUNNING);
	/*
	 * We can check for list emptiness outside the lock
	 * IFF:
	 *  - we use the "careful" check that verifies both
	 *    the next and prev pointers, so that there cannot
	 *    be any half-pending updates in progress on other
	 *    CPU's that we haven't seen yet (and that might
	 *    still change the stack area.
	 * and
	 *  - all other users take the lock (ie we can only
	 *    have _one_ other CPU that looks at or modifies
	 *    the list).
	 */
369
	if (!list_empty_careful(&wq_entry->entry)) {
370
		spin_lock_irqsave(&wq_head->lock, flags);
371
		list_del_init(&wq_entry->entry);
372
		spin_unlock_irqrestore(&wq_head->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
373 374 375 376
	}
}
EXPORT_SYMBOL(finish_wait);

377
int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
Linus Torvalds's avatar
Linus Torvalds committed
378
{
379
	int ret = default_wake_function(wq_entry, mode, sync, key);
Linus Torvalds's avatar
Linus Torvalds committed
380 381

	if (ret)
382
		list_del_init(&wq_entry->entry);
Linus Torvalds's avatar
Linus Torvalds committed
383 384 385 386
	return ret;
}
EXPORT_SYMBOL(autoremove_wake_function);

387 388 389 390
static inline bool is_kthread_should_stop(void)
{
	return (current->flags & PF_KTHREAD) && kthread_should_stop();
}
391 392 393 394

/*
 * DEFINE_WAIT_FUNC(wait, woken_wake_func);
 *
395
 * add_wait_queue(&wq_head, &wait);
396 397 398 399 400 401
 * for (;;) {
 *     if (condition)
 *         break;
 *
 *     p->state = mode;				condition = true;
 *     smp_mb(); // A				smp_wmb(); // C
402
 *     if (!wq_entry->flags & WQ_FLAG_WOKEN)	wq_entry->flags |= WQ_FLAG_WOKEN;
403 404
 *         schedule()				try_to_wake_up();
 *     p->state = TASK_RUNNING;		    ~~~~~~~~~~~~~~~~~~
405
 *     wq_entry->flags &= ~WQ_FLAG_WOKEN;		condition = true;
406
 *     smp_mb() // B				smp_wmb(); // C
407
 *						wq_entry->flags |= WQ_FLAG_WOKEN;
408
 * }
409
 * remove_wait_queue(&wq_head, &wait);
410 411
 *
 */
412
long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
413 414 415 416 417 418 419
{
	set_current_state(mode); /* A */
	/*
	 * The above implies an smp_mb(), which matches with the smp_wmb() from
	 * woken_wake_function() such that if we observe WQ_FLAG_WOKEN we must
	 * also observe all state before the wakeup.
	 */
420
	if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
421 422 423 424 425 426 427 428 429
		timeout = schedule_timeout(timeout);
	__set_current_state(TASK_RUNNING);

	/*
	 * The below implies an smp_mb(), it too pairs with the smp_wmb() from
	 * woken_wake_function() such that we must either observe the wait
	 * condition being true _OR_ WQ_FLAG_WOKEN such that we will not miss
	 * an event.
	 */
430
	smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */
431 432 433 434 435

	return timeout;
}
EXPORT_SYMBOL(wait_woken);

436
int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
437 438 439 440 441 442
{
	/*
	 * Although this function is called under waitqueue lock, LOCK
	 * doesn't imply write barrier and the users expects write
	 * barrier semantics on wakeup functions.  The following
	 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
443
	 * and is paired with smp_store_mb() in wait_woken().
444 445
	 */
	smp_wmb(); /* C */
446
	wq_entry->flags |= WQ_FLAG_WOKEN;
447

448
	return default_wake_function(wq_entry, mode, sync, key);
449 450
}
EXPORT_SYMBOL(woken_wake_function);