sem.c 58.1 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
Linus Torvalds's avatar
Linus Torvalds committed
2 3 4 5 6 7 8 9
/*
 * linux/ipc/sem.c
 * Copyright (C) 1992 Krishna Balasubramanian
 * Copyright (C) 1995 Eric Schenk, Bruno Haible
 *
 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
 *
 * SMP-threaded, sysctl's added
10
 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
Linus Torvalds's avatar
Linus Torvalds committed
11
 * Enforced range limit on SEM_UNDO
Alan Cox's avatar
Alan Cox committed
12
 * (c) 2001 Red Hat Inc
Linus Torvalds's avatar
Linus Torvalds committed
13 14
 * Lockless wakeup
 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
Davidlohr Bueso's avatar
Davidlohr Bueso committed
15
 * (c) 2016 Davidlohr Bueso <dave@stgolabs.net>
16 17
 * Further wakeup optimizations, documentation
 * (c) 2010 Manfred Spraul <manfred@colorfullife.com>
Steve Grubb's avatar
Steve Grubb committed
18 19 20
 *
 * support for audit of ipc object properties and permission changes
 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
Kirill Korotaev's avatar
Kirill Korotaev committed
21 22 23 24
 *
 * namespaces support
 * OpenVZ, SWsoft Inc.
 * Pavel Emelianov <xemul@openvz.org>
25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
 *
 * Implementation notes: (May 2010)
 * This file implements System V semaphores.
 *
 * User space visible behavior:
 * - FIFO ordering for semop() operations (just FIFO, not starvation
 *   protection)
 * - multiple semaphore operations that alter the same semaphore in
 *   one semop() are handled.
 * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
 *   SETALL calls.
 * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
 * - undo adjustments at process exit are limited to 0..SEMVMX.
 * - namespace are supported.
 * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing
 *   to /proc/sys/kernel/sem.
 * - statistics about the usage are reported in /proc/sysvipc/sem.
 *
 * Internals:
 * - scalability:
 *   - all global variables are read-mostly.
 *   - semop() calls and semctl(RMID) are synchronized by RCU.
 *   - most operations do write operations (actually: spin_lock calls) to
 *     the per-semaphore array structure.
 *   Thus: Perfect SMP scaling between independent semaphore arrays.
 *         If multiple semaphores in one array are used, then cache line
 *         trashing on the semaphore array spinlock will limit the scaling.
52
 * - semncnt and semzcnt are calculated on demand in count_semcnt()
53 54 55 56 57
 * - the task that performs a successful semop() scans the list of all
 *   sleeping tasks and completes any pending operations that can be fulfilled.
 *   Semaphores are actively given to waiting tasks (necessary for FIFO).
 *   (see update_queue())
 * - To improve the scalability, the actual wake-up calls are performed after
Davidlohr Bueso's avatar
Davidlohr Bueso committed
58
 *   dropping all locks. (see wake_up_sem_queue_prepare())
59 60 61 62 63 64 65 66 67 68 69 70
 * - All work is done by the waker, the woken up task does not have to do
 *   anything - not even acquiring a lock or dropping a refcount.
 * - A woken up task may not even touch the semaphore array anymore, it may
 *   have been destroyed already by a semctl(RMID).
 * - UNDO values are stored in an array (one per process and per
 *   semaphore array, lazily allocated). For backwards compatibility, multiple
 *   modes for the UNDO variables are supported (per process, per thread)
 *   (see copy_semundo, CLONE_SYSVSEM)
 * - There are two lists of the pending operations: a per-array list
 *   and per-semaphore list (stored in the array). This allows to achieve FIFO
 *   ordering without always scanning all pending operations.
 *   The worst-case behavior is nevertheless O(N^2) for N wakeups.
Linus Torvalds's avatar
Linus Torvalds committed
71 72 73 74 75 76 77 78 79 80
 */

#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/time.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/audit.h>
81
#include <linux/capability.h>
82
#include <linux/seq_file.h>
Nadia Derbey's avatar
Nadia Derbey committed
83
#include <linux/rwsem.h>
Kirill Korotaev's avatar
Kirill Korotaev committed
84
#include <linux/nsproxy.h>
85
#include <linux/ipc_namespace.h>
86
#include <linux/sched/wake_q.h>
Ingo Molnar's avatar
Ingo Molnar committed
87

Paul McQuade's avatar
Paul McQuade committed
88
#include <linux/uaccess.h>
Linus Torvalds's avatar
Linus Torvalds committed
89 90
#include "util.h"

91 92 93 94 95 96 97 98 99

/* One queue for each sleeping process in the system. */
struct sem_queue {
	struct list_head	list;	 /* queue of pending operations */
	struct task_struct	*sleeper; /* this process */
	struct sem_undo		*undo;	 /* undo structure */
	int			pid;	 /* process id of requesting process */
	int			status;	 /* completion status of operation */
	struct sembuf		*sops;	 /* array of pending operations */
100
	struct sembuf		*blocking; /* the operation that blocked */
101
	int			nsops;	 /* number of operations */
102 103
	bool			alter;	 /* does *sops alter the array? */
	bool                    dupsop;	 /* sops on more than one sem_num */
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
};

/* Each task has a list of undo requests. They are executed automatically
 * when the process exits.
 */
struct sem_undo {
	struct list_head	list_proc;	/* per-process list: *
						 * all undos from one process
						 * rcu protected */
	struct rcu_head		rcu;		/* rcu struct for sem_undo */
	struct sem_undo_list	*ulp;		/* back ptr to sem_undo_list */
	struct list_head	list_id;	/* per semaphore array list:
						 * all undos for one array */
	int			semid;		/* semaphore set identifier */
	short			*semadj;	/* array of adjustments */
						/* one per semaphore */
};

/* sem_undo_list controls shared access to the list of sem_undo structures
 * that may be shared among all a CLONE_SYSVSEM task group.
 */
struct sem_undo_list {
126
	refcount_t		refcnt;
127 128 129 130 131
	spinlock_t		lock;
	struct list_head	list_proc;
};


132
#define sem_ids(ns)	((ns)->ids[IPC_SEM_IDS])
Kirill Korotaev's avatar
Kirill Korotaev committed
133

Nadia Derbey's avatar
Nadia Derbey committed
134
static int newary(struct ipc_namespace *, struct ipc_params *);
135
static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
Linus Torvalds's avatar
Linus Torvalds committed
136
#ifdef CONFIG_PROC_FS
137
static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
Linus Torvalds's avatar
Linus Torvalds committed
138 139 140 141 142
#endif

#define SEMMSL_FAST	256 /* 512 bytes on stack */
#define SEMOPM_FAST	64  /* ~ 372 bytes on stack */

Manfred Spraul's avatar
Manfred Spraul committed
143 144 145 146 147 148 149
/*
 * Switching from the mode suitable for simple ops
 * to the mode for complex ops is costly. Therefore:
 * use some hysteresis
 */
#define USE_GLOBAL_LOCK_HYSTERESIS	10

Linus Torvalds's avatar
Linus Torvalds committed
150
/*
151
 * Locking:
152
 * a) global sem_lock() for read/write
Linus Torvalds's avatar
Linus Torvalds committed
153
 *	sem_undo.id_next,
154
 *	sem_array.complex_count,
155 156
 *	sem_array.pending{_alter,_const},
 *	sem_array.sem_undo
Paul McQuade's avatar
Paul McQuade committed
157
 *
158
 * b) global or semaphore sem_lock() for read/write:
159
 *	sem_array.sems[i].pending_{const,alter}:
160 161 162 163 164
 *
 * c) special:
 *	sem_undo_list.list_proc:
 *	* undo_list->lock for write
 *	* rcu for read
Manfred Spraul's avatar
Manfred Spraul committed
165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
 *	use_global_lock:
 *	* global sem_lock() for write
 *	* either local or global sem_lock() for read.
 *
 * Memory ordering:
 * Most ordering is enforced by using spin_lock() and spin_unlock().
 * The special case is use_global_lock:
 * Setting it from non-zero to 0 is a RELEASE, this is ensured by
 * using smp_store_release().
 * Testing if it is non-zero is an ACQUIRE, this is ensured by using
 * smp_load_acquire().
 * Setting it from 0 to non-zero must be ordered with regards to
 * this smp_load_acquire(), this is guaranteed because the smp_load_acquire()
 * is inside a spin_lock() and after a write from 0 to non-zero a
 * spin_lock()+spin_unlock() is done.
Linus Torvalds's avatar
Linus Torvalds committed
180 181
 */

Kirill Korotaev's avatar
Kirill Korotaev committed
182 183 184 185 186
#define sc_semmsl	sem_ctls[0]
#define sc_semmns	sem_ctls[1]
#define sc_semopm	sem_ctls[2]
#define sc_semmni	sem_ctls[3]

187
int sem_init_ns(struct ipc_namespace *ns)
Kirill Korotaev's avatar
Kirill Korotaev committed
188 189 190 191 192 193
{
	ns->sc_semmsl = SEMMSL;
	ns->sc_semmns = SEMMNS;
	ns->sc_semopm = SEMOPM;
	ns->sc_semmni = SEMMNI;
	ns->used_sems = 0;
194
	return ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
Kirill Korotaev's avatar
Kirill Korotaev committed
195 196
}

197
#ifdef CONFIG_IPC_NS
Kirill Korotaev's avatar
Kirill Korotaev committed
198 199
void sem_exit_ns(struct ipc_namespace *ns)
{
200
	free_ipcs(ns, &sem_ids(ns), freeary);
201
	idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
202
	rhashtable_destroy(&ns->ids[IPC_SEM_IDS].key_ht);
Kirill Korotaev's avatar
Kirill Korotaev committed
203
}
204
#endif
Linus Torvalds's avatar
Linus Torvalds committed
205

206
int __init sem_init(void)
Linus Torvalds's avatar
Linus Torvalds committed
207
{
208 209
	const int err = sem_init_ns(&init_ipc_ns);

210 211
	ipc_init_proc_interface("sysvipc/sem",
				"       key      semid perms      nsems   uid   gid  cuid  cgid      otime      ctime\n",
Kirill Korotaev's avatar
Kirill Korotaev committed
212
				IPC_SEM_IDS, sysvipc_sem_proc_show);
213
	return err;
Linus Torvalds's avatar
Linus Torvalds committed
214 215
}

216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
/**
 * unmerge_queues - unmerge queues, if possible.
 * @sma: semaphore array
 *
 * The function unmerges the wait queues if complex_count is 0.
 * It must be called prior to dropping the global semaphore array lock.
 */
static void unmerge_queues(struct sem_array *sma)
{
	struct sem_queue *q, *tq;

	/* complex operations still around? */
	if (sma->complex_count)
		return;
	/*
	 * We will switch back to simple mode.
	 * Move all pending operation back into the per-semaphore
	 * queues.
	 */
	list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
		struct sem *curr;
237
		curr = &sma->sems[q->sops[0].sem_num];
238 239 240 241 242 243 244

		list_add_tail(&q->list, &curr->pending_alter);
	}
	INIT_LIST_HEAD(&sma->pending_alter);
}

/**
245
 * merge_queues - merge single semop queues into global queue
246 247 248 249 250 251 252 253 254 255 256
 * @sma: semaphore array
 *
 * This function merges all per-semaphore queues into the global queue.
 * It is necessary to achieve FIFO ordering for the pending single-sop
 * operations when a multi-semop operation must sleep.
 * Only the alter operations must be moved, the const operations can stay.
 */
static void merge_queues(struct sem_array *sma)
{
	int i;
	for (i = 0; i < sma->sem_nsems; i++) {
257
		struct sem *sem = &sma->sems[i];
258 259 260 261 262

		list_splice_init(&sem->pending_alter, &sma->pending_alter);
	}
}

Davidlohr Bueso's avatar
Davidlohr Bueso committed
263 264
static void sem_rcu_free(struct rcu_head *head)
{
265 266
	struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu);
	struct sem_array *sma = container_of(p, struct sem_array, sem_perm);
Davidlohr Bueso's avatar
Davidlohr Bueso committed
267 268

	security_sem_free(sma);
Kees Cook's avatar
Kees Cook committed
269
	kvfree(sma);
Davidlohr Bueso's avatar
Davidlohr Bueso committed
270 271
}

272
/*
273
 * Enter the mode suitable for non-simple operations:
274 275
 * Caller must own sem_perm.lock.
 */
276
static void complexmode_enter(struct sem_array *sma)
277 278 279 280
{
	int i;
	struct sem *sem;

Manfred Spraul's avatar
Manfred Spraul committed
281 282 283 284 285 286 287
	if (sma->use_global_lock > 0)  {
		/*
		 * We are already in global lock mode.
		 * Nothing to do, just reset the
		 * counter until we return to simple mode.
		 */
		sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
288 289
		return;
	}
Manfred Spraul's avatar
Manfred Spraul committed
290
	sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
291

292
	for (i = 0; i < sma->sem_nsems; i++) {
293
		sem = &sma->sems[i];
294 295
		spin_lock(&sem->lock);
		spin_unlock(&sem->lock);
296
	}
297 298 299 300 301 302 303 304 305 306 307 308 309 310
}

/*
 * Try to leave the mode that disallows simple operations:
 * Caller must own sem_perm.lock.
 */
static void complexmode_tryleave(struct sem_array *sma)
{
	if (sma->complex_count)  {
		/* Complex ops are sleeping.
		 * We must stay in complex mode
		 */
		return;
	}
Manfred Spraul's avatar
Manfred Spraul committed
311 312 313 314 315 316 317 318 319 320 321
	if (sma->use_global_lock == 1) {
		/*
		 * Immediately after setting use_global_lock to 0,
		 * a simple op can start. Thus: all memory writes
		 * performed by the current operation must be visible
		 * before we set use_global_lock to 0.
		 */
		smp_store_release(&sma->use_global_lock, 0);
	} else {
		sma->use_global_lock--;
	}
322 323
}

324
#define SEM_GLOBAL_LOCK	(-1)
325 326 327 328 329 330 331 332 333 334
/*
 * If the request contains only one semaphore operation, and there are
 * no complex transactions pending, lock only the semaphore involved.
 * Otherwise, lock the entire semaphore array, since we either have
 * multiple semaphores in our own semops, or we need to look at
 * semaphores from other pending complex operations.
 */
static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
			      int nsops)
{
335
	struct sem *sem;
336

337 338 339
	if (nsops != 1) {
		/* Complex operation - acquire a full lock */
		ipc_lock_object(&sma->sem_perm);
340

341 342 343
		/* Prevent parallel simple ops */
		complexmode_enter(sma);
		return SEM_GLOBAL_LOCK;
344 345 346 347
	}

	/*
	 * Only one semaphore affected - try to optimize locking.
348 349 350
	 * Optimized locking is possible if no complex operation
	 * is either enqueued or processed right now.
	 *
Manfred Spraul's avatar
Manfred Spraul committed
351
	 * Both facts are tracked by use_global_mode.
352
	 */
353
	sem = &sma->sems[sops->sem_num];
354

355
	/*
Manfred Spraul's avatar
Manfred Spraul committed
356
	 * Initial check for use_global_lock. Just an optimization,
357 358
	 * no locking, no memory barrier.
	 */
Manfred Spraul's avatar
Manfred Spraul committed
359
	if (!sma->use_global_lock) {
360
		/*
361 362
		 * It appears that no complex operation is around.
		 * Acquire the per-semaphore lock.
363
		 */
364 365
		spin_lock(&sem->lock);

Manfred Spraul's avatar
Manfred Spraul committed
366 367
		/* pairs with smp_store_release() */
		if (!smp_load_acquire(&sma->use_global_lock)) {
368 369
			/* fast path successful! */
			return sops->sem_num;
370
		}
371 372 373 374 375
		spin_unlock(&sem->lock);
	}

	/* slow path: acquire the full lock */
	ipc_lock_object(&sma->sem_perm);
376

Manfred Spraul's avatar
Manfred Spraul committed
377 378 379 380 381 382 383 384 385
	if (sma->use_global_lock == 0) {
		/*
		 * The use_global_lock mode ended while we waited for
		 * sma->sem_perm.lock. Thus we must switch to locking
		 * with sem->lock.
		 * Unlike in the fast path, there is no need to recheck
		 * sma->use_global_lock after we have acquired sem->lock:
		 * We own sma->sem_perm.lock, thus use_global_lock cannot
		 * change.
386 387
		 */
		spin_lock(&sem->lock);
Manfred Spraul's avatar
Manfred Spraul committed
388

389 390
		ipc_unlock_object(&sma->sem_perm);
		return sops->sem_num;
391
	} else {
Manfred Spraul's avatar
Manfred Spraul committed
392 393 394 395
		/*
		 * Not a false alarm, thus continue to use the global lock
		 * mode. No need for complexmode_enter(), this was done by
		 * the caller that has set use_global_mode to non-zero.
396
		 */
397
		return SEM_GLOBAL_LOCK;
398 399 400 401 402
	}
}

static inline void sem_unlock(struct sem_array *sma, int locknum)
{
403
	if (locknum == SEM_GLOBAL_LOCK) {
404
		unmerge_queues(sma);
405
		complexmode_tryleave(sma);
406
		ipc_unlock_object(&sma->sem_perm);
407
	} else {
408
		struct sem *sem = &sma->sems[locknum];
409 410 411 412
		spin_unlock(&sem->lock);
	}
}

Nadia Derbey's avatar
Nadia Derbey committed
413
/*
Davidlohr Bueso's avatar
Davidlohr Bueso committed
414
 * sem_lock_(check_) routines are called in the paths where the rwsem
Nadia Derbey's avatar
Nadia Derbey committed
415
 * is not held.
416 417
 *
 * The caller holds the RCU read lock.
Nadia Derbey's avatar
Nadia Derbey committed
418
 */
419 420
static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
{
421
	struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&sem_ids(ns), id);
422 423 424 425 426 427 428 429 430 431 432 433 434 435

	if (IS_ERR(ipcp))
		return ERR_CAST(ipcp);

	return container_of(ipcp, struct sem_array, sem_perm);
}

static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
							int id)
{
	struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id);

	if (IS_ERR(ipcp))
		return ERR_CAST(ipcp);
436

Nadia Derbey's avatar
Nadia Derbey committed
437
	return container_of(ipcp, struct sem_array, sem_perm);
438 439
}

440 441
static inline void sem_lock_and_putref(struct sem_array *sma)
{
442
	sem_lock(sma, NULL, -1);
443
	ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
444 445
}

Nadia Derbey's avatar
Nadia Derbey committed
446 447 448 449 450
static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
{
	ipc_rmid(&sem_ids(ns), &s->sem_perm);
}

Kees Cook's avatar
Kees Cook committed
451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468
static struct sem_array *sem_alloc(size_t nsems)
{
	struct sem_array *sma;
	size_t size;

	if (nsems > (INT_MAX - sizeof(*sma)) / sizeof(sma->sems[0]))
		return NULL;

	size = sizeof(*sma) + nsems * sizeof(sma->sems[0]);
	sma = kvmalloc(size, GFP_KERNEL);
	if (unlikely(!sma))
		return NULL;

	memset(sma, 0, size);

	return sma;
}

Nadia Derbey's avatar
Nadia Derbey committed
469 470 471 472 473
/**
 * newary - Create a new semaphore set
 * @ns: namespace
 * @params: ptr to the structure that contains key, semflg and nsems
 *
Davidlohr Bueso's avatar
Davidlohr Bueso committed
474
 * Called with sem_ids.rwsem held (as a writer)
Nadia Derbey's avatar
Nadia Derbey committed
475
 */
Nadia Derbey's avatar
Nadia Derbey committed
476
static int newary(struct ipc_namespace *ns, struct ipc_params *params)
Linus Torvalds's avatar
Linus Torvalds committed
477 478 479
{
	int retval;
	struct sem_array *sma;
Nadia Derbey's avatar
Nadia Derbey committed
480 481 482
	key_t key = params->key;
	int nsems = params->u.nsems;
	int semflg = params->flg;
483
	int i;
Linus Torvalds's avatar
Linus Torvalds committed
484 485 486

	if (!nsems)
		return -EINVAL;
Kirill Korotaev's avatar
Kirill Korotaev committed
487
	if (ns->used_sems + nsems > ns->sc_semmns)
Linus Torvalds's avatar
Linus Torvalds committed
488 489
		return -ENOSPC;

Kees Cook's avatar
Kees Cook committed
490
	sma = sem_alloc(nsems);
491
	if (!sma)
Linus Torvalds's avatar
Linus Torvalds committed
492
		return -ENOMEM;
493

Linus Torvalds's avatar
Linus Torvalds committed
494 495 496 497 498 499
	sma->sem_perm.mode = (semflg & S_IRWXUGO);
	sma->sem_perm.key = key;

	sma->sem_perm.security = NULL;
	retval = security_sem_alloc(sma);
	if (retval) {
Kees Cook's avatar
Kees Cook committed
500
		kvfree(sma);
Linus Torvalds's avatar
Linus Torvalds committed
501 502 503
		return retval;
	}

504
	for (i = 0; i < nsems; i++) {
505 506 507
		INIT_LIST_HEAD(&sma->sems[i].pending_alter);
		INIT_LIST_HEAD(&sma->sems[i].pending_const);
		spin_lock_init(&sma->sems[i].lock);
508
	}
509 510

	sma->complex_count = 0;
Manfred Spraul's avatar
Manfred Spraul committed
511
	sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
512 513
	INIT_LIST_HEAD(&sma->pending_alter);
	INIT_LIST_HEAD(&sma->pending_const);
514
	INIT_LIST_HEAD(&sma->list_id);
Linus Torvalds's avatar
Linus Torvalds committed
515
	sma->sem_nsems = nsems;
516
	sma->sem_ctime = ktime_get_real_seconds();
517

518 519 520 521
	retval = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
	if (retval < 0) {
		call_rcu(&sma->sem_perm.rcu, sem_rcu_free);
		return retval;
522 523 524
	}
	ns->used_sems += nsems;

525
	sem_unlock(sma, -1);
526
	rcu_read_unlock();
Linus Torvalds's avatar
Linus Torvalds committed
527

Nadia Derbey's avatar
Nadia Derbey committed
528
	return sma->sem_perm.id;
Linus Torvalds's avatar
Linus Torvalds committed
529 530
}

Nadia Derbey's avatar
Nadia Derbey committed
531

Nadia Derbey's avatar
Nadia Derbey committed
532
/*
Davidlohr Bueso's avatar
Davidlohr Bueso committed
533
 * Called with sem_ids.rwsem and ipcp locked.
Nadia Derbey's avatar
Nadia Derbey committed
534
 */
Nadia Derbey's avatar
Nadia Derbey committed
535
static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
Nadia Derbey's avatar
Nadia Derbey committed
536
{
Nadia Derbey's avatar
Nadia Derbey committed
537 538 539 540
	struct sem_array *sma;

	sma = container_of(ipcp, struct sem_array, sem_perm);
	return security_sem_associate(sma, semflg);
Nadia Derbey's avatar
Nadia Derbey committed
541 542
}

Nadia Derbey's avatar
Nadia Derbey committed
543
/*
Davidlohr Bueso's avatar
Davidlohr Bueso committed
544
 * Called with sem_ids.rwsem and ipcp locked.
Nadia Derbey's avatar
Nadia Derbey committed
545
 */
Nadia Derbey's avatar
Nadia Derbey committed
546 547
static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
				struct ipc_params *params)
Nadia Derbey's avatar
Nadia Derbey committed
548
{
Nadia Derbey's avatar
Nadia Derbey committed
549 550 551 552
	struct sem_array *sma;

	sma = container_of(ipcp, struct sem_array, sem_perm);
	if (params->u.nsems > sma->sem_nsems)
Nadia Derbey's avatar
Nadia Derbey committed
553 554 555 556 557
		return -EINVAL;

	return 0;
}

558
SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
Linus Torvalds's avatar
Linus Torvalds committed
559
{
Kirill Korotaev's avatar
Kirill Korotaev committed
560
	struct ipc_namespace *ns;
Mathias Krause's avatar
Mathias Krause committed
561 562 563 564 565
	static const struct ipc_ops sem_ops = {
		.getnew = newary,
		.associate = sem_security,
		.more_checks = sem_more_checks,
	};
Nadia Derbey's avatar
Nadia Derbey committed
566
	struct ipc_params sem_params;
Kirill Korotaev's avatar
Kirill Korotaev committed
567 568

	ns = current->nsproxy->ipc_ns;
Linus Torvalds's avatar
Linus Torvalds committed
569

Kirill Korotaev's avatar
Kirill Korotaev committed
570
	if (nsems < 0 || nsems > ns->sc_semmsl)
Linus Torvalds's avatar
Linus Torvalds committed
571
		return -EINVAL;
Nadia Derbey's avatar
Nadia Derbey committed
572

Nadia Derbey's avatar
Nadia Derbey committed
573 574 575
	sem_params.key = key;
	sem_params.flg = semflg;
	sem_params.u.nsems = nsems;
Linus Torvalds's avatar
Linus Torvalds committed
576

Nadia Derbey's avatar
Nadia Derbey committed
577
	return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
Linus Torvalds's avatar
Linus Torvalds committed
578 579
}

580
/**
581 582
 * perform_atomic_semop[_slow] - Attempt to perform semaphore
 *                               operations on a given array.
583
 * @sma: semaphore array
584
 * @q: struct sem_queue that describes the operation
585
 *
586 587 588 589 590 591 592
 * Caller blocking are as follows, based the value
 * indicated by the semaphore operation (sem_op):
 *
 *  (1) >0 never blocks.
 *  (2)  0 (wait-for-zero operation): semval is non-zero.
 *  (3) <0 attempting to decrement semval to a value smaller than zero.
 *
593 594
 * Returns 0 if the operation was possible.
 * Returns 1 if the operation is impossible, the caller must sleep.
595
 * Returns <0 for error codes.
Linus Torvalds's avatar
Linus Torvalds committed
596
 */
597
static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
Linus Torvalds's avatar
Linus Torvalds committed
598
{
599
	int result, sem_op, nsops, pid;
Linus Torvalds's avatar
Linus Torvalds committed
600
	struct sembuf *sop;
Manfred Spraul's avatar
Manfred Spraul committed
601
	struct sem *curr;
602 603 604 605 606 607
	struct sembuf *sops;
	struct sem_undo *un;

	sops = q->sops;
	nsops = q->nsops;
	un = q->undo;
Linus Torvalds's avatar
Linus Torvalds committed
608 609

	for (sop = sops; sop < sops + nsops; sop++) {
610
		curr = &sma->sems[sop->sem_num];
Linus Torvalds's avatar
Linus Torvalds committed
611 612
		sem_op = sop->sem_op;
		result = curr->semval;
613

Linus Torvalds's avatar
Linus Torvalds committed
614 615 616 617 618 619 620 621
		if (!sem_op && result)
			goto would_block;

		result += sem_op;
		if (result < 0)
			goto would_block;
		if (result > SEMVMX)
			goto out_of_range;
622

Linus Torvalds's avatar
Linus Torvalds committed
623 624
		if (sop->sem_flg & SEM_UNDO) {
			int undo = un->semadj[sop->sem_num] - sem_op;
625
			/* Exceeding the undo range is an error. */
Linus Torvalds's avatar
Linus Torvalds committed
626 627
			if (undo < (-SEMAEM - 1) || undo > SEMAEM)
				goto out_of_range;
628
			un->semadj[sop->sem_num] = undo;
Linus Torvalds's avatar
Linus Torvalds committed
629
		}
630

Linus Torvalds's avatar
Linus Torvalds committed
631 632 633 634
		curr->semval = result;
	}

	sop--;
635
	pid = q->pid;
Linus Torvalds's avatar
Linus Torvalds committed
636
	while (sop >= sops) {
637
		sma->sems[sop->sem_num].sempid = pid;
Linus Torvalds's avatar
Linus Torvalds committed
638 639
		sop--;
	}
640

Linus Torvalds's avatar
Linus Torvalds committed
641 642 643 644 645 646 647
	return 0;

out_of_range:
	result = -ERANGE;
	goto undo;

would_block:
648 649
	q->blocking = sop;

Linus Torvalds's avatar
Linus Torvalds committed
650 651 652 653 654 655 656 657
	if (sop->sem_flg & IPC_NOWAIT)
		result = -EAGAIN;
	else
		result = 1;

undo:
	sop--;
	while (sop >= sops) {
658
		sem_op = sop->sem_op;
659
		sma->sems[sop->sem_num].semval -= sem_op;
660 661
		if (sop->sem_flg & SEM_UNDO)
			un->semadj[sop->sem_num] += sem_op;
Linus Torvalds's avatar
Linus Torvalds committed
662 663 664 665 666 667
		sop--;
	}

	return result;
}

668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689
static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
{
	int result, sem_op, nsops;
	struct sembuf *sop;
	struct sem *curr;
	struct sembuf *sops;
	struct sem_undo *un;

	sops = q->sops;
	nsops = q->nsops;
	un = q->undo;

	if (unlikely(q->dupsop))
		return perform_atomic_semop_slow(sma, q);

	/*
	 * We scan the semaphore set twice, first to ensure that the entire
	 * operation can succeed, therefore avoiding any pointless writes
	 * to shared memory and having to undo such changes in order to block
	 * until the operations can go through.
	 */
	for (sop = sops; sop < sops + nsops; sop++) {
690
		curr = &sma->sems[sop->sem_num];
691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713
		sem_op = sop->sem_op;
		result = curr->semval;

		if (!sem_op && result)
			goto would_block; /* wait-for-zero */

		result += sem_op;
		if (result < 0)
			goto would_block;

		if (result > SEMVMX)
			return -ERANGE;

		if (sop->sem_flg & SEM_UNDO) {
			int undo = un->semadj[sop->sem_num] - sem_op;

			/* Exceeding the undo range is an error. */
			if (undo < (-SEMAEM - 1) || undo > SEMAEM)
				return -ERANGE;
		}
	}

	for (sop = sops; sop < sops + nsops; sop++) {
714
		curr = &sma->sems[sop->sem_num];
715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733
		sem_op = sop->sem_op;
		result = curr->semval;

		if (sop->sem_flg & SEM_UNDO) {
			int undo = un->semadj[sop->sem_num] - sem_op;

			un->semadj[sop->sem_num] = undo;
		}
		curr->semval += sem_op;
		curr->sempid = q->pid;
	}

	return 0;

would_block:
	q->blocking = sop;
	return sop->sem_flg & IPC_NOWAIT ? -EAGAIN : 1;
}

Davidlohr Bueso's avatar
Davidlohr Bueso committed
734 735
static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error,
					     struct wake_q_head *wake_q)
736
{
Davidlohr Bueso's avatar
Davidlohr Bueso committed
737 738 739 740 741 742 743 744 745
	wake_q_add(wake_q, q->sleeper);
	/*
	 * Rely on the above implicit barrier, such that we can
	 * ensure that we hold reference to the task before setting
	 * q->status. Otherwise we could race with do_exit if the
	 * task is awoken by an external event before calling
	 * wake_up_process().
	 */
	WRITE_ONCE(q->status, error);
Nick Piggin's avatar
Nick Piggin committed
746 747
}

748 749 750
static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
{
	list_del(&q->list);
751
	if (q->nsops > 1)
752 753 754
		sma->complex_count--;
}

755 756 757 758 759 760 761
/** check_restart(sma, q)
 * @sma: semaphore array
 * @q: the operation that just completed
 *
 * update_queue is O(N^2) when it restarts scanning the whole queue of
 * waiting operations. Therefore this function checks if the restart is
 * really necessary. It is called after a previously waiting operation
762 763
 * modified the array.
 * Note that wait-for-zero operations are handled without restart.
764
 */
765
static inline int check_restart(struct sem_array *sma, struct sem_queue *q)
766
{
767 768
	/* pending complex alter operations are too difficult to analyse */
	if (!list_empty(&sma->pending_alter))
769 770 771 772 773 774
		return 1;

	/* we were a sleeping complex operation. Too difficult */
	if (q->nsops > 1)
		return 1;

775 776 777 778 779 780 781 782 783 784 785 786 787
	/* It is impossible that someone waits for the new value:
	 * - complex operations always restart.
	 * - wait-for-zero are handled seperately.
	 * - q is a previously sleeping simple operation that
	 *   altered the array. It must be a decrement, because
	 *   simple increments never sleep.
	 * - If there are older (higher priority) decrements
	 *   in the queue, then they have observed the original
	 *   semval value and couldn't proceed. The operation
	 *   decremented to value - thus they won't proceed either.
	 */
	return 0;
}
788

789
/**
790
 * wake_const_ops - wake up non-alter tasks
791 792
 * @sma: semaphore array.
 * @semnum: semaphore that was modified.
Davidlohr Bueso's avatar
Davidlohr Bueso committed
793
 * @wake_q: lockless wake-queue head.
794 795 796 797 798
 *
 * wake_const_ops must be called after a semaphore in a semaphore array
 * was set to 0. If complex const operations are pending, wake_const_ops must
 * be called with semnum = -1, as well as with the number of each modified
 * semaphore.
Davidlohr Bueso's avatar
Davidlohr Bueso committed
799
 * The tasks that must be woken up are added to @wake_q. The return code
800 801 802 803
 * is stored in q->pid.
 * The function returns 1 if at least one operation was completed successfully.
 */
static int wake_const_ops(struct sem_array *sma, int semnum,
Davidlohr Bueso's avatar
Davidlohr Bueso committed
804
			  struct wake_q_head *wake_q)
805
{
806
	struct sem_queue *q, *tmp;
807 808 809 810 811 812
	struct list_head *pending_list;
	int semop_completed = 0;

	if (semnum == -1)
		pending_list = &sma->pending_const;
	else
813
		pending_list = &sma->sems[semnum].pending_const;
814

815 816
	list_for_each_entry_safe(q, tmp, pending_list, list) {
		int error = perform_atomic_semop(sma, q);
817

818 819 820 821
		if (error > 0)
			continue;
		/* operation completed, remove from queue & wakeup */
		unlink_queue(sma, q);
822

823 824 825
		wake_up_sem_queue_prepare(q, error, wake_q);
		if (error == 0)
			semop_completed = 1;
826
	}
827

828 829 830 831
	return semop_completed;
}

/**
832
 * do_smart_wakeup_zero - wakeup all wait for zero tasks
833 834 835
 * @sma: semaphore array
 * @sops: operations that were performed
 * @nsops: number of operations
Davidlohr Bueso's avatar
Davidlohr Bueso committed
836
 * @wake_q: lockless wake-queue head
837
 *
838 839
 * Checks all required queue for wait-for-zero operations, based
 * on the actual changes that were performed on the semaphore array.
840 841 842
 * The function returns 1 if at least one operation was completed successfully.
 */
static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
Davidlohr Bueso's avatar
Davidlohr Bueso committed
843
				int nsops, struct wake_q_head *wake_q)
844 845 846 847 848 849 850 851 852 853
{
	int i;
	int semop_completed = 0;
	int got_zero = 0;

	/* first: the per-semaphore queues, if known */
	if (sops) {
		for (i = 0; i < nsops; i++) {
			int num = sops[i].sem_num;

854
			if (sma->sems[num].semval == 0) {
855
				got_zero = 1;
Davidlohr Bueso's avatar
Davidlohr Bueso committed
856
				semop_completed |= wake_const_ops(sma, num, wake_q);
857 858 859 860 861 862
			}
		}
	} else {
		/*
		 * No sops means modified semaphores not known.
		 * Assume all were changed.
863
		 */
864
		for (i = 0; i < sma->sem_nsems; i++) {
865
			if (sma->sems[i].semval == 0) {
866
				got_zero = 1;
Davidlohr Bueso's avatar
Davidlohr Bueso committed
867
				semop_completed |= wake_const_ops(sma, i, wake_q);
868 869
			}
		}
870 871
	}
	/*
872 873
	 * If one of the modified semaphores got 0,
	 * then check the global queue, too.
874
	 */
875
	if (got_zero)
Davidlohr Bueso's avatar
Davidlohr Bueso committed
876
		semop_completed |= wake_const_ops(sma, -1, wake_q);
877

878
	return semop_completed;
879 880
}

881 882

/**
883
 * update_queue - look for tasks that can be completed.
884 885
 * @sma: semaphore array.
 * @semnum: semaphore that was modified.
Davidlohr Bueso's avatar
Davidlohr Bueso committed
886
 * @wake_q: lockless wake-queue head.
887 888
 *
 * update_queue must be called after a semaphore in a semaphore array
889 890 891
 * was modified. If multiple semaphores were modified, update_queue must
 * be called with semnum = -1, as well as with the number of each modified
 * semaphore.
Davidlohr Bueso's avatar
Davidlohr Bueso committed
892
 * The tasks that must be woken up are added to @wake_q. The return code
893
 * is stored in q->pid.
894 895
 * The function internally checks if const operations can now succeed.
 *
896
 * The function return 1 if at least one semop was completed successfully.
Linus Torvalds's avatar
Linus Torvalds committed
897
 */
Davidlohr Bueso's avatar
Davidlohr Bueso committed
898
static int update_queue(struct sem_array *sma, int semnum, struct wake_q_head *wake_q)
Linus Torvalds's avatar
Linus Torvalds committed
899
{
900
	struct sem_queue *q, *tmp;
901
	struct list_head *pending_list;
902
	int semop_completed = 0;
903

904
	if (semnum == -1)
905
		pending_list = &sma->pending_alter;
906
	else
907
		pending_list = &sma->sems[semnum].pending_alter;
908 909

again:
910
	list_for_each_entry_safe(q, tmp, pending_list, list) {
911
		int error, restart;
912

913 914
		/* If we are scanning the single sop, per-semaphore list of
		 * one semaphore and that semaphore is 0, then it is not
915
		 * necessary to scan further: simple increments
916 917 918 919
		 * that affect only one entry succeed immediately and cannot
		 * be in the  per semaphore pending queue, and decrements
		 * cannot be successful if the value is already 0.
		 */
920
		if (semnum != -1 && sma->sems[semnum].semval == 0)
921 922
			break;

923
		error = perform_atomic_semop(sma, q);
Linus Torvalds's avatar
Linus Torvalds committed
924 925

		/* Does q->sleeper still need to sleep? */
926 927 928
		if (error > 0)
			continue;

929
		unlink_queue(sma, q);
930

931
		if (error) {
932
			restart = 0;
933 934
		} else {
			semop_completed = 1;
Davidlohr Bueso's avatar
Davidlohr Bueso committed
935
			do_smart_wakeup_zero(sma, q->sops, q->nsops, wake_q);
936
			restart = check_restart(sma, q);
937
		}
938

Davidlohr Bueso's avatar
Davidlohr Bueso committed
939
		wake_up_sem_queue_prepare(q, error, wake_q);
940
		if (restart)
941
			goto again;
Linus Torvalds's avatar
Linus Torvalds committed
942
	}
943
	return semop_completed;
Linus Torvalds's avatar
Linus Torvalds committed
944 945
}

946
/**
947
 * set_semotime - set sem_otime
948 949 950 951 952 953 954 955 956
 * @sma: semaphore array
 * @sops: operations that modified the array, may be NULL
 *
 * sem_otime is replicated to avoid cache line trashing.
 * This function sets one instance to the current time.
 */
static void set_semotime(struct sem_array *sma, struct sembuf *sops)
{
	if (sops == NULL) {
957
		sma->sems[0].sem_otime = get_seconds();
958
	} else {
959
		sma->sems[sops[0].sem_num].sem_otime =
960 961 962 963
							get_seconds();
	}
}

964
/**
965
 * do_smart_update - optimized update_queue
966 967 968
 * @sma: semaphore array
 * @sops: operations that were performed
 * @nsops: number of operations
969
 * @otime: force setting otime
Davidlohr Bueso's avatar
Davidlohr Bueso committed
970
 * @wake_q: lockless wake-queue head
971
 *
972 973
 * do_smart_update() does the required calls to update_queue and wakeup_zero,
 * based on the actual changes that were performed on the semaphore array.
974
 * Note that the function does not do the actual wake-up: the caller is
Davidlohr Bueso's avatar
Davidlohr Bueso committed
975
 * responsible for calling wake_up_q().
976
 * It is safe to perform this call after dropping all locks.
977
 */
978
static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
Davidlohr Bueso's avatar
Davidlohr Bueso committed
979
			    int otime, struct wake_q_head *wake_q)
980 981 982
{
	int i;

Davidlohr Bueso's avatar
Davidlohr Bueso committed
983
	otime |= do_smart_wakeup_zero(sma, sops, nsops, wake_q);
984

985 986
	if (!list_empty(&sma->pending_alter)) {
		/* semaphore array uses the global queue - just process it. */
Davidlohr Bueso's avatar
Davidlohr Bueso committed
987
		otime |= update_queue(sma, -1, wake_q);
988 989 990 991 992 993 994
	} else {
		if (!sops) {
			/*
			 * No sops, thus the modified semaphores are not
			 * known. Check all.
			 */
			for (i = 0; i < sma->sem_nsems; i++)
Davidlohr Bueso's avatar
Davidlohr Bueso committed
995
				otime |= update_queue(sma, i, wake_q);
996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008
		} else {
			/*
			 * Check the semaphores that were increased:
			 * - No complex ops, thus all sleeping ops are
			 *   decrease.
			 * - if we decreased the value, then any sleeping
			 *   semaphore ops wont be able to run: If the
			 *   previous value was too small, then the new
			 *   value will be too small, too.
			 */
			for (i = 0; i < nsops; i++) {
				if (sops[i].sem_op > 0) {
					otime |= update_queue(sma,
Davidlohr Bueso's avatar
Davidlohr Bueso committed
1009
							      sops[i].sem_num, wake_q);
1010
				}
1011
			}
1012
		}
1013
	}
1014 1015
	if (otime)
		set_semotime(sma, sops);
1016 1017
}

1018
/*
1019
 * check_qop: Test if a queued operation sleeps on the semaphore semnum
1020 1021 1022 1023
 */
static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q,
			bool count_zero)
{
1024
	struct sembuf *sop = q->blocking;
1025

1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036
	/*
	 * Linux always (since 0.99.10) reported a task as sleeping on all
	 * semaphores. This violates SUS, therefore it was changed to the
	 * standard compliant behavior.
	 * Give the administrators a chance to notice that an application
	 * might misbehave because it relies on the Linux behavior.
	 */
	pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant.\n"
			"The task %s (%d) triggered the difference, watch for misbehavior.\n",
			current->comm, task_pid_nr(current));

1037 1038
	if (sop->sem_num != semnum)
		return 0;
1039

1040 1041 1042 1043 1044 1045
	if (count_zero && sop->sem_op == 0)
		return 1;
	if (!count_zero && sop->sem_op < 0)
		return 1;

	return 0;
1046 1047
}

Linus Torvalds's avatar
Linus Torvalds committed
1048 1049 1050
/* The following counts are associated to each semaphore:
 *   semncnt        number of tasks waiting on semval being nonzero
 *   semzcnt        number of tasks waiting on semval being zero
1051 1052 1053
 *
 * Per definition, a task waits only on the semaphore of the first semop
 * that cannot proceed, even if additional operation would block, too.
Linus Torvalds's avatar
Linus Torvalds committed
1054
 */
1055 1056
static int count_semcnt(struct sem_array *sma, ushort semnum,
			bool count_zero)
Linus Torvalds's avatar
Linus Torvalds committed
1057
{
1058
	struct list_head *l;
Manfred Spraul's avatar
Manfred Spraul committed
1059
	struct sem_queue *q;
1060
	int semcnt;
Linus Torvalds's avatar
Linus Torvalds committed
1061

1062 1063 1064
	semcnt = 0;
	/* First: check the simple operations. They are easy to evaluate */
	if (count_zero)
1065
		l = &sma->sems[semnum].pending_const;
1066
	else
1067
		l = &sma->sems[semnum].pending_alter;
Linus Torvalds's avatar
Linus Torvalds committed
1068

1069 1070 1071 1072 1073
	list_for_each_entry(q, l, list) {
		/* all task on a per-semaphore list sleep on exactly
		 * that semaphore
		 */
		semcnt++;
1074 1075
	}

1076
	/* Then: check the complex operations. */
1077
	list_for_each_entry(q, &sma->pending_alter, list) {
1078 1079 1080 1081 1082 1083
		semcnt += check_qop(sma, semnum, q, count_zero);
	}
	if (count_zero) {
		list_for_each_entry(q, &sma->pending_const, list) {
			semcnt += check_qop(sma, semnum, q, count_zero);
		}
1084
	}
1085
	return semcnt;
Linus Torvalds's avatar
Linus Torvalds committed
1086 1087
}

Davidlohr Bueso's avatar
Davidlohr Bueso committed
1088 1089
/* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
 * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem
Nadia Derbey's avatar
Nadia Derbey committed
1090
 * remains locked on exit.
Linus Torvalds's avatar
Linus Torvalds committed
1091
 */
1092
static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
Linus Torvalds's avatar
Linus Torvalds committed
1093
{
1094 1095
	struct sem_undo *un, *tu;
	struct sem_queue *q, *tq;
1096
	struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
1097
	int i;
Davidlohr Bueso's avatar
Davidlohr Bueso committed
1098
	DEFINE_WAKE_Q(wake_q);
Linus Torvalds's avatar
Linus Torvalds committed
1099

1100
	/* Free the existing undo structures for this semaphore set.  */
1101
	ipc_assert_locked_object(&sma->sem_perm);
1102 1103 1104
	list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
		list_del(&un->list_id);
		spin_lock(&un->ulp->lock);
Linus Torvalds's avatar
Linus Torvalds committed
1105
		un->semid = -1;
1106 1107
		list_del_rcu(&un->list_proc);
		spin_unlock(&un->ulp->lock);
1108
		kfree_rcu(un, rcu);
1109
	}
Linus Torvalds's avatar
Linus Torvalds committed
1110 1111

	/* Wake up all pending processes and let them fail with EIDRM. */
1112 1113
	list_for_each_entry_safe(q, tq, &sma->pending_const, list) {
		unlink_queue(sma, q);
Davidlohr Bueso's avatar
Davidlohr Bueso committed
1114
		wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1115 1116 1117
	}

	list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
1118
		unlink_queue(sma, q);
Davidlohr Bueso's avatar
Davidlohr Bueso committed
1119
		wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
Linus Torvalds's avatar
Linus Torvalds committed
1120
	}
1121
	for (i = 0; i < sma->sem_nsems; i++) {
1122
		struct sem *sem = &sma->sems[i];
1123 1124
		list_for_each_entry_safe(q, tq, &sem->pending_const, list) {
			unlink_queue(sma, q);
Davidlohr Bueso's avatar
Davidlohr Bueso committed
1125
			wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1126 1127
		}
		list_for_each_entry_safe(q, tq, &sem->pending_alter, list) {
1128
			unlink_queue(sma, q);
Davidlohr Bueso's avatar
Davidlohr Bueso committed
1129
			wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1130 1131
		}
	}
Linus Torvalds's avatar
Linus Torvalds committed
1132

Nadia Derbey's avatar
Nadia Derbey committed
1133 1134
	/* Remove the semaphore set from the IDR */
	sem_rmid(ns, sma);
1135
	sem_unlock(sma, -1);
1136
	rcu_read_unlock();
Linus Torvalds's avatar
Linus Torvalds committed
1137

Davidlohr Bueso's avatar
Davidlohr Bueso committed
1138
	wake_up_q(&wake_q);