signal.c 97.4 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 *  linux/kernel/signal.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *
 *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
 *
 *  2003-06-02  Jim Houston - Concurrent Computer Corp.
 *		Changes to use preallocated sigqueue structures
 *		to allow signals to be sent reliably.
 */

#include <linux/slab.h>
14
#include <linux/export.h>
Linus Torvalds's avatar
Linus Torvalds committed
15
#include <linux/init.h>
16
#include <linux/sched/mm.h>
17
#include <linux/sched/user.h>
18
#include <linux/sched/debug.h>
19
#include <linux/sched/task.h>
20
#include <linux/sched/task_stack.h>
21
#include <linux/sched/cputime.h>
Linus Torvalds's avatar
Linus Torvalds committed
22 23 24
#include <linux/fs.h>
#include <linux/tty.h>
#include <linux/binfmts.h>
25
#include <linux/coredump.h>
Linus Torvalds's avatar
Linus Torvalds committed
26 27 28
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/ptrace.h>
29
#include <linux/signal.h>
30
#include <linux/signalfd.h>
31
#include <linux/ratelimit.h>
32
#include <linux/tracehook.h>
33
#include <linux/capability.h>
34
#include <linux/freezer.h>
35
#include <linux/ipipe.h>
36 37
#include <linux/pid_namespace.h>
#include <linux/nsproxy.h>
38
#include <linux/user_namespace.h>
39
#include <linux/uprobes.h>
40
#include <linux/compat.h>
41
#include <linux/cn_proc.h>
42
#include <linux/compiler.h>
43
#include <linux/posix-timers.h>
44

45 46
#define CREATE_TRACE_POINTS
#include <trace/events/signal.h>
47

Linus Torvalds's avatar
Linus Torvalds committed
48
#include <asm/param.h>
49
#include <linux/uaccess.h>
Linus Torvalds's avatar
Linus Torvalds committed
50 51
#include <asm/unistd.h>
#include <asm/siginfo.h>
52
#include <asm/cacheflush.h>
53
#include "audit.h"	/* audit_signal_info() */
Linus Torvalds's avatar
Linus Torvalds committed
54 55 56 57 58

/*
 * SLAB caches for signal bits.
 */

59
static struct kmem_cache *sigqueue_cachep;
Linus Torvalds's avatar
Linus Torvalds committed
60

61 62
int print_fatal_signals __read_mostly;

63
static void __user *sig_handler(struct task_struct *t, int sig)
64
{
65 66
	return t->sighand->action[sig - 1].sa.sa_handler;
}
67

68 69
static int sig_handler_ignored(void __user *handler, int sig)
{
70 71 72 73
	/* Is it explicitly or implicitly ignored? */
	return handler == SIG_IGN ||
		(handler == SIG_DFL && sig_kernel_ignore(sig));
}
Linus Torvalds's avatar
Linus Torvalds committed
74

75
static int sig_task_ignored(struct task_struct *t, int sig, bool force)
Linus Torvalds's avatar
Linus Torvalds committed
76
{
77
	void __user *handler;
Linus Torvalds's avatar
Linus Torvalds committed
78

79 80 81
	handler = sig_handler(t, sig);

	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
82
	    handler == SIG_DFL && !(force && sig_kernel_only(sig)))
83 84 85 86 87
		return 1;

	return sig_handler_ignored(handler, sig);
}

88
static int sig_ignored(struct task_struct *t, int sig, bool force)
89
{
Linus Torvalds's avatar
Linus Torvalds committed
90 91 92 93 94
	/*
	 * Blocked signals are never ignored, since the
	 * signal handler may change by the time it is
	 * unblocked.
	 */
95
	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
Linus Torvalds's avatar
Linus Torvalds committed
96 97
		return 0;

98
	/*
99 100 101
	 * Tracers may want to know about even ignored signal unless it
	 * is SIGKILL which can't be reported anyway but can be ignored
	 * by SIGNAL_UNKILLABLE task.
102
	 */
103 104 105 106
	if (t->ptrace && sig != SIGKILL)
		return 0;

	return sig_task_ignored(t, sig, force);
Linus Torvalds's avatar
Linus Torvalds committed
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
}

/*
 * Re-calculate pending state from the set of locally pending
 * signals, globally pending signals, and blocked signals.
 */
static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
{
	unsigned long ready;
	long i;

	switch (_NSIG_WORDS) {
	default:
		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
			ready |= signal->sig[i] &~ blocked->sig[i];
		break;

	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
		ready |= signal->sig[2] &~ blocked->sig[2];
		ready |= signal->sig[1] &~ blocked->sig[1];
		ready |= signal->sig[0] &~ blocked->sig[0];
		break;

	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
		ready |= signal->sig[0] &~ blocked->sig[0];
		break;

	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
	}
	return ready !=	0;
}

#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))

141
static int recalc_sigpending_tsk(struct task_struct *t)
Linus Torvalds's avatar
Linus Torvalds committed
142
{
143
	if ((t->jobctl & JOBCTL_PENDING_MASK) ||
Linus Torvalds's avatar
Linus Torvalds committed
144
	    PENDING(&t->pending, &t->blocked) ||
145
	    PENDING(&t->signal->shared_pending, &t->blocked)) {
Linus Torvalds's avatar
Linus Torvalds committed
146
		set_tsk_thread_flag(t, TIF_SIGPENDING);
147 148
		return 1;
	}
149 150 151 152 153
	/*
	 * We must never clear the flag in another thread, or in current
	 * when it's possible the current syscall is returning -ERESTART*.
	 * So we don't clear it here, and only callers who know they should do.
	 */
154 155 156 157 158 159 160 161 162 163 164
	return 0;
}

/*
 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
 * This is superfluous when called on current, the wakeup is a harmless no-op.
 */
void recalc_sigpending_and_wake(struct task_struct *t)
{
	if (recalc_sigpending_tsk(t))
		signal_wake_up(t, 0);
Linus Torvalds's avatar
Linus Torvalds committed
165 166 167 168
}

void recalc_sigpending(void)
{
169
	if (!recalc_sigpending_tsk(current) && !freezing(current))
170 171
		clear_thread_flag(TIF_SIGPENDING);

Linus Torvalds's avatar
Linus Torvalds committed
172 173 174 175
}

/* Given the mask, find the first available signal that should be serviced. */

176 177
#define SYNCHRONOUS_MASK \
	(sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
178
	 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
179

180
int next_signal(struct sigpending *pending, sigset_t *mask)
Linus Torvalds's avatar
Linus Torvalds committed
181 182 183
{
	unsigned long i, *s, *m, x;
	int sig = 0;
184

Linus Torvalds's avatar
Linus Torvalds committed
185 186
	s = pending->signal.sig;
	m = mask->sig;
187 188 189 190 191 192 193 194 195 196 197 198 199

	/*
	 * Handle the first word specially: it contains the
	 * synchronous signals that need to be dequeued first.
	 */
	x = *s &~ *m;
	if (x) {
		if (x & SYNCHRONOUS_MASK)
			x &= SYNCHRONOUS_MASK;
		sig = ffz(~x) + 1;
		return sig;
	}

Linus Torvalds's avatar
Linus Torvalds committed
200 201
	switch (_NSIG_WORDS) {
	default:
202 203 204 205 206 207 208
		for (i = 1; i < _NSIG_WORDS; ++i) {
			x = *++s &~ *++m;
			if (!x)
				continue;
			sig = ffz(~x) + i*_NSIG_BPW + 1;
			break;
		}
Linus Torvalds's avatar
Linus Torvalds committed
209 210
		break;

211 212 213
	case 2:
		x = s[1] &~ m[1];
		if (!x)
Linus Torvalds's avatar
Linus Torvalds committed
214
			break;
215
		sig = ffz(~x) + _NSIG_BPW + 1;
Linus Torvalds's avatar
Linus Torvalds committed
216 217
		break;

218 219
	case 1:
		/* Nothing to do */
Linus Torvalds's avatar
Linus Torvalds committed
220 221
		break;
	}
222

Linus Torvalds's avatar
Linus Torvalds committed
223 224 225
	return sig;
}

226 227 228 229 230 231 232 233 234 235
static inline void print_dropped_signal(int sig)
{
	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);

	if (!print_fatal_signals)
		return;

	if (!__ratelimit(&ratelimit_state))
		return;

236
	pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
237 238 239
				current->comm, current->pid, sig);
}

240
/**
241
 * task_set_jobctl_pending - set jobctl pending bits
242
 * @task: target task
243
 * @mask: pending bits to set
244
 *
245 246 247 248 249 250 251 252 253 254 255 256
 * Clear @mask from @task->jobctl.  @mask must be subset of
 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
 * %JOBCTL_TRAPPING.  If stop signo is being set, the existing signo is
 * cleared.  If @task is already being killed or exiting, this function
 * becomes noop.
 *
 * CONTEXT:
 * Must be called with @task->sighand->siglock held.
 *
 * RETURNS:
 * %true if @mask is set, %false if made noop because @task was dying.
 */
257
bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
{
	BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
			JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
	BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));

	if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
		return false;

	if (mask & JOBCTL_STOP_SIGMASK)
		task->jobctl &= ~JOBCTL_STOP_SIGMASK;

	task->jobctl |= mask;
	return true;
}

273
/**
274
 * task_clear_jobctl_trapping - clear jobctl trapping bit
275 276
 * @task: target task
 *
277 278 279 280
 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
 * Clear it and wake up the ptracer.  Note that we don't need any further
 * locking.  @task->siglock guarantees that @task->parent points to the
 * ptracer.
281 282 283 284
 *
 * CONTEXT:
 * Must be called with @task->sighand->siglock held.
 */
285
void task_clear_jobctl_trapping(struct task_struct *task)
286
{
287 288
	if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
		task->jobctl &= ~JOBCTL_TRAPPING;
289
		smp_mb();	/* advised by wake_up_bit() */
290
		wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
291 292 293
	}
}

294
/**
295
 * task_clear_jobctl_pending - clear jobctl pending bits
296
 * @task: target task
297
 * @mask: pending bits to clear
298
 *
299 300 301
 * Clear @mask from @task->jobctl.  @mask must be subset of
 * %JOBCTL_PENDING_MASK.  If %JOBCTL_STOP_PENDING is being cleared, other
 * STOP bits are cleared together.
302
 *
303 304
 * If clearing of @mask leaves no stop or trap pending, this function calls
 * task_clear_jobctl_trapping().
305 306 307 308
 *
 * CONTEXT:
 * Must be called with @task->sighand->siglock held.
 */
309
void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
310
{
311 312 313 314 315 316
	BUG_ON(mask & ~JOBCTL_PENDING_MASK);

	if (mask & JOBCTL_STOP_PENDING)
		mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;

	task->jobctl &= ~mask;
317 318 319

	if (!(task->jobctl & JOBCTL_PENDING_MASK))
		task_clear_jobctl_trapping(task);
320 321 322 323 324 325
}

/**
 * task_participate_group_stop - participate in a group stop
 * @task: task participating in a group stop
 *
326
 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
327
 * Group stop states are cleared and the group stop count is consumed if
328
 * %JOBCTL_STOP_CONSUME was set.  If the consumption completes the group
329
 * stop, the appropriate %SIGNAL_* flags are set.
330 331 332
 *
 * CONTEXT:
 * Must be called with @task->sighand->siglock held.
333 334 335 336
 *
 * RETURNS:
 * %true if group stop completion should be notified to the parent, %false
 * otherwise.
337 338 339 340
 */
static bool task_participate_group_stop(struct task_struct *task)
{
	struct signal_struct *sig = task->signal;
341
	bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
342

343
	WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
344

345
	task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
346 347 348 349 350 351 352

	if (!consume)
		return false;

	if (!WARN_ON_ONCE(sig->group_stop_count == 0))
		sig->group_stop_count--;

353 354 355 356 357
	/*
	 * Tell the caller to notify completion iff we are entering into a
	 * fresh group stop.  Read comment in do_signal_stop() for details.
	 */
	if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
358
		signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
359 360 361 362 363
		return true;
	}
	return false;
}

364 365 366
/*
 * allocate a new signal queue record
 * - this may be called without locks if and only if t == current, otherwise an
367
 *   appropriate lock must be held to stop the target task from exiting
368
 */
369 370
static struct sigqueue *
__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
Linus Torvalds's avatar
Linus Torvalds committed
371 372
{
	struct sigqueue *q = NULL;
373
	struct user_struct *user;
Linus Torvalds's avatar
Linus Torvalds committed
374

375
	/*
376 377
	 * Protect access to @t credentials. This can go away when all
	 * callers hold rcu read lock.
378
	 */
379
	rcu_read_lock();
380
	user = get_uid(__task_cred(t)->user);
381
	atomic_inc(&user->sigpending);
382
	rcu_read_unlock();
383

Linus Torvalds's avatar
Linus Torvalds committed
384
	if (override_rlimit ||
385
	    atomic_read(&user->sigpending) <=
386
			task_rlimit(t, RLIMIT_SIGPENDING)) {
Linus Torvalds's avatar
Linus Torvalds committed
387
		q = kmem_cache_alloc(sigqueue_cachep, flags);
388 389 390 391
	} else {
		print_dropped_signal(sig);
	}

Linus Torvalds's avatar
Linus Torvalds committed
392
	if (unlikely(q == NULL)) {
393
		atomic_dec(&user->sigpending);
394
		free_uid(user);
Linus Torvalds's avatar
Linus Torvalds committed
395 396 397
	} else {
		INIT_LIST_HEAD(&q->list);
		q->flags = 0;
398
		q->user = user;
Linus Torvalds's avatar
Linus Torvalds committed
399
	}
400 401

	return q;
Linus Torvalds's avatar
Linus Torvalds committed
402 403
}

404
static void __sigqueue_free(struct sigqueue *q)
Linus Torvalds's avatar
Linus Torvalds committed
405 406 407 408 409 410 411 412
{
	if (q->flags & SIGQUEUE_PREALLOC)
		return;
	atomic_dec(&q->user->sigpending);
	free_uid(q->user);
	kmem_cache_free(sigqueue_cachep, q);
}

413
void flush_sigqueue(struct sigpending *queue)
Linus Torvalds's avatar
Linus Torvalds committed
414 415 416 417 418 419 420 421 422 423 424 425
{
	struct sigqueue *q;

	sigemptyset(&queue->signal);
	while (!list_empty(&queue->list)) {
		q = list_entry(queue->list.next, struct sigqueue , list);
		list_del_init(&q->list);
		__sigqueue_free(q);
	}
}

/*
426
 * Flush all pending signals for this kthread.
Linus Torvalds's avatar
Linus Torvalds committed
427
 */
428
void flush_signals(struct task_struct *t)
Linus Torvalds's avatar
Linus Torvalds committed
429 430 431 432
{
	unsigned long flags;

	spin_lock_irqsave(&t->sighand->siglock, flags);
433 434 435
	clear_tsk_thread_flag(t, TIF_SIGPENDING);
	flush_sigqueue(&t->pending);
	flush_sigqueue(&t->signal->shared_pending);
Linus Torvalds's avatar
Linus Torvalds committed
436 437 438
	spin_unlock_irqrestore(&t->sighand->siglock, flags);
}

439
#ifdef CONFIG_POSIX_TIMERS
440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472
static void __flush_itimer_signals(struct sigpending *pending)
{
	sigset_t signal, retain;
	struct sigqueue *q, *n;

	signal = pending->signal;
	sigemptyset(&retain);

	list_for_each_entry_safe(q, n, &pending->list, list) {
		int sig = q->info.si_signo;

		if (likely(q->info.si_code != SI_TIMER)) {
			sigaddset(&retain, sig);
		} else {
			sigdelset(&signal, sig);
			list_del_init(&q->list);
			__sigqueue_free(q);
		}
	}

	sigorsets(&pending->signal, &signal, &retain);
}

void flush_itimer_signals(void)
{
	struct task_struct *tsk = current;
	unsigned long flags;

	spin_lock_irqsave(&tsk->sighand->siglock, flags);
	__flush_itimer_signals(&tsk->pending);
	__flush_itimer_signals(&tsk->signal->shared_pending);
	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
}
473
#endif
474

475 476 477 478 479 480 481 482 483 484
void ignore_signals(struct task_struct *t)
{
	int i;

	for (i = 0; i < _NSIG; ++i)
		t->sighand->action[i].sa.sa_handler = SIG_IGN;

	flush_signals(t);
}

Linus Torvalds's avatar
Linus Torvalds committed
485 486 487 488 489 490 491 492 493 494 495 496 497
/*
 * Flush all handlers for a task.
 */

void
flush_signal_handlers(struct task_struct *t, int force_default)
{
	int i;
	struct k_sigaction *ka = &t->sighand->action[0];
	for (i = _NSIG ; i != 0 ; i--) {
		if (force_default || ka->sa.sa_handler != SIG_IGN)
			ka->sa.sa_handler = SIG_DFL;
		ka->sa.sa_flags = 0;
498
#ifdef __ARCH_HAS_SA_RESTORER
499 500
		ka->sa.sa_restorer = NULL;
#endif
Linus Torvalds's avatar
Linus Torvalds committed
501 502 503 504 505
		sigemptyset(&ka->sa.sa_mask);
		ka++;
	}
}

506 507
int unhandled_signal(struct task_struct *tsk, int sig)
{
508
	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
509
	if (is_global_init(tsk))
510
		return 1;
511
	if (handler != SIG_IGN && handler != SIG_DFL)
512
		return 0;
513 514
	/* if ptraced, let the tracer determine */
	return !tsk->ptrace;
515 516
}

517 518
static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
			   bool *resched_timer)
Linus Torvalds's avatar
Linus Torvalds committed
519 520 521 522 523 524 525 526 527
{
	struct sigqueue *q, *first = NULL;

	/*
	 * Collect the siginfo appropriate to this signal.  Check if
	 * there is another siginfo for the same signal.
	*/
	list_for_each_entry(q, &list->list, list) {
		if (q->info.si_signo == sig) {
528 529
			if (first)
				goto still_pending;
Linus Torvalds's avatar
Linus Torvalds committed
530 531 532
			first = q;
		}
	}
533 534 535

	sigdelset(&list->signal, sig);

Linus Torvalds's avatar
Linus Torvalds committed
536
	if (first) {
537
still_pending:
Linus Torvalds's avatar
Linus Torvalds committed
538 539
		list_del_init(&first->list);
		copy_siginfo(info, &first->info);
540 541 542 543 544 545

		*resched_timer =
			(first->flags & SIGQUEUE_PREALLOC) &&
			(info->si_code == SI_TIMER) &&
			(info->si_sys_private);

Linus Torvalds's avatar
Linus Torvalds committed
546 547
		__sigqueue_free(first);
	} else {
548 549 550 551
		/*
		 * Ok, it wasn't in the queue.  This must be
		 * a fast-pathed signal or we must have been
		 * out of queue space.  So zero out the info.
Linus Torvalds's avatar
Linus Torvalds committed
552 553 554
		 */
		info->si_signo = sig;
		info->si_errno = 0;
555
		info->si_code = SI_USER;
Linus Torvalds's avatar
Linus Torvalds committed
556 557 558 559 560 561
		info->si_pid = 0;
		info->si_uid = 0;
	}
}

static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
562
			siginfo_t *info, bool *resched_timer)
Linus Torvalds's avatar
Linus Torvalds committed
563
{
564
	int sig = next_signal(pending, mask);
Linus Torvalds's avatar
Linus Torvalds committed
565

566
	if (sig)
567
		collect_signal(sig, pending, info, resched_timer);
Linus Torvalds's avatar
Linus Torvalds committed
568 569 570 571
	return sig;
}

/*
572
 * Dequeue a signal and return the element to the caller, which is
Linus Torvalds's avatar
Linus Torvalds committed
573 574 575 576 577 578
 * expected to free it.
 *
 * All callers have to hold the siglock.
 */
int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
{
579
	bool resched_timer = false;
580
	int signr;
581 582 583 584

	/* We only dequeue private signals from ourselves, we don't let
	 * signalfd steal them
	 */
585
	signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
586
	if (!signr) {
Linus Torvalds's avatar
Linus Torvalds committed
587
		signr = __dequeue_signal(&tsk->signal->shared_pending,
588
					 mask, info, &resched_timer);
589
#ifdef CONFIG_POSIX_TIMERS
590 591 592 593 594 595
		/*
		 * itimer signal ?
		 *
		 * itimers are process shared and we restart periodic
		 * itimers in the signal delivery path to prevent DoS
		 * attacks in the high resolution timer case. This is
596
		 * compliant with the old way of self-restarting
597 598 599 600 601 602 603 604 605 606
		 * itimers, as the SIGALRM is a legacy signal and only
		 * queued once. Changing the restart behaviour to
		 * restart the timer in the signal dequeue path is
		 * reducing the timer noise on heavy loaded !highres
		 * systems too.
		 */
		if (unlikely(signr == SIGALRM)) {
			struct hrtimer *tmr = &tsk->signal->real_timer;

			if (!hrtimer_is_queued(tmr) &&
607
			    tsk->signal->it_real_incr != 0) {
608 609 610 611 612
				hrtimer_forward(tmr, tmr->base->get_time(),
						tsk->signal->it_real_incr);
				hrtimer_restart(tmr);
			}
		}
613
#endif
614
	}
615

Davide Libenzi's avatar
Davide Libenzi committed
616
	recalc_sigpending();
617 618 619 620
	if (!signr)
		return 0;

	if (unlikely(sig_kernel_stop(signr))) {
621 622 623 624 625 626 627 628 629 630 631 632
		/*
		 * Set a marker that we have dequeued a stop signal.  Our
		 * caller might release the siglock and then the pending
		 * stop signal it is about to process is no longer in the
		 * pending bitmasks, but must still be cleared by a SIGCONT
		 * (and overruled by a SIGKILL).  So those cases clear this
		 * shared flag after we've set it.  Note that this flag may
		 * remain set after the signal we return is ignored or
		 * handled.  That doesn't matter because its only purpose
		 * is to alert stop-signal processing code when another
		 * processor has come along and cleared the flag.
		 */
633
		current->jobctl |= JOBCTL_STOP_DEQUEUED;
634
	}
635
#ifdef CONFIG_POSIX_TIMERS
636
	if (resched_timer) {
Linus Torvalds's avatar
Linus Torvalds committed
637 638 639 640 641 642 643
		/*
		 * Release the siglock to ensure proper locking order
		 * of timer locks outside of siglocks.  Note, we leave
		 * irqs disabled here, since the posix-timers code is
		 * about to disable them again anyway.
		 */
		spin_unlock(&tsk->sighand->siglock);
644
		posixtimer_rearm(info);
Linus Torvalds's avatar
Linus Torvalds committed
645 646
		spin_lock(&tsk->sighand->siglock);
	}
647
#endif
Linus Torvalds's avatar
Linus Torvalds committed
648 649 650 651 652 653 654 655 656 657 658 659 660 661
	return signr;
}

/*
 * Tell a process that it has a new active signal..
 *
 * NOTE! we rely on the previous spin_lock to
 * lock interrupts for us! We can only be called with
 * "siglock" held, and the local interrupt must
 * have been disabled when that got acquired!
 *
 * No need to set need_resched since signal event passing
 * goes through ->blocked
 */
662
void signal_wake_up_state(struct task_struct *t, unsigned int state)
Linus Torvalds's avatar
Linus Torvalds committed
663 664
{
	set_tsk_thread_flag(t, TIF_SIGPENDING);
665 666 667 668

	/* TIF_SIGPENDING must be prior to reporting. */
	__ipipe_report_sigwake(t);

Linus Torvalds's avatar
Linus Torvalds committed
669
	/*
670
	 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
Matthew Wilcox's avatar
Matthew Wilcox committed
671
	 * case. We don't check t->state here because there is a race with it
Linus Torvalds's avatar
Linus Torvalds committed
672 673 674 675
	 * executing another processor and just now entering stopped state.
	 * By using wake_up_state, we ensure the process will wake up and
	 * handle its death signal.
	 */
676
	if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
Linus Torvalds's avatar
Linus Torvalds committed
677 678 679
		kick_process(t);
}

680 681 682 683 684 685
/*
 * Remove signals in mask from the pending set and queue.
 * Returns 1 if any signals were found.
 *
 * All callers must be holding the siglock.
 */
686
static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
687 688 689 690 691 692 693 694
{
	struct sigqueue *q, *n;
	sigset_t m;

	sigandsets(&m, mask, &s->signal);
	if (sigisemptyset(&m))
		return 0;

695
	sigandnsets(&s->signal, &s->signal, mask);
696 697 698 699 700 701 702 703
	list_for_each_entry_safe(q, n, &s->list, list) {
		if (sigismember(mask, q->info.si_signo)) {
			list_del_init(&q->list);
			__sigqueue_free(q);
		}
	}
	return 1;
}
Linus Torvalds's avatar
Linus Torvalds committed
704

705 706 707 708 709 710 711 712 713 714 715
static inline int is_si_special(const struct siginfo *info)
{
	return info <= SEND_SIG_FORCED;
}

static inline bool si_fromuser(const struct siginfo *info)
{
	return info == SEND_SIG_NOINFO ||
		(!is_si_special(info) && SI_FROMUSER(info));
}

716 717 718 719 720 721 722 723
/*
 * called with RCU read lock from check_kill_permission()
 */
static int kill_ok_by_cred(struct task_struct *t)
{
	const struct cred *cred = current_cred();
	const struct cred *tcred = __task_cred(t);

724 725 726 727
	if (uid_eq(cred->euid, tcred->suid) ||
	    uid_eq(cred->euid, tcred->uid)  ||
	    uid_eq(cred->uid,  tcred->suid) ||
	    uid_eq(cred->uid,  tcred->uid))
728 729
		return 1;

730
	if (ns_capable(tcred->user_ns, CAP_KILL))
731 732 733 734 735
		return 1;

	return 0;
}

Linus Torvalds's avatar
Linus Torvalds committed
736 737
/*
 * Bad permissions for sending the signal
738
 * - the caller must hold the RCU read lock
Linus Torvalds's avatar
Linus Torvalds committed
739 740 741 742
 */
static int check_kill_permission(int sig, struct siginfo *info,
				 struct task_struct *t)
{
743
	struct pid *sid;
744 745
	int error;

746
	if (!valid_signal(sig))
747 748
		return -EINVAL;

749
	if (!si_fromuser(info))
750
		return 0;
751

752 753
	error = audit_signal_info(sig, t); /* Let audit system see the signal */
	if (error)
Linus Torvalds's avatar
Linus Torvalds committed
754
		return error;
755

756
	if (!same_thread_group(current, t) &&
757
	    !kill_ok_by_cred(t)) {
758 759 760 761 762 763 764 765 766 767 768 769 770
		switch (sig) {
		case SIGCONT:
			sid = task_session(t);
			/*
			 * We don't return the error if sid == NULL. The
			 * task was unhashed, the caller must notice this.
			 */
			if (!sid || sid == task_session(current))
				break;
		default:
			return -EPERM;
		}
	}
771

772
	return security_task_kill(t, info, sig, 0);
Linus Torvalds's avatar
Linus Torvalds committed
773 774
}

775 776 777 778 779 780 781 782
/**
 * ptrace_trap_notify - schedule trap to notify ptracer
 * @t: tracee wanting to notify tracer
 *
 * This function schedules sticky ptrace trap which is cleared on the next
 * TRAP_STOP to notify ptracer of an event.  @t must have been seized by
 * ptracer.
 *
783 784 785 786 787
 * If @t is running, STOP trap will be taken.  If trapped for STOP and
 * ptracer is listening for events, tracee is woken up so that it can
 * re-trap for the new event.  If trapped otherwise, STOP trap will be
 * eventually taken without returning to userland after the existing traps
 * are finished by PTRACE_CONT.
788 789 790 791 792 793 794 795 796 797
 *
 * CONTEXT:
 * Must be called with @task->sighand->siglock held.
 */
static void ptrace_trap_notify(struct task_struct *t)
{
	WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
	assert_spin_locked(&t->sighand->siglock);

	task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
798
	ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
799 800
}

Linus Torvalds's avatar
Linus Torvalds committed
801
/*
802 803
 * Handle magic process-wide effects of stop/continue signals. Unlike
 * the signal actions, these happen immediately at signal-generation
Linus Torvalds's avatar
Linus Torvalds committed
804 805
 * time regardless of blocking, ignoring, or handling.  This does the
 * actual continuing for SIGCONT, but not the actual stopping for stop
806 807 808 809
 * signals. The process stop is done as a signal action for SIG_DFL.
 *
 * Returns true if the signal should be actually delivered, otherwise
 * it should be dropped.
Linus Torvalds's avatar
Linus Torvalds committed
810
 */
811
static bool prepare_signal(int sig, struct task_struct *p, bool force)
Linus Torvalds's avatar
Linus Torvalds committed
812
{
813
	struct signal_struct *signal = p->signal;
Linus Torvalds's avatar
Linus Torvalds committed
814
	struct task_struct *t;
815
	sigset_t flush;
Linus Torvalds's avatar
Linus Torvalds committed
816

817
	if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
818
		if (!(signal->flags & SIGNAL_GROUP_EXIT))
819
			return sig == SIGKILL;
Linus Torvalds's avatar
Linus Torvalds committed
820
		/*
821
		 * The process is in the middle of dying, nothing to do.
Linus Torvalds's avatar
Linus Torvalds committed
822
		 */
823
	} else if (sig_kernel_stop(sig)) {
Linus Torvalds's avatar
Linus Torvalds committed
824 825 826
		/*
		 * This is a stop signal.  Remove SIGCONT from all queues.
		 */
827
		siginitset(&flush, sigmask(SIGCONT));
828
		flush_sigqueue_mask(&flush, &signal->shared_pending);
829
		for_each_thread(p, t)
830
			flush_sigqueue_mask(&flush, &t->pending);
Linus Torvalds's avatar
Linus Torvalds committed
831
	} else if (sig == SIGCONT) {
832
		unsigned int why;
Linus Torvalds's avatar
Linus Torvalds committed
833
		/*
834
		 * Remove all stop signals from all queues, wake all threads.
Linus Torvalds's avatar
Linus Torvalds committed
835
		 */
836
		siginitset(&flush, SIG_KERNEL_STOP_MASK);
837
		flush_sigqueue_mask(&flush, &signal->shared_pending);
838
		for_each_thread(p, t) {
839
			flush_sigqueue_mask(&flush, &t->pending);
840
			task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
841 842 843 844
			if (likely(!(t->ptrace & PT_SEIZED)))
				wake_up_state(t, __TASK_STOPPED);
			else
				ptrace_trap_notify(t);
845
		}
Linus Torvalds's avatar
Linus Torvalds committed
846

847 848 849 850 851 852 853 854 855
		/*
		 * Notify the parent with CLD_CONTINUED if we were stopped.
		 *
		 * If we were in the middle of a group stop, we pretend it
		 * was already finished, and then continued. Since SIGCHLD
		 * doesn't queue we report only CLD_STOPPED, as if the next
		 * CLD_CONTINUED was dropped.
		 */
		why = 0;
856
		if (signal->flags & SIGNAL_STOP_STOPPED)
857
			why |= SIGNAL_CLD_CONTINUED;
858
		else if (signal->group_stop_count)
859 860 861
			why |= SIGNAL_CLD_STOPPED;

		if (why) {
862
			/*
863
			 * The first thread which returns from do_signal_stop()
864 865 866
			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
			 * notify its parent. See get_signal_to_deliver().
			 */
867
			signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
868 869
			signal->group_stop_count = 0;
			signal->group_exit_code = 0;
Linus Torvalds's avatar
Linus Torvalds committed
870 871
		}
	}
872

873
	return !sig_ignored(p, sig, force);
Linus Torvalds's avatar
Linus Torvalds committed
874 875
}

876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891
/*
 * Test if P wants to take SIG.  After we've checked all threads with this,
 * it's equivalent to finding no threads not blocking SIG.  Any threads not
 * blocking SIG were ruled out because they are not running and already
 * have pending signals.  Such threads will dequeue from the shared queue
 * as soon as they're available, so putting the signal on the shared queue
 * will be equivalent to sending it to one such thread.
 */
static inline int wants_signal(int sig, struct task_struct *p)
{
	if (sigismember(&p->blocked, sig))
		return 0;
	if (p->flags & PF_EXITING)
		return 0;
	if (sig == SIGKILL)
		return 1;
892 893 894
	if (task_is_stopped_or_traced(p)) {
		if (!signal_pending(p))
			__ipipe_report_sigwake(p);
895
		return 0;
896
	}
897 898 899
	return task_curr(p) || !signal_pending(p);
}

900
static void complete_signal(int sig, struct task_struct *p, int group)
901 902 903 904 905 906 907 908 909 910 911 912
{
	struct signal_struct *signal = p->signal;
	struct task_struct *t;

	/*
	 * Now find a thread we can wake up to take the signal off the queue.
	 *
	 * If the main thread wants the signal, it gets first crack.
	 * Probably the least surprising to the average bear.
	 */
	if (wants_signal(sig, p))
		t = p;
913
	else if (!group || thread_group_empty(p))
914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940
		/*
		 * There is just one thread and it does not need to be woken.
		 * It will dequeue unblocked signals before it runs again.
		 */
		return;
	else {
		/*
		 * Otherwise try to find a suitable thread.
		 */
		t = signal->curr_target;
		while (!wants_signal(sig, t)) {
			t = next_thread(t);
			if (t == signal->curr_target)
				/*
				 * No thread needs to be woken.
				 * Any eligible threads will see
				 * the signal in the queue soon.
				 */
				return;
		}
		signal->curr_target = t;
	}

	/*
	 * Found a killable thread.  If the signal will be fatal,
	 * then start taking the whole group down immediately.
	 */
941
	if (sig_fatal(p, sig) &&
942
	    !(signal->flags & SIGNAL_GROUP_EXIT) &&
943
	    !sigismember(&t->real_blocked, sig) &&
944
	    (sig == SIGKILL || !p->ptrace)) {
945 946 947 948 949 950 951 952 953 954 955 956 957 958 959
		/*
		 * This signal will be fatal to the whole group.
		 */
		if (!sig_kernel_coredump(sig)) {
			/*
			 * Start a group exit and wake everybody up.
			 * This way we don't have other threads
			 * running and doing things after a slower
			 * thread has the fatal signal pending.
			 */
			signal->flags = SIGNAL_GROUP_EXIT;
			signal->group_exit_code = sig;
			signal->group_stop_count = 0;
			t = p;
			do {
960
				task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
961 962 963 964 965 966 967 968 969 970 971 972 973 974 975
				sigaddset(&t->pending.signal, SIGKILL);
				signal_wake_up(t, 1);
			} while_each_thread(p, t);
			return;
		}
	}

	/*
	 * The signal is already in the shared-pending queue.
	 * Tell the chosen thread to wake up and dequeue it.
	 */
	signal_wake_up(t, sig == SIGKILL);
	return;
}

976 977 978 979 980
static inline int legacy_queue(struct sigpending *signals, int sig)
{
	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
}

981 982 983 984 985 986 987 988 989
#ifdef CONFIG_USER_NS
static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
{
	if (current_user_ns() == task_cred_xxx(t, user_ns))
		return;

	if (SI_FROMKERNEL(info))
		return;

990 991 992 993
	rcu_read_lock();
	info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
					make_kuid(current_user_ns(), info->si_uid));
	rcu_read_unlock();
994 995 996 997 998 999 1000 1001
}
#else
static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
{
	return;
}
#endif

1002 1003
static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
			int group, int from_ancestor_ns)
Linus Torvalds's avatar
Linus Torvalds committed
1004
{
1005
	struct sigpending *pending;
1006
	struct sigqueue *q;
1007
	int override_rlimit;
1008
	int ret = 0, result;
1009

1010
	assert_spin_locked(&t->sighand->siglock);
1011

1012
	result = TRACE_SIGNAL_IGNORED;
1013 1014
	if (!prepare_signal(sig, t,
			from_ancestor_ns || (info == SEND_SIG_FORCED)))
1015
		goto ret;
1016 1017

	pending = group ? &t->signal->shared_pending : &t->pending;
1018 1019 1020 1021 1022
	/*
	 * Short-circuit ignored signals and support queuing
	 * exactly one non-rt signal, so that we can get more
	 * detailed information about the cause of the signal.
	 */
1023
	result = TRACE_SIGNAL_ALREADY_PENDING;
1024
	if (legacy_queue(pending, sig))
1025 1026 1027
		goto ret;

	result = TRACE_SIGNAL_DELIVERED;
Linus Torvalds's avatar
Linus Torvalds committed
1028 1029 1030 1031
	/*
	 * fast-pathed signals for kernel-internal things like SIGSTOP
	 * or SIGKILL.
	 */
1032
	if (info == SEND_SIG_FORCED)
Linus Torvalds's avatar
Linus Torvalds committed
1033 1034
		goto out_set;

1035 1036 1037 1038 1039 1040 1041 1042 1043
	/*
	 * Real-time signals must be queued if sent by sigqueue, or
	 * some other real-time mechanism.  It is implementation
	 * defined whether kill() does so.  We attempt to do so, on
	 * the principle of least surprise, but since kill is not
	 * allowed to fail with EAGAIN when low on memory we just
	 * make sure at least one signal gets delivered and don't
	 * pass on the info struct.
	 */
1044 1045 1046 1047 1048
	if (sig < SIGRTMIN)
		override_rlimit = (is_si_special(info) || info->si_code >= 0);
	else
		override_rlimit = 0;

1049
	q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
Linus Torvalds's avatar
Linus Torvalds committed
1050
	if (q) {
1051
		list_add_tail(&q->list, &pending->list);
Linus Torvalds's avatar
Linus Torvalds committed
1052
		switch ((unsigned long) info) {
1053
		case (unsigned long) SEND_SIG_NOINFO:
Linus Torvalds's avatar
Linus Torvalds committed
1054 1055 1056
			q->info.si_signo = sig;
			q->info.si_errno = 0;
			q->info.si_code = SI_USER;
1057
			q->info.si_pid = task_tgid_nr_ns(current,
1058
							task_active_pid_ns(t));
1059
			q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
Linus Torvalds's avatar
Linus Torvalds committed
1060
			break;
1061
		case (unsigned long) SEND_SIG_PRIV:
Linus Torvalds's avatar
Linus Torvalds committed
1062 1063 1064 1065 1066 1067 1068 1069
			q->info.si_signo = sig;
			q->info.si_errno = 0;
			q->info.si_code = SI_KERNEL;
			q->info.si_pid = 0;
			q->info.si_uid = 0;
			break;
		default:
			copy_siginfo(&q->info, info);
1070 1071
			if (from_ancestor_ns)
				q->info.si_pid = 0;
Linus Torvalds's avatar
Linus Torvalds committed
1072 1073
			break;
		}
1074 1075 1076

		userns_fixup_signal_uid(&q->info, t);

1077
	} else if (!is_si_special(info)) {
1078 1079 1080 1081 1082 1083
		if (sig >= SIGRTMIN && info->si_code != SI_USER) {
			/*
			 * Queue overflow, abort.  We may abort if the
			 * signal was rt and sent by user using something
			 * other than kill().
			 */
1084 1085 1086
			result = TRACE_SIGNAL_OVERFLOW_FAIL;
			ret = -EAGAIN;
			goto ret;
1087 1088 1089 1090 1091
		} else {
			/*
			 * This is a silent loss of information.  We still
			 * send the signal, but the *info bits are lost.
			 */
1092
			result = TRACE_SIGNAL_LOSE_INFO;
1093
		}
Linus Torvalds's avatar
Linus Torvalds committed
1094 1095 1096
	}

out_set:
1097
	signalfd_notify(t, sig);
1098
	sigaddset(&pending->signal, sig);
1099
	complete_signal(sig, t, group);
1100 1101 1102
ret:
	trace_signal_generate(sig, info, t, group, result);
	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
1103 1104
}

1105 1106 1107
static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
			int group)
{
1108 1109 1110
	int from_ancestor_ns = 0;

#ifdef CONFIG_PID_NS
1111 1112
	from_ancestor_ns = si_fromuser(info) &&
			   !task_pid_nr_ns(current, task_active_pid_ns(t));
1113 1114 1115
#endif

	return __send_signal(sig, info, t, group, from_ancestor_ns);
1116 1117
}

1118
static void print_fatal_signal(int signr)
Ingo Molnar's avatar
Ingo Molnar committed
1119
{
1120
	struct pt_regs *regs = signal_pt_regs();
1121
	pr_info("potentially unexpected fatal signal %d.\n", signr);
Ingo Molnar's avatar
Ingo Molnar committed
1122

Al Viro's avatar
Al Viro committed
1123
#if defined(__i386__) && !defined(__arch_um__)
1124
	pr_info("code at %08lx: ", regs->ip);
Ingo Molnar's avatar
Ingo Molnar committed
1125 1126 1127 1128 1129
	{
		int i;
		for (i = 0; i < 16; i++) {
			unsigned char insn;

1130 1131
			if (get_user(insn, (unsigned char *)(regs->ip + i)))
				break;
1132
			pr_cont("%02x ", insn);
Ingo Molnar's avatar
Ingo Molnar committed
1133 1134
		}
	}
1135
	pr_cont("\n");
Ingo Molnar's avatar
Ingo Molnar committed
1136
#endif
1137
	preempt_disable();
Ingo Molnar's avatar
Ingo Molnar committed
1138
	show_regs(regs);
1139
	preempt_enable();
Ingo Molnar's avatar
Ingo Molnar committed
1140 1141 1142 1143 1144 1145 1146 1147 1148 1149
}

static int __init setup_print_fatal_signals(char *str)
{
	get_option (&str, &print_fatal_signals);

	return 1;
}

__setup("print-fatal-signals=", setup_print_fatal_signals);
Linus Torvalds's avatar
Linus Torvalds committed
1150

1151 1152 1153 1154 1155 1156
int
__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
{
	return send_signal(sig, info, p, 1);
}

Linus Torvalds's avatar
Linus Torvalds committed
1157 1158 1159
static int
specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
{
1160
	return send_signal(sig, info, t, 0);
Linus Torvalds's avatar
Linus Torvalds committed
1161 1162
}

1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176
int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
			bool group)
{
	unsigned long flags;
	int ret = -ESRCH;

	if (lock_task_sighand(p, &flags)) {
		ret = send_signal(sig, info, p, group);
		unlock_task_sighand(p, &flags);
	}

	return ret;
}

Linus Torvalds's avatar
Linus Torvalds committed
1177 1178 1179
/*
 * Force a signal that the process can't ignore: if necessary
 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1180 1181 1182 1183 1184
 *
 * Note: If we unblock the signal, we always reset it to SIG_DFL,
 * since we do not want to have a signal handler that was blocked
 * be invoked when user space had explicitly blocked it.
 *
1185 1186
 * We don't want to have recursive SIGSEGV's etc, for example,
 * that is why we also clear SIGNAL_UNKILLABLE.
Linus Torvalds's avatar
Linus Torvalds committed
1187 1188 1189 1190 1191
 */
int
force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
{
	unsigned long int flags;
1192 1193
	int ret, blocked, ignored;
	struct k_sigaction *action;
Linus Torvalds's avatar
Linus Torvalds committed
1194 1195

	spin_lock_irqsave(&t->sighand->siglock, flags);
1196 1197 1198 1199 1200 1201 1202
	action = &t->sighand->action[sig-1];
	ignored = action->sa.sa_handler == SIG_IGN;
	blocked = sigismember(&t->blocked, sig);
	if (blocked || ignored) {
		action->sa.sa_handler = SIG_DFL;
		if (blocked) {
			sigdelset(&t->blocked, sig);
1203
			recalc_sigpending_and_wake(t);
1204
		}
Linus Torvalds's avatar
Linus Torvalds committed
1205
	}
1206 1207 1208 1209 1210
	/*
	 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
	 * debugging to leave init killable.
	 */
	if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1211
		t->signal->flags &= ~SIGNAL_UNKILLABLE;
Linus Torvalds's avatar
Linus Torvalds committed
1212 1213 1214 1215 1216 1217 1218 1219 1220
	ret = specific_send_sig_info(sig, info, t);
	spin_unlock_irqrestore(&t->sighand->siglock, flags);

	return ret;
}

/*
 * Nuke all other threads in the group.
 */
1221
int zap_other_threads(struct task_struct *p)
Linus Torvalds's avatar
Linus Torvalds committed
1222
{
1223 1224
	struct task_struct *t = p;
	int count = 0;
Linus Torvalds's avatar
Linus Torvalds committed
1225 1226 1227

	p->signal->group_stop_count = 0;

1228
	while_each_thread(p, t) {
1229
		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1230 1231 1232
		count++;

		/* Don't bother with already dead threads */
Linus Torvalds's avatar
Linus Torvalds committed
1233 1234 1235 1236 1237
		if (t->exit_state)
			continue;
		sigaddset(&t->pending.signal, SIGKILL);
		signal_wake_up(t, 1);
	}
1238 1239

	return count;
Linus Torvalds's avatar
Linus Torvalds committed
1240 1241
}

1242 1243
struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
					   unsigned long *flags)
1244 1245 1246 1247
{
	struct sighand_struct *sighand;

	for (;;) {
1248 1249 1250 1251
		/*
		 * Disable interrupts early to avoid deadlocks.
		 * See rcu_read_unlock() comment header for details.
		 */
1252 1253
		local_irq_save(*flags);
		rcu_read_lock();
1254
		sighand = rcu_dereference(tsk->sighand);
1255 1256 1257
		if (unlikely(sighand == NULL)) {
			rcu_read_unlock();
			local_irq_restore(*flags);
1258
			break;
1259
		}
1260 1261
		/*
		 * This sighand can be already freed and even reused, but
1262
		 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1263 1264 1265 1266 1267 1268 1269 1270
		 * initializes ->siglock: this slab can't go away, it has
		 * the same object type, ->siglock can't be reinitialized.
		 *
		 * We need to ensure that tsk->sighand is still the same
		 * after we take the lock, we can race with de_thread() or
		 * __exit_signal(). In the latter case the next iteration
		 * must see ->sighand == NULL.
		 */
1271 1272 1273
		spin_lock(&sighand->siglock);
		if (likely(sighand == tsk->sighand)) {
			rcu_read_unlock();
1274
			break;
1275 1276 1277 1278
		}
		spin_unlock(&sighand->siglock);
		rcu_read_unlock();
		local_irq_restore(*flags);
1279 1280 1281 1282 1283
	}

	return sighand;
}

1284 1285 1286
/*
 * send signal info to all the members of a group
 */
Linus Torvalds's avatar
Linus Torvalds committed
1287 1288
int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
{
1289 1290 1291 1292 1293
	int ret;

	rcu_read_lock();
	ret = check_kill_permission(sig, info, p);
	rcu_read_unlock();
1294

1295 1296
	if (!ret && sig)
		ret = do_send_sig_info(sig, info, p, true);
Linus Torvalds's avatar
Linus Torvalds committed
1297 1298 1299 1300 1301

	return ret;
}

/*
1302
 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
Linus Torvalds's avatar
Linus Torvalds committed
1303
 * control characters do (^C, ^Z etc)
1304
 * - the caller must hold at least a readlock on tasklist_lock
Linus Torvalds's avatar
Linus Torvalds committed
1305
 */
1306
int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
Linus Torvalds's avatar
Linus Torvalds committed
1307 1308 1309 1310 1311 1312
{
	struct task_struct *p = NULL;
	int retval, success;

	success = 0;
	retval = -ESRCH;
1313
	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
Linus Torvalds's avatar
Linus Torvalds committed
1314 1315 1316
		int err = group_send_sig_info(sig, info, p);
		success |= !err;
		retval = err;
1317
	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
Linus Torvalds's avatar
Linus Torvalds committed
1318 1319 1320
	return success ? 0 : retval;
}

1321
int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
Linus Torvalds's avatar
Linus Torvalds committed
1322
{
1323
	int error = -ESRCH;
Linus Torvalds's avatar
Linus Torvalds committed
1324 1325
	struct task_struct *p;

1326 1327 1328 1329 1330 1331 1332 1333
	for (;;) {
		rcu_read_lock();
		p = pid_task(pid, PIDTYPE_PID);
		if (p)
			error = group_send_sig_info(sig, info, p);
		rcu_read_unlock();
		if (likely(!p || error != -ESRCH))
			return error;
1334

1335 1336 1337 1338 1339 1340
		/*
		 * The task was unhashed in between, try again.  If it
		 * is dead, pid_task() will return NULL, if we race with
		 * de_thread() it will find the new leader.
		 */
	}
Linus Torvalds's avatar
Linus Torvalds committed
1341 1342
}

1343
static int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1344 1345 1346
{
	int error;
	rcu_read_lock();
1347
	error = kill_pid_info(sig, info, find_vpid(pid));
1348 1349 1350 1351
	rcu_read_unlock();
	return error;
}

1352 1353 1354 1355
static int kill_as_cred_perm(const struct cred *cred,
			     struct task_struct *target)
{
	const struct cred *pcred = __task_cred(target);
1356 1357
	if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
	    !uid_eq(cred->uid,  pcred->suid) && !uid_eq(cred->uid,  pcred->uid))
1358 1359 1360 1361
		return 0;
	return 1;
}

1362
/* like kill_pid_info(), but doesn't use uid/euid of "current" */
1363 1364
int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
			 const struct cred *cred, u32 secid)
1365 1366 1367
{
	int ret = -EINVAL;
	struct task_struct *p;