process_vm_access.c 10.2 KB
Newer Older
Christopher Yeoh's avatar
Christopher Yeoh committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * linux/mm/process_vm_access.c
 *
 * Copyright (C) 2010-2011 Christopher Yeoh <cyeoh@au1.ibm.com>, IBM Corp.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

#include <linux/mm.h>
#include <linux/uio.h>
#include <linux/sched.h>
#include <linux/highmem.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/syscalls.h>

#ifdef CONFIG_COMPAT
#include <linux/compat.h>
#endif

/**
 * process_vm_rw_pages - read/write pages from task specified
Al Viro's avatar
Al Viro committed
26
 * @pages: array of pointers to pages we want to copy
Christopher Yeoh's avatar
Christopher Yeoh committed
27 28
 * @start_offset: offset in page to start copying from/to
 * @len: number of bytes to copy
Al Viro's avatar
Al Viro committed
29
 * @iter: where to copy to/from locally
Christopher Yeoh's avatar
Christopher Yeoh committed
30 31 32
 * @vm_write: 0 means copy from, 1 means copy to
 * Returns 0 on success, error code otherwise
 */
33 34
static int process_vm_rw_pages(struct page **pages,
			       unsigned offset,
35
			       size_t len,
36
			       struct iov_iter *iter,
37
			       int vm_write)
Christopher Yeoh's avatar
Christopher Yeoh committed
38 39
{
	/* Do the copy for each page */
40
	while (len && iov_iter_count(iter)) {
41
		struct page *page = *pages++;
42
		size_t copy = PAGE_SIZE - offset;
43
		size_t copied;
44

45 46 47
		if (copy > len)
			copy = len;

48
		if (vm_write) {
49 50 51 52 53
			if (copy > iov_iter_count(iter))
				copy = iov_iter_count(iter);
			copied = iov_iter_copy_from_user(page, iter,
					offset, copy);
			iov_iter_advance(iter, copied);
54 55
			set_page_dirty_lock(page);
		} else {
56
			copied = copy_page_to_iter(page, offset, copy, iter);
Christopher Yeoh's avatar
Christopher Yeoh committed
57
		}
58 59 60 61
		len -= copied;
		if (copied < copy && iov_iter_count(iter))
			return -EFAULT;
		offset = 0;
Christopher Yeoh's avatar
Christopher Yeoh committed
62
	}
63
	return 0;
Christopher Yeoh's avatar
Christopher Yeoh committed
64 65 66 67 68 69 70 71 72
}

/* Maximum number of pages kmalloc'd to hold struct page's during copy */
#define PVM_MAX_KMALLOC_PAGES (PAGE_SIZE * 2)

/**
 * process_vm_rw_single_vec - read/write pages from task specified
 * @addr: start memory address of target process
 * @len: size of area to copy to/from
Al Viro's avatar
Al Viro committed
73
 * @iter: where to copy to/from locally
Christopher Yeoh's avatar
Christopher Yeoh committed
74 75 76 77 78 79 80 81 82
 * @process_pages: struct pages area that can store at least
 *  nr_pages_to_copy struct page pointers
 * @mm: mm for task
 * @task: task to read/write from
 * @vm_write: 0 means copy from, 1 means copy to
 * Returns 0 on success or on failure error code
 */
static int process_vm_rw_single_vec(unsigned long addr,
				    unsigned long len,
83
				    struct iov_iter *iter,
Christopher Yeoh's avatar
Christopher Yeoh committed
84 85 86
				    struct page **process_pages,
				    struct mm_struct *mm,
				    struct task_struct *task,
87
				    int vm_write)
Christopher Yeoh's avatar
Christopher Yeoh committed
88 89 90 91 92 93 94 95 96 97 98 99 100
{
	unsigned long pa = addr & PAGE_MASK;
	unsigned long start_offset = addr - pa;
	unsigned long nr_pages;
	ssize_t rc = 0;
	unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
		/ sizeof(struct pages *);

	/* Work out address and page range required */
	if (len == 0)
		return 0;
	nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1;

Al Viro's avatar
Al Viro committed
101 102 103
	while (!rc && nr_pages && iov_iter_count(iter)) {
		int pages = min(nr_pages, max_pages_per_loop);
		size_t bytes;
Christopher Yeoh's avatar
Christopher Yeoh committed
104

105 106
		/* Get the pages we're interested in */
		down_read(&mm->mmap_sem);
Al Viro's avatar
Al Viro committed
107 108
		pages = get_user_pages(task, mm, pa, pages,
				      vm_write, 0, process_pages, NULL);
109 110
		up_read(&mm->mmap_sem);

Al Viro's avatar
Al Viro committed
111
		if (pages <= 0)
112 113
			return -EFAULT;

Al Viro's avatar
Al Viro committed
114 115 116
		bytes = pages * PAGE_SIZE - start_offset;
		if (bytes > len)
			bytes = len;
117

118
		rc = process_vm_rw_pages(process_pages,
Al Viro's avatar
Al Viro committed
119
					 start_offset, bytes, iter,
120
					 vm_write);
Al Viro's avatar
Al Viro committed
121
		len -= bytes;
Christopher Yeoh's avatar
Christopher Yeoh committed
122
		start_offset = 0;
Al Viro's avatar
Al Viro committed
123 124 125 126
		nr_pages -= pages;
		pa += pages * PAGE_SIZE;
		while (pages)
			put_page(process_pages[--pages]);
Christopher Yeoh's avatar
Christopher Yeoh committed
127 128 129 130 131 132 133 134 135 136 137 138
	}

	return rc;
}

/* Maximum number of entries for process pages array
   which lives on stack */
#define PVM_MAX_PP_ARRAY_COUNT 16

/**
 * process_vm_rw_core - core of reading/writing pages from task specified
 * @pid: PID of process to read/write from/to
Al Viro's avatar
Al Viro committed
139
 * @iter: where to copy to/from locally
Christopher Yeoh's avatar
Christopher Yeoh committed
140 141 142 143 144 145 146 147
 * @rvec: iovec array specifying where to copy to/from in the other process
 * @riovcnt: size of rvec array
 * @flags: currently unused
 * @vm_write: 0 if reading from other process, 1 if writing to other process
 * Returns the number of bytes read/written or error code. May
 *  return less bytes than expected if an error occurs during the copying
 *  process.
 */
148
static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
Christopher Yeoh's avatar
Christopher Yeoh committed
149 150 151 152 153 154 155 156 157 158 159 160 161
				  const struct iovec *rvec,
				  unsigned long riovcnt,
				  unsigned long flags, int vm_write)
{
	struct task_struct *task;
	struct page *pp_stack[PVM_MAX_PP_ARRAY_COUNT];
	struct page **process_pages = pp_stack;
	struct mm_struct *mm;
	unsigned long i;
	ssize_t rc = 0;
	unsigned long nr_pages = 0;
	unsigned long nr_pages_iov;
	ssize_t iov_len;
162
	size_t total_len = iov_iter_count(iter);
Christopher Yeoh's avatar
Christopher Yeoh committed
163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203

	/*
	 * Work out how many pages of struct pages we're going to need
	 * when eventually calling get_user_pages
	 */
	for (i = 0; i < riovcnt; i++) {
		iov_len = rvec[i].iov_len;
		if (iov_len > 0) {
			nr_pages_iov = ((unsigned long)rvec[i].iov_base
					+ iov_len)
				/ PAGE_SIZE - (unsigned long)rvec[i].iov_base
				/ PAGE_SIZE + 1;
			nr_pages = max(nr_pages, nr_pages_iov);
		}
	}

	if (nr_pages == 0)
		return 0;

	if (nr_pages > PVM_MAX_PP_ARRAY_COUNT) {
		/* For reliability don't try to kmalloc more than
		   2 pages worth */
		process_pages = kmalloc(min_t(size_t, PVM_MAX_KMALLOC_PAGES,
					      sizeof(struct pages *)*nr_pages),
					GFP_KERNEL);

		if (!process_pages)
			return -ENOMEM;
	}

	/* Get process information */
	rcu_read_lock();
	task = find_task_by_vpid(pid);
	if (task)
		get_task_struct(task);
	rcu_read_unlock();
	if (!task) {
		rc = -ESRCH;
		goto free_proc_pages;
	}

204 205 206 207 208 209 210 211 212
	mm = mm_access(task, PTRACE_MODE_ATTACH);
	if (!mm || IS_ERR(mm)) {
		rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
		/*
		 * Explicitly map EACCES to EPERM as EPERM is a more a
		 * appropriate error code for process_vw_readv/writev
		 */
		if (rc == -EACCES)
			rc = -EPERM;
Christopher Yeoh's avatar
Christopher Yeoh committed
213 214 215
		goto put_task_struct;
	}

216
	for (i = 0; i < riovcnt && iov_iter_count(iter) && !rc; i++)
Christopher Yeoh's avatar
Christopher Yeoh committed
217 218
		rc = process_vm_rw_single_vec(
			(unsigned long)rvec[i].iov_base, rvec[i].iov_len,
219 220 221 222 223 224 225 226 227 228
			iter, process_pages, mm, task, vm_write);

	/* copied = space before - space after */
	total_len -= iov_iter_count(iter);

	/* If we have managed to copy any data at all then
	   we return the number of bytes copied. Otherwise
	   we return the error code */
	if (total_len)
		rc = total_len;
Christopher Yeoh's avatar
Christopher Yeoh committed
229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264

	mmput(mm);

put_task_struct:
	put_task_struct(task);

free_proc_pages:
	if (process_pages != pp_stack)
		kfree(process_pages);
	return rc;
}

/**
 * process_vm_rw - check iovecs before calling core routine
 * @pid: PID of process to read/write from/to
 * @lvec: iovec array specifying where to copy to/from locally
 * @liovcnt: size of lvec array
 * @rvec: iovec array specifying where to copy to/from in the other process
 * @riovcnt: size of rvec array
 * @flags: currently unused
 * @vm_write: 0 if reading from other process, 1 if writing to other process
 * Returns the number of bytes read/written or error code. May
 *  return less bytes than expected if an error occurs during the copying
 *  process.
 */
static ssize_t process_vm_rw(pid_t pid,
			     const struct iovec __user *lvec,
			     unsigned long liovcnt,
			     const struct iovec __user *rvec,
			     unsigned long riovcnt,
			     unsigned long flags, int vm_write)
{
	struct iovec iovstack_l[UIO_FASTIOV];
	struct iovec iovstack_r[UIO_FASTIOV];
	struct iovec *iov_l = iovstack_l;
	struct iovec *iov_r = iovstack_r;
265
	struct iov_iter iter;
Christopher Yeoh's avatar
Christopher Yeoh committed
266 267 268 269 270 271 272 273
	ssize_t rc;

	if (flags != 0)
		return -EINVAL;

	/* Check iovecs */
	if (vm_write)
		rc = rw_copy_check_uvector(WRITE, lvec, liovcnt, UIO_FASTIOV,
274
					   iovstack_l, &iov_l);
Christopher Yeoh's avatar
Christopher Yeoh committed
275 276
	else
		rc = rw_copy_check_uvector(READ, lvec, liovcnt, UIO_FASTIOV,
277
					   iovstack_l, &iov_l);
Christopher Yeoh's avatar
Christopher Yeoh committed
278 279 280
	if (rc <= 0)
		goto free_iovecs;

281 282
	iov_iter_init(&iter, iov_l, liovcnt, rc, 0);

283 284
	rc = rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, UIO_FASTIOV,
				   iovstack_r, &iov_r);
Christopher Yeoh's avatar
Christopher Yeoh committed
285 286 287
	if (rc <= 0)
		goto free_iovecs;

288
	rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write);
Christopher Yeoh's avatar
Christopher Yeoh committed
289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327

free_iovecs:
	if (iov_r != iovstack_r)
		kfree(iov_r);
	if (iov_l != iovstack_l)
		kfree(iov_l);

	return rc;
}

SYSCALL_DEFINE6(process_vm_readv, pid_t, pid, const struct iovec __user *, lvec,
		unsigned long, liovcnt, const struct iovec __user *, rvec,
		unsigned long, riovcnt,	unsigned long, flags)
{
	return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 0);
}

SYSCALL_DEFINE6(process_vm_writev, pid_t, pid,
		const struct iovec __user *, lvec,
		unsigned long, liovcnt, const struct iovec __user *, rvec,
		unsigned long, riovcnt,	unsigned long, flags)
{
	return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 1);
}

#ifdef CONFIG_COMPAT

asmlinkage ssize_t
compat_process_vm_rw(compat_pid_t pid,
		     const struct compat_iovec __user *lvec,
		     unsigned long liovcnt,
		     const struct compat_iovec __user *rvec,
		     unsigned long riovcnt,
		     unsigned long flags, int vm_write)
{
	struct iovec iovstack_l[UIO_FASTIOV];
	struct iovec iovstack_r[UIO_FASTIOV];
	struct iovec *iov_l = iovstack_l;
	struct iovec *iov_r = iovstack_r;
328
	struct iov_iter iter;
Christopher Yeoh's avatar
Christopher Yeoh committed
329 330 331 332 333 334 335 336
	ssize_t rc = -EFAULT;

	if (flags != 0)
		return -EINVAL;

	if (vm_write)
		rc = compat_rw_copy_check_uvector(WRITE, lvec, liovcnt,
						  UIO_FASTIOV, iovstack_l,
337
						  &iov_l);
Christopher Yeoh's avatar
Christopher Yeoh committed
338 339 340
	else
		rc = compat_rw_copy_check_uvector(READ, lvec, liovcnt,
						  UIO_FASTIOV, iovstack_l,
341
						  &iov_l);
Christopher Yeoh's avatar
Christopher Yeoh committed
342 343
	if (rc <= 0)
		goto free_iovecs;
344
	iov_iter_init(&iter, iov_l, liovcnt, rc, 0);
345
	rc = compat_rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt,
Christopher Yeoh's avatar
Christopher Yeoh committed
346
					  UIO_FASTIOV, iovstack_r,
347
					  &iov_r);
Christopher Yeoh's avatar
Christopher Yeoh committed
348 349 350
	if (rc <= 0)
		goto free_iovecs;

351
	rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write);
Christopher Yeoh's avatar
Christopher Yeoh committed
352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385

free_iovecs:
	if (iov_r != iovstack_r)
		kfree(iov_r);
	if (iov_l != iovstack_l)
		kfree(iov_l);
	return rc;
}

asmlinkage ssize_t
compat_sys_process_vm_readv(compat_pid_t pid,
			    const struct compat_iovec __user *lvec,
			    unsigned long liovcnt,
			    const struct compat_iovec __user *rvec,
			    unsigned long riovcnt,
			    unsigned long flags)
{
	return compat_process_vm_rw(pid, lvec, liovcnt, rvec,
				    riovcnt, flags, 0);
}

asmlinkage ssize_t
compat_sys_process_vm_writev(compat_pid_t pid,
			     const struct compat_iovec __user *lvec,
			     unsigned long liovcnt,
			     const struct compat_iovec __user *rvec,
			     unsigned long riovcnt,
			     unsigned long flags)
{
	return compat_process_vm_rw(pid, lvec, liovcnt, rvec,
				    riovcnt, flags, 1);
}

#endif