cfq-iosched.c 65.2 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6
/*
 *  CFQ, or complete fairness queueing, disk scheduler.
 *
 *  Based on ideas from a previously unfinished io
 *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
 *
7
 *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
Linus Torvalds's avatar
Linus Torvalds committed
8 9
 */
#include <linux/module.h>
Al Viro's avatar
Al Viro committed
10 11
#include <linux/blkdev.h>
#include <linux/elevator.h>
Linus Torvalds's avatar
Linus Torvalds committed
12
#include <linux/rbtree.h>
13
#include <linux/ioprio.h>
14
#include <linux/blktrace_api.h>
Linus Torvalds's avatar
Linus Torvalds committed
15 16 17 18

/*
 * tunables
 */
19 20
/* max queue in one round of service */
static const int cfq_quantum = 4;
21
static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
22 23 24 25
/* maximum backwards seek, in KiB */
static const int cfq_back_max = 16 * 1024;
/* penalty of a backwards seek */
static const int cfq_back_penalty = 2;
26
static const int cfq_slice_sync = HZ / 10;
Jens Axboe's avatar
Jens Axboe committed
27
static int cfq_slice_async = HZ / 25;
28
static const int cfq_slice_async_rq = 2;
29
static int cfq_slice_idle = HZ / 125;
30

31
/*
32
 * offset from end of service tree
33
 */
34
#define CFQ_IDLE_DELAY		(HZ / 5)
35 36 37 38 39 40

/*
 * below this threshold, we consider thinktime immediate
 */
#define CFQ_MIN_TT		(2)

41
#define CFQ_SLICE_SCALE		(5)
42
#define CFQ_HW_QUEUE_MIN	(5)
43

44 45
#define RQ_CIC(rq)		\
	((struct cfq_io_context *) (rq)->elevator_private)
46
#define RQ_CFQQ(rq)		(struct cfq_queue *) ((rq)->elevator_private2)
Linus Torvalds's avatar
Linus Torvalds committed
47

48 49
static struct kmem_cache *cfq_pool;
static struct kmem_cache *cfq_ioc_pool;
Linus Torvalds's avatar
Linus Torvalds committed
50

51
static DEFINE_PER_CPU(unsigned long, cfq_ioc_count);
52
static struct completion *ioc_gone;
53
static DEFINE_SPINLOCK(ioc_gone_lock);
54

55 56 57 58
#define CFQ_PRIO_LISTS		IOPRIO_BE_NR
#define cfq_class_idle(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
#define cfq_class_rt(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_RT)

59 60
#define sample_valid(samples)	((samples) > 80)

61 62 63 64 65 66 67 68 69 70 71 72
/*
 * Most of our rbtree usage is for sorting with min extraction, so
 * if we cache the leftmost node we don't have to walk down the tree
 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
 * move this into the elevator for the rq sorting as well.
 */
struct cfq_rb_root {
	struct rb_root rb;
	struct rb_node *left;
};
#define CFQ_RB_ROOT	(struct cfq_rb_root) { RB_ROOT, NULL, }

73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
/*
 * Per process-grouping structure
 */
struct cfq_queue {
	/* reference count */
	atomic_t ref;
	/* various state flags, see below */
	unsigned int flags;
	/* parent cfq_data */
	struct cfq_data *cfqd;
	/* service_tree member */
	struct rb_node rb_node;
	/* service_tree key */
	unsigned long rb_key;
	/* prio tree member */
	struct rb_node p_node;
	/* prio tree root we belong to, if any */
	struct rb_root *p_root;
	/* sorted list of pending requests */
	struct rb_root sort_list;
	/* if fifo isn't expired, next request to serve */
	struct request *next_rq;
	/* requests queued in sort_list */
	int queued[2];
	/* currently allocated requests */
	int allocated[2];
	/* fifo list of requests in sort_list */
	struct list_head fifo;

	unsigned long slice_end;
	long slice_resid;
	unsigned int slice_dispatch;

	/* pending metadata requests */
	int meta_pending;
	/* number of requests that are on the dispatch list or inside driver */
	int dispatched;

	/* io prio of this group */
	unsigned short ioprio, org_ioprio;
	unsigned short ioprio_class, org_ioprio_class;

	pid_t pid;
};

118 119 120
/*
 * Per block device queue structure
 */
Linus Torvalds's avatar
Linus Torvalds committed
121
struct cfq_data {
122
	struct request_queue *queue;
123 124 125 126

	/*
	 * rr list of queues with requests and the count of them
	 */
127
	struct cfq_rb_root service_tree;
128 129 130 131 132 133 134 135

	/*
	 * Each priority tree is sorted by next_request position.  These
	 * trees are used when determining if two or more queues are
	 * interleaving requests (see cfq_close_cooperator).
	 */
	struct rb_root prio_trees[CFQ_PRIO_LISTS];

136 137
	unsigned int busy_queues;

138
	int rq_in_driver[2];
139
	int sync_flight;
140 141 142 143 144

	/*
	 * queue-depth detection
	 */
	int rq_queued;
145
	int hw_tag;
146 147
	int hw_tag_samples;
	int rq_in_driver_peak;
Linus Torvalds's avatar
Linus Torvalds committed
148

149 150 151 152
	/*
	 * idle window management
	 */
	struct timer_list idle_slice_timer;
153
	struct work_struct unplug_work;
Linus Torvalds's avatar
Linus Torvalds committed
154

155 156 157
	struct cfq_queue *active_queue;
	struct cfq_io_context *active_cic;

158 159 160 161 162
	/*
	 * async queue for each priority case
	 */
	struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
	struct cfq_queue *async_idle_cfqq;
163

Jens Axboe's avatar
Jens Axboe committed
164
	sector_t last_position;
Linus Torvalds's avatar
Linus Torvalds committed
165 166 167 168 169

	/*
	 * tunables, see top of file
	 */
	unsigned int cfq_quantum;
170
	unsigned int cfq_fifo_expire[2];
Linus Torvalds's avatar
Linus Torvalds committed
171 172
	unsigned int cfq_back_penalty;
	unsigned int cfq_back_max;
173 174 175
	unsigned int cfq_slice[2];
	unsigned int cfq_slice_async_rq;
	unsigned int cfq_slice_idle;
176
	unsigned int cfq_latency;
177 178

	struct list_head cic_list;
Linus Torvalds's avatar
Linus Torvalds committed
179

180 181 182 183
	/*
	 * Fallback dummy cfqq for extreme OOM conditions
	 */
	struct cfq_queue oom_cfqq;
184 185

	unsigned long last_end_sync_rq;
Linus Torvalds's avatar
Linus Torvalds committed
186 187
};

Jens Axboe's avatar
Jens Axboe committed
188
enum cfqq_state_flags {
189 190
	CFQ_CFQQ_FLAG_on_rr = 0,	/* on round-robin busy list */
	CFQ_CFQQ_FLAG_wait_request,	/* waiting for a request */
191
	CFQ_CFQQ_FLAG_must_dispatch,	/* must be allowed a dispatch */
192 193 194 195
	CFQ_CFQQ_FLAG_must_alloc_slice,	/* per-slice must_alloc flag */
	CFQ_CFQQ_FLAG_fifo_expire,	/* FIFO checked in this slice */
	CFQ_CFQQ_FLAG_idle_window,	/* slice idling enabled */
	CFQ_CFQQ_FLAG_prio_changed,	/* task priority has changed */
196
	CFQ_CFQQ_FLAG_slice_new,	/* no requests dispatched in slice */
197
	CFQ_CFQQ_FLAG_sync,		/* synchronous queue */
198
	CFQ_CFQQ_FLAG_coop,		/* has done a coop jump of the queue */
Jens Axboe's avatar
Jens Axboe committed
199 200 201 202 203
};

#define CFQ_CFQQ_FNS(name)						\
static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)		\
{									\
204
	(cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name);			\
Jens Axboe's avatar
Jens Axboe committed
205 206 207
}									\
static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)	\
{									\
208
	(cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);			\
Jens Axboe's avatar
Jens Axboe committed
209 210 211
}									\
static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)		\
{									\
212
	return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;	\
Jens Axboe's avatar
Jens Axboe committed
213 214 215 216
}

CFQ_CFQQ_FNS(on_rr);
CFQ_CFQQ_FNS(wait_request);
217
CFQ_CFQQ_FNS(must_dispatch);
Jens Axboe's avatar
Jens Axboe committed
218 219 220 221
CFQ_CFQQ_FNS(must_alloc_slice);
CFQ_CFQQ_FNS(fifo_expire);
CFQ_CFQQ_FNS(idle_window);
CFQ_CFQQ_FNS(prio_changed);
222
CFQ_CFQQ_FNS(slice_new);
223
CFQ_CFQQ_FNS(sync);
224
CFQ_CFQQ_FNS(coop);
Jens Axboe's avatar
Jens Axboe committed
225 226
#undef CFQ_CFQQ_FNS

227 228 229 230 231
#define cfq_log_cfqq(cfqd, cfqq, fmt, args...)	\
	blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
#define cfq_log(cfqd, fmt, args...)	\
	blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)

232
static void cfq_dispatch_insert(struct request_queue *, struct request *);
233
static struct cfq_queue *cfq_get_queue(struct cfq_data *, int,
234
				       struct io_context *, gfp_t);
235
static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
236 237
						struct io_context *);

238 239 240 241 242
static inline int rq_in_driver(struct cfq_data *cfqd)
{
	return cfqd->rq_in_driver[0] + cfqd->rq_in_driver[1];
}

243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
					    int is_sync)
{
	return cic->cfqq[!!is_sync];
}

static inline void cic_set_cfqq(struct cfq_io_context *cic,
				struct cfq_queue *cfqq, int is_sync)
{
	cic->cfqq[!!is_sync] = cfqq;
}

/*
 * We regard a request as SYNC, if it's either a read or has the SYNC bit
 * set (in which case it could also be direct WRITE).
 */
static inline int cfq_bio_sync(struct bio *bio)
{
261
	if (bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO))
262 263 264 265
		return 1;

	return 0;
}
Linus Torvalds's avatar
Linus Torvalds committed
266

Andrew Morton's avatar
Andrew Morton committed
267 268 269 270
/*
 * scheduler run of queue, if there are requests pending and no one in the
 * driver that will restart queueing
 */
271
static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
Andrew Morton's avatar
Andrew Morton committed
272
{
273 274
	if (cfqd->busy_queues) {
		cfq_log(cfqd, "schedule dispatch");
275
		kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
276
	}
Andrew Morton's avatar
Andrew Morton committed
277 278
}

279
static int cfq_queue_empty(struct request_queue *q)
Andrew Morton's avatar
Andrew Morton committed
280 281 282
{
	struct cfq_data *cfqd = q->elevator->elevator_data;

283
	return !cfqd->busy_queues;
Andrew Morton's avatar
Andrew Morton committed
284 285
}

286 287 288 289 290
/*
 * Scale schedule slice based on io priority. Use the sync time slice only
 * if a queue is marked sync and has sync io queued. A sync queue with async
 * io only, should not get full sync slice length.
 */
291 292
static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync,
				 unsigned short prio)
293
{
294
	const int base_slice = cfqd->cfq_slice[sync];
295

296 297 298 299
	WARN_ON(prio >= IOPRIO_BE_NR);

	return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
}
300

301 302 303 304
static inline int
cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
	return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
305 306 307 308 309 310
}

static inline void
cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
	cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
311
	cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
}

/*
 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
 * isn't valid until the first request from the dispatch is activated
 * and the slice time set.
 */
static inline int cfq_slice_used(struct cfq_queue *cfqq)
{
	if (cfq_cfqq_slice_new(cfqq))
		return 0;
	if (time_before(jiffies, cfqq->slice_end))
		return 0;

	return 1;
}

Linus Torvalds's avatar
Linus Torvalds committed
329
/*
Jens Axboe's avatar
Jens Axboe committed
330
 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
Linus Torvalds's avatar
Linus Torvalds committed
331
 * We choose the request that is closest to the head right now. Distance
332
 * behind the head is penalized and only allowed to a certain extent.
Linus Torvalds's avatar
Linus Torvalds committed
333
 */
Jens Axboe's avatar
Jens Axboe committed
334 335
static struct request *
cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
Linus Torvalds's avatar
Linus Torvalds committed
336 337 338
{
	sector_t last, s1, s2, d1 = 0, d2 = 0;
	unsigned long back_max;
339 340 341
#define CFQ_RQ1_WRAP	0x01 /* request 1 wraps */
#define CFQ_RQ2_WRAP	0x02 /* request 2 wraps */
	unsigned wrap = 0; /* bit mask: requests behind the disk head? */
Linus Torvalds's avatar
Linus Torvalds committed
342

Jens Axboe's avatar
Jens Axboe committed
343 344 345 346
	if (rq1 == NULL || rq1 == rq2)
		return rq2;
	if (rq2 == NULL)
		return rq1;
347

Jens Axboe's avatar
Jens Axboe committed
348 349 350 351
	if (rq_is_sync(rq1) && !rq_is_sync(rq2))
		return rq1;
	else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
		return rq2;
352 353 354 355
	if (rq_is_meta(rq1) && !rq_is_meta(rq2))
		return rq1;
	else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
		return rq2;
Linus Torvalds's avatar
Linus Torvalds committed
356

357 358
	s1 = blk_rq_pos(rq1);
	s2 = blk_rq_pos(rq2);
Linus Torvalds's avatar
Linus Torvalds committed
359

Jens Axboe's avatar
Jens Axboe committed
360
	last = cfqd->last_position;
Linus Torvalds's avatar
Linus Torvalds committed
361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376

	/*
	 * by definition, 1KiB is 2 sectors
	 */
	back_max = cfqd->cfq_back_max * 2;

	/*
	 * Strict one way elevator _except_ in the case where we allow
	 * short backward seeks which are biased as twice the cost of a
	 * similar forward seek.
	 */
	if (s1 >= last)
		d1 = s1 - last;
	else if (s1 + back_max >= last)
		d1 = (last - s1) * cfqd->cfq_back_penalty;
	else
377
		wrap |= CFQ_RQ1_WRAP;
Linus Torvalds's avatar
Linus Torvalds committed
378 379 380 381 382 383

	if (s2 >= last)
		d2 = s2 - last;
	else if (s2 + back_max >= last)
		d2 = (last - s2) * cfqd->cfq_back_penalty;
	else
384
		wrap |= CFQ_RQ2_WRAP;
Linus Torvalds's avatar
Linus Torvalds committed
385 386

	/* Found required data */
387 388 389 390 391 392

	/*
	 * By doing switch() on the bit mask "wrap" we avoid having to
	 * check two variables for all permutations: --> faster!
	 */
	switch (wrap) {
Jens Axboe's avatar
Jens Axboe committed
393
	case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
394
		if (d1 < d2)
Jens Axboe's avatar
Jens Axboe committed
395
			return rq1;
396
		else if (d2 < d1)
Jens Axboe's avatar
Jens Axboe committed
397
			return rq2;
398 399
		else {
			if (s1 >= s2)
Jens Axboe's avatar
Jens Axboe committed
400
				return rq1;
401
			else
Jens Axboe's avatar
Jens Axboe committed
402
				return rq2;
403
		}
Linus Torvalds's avatar
Linus Torvalds committed
404

405
	case CFQ_RQ2_WRAP:
Jens Axboe's avatar
Jens Axboe committed
406
		return rq1;
407
	case CFQ_RQ1_WRAP:
Jens Axboe's avatar
Jens Axboe committed
408 409
		return rq2;
	case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
410 411 412 413 414 415 416 417
	default:
		/*
		 * Since both rqs are wrapped,
		 * start with the one that's further behind head
		 * (--> only *one* back seek required),
		 * since back seek takes more time than forward.
		 */
		if (s1 <= s2)
Jens Axboe's avatar
Jens Axboe committed
418
			return rq1;
Linus Torvalds's avatar
Linus Torvalds committed
419
		else
Jens Axboe's avatar
Jens Axboe committed
420
			return rq2;
Linus Torvalds's avatar
Linus Torvalds committed
421 422 423
	}
}

424 425 426
/*
 * The below is leftmost cache rbtree addon
 */
427
static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
428 429 430 431
{
	if (!root->left)
		root->left = rb_first(&root->rb);

432 433 434 435
	if (root->left)
		return rb_entry(root->left, struct cfq_queue, rb_node);

	return NULL;
436 437
}

438 439 440 441 442 443
static void rb_erase_init(struct rb_node *n, struct rb_root *root)
{
	rb_erase(n, root);
	RB_CLEAR_NODE(n);
}

444 445 446 447
static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
{
	if (root->left == n)
		root->left = NULL;
448
	rb_erase_init(n, &root->rb);
449 450
}

Linus Torvalds's avatar
Linus Torvalds committed
451 452 453
/*
 * would be nice to take fifo expire time into account as well
 */
Jens Axboe's avatar
Jens Axboe committed
454 455 456
static struct request *
cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
		  struct request *last)
Linus Torvalds's avatar
Linus Torvalds committed
457
{
458 459
	struct rb_node *rbnext = rb_next(&last->rb_node);
	struct rb_node *rbprev = rb_prev(&last->rb_node);
Jens Axboe's avatar
Jens Axboe committed
460
	struct request *next = NULL, *prev = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
461

462
	BUG_ON(RB_EMPTY_NODE(&last->rb_node));
Linus Torvalds's avatar
Linus Torvalds committed
463 464

	if (rbprev)
Jens Axboe's avatar
Jens Axboe committed
465
		prev = rb_entry_rq(rbprev);
Linus Torvalds's avatar
Linus Torvalds committed
466

467
	if (rbnext)
Jens Axboe's avatar
Jens Axboe committed
468
		next = rb_entry_rq(rbnext);
469 470 471
	else {
		rbnext = rb_first(&cfqq->sort_list);
		if (rbnext && rbnext != &last->rb_node)
Jens Axboe's avatar
Jens Axboe committed
472
			next = rb_entry_rq(rbnext);
473
	}
Linus Torvalds's avatar
Linus Torvalds committed
474

475
	return cfq_choose_req(cfqd, next, prev);
Linus Torvalds's avatar
Linus Torvalds committed
476 477
}

478 479
static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
				      struct cfq_queue *cfqq)
Linus Torvalds's avatar
Linus Torvalds committed
480
{
481 482 483
	/*
	 * just an approximation, should be ok.
	 */
484 485
	return (cfqd->busy_queues - 1) * (cfq_prio_slice(cfqd, 1, 0) -
		       cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
486 487
}

488 489 490 491 492
/*
 * The cfqd->service_tree holds all pending cfq_queue's that have
 * requests waiting to be processed. It is sorted in the order that
 * we will service the queues.
 */
493 494
static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
				 int add_front)
495
{
496 497
	struct rb_node **p, *parent;
	struct cfq_queue *__cfqq;
498
	unsigned long rb_key;
499
	int left;
500

501 502 503 504 505 506 507 508 509
	if (cfq_class_idle(cfqq)) {
		rb_key = CFQ_IDLE_DELAY;
		parent = rb_last(&cfqd->service_tree.rb);
		if (parent && parent != &cfqq->rb_node) {
			__cfqq = rb_entry(parent, struct cfq_queue, rb_node);
			rb_key += __cfqq->rb_key;
		} else
			rb_key += jiffies;
	} else if (!add_front) {
510 511 512 513 514 515
		/*
		 * Get our rb key offset. Subtract any residual slice
		 * value carried from last service. A negative resid
		 * count indicates slice overrun, and this should position
		 * the next service time further away in the tree.
		 */
516
		rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
517
		rb_key -= cfqq->slice_resid;
518
		cfqq->slice_resid = 0;
519 520 521 522 523
	} else {
		rb_key = -HZ;
		__cfqq = cfq_rb_first(&cfqd->service_tree);
		rb_key += __cfqq ? __cfqq->rb_key : jiffies;
	}
Linus Torvalds's avatar
Linus Torvalds committed
524

525
	if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
526
		/*
527
		 * same position, nothing more to do
528
		 */
529 530
		if (rb_key == cfqq->rb_key)
			return;
Linus Torvalds's avatar
Linus Torvalds committed
531

532
		cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
Linus Torvalds's avatar
Linus Torvalds committed
533
	}
534

535
	left = 1;
536 537
	parent = NULL;
	p = &cfqd->service_tree.rb.rb_node;
538
	while (*p) {
539
		struct rb_node **n;
540

541 542 543
		parent = *p;
		__cfqq = rb_entry(parent, struct cfq_queue, rb_node);

544 545
		/*
		 * sort RT queues first, we always want to give
546 547
		 * preference to them. IDLE queues goes to the back.
		 * after that, sort on the next service time.
548 549
		 */
		if (cfq_class_rt(cfqq) > cfq_class_rt(__cfqq))
550
			n = &(*p)->rb_left;
551
		else if (cfq_class_rt(cfqq) < cfq_class_rt(__cfqq))
552 553 554 555 556
			n = &(*p)->rb_right;
		else if (cfq_class_idle(cfqq) < cfq_class_idle(__cfqq))
			n = &(*p)->rb_left;
		else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq))
			n = &(*p)->rb_right;
557
		else if (time_before(rb_key, __cfqq->rb_key))
558 559 560 561 562
			n = &(*p)->rb_left;
		else
			n = &(*p)->rb_right;

		if (n == &(*p)->rb_right)
563
			left = 0;
564 565

		p = n;
566 567
	}

568 569 570
	if (left)
		cfqd->service_tree.left = &cfqq->rb_node;

571 572
	cfqq->rb_key = rb_key;
	rb_link_node(&cfqq->rb_node, parent, p);
573
	rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb);
Linus Torvalds's avatar
Linus Torvalds committed
574 575
}

576
static struct cfq_queue *
577 578 579
cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
		     sector_t sector, struct rb_node **ret_parent,
		     struct rb_node ***rb_link)
580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595
{
	struct rb_node **p, *parent;
	struct cfq_queue *cfqq = NULL;

	parent = NULL;
	p = &root->rb_node;
	while (*p) {
		struct rb_node **n;

		parent = *p;
		cfqq = rb_entry(parent, struct cfq_queue, p_node);

		/*
		 * Sort strictly based on sector.  Smallest to the left,
		 * largest to the right.
		 */
596
		if (sector > blk_rq_pos(cfqq->next_rq))
597
			n = &(*p)->rb_right;
598
		else if (sector < blk_rq_pos(cfqq->next_rq))
599 600 601 602
			n = &(*p)->rb_left;
		else
			break;
		p = n;
603
		cfqq = NULL;
604 605 606 607 608
	}

	*ret_parent = parent;
	if (rb_link)
		*rb_link = p;
609
	return cfqq;
610 611 612 613 614 615 616
}

static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
	struct rb_node **p, *parent;
	struct cfq_queue *__cfqq;

617 618 619 620
	if (cfqq->p_root) {
		rb_erase(&cfqq->p_node, cfqq->p_root);
		cfqq->p_root = NULL;
	}
621 622 623 624 625 626

	if (cfq_class_idle(cfqq))
		return;
	if (!cfqq->next_rq)
		return;

627
	cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
628 629
	__cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
				      blk_rq_pos(cfqq->next_rq), &parent, &p);
630 631
	if (!__cfqq) {
		rb_link_node(&cfqq->p_node, parent, p);
632 633 634
		rb_insert_color(&cfqq->p_node, cfqq->p_root);
	} else
		cfqq->p_root = NULL;
635 636
}

637 638 639
/*
 * Update cfqq's position in the service tree.
 */
640
static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
Jens Axboe's avatar
Jens Axboe committed
641 642 643 644
{
	/*
	 * Resorting requires the cfqq to be on the RR list already.
	 */
645
	if (cfq_cfqq_on_rr(cfqq)) {
646
		cfq_service_tree_add(cfqd, cfqq, 0);
647 648
		cfq_prio_tree_add(cfqd, cfqq);
	}
Jens Axboe's avatar
Jens Axboe committed
649 650
}

Linus Torvalds's avatar
Linus Torvalds committed
651 652
/*
 * add to busy list of queues for service, trying to be fair in ordering
653
 * the pending list according to last request service
Linus Torvalds's avatar
Linus Torvalds committed
654
 */
655
static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
Linus Torvalds's avatar
Linus Torvalds committed
656
{
657
	cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
Jens Axboe's avatar
Jens Axboe committed
658 659
	BUG_ON(cfq_cfqq_on_rr(cfqq));
	cfq_mark_cfqq_on_rr(cfqq);
Linus Torvalds's avatar
Linus Torvalds committed
660 661
	cfqd->busy_queues++;

662
	cfq_resort_rr_list(cfqd, cfqq);
Linus Torvalds's avatar
Linus Torvalds committed
663 664
}

665 666 667 668
/*
 * Called when the cfqq no longer has requests pending, remove it from
 * the service tree.
 */
669
static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
Linus Torvalds's avatar
Linus Torvalds committed
670
{
671
	cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
Jens Axboe's avatar
Jens Axboe committed
672 673
	BUG_ON(!cfq_cfqq_on_rr(cfqq));
	cfq_clear_cfqq_on_rr(cfqq);
Linus Torvalds's avatar
Linus Torvalds committed
674

675 676
	if (!RB_EMPTY_NODE(&cfqq->rb_node))
		cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
677 678 679 680
	if (cfqq->p_root) {
		rb_erase(&cfqq->p_node, cfqq->p_root);
		cfqq->p_root = NULL;
	}
681

Linus Torvalds's avatar
Linus Torvalds committed
682 683 684 685 686 687 688
	BUG_ON(!cfqd->busy_queues);
	cfqd->busy_queues--;
}

/*
 * rb tree support functions
 */
689
static void cfq_del_rq_rb(struct request *rq)
Linus Torvalds's avatar
Linus Torvalds committed
690
{
Jens Axboe's avatar
Jens Axboe committed
691
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
692
	struct cfq_data *cfqd = cfqq->cfqd;
Jens Axboe's avatar
Jens Axboe committed
693
	const int sync = rq_is_sync(rq);
Linus Torvalds's avatar
Linus Torvalds committed
694

695 696
	BUG_ON(!cfqq->queued[sync]);
	cfqq->queued[sync]--;
Linus Torvalds's avatar
Linus Torvalds committed
697

Jens Axboe's avatar
Jens Axboe committed
698
	elv_rb_del(&cfqq->sort_list, rq);
Linus Torvalds's avatar
Linus Torvalds committed
699

700
	if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
701
		cfq_del_cfqq_rr(cfqd, cfqq);
Linus Torvalds's avatar
Linus Torvalds committed
702 703
}

Jens Axboe's avatar
Jens Axboe committed
704
static void cfq_add_rq_rb(struct request *rq)
Linus Torvalds's avatar
Linus Torvalds committed
705
{
Jens Axboe's avatar
Jens Axboe committed
706
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
Linus Torvalds's avatar
Linus Torvalds committed
707
	struct cfq_data *cfqd = cfqq->cfqd;
708
	struct request *__alias, *prev;
Linus Torvalds's avatar
Linus Torvalds committed
709

710
	cfqq->queued[rq_is_sync(rq)]++;
Linus Torvalds's avatar
Linus Torvalds committed
711 712 713 714 715

	/*
	 * looks a little odd, but the first insert might return an alias.
	 * if that happens, put the alias on the dispatch list
	 */
716
	while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
Jens Axboe's avatar
Jens Axboe committed
717
		cfq_dispatch_insert(cfqd->queue, __alias);
718 719 720

	if (!cfq_cfqq_on_rr(cfqq))
		cfq_add_cfqq_rr(cfqd, cfqq);
721 722 723 724

	/*
	 * check if this request is a better next-serve candidate
	 */
725
	prev = cfqq->next_rq;
726
	cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq);
727 728 729 730 731 732 733

	/*
	 * adjust priority tree position, if ->next_rq changes
	 */
	if (prev != cfqq->next_rq)
		cfq_prio_tree_add(cfqd, cfqq);

734
	BUG_ON(!cfqq->next_rq);
Linus Torvalds's avatar
Linus Torvalds committed
735 736
}

737
static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
Linus Torvalds's avatar
Linus Torvalds committed
738
{
739 740
	elv_rb_del(&cfqq->sort_list, rq);
	cfqq->queued[rq_is_sync(rq)]--;
Jens Axboe's avatar
Jens Axboe committed
741
	cfq_add_rq_rb(rq);
Linus Torvalds's avatar
Linus Torvalds committed
742 743
}

744 745
static struct request *
cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
Linus Torvalds's avatar
Linus Torvalds committed
746
{
747
	struct task_struct *tsk = current;
748
	struct cfq_io_context *cic;
749
	struct cfq_queue *cfqq;
Linus Torvalds's avatar
Linus Torvalds committed
750

751
	cic = cfq_cic_lookup(cfqd, tsk->io_context);
752 753 754 755
	if (!cic)
		return NULL;

	cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
756 757 758
	if (cfqq) {
		sector_t sector = bio->bi_sector + bio_sectors(bio);

759
		return elv_rb_find(&cfqq->sort_list, sector);
760
	}
Linus Torvalds's avatar
Linus Torvalds committed
761 762 763 764

	return NULL;
}

765
static void cfq_activate_request(struct request_queue *q, struct request *rq)
Linus Torvalds's avatar
Linus Torvalds committed
766
{
767
	struct cfq_data *cfqd = q->elevator->elevator_data;
Jens Axboe's avatar
Jens Axboe committed
768

769
	cfqd->rq_in_driver[rq_is_sync(rq)]++;
770
	cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
771
						rq_in_driver(cfqd));
772

773
	cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
Linus Torvalds's avatar
Linus Torvalds committed
774 775
}

776
static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
Linus Torvalds's avatar
Linus Torvalds committed
777
{
778
	struct cfq_data *cfqd = q->elevator->elevator_data;
779
	const int sync = rq_is_sync(rq);
780

781 782
	WARN_ON(!cfqd->rq_in_driver[sync]);
	cfqd->rq_in_driver[sync]--;
783
	cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
784
						rq_in_driver(cfqd));
Linus Torvalds's avatar
Linus Torvalds committed
785 786
}

787
static void cfq_remove_request(struct request *rq)
Linus Torvalds's avatar
Linus Torvalds committed
788
{
Jens Axboe's avatar
Jens Axboe committed
789
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
790

Jens Axboe's avatar
Jens Axboe committed
791 792
	if (cfqq->next_rq == rq)
		cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
Linus Torvalds's avatar
Linus Torvalds committed
793

794
	list_del_init(&rq->queuelist);
Jens Axboe's avatar
Jens Axboe committed
795
	cfq_del_rq_rb(rq);
796

797
	cfqq->cfqd->rq_queued--;
798 799 800 801
	if (rq_is_meta(rq)) {
		WARN_ON(!cfqq->meta_pending);
		cfqq->meta_pending--;
	}
Linus Torvalds's avatar
Linus Torvalds committed
802 803
}

804 805
static int cfq_merge(struct request_queue *q, struct request **req,
		     struct bio *bio)
Linus Torvalds's avatar
Linus Torvalds committed
806 807 808 809
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
	struct request *__rq;

810
	__rq = cfq_find_rq_fmerge(cfqd, bio);
811
	if (__rq && elv_rq_merge_ok(__rq, bio)) {
812 813
		*req = __rq;
		return ELEVATOR_FRONT_MERGE;
Linus Torvalds's avatar
Linus Torvalds committed
814 815 816 817 818
	}

	return ELEVATOR_NO_MERGE;
}

819
static void cfq_merged_request(struct request_queue *q, struct request *req,
820
			       int type)
Linus Torvalds's avatar
Linus Torvalds committed
821
{
822
	if (type == ELEVATOR_FRONT_MERGE) {
Jens Axboe's avatar
Jens Axboe committed
823
		struct cfq_queue *cfqq = RQ_CFQQ(req);
Linus Torvalds's avatar
Linus Torvalds committed
824

Jens Axboe's avatar
Jens Axboe committed
825
		cfq_reposition_rq_rb(cfqq, req);
Linus Torvalds's avatar
Linus Torvalds committed
826 827 828 829
	}
}

static void
830
cfq_merged_requests(struct request_queue *q, struct request *rq,
Linus Torvalds's avatar
Linus Torvalds committed
831 832
		    struct request *next)
{
833 834 835 836
	/*
	 * reposition in fifo if next is older than rq
	 */
	if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
837
	    time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
838
		list_move(&rq->queuelist, &next->queuelist);
839 840
		rq_set_fifo_time(rq, rq_fifo_time(next));
	}
841

842
	cfq_remove_request(next);
843 844
}

845
static int cfq_allow_merge(struct request_queue *q, struct request *rq,
846 847 848
			   struct bio *bio)
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
849
	struct cfq_io_context *cic;
850 851 852
	struct cfq_queue *cfqq;

	/*
853
	 * Disallow merge of a sync bio into an async request.
854
	 */
855
	if (cfq_bio_sync(bio) && !rq_is_sync(rq))
856 857 858
		return 0;

	/*
859 860
	 * Lookup the cfqq that this bio will be queued with. Allow
	 * merge only if rq is queued there.
861
	 */
862
	cic = cfq_cic_lookup(cfqd, current->io_context);
863 864
	if (!cic)
		return 0;
865

866
	cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
867 868
	if (cfqq == RQ_CFQQ(rq))
		return 1;
869

870
	return 0;
871 872
}

873 874
static void __cfq_set_active_queue(struct cfq_data *cfqd,
				   struct cfq_queue *cfqq)
875 876
{
	if (cfqq) {
877
		cfq_log_cfqq(cfqd, cfqq, "set_active");
878
		cfqq->slice_end = 0;
879 880 881
		cfqq->slice_dispatch = 0;

		cfq_clear_cfqq_wait_request(cfqq);
882
		cfq_clear_cfqq_must_dispatch(cfqq);
Jens Axboe's avatar
Jens Axboe committed
883 884
		cfq_clear_cfqq_must_alloc_slice(cfqq);
		cfq_clear_cfqq_fifo_expire(cfqq);
885
		cfq_mark_cfqq_slice_new(cfqq);
886 887

		del_timer(&cfqd->idle_slice_timer);
888 889 890 891 892
	}

	cfqd->active_queue = cfqq;
}

893 894 895 896 897
/*
 * current cfqq expired its slice (or was too idle), select new one
 */
static void
__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
898
		    int timed_out)
899
{
900 901
	cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);

902 903 904 905 906 907
	if (cfq_cfqq_wait_request(cfqq))
		del_timer(&cfqd->idle_slice_timer);

	cfq_clear_cfqq_wait_request(cfqq);

	/*
908
	 * store what was left of this slice, if the queue idled/timed out
909
	 */
910
	if (timed_out && !cfq_cfqq_slice_new(cfqq)) {
911
		cfqq->slice_resid = cfqq->slice_end - jiffies;
912 913
		cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
	}
914

915
	cfq_resort_rr_list(cfqd, cfqq);
916 917 918 919 920 921 922 923 924 925

	if (cfqq == cfqd->active_queue)
		cfqd->active_queue = NULL;

	if (cfqd->active_cic) {
		put_io_context(cfqd->active_cic->ioc);
		cfqd->active_cic = NULL;
	}
}

926
static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out)
927 928 929 930
{
	struct cfq_queue *cfqq = cfqd->active_queue;

	if (cfqq)
931
		__cfq_slice_expired(cfqd, cfqq, timed_out);
932 933
}

934 935 936 937
/*
 * Get next queue for service. Unless we have a queue preemption,
 * we'll simply select the first cfqq in the service tree.
 */
Jens Axboe's avatar
Jens Axboe committed
938
static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
939
{
940 941
	if (RB_EMPTY_ROOT(&cfqd->service_tree.rb))
		return NULL;
942

943
	return cfq_rb_first(&cfqd->service_tree);
Jens Axboe's avatar
Jens Axboe committed
944 945
}

946 947 948
/*
 * Get and set a new active queue for service.
 */
949 950
static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
					      struct cfq_queue *cfqq)
Jens Axboe's avatar
Jens Axboe committed
951
{
952 953 954 955 956
	if (!cfqq) {
		cfqq = cfq_get_next_queue(cfqd);
		if (cfqq)
			cfq_clear_cfqq_coop(cfqq);
	}
Jens Axboe's avatar
Jens Axboe committed
957

958
	__cfq_set_active_queue(cfqd, cfqq);
Jens Axboe's avatar
Jens Axboe committed
959
	return cfqq;
960 961
}

962 963 964
static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
					  struct request *rq)
{
965 966
	if (blk_rq_pos(rq) >= cfqd->last_position)
		return blk_rq_pos(rq) - cfqd->last_position;
967
	else
968
		return cfqd->last_position - blk_rq_pos(rq);