cfq-iosched.c 53.3 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6
/*
 *  CFQ, or complete fairness queueing, disk scheduler.
 *
 *  Based on ideas from a previously unfinished io
 *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
 *
7
 *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
Linus Torvalds's avatar
Linus Torvalds committed
8 9
 */
#include <linux/module.h>
Al Viro's avatar
Al Viro committed
10 11
#include <linux/blkdev.h>
#include <linux/elevator.h>
Linus Torvalds's avatar
Linus Torvalds committed
12 13
#include <linux/hash.h>
#include <linux/rbtree.h>
14
#include <linux/ioprio.h>
Linus Torvalds's avatar
Linus Torvalds committed
15 16 17 18

/*
 * tunables
 */
19 20 21 22
static const int cfq_quantum = 4;		/* max queue in one round of service */
static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
static const int cfq_back_max = 16 * 1024;	/* maximum backwards seek, in KiB */
static const int cfq_back_penalty = 2;		/* penalty of a backwards seek */
Linus Torvalds's avatar
Linus Torvalds committed
23

24
static const int cfq_slice_sync = HZ / 10;
Jens Axboe's avatar
Jens Axboe committed
25
static int cfq_slice_async = HZ / 25;
26
static const int cfq_slice_async_rq = 2;
27
static int cfq_slice_idle = HZ / 125;
28 29 30 31 32 33

#define CFQ_IDLE_GRACE		(HZ / 10)
#define CFQ_SLICE_SCALE		(5)

#define CFQ_KEY_ASYNC		(0)

Linus Torvalds's avatar
Linus Torvalds committed
34 35 36 37 38 39 40 41 42
/*
 * for the hash of cfqq inside the cfqd
 */
#define CFQ_QHASH_SHIFT		6
#define CFQ_QHASH_ENTRIES	(1 << CFQ_QHASH_SHIFT)
#define list_entry_qhash(entry)	hlist_entry((entry), struct cfq_queue, cfq_hash)

#define list_entry_cfqq(ptr)	list_entry((ptr), struct cfq_queue, cfq_list)

Jens Axboe's avatar
Jens Axboe committed
43 44
#define RQ_CIC(rq)		((struct cfq_io_context*)(rq)->elevator_private)
#define RQ_CFQQ(rq)		((rq)->elevator_private2)
Linus Torvalds's avatar
Linus Torvalds committed
45

46 47
static struct kmem_cache *cfq_pool;
static struct kmem_cache *cfq_ioc_pool;
Linus Torvalds's avatar
Linus Torvalds committed
48

49
static DEFINE_PER_CPU(unsigned long, ioc_count);
50 51
static struct completion *ioc_gone;

52 53 54 55
#define CFQ_PRIO_LISTS		IOPRIO_BE_NR
#define cfq_class_idle(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
#define cfq_class_rt(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_RT)

Jens Axboe's avatar
Jens Axboe committed
56 57 58 59 60 61 62 63 64 65
#define ASYNC			(0)
#define SYNC			(1)

#define cfq_cfqq_dispatched(cfqq)	\
	((cfqq)->on_dispatch[ASYNC] + (cfqq)->on_dispatch[SYNC])

#define cfq_cfqq_class_sync(cfqq)	((cfqq)->key != CFQ_KEY_ASYNC)

#define cfq_cfqq_sync(cfqq)		\
	(cfq_cfqq_class_sync(cfqq) || (cfqq)->on_dispatch[SYNC])
66

67 68
#define sample_valid(samples)	((samples) > 80)

69 70 71
/*
 * Per block device queue structure
 */
Linus Torvalds's avatar
Linus Torvalds committed
72
struct cfq_data {
73 74 75 76 77 78 79 80 81 82 83 84 85 86
	request_queue_t *queue;

	/*
	 * rr list of queues with requests and the count of them
	 */
	struct list_head rr_list[CFQ_PRIO_LISTS];
	struct list_head busy_rr;
	struct list_head cur_rr;
	struct list_head idle_rr;
	unsigned int busy_queues;

	/*
	 * cfqq lookup hash
	 */
Linus Torvalds's avatar
Linus Torvalds committed
87 88
	struct hlist_head *cfq_hash;

89
	int rq_in_driver;
90
	int hw_tag;
Linus Torvalds's avatar
Linus Torvalds committed
91

92 93 94 95 96
	/*
	 * idle window management
	 */
	struct timer_list idle_slice_timer;
	struct work_struct unplug_work;
Linus Torvalds's avatar
Linus Torvalds committed
97

98 99 100 101 102 103
	struct cfq_queue *active_queue;
	struct cfq_io_context *active_cic;
	int cur_prio, cur_end_prio;
	unsigned int dispatch_slice;

	struct timer_list idle_class_timer;
Linus Torvalds's avatar
Linus Torvalds committed
104 105

	sector_t last_sector;
106
	unsigned long last_end_request;
Linus Torvalds's avatar
Linus Torvalds committed
107 108 109 110 111

	/*
	 * tunables, see top of file
	 */
	unsigned int cfq_quantum;
112
	unsigned int cfq_fifo_expire[2];
Linus Torvalds's avatar
Linus Torvalds committed
113 114
	unsigned int cfq_back_penalty;
	unsigned int cfq_back_max;
115 116 117
	unsigned int cfq_slice[2];
	unsigned int cfq_slice_async_rq;
	unsigned int cfq_slice_idle;
118 119

	struct list_head cic_list;
Linus Torvalds's avatar
Linus Torvalds committed
120 121
};

122 123 124
/*
 * Per process-grouping structure
 */
Linus Torvalds's avatar
Linus Torvalds committed
125 126 127 128 129
struct cfq_queue {
	/* reference count */
	atomic_t ref;
	/* parent cfq_data */
	struct cfq_data *cfqd;
130
	/* cfqq lookup hash */
Linus Torvalds's avatar
Linus Torvalds committed
131 132
	struct hlist_node cfq_hash;
	/* hash key */
133
	unsigned int key;
134
	/* member of the rr/busy/cur/idle cfqd list */
Linus Torvalds's avatar
Linus Torvalds committed
135 136 137 138
	struct list_head cfq_list;
	/* sorted list of pending requests */
	struct rb_root sort_list;
	/* if fifo isn't expired, next request to serve */
Jens Axboe's avatar
Jens Axboe committed
139
	struct request *next_rq;
Linus Torvalds's avatar
Linus Torvalds committed
140 141 142 143
	/* requests queued in sort_list */
	int queued[2];
	/* currently allocated requests */
	int allocated[2];
144 145
	/* pending metadata requests */
	int meta_pending;
Linus Torvalds's avatar
Linus Torvalds committed
146
	/* fifo list of requests in sort_list */
147
	struct list_head fifo;
Linus Torvalds's avatar
Linus Torvalds committed
148

149
	unsigned long slice_end;
150
	unsigned long service_last;
151
	long slice_resid;
Linus Torvalds's avatar
Linus Torvalds committed
152

Jens Axboe's avatar
Jens Axboe committed
153 154
	/* number of requests that are on the dispatch list */
	int on_dispatch[2];
155 156 157 158 159

	/* io prio of this group */
	unsigned short ioprio, org_ioprio;
	unsigned short ioprio_class, org_ioprio_class;

Jens Axboe's avatar
Jens Axboe committed
160 161
	/* various state flags, see below */
	unsigned int flags;
Linus Torvalds's avatar
Linus Torvalds committed
162 163
};

Jens Axboe's avatar
Jens Axboe committed
164
enum cfqq_state_flags {
165 166 167 168 169 170 171 172 173
	CFQ_CFQQ_FLAG_on_rr = 0,	/* on round-robin busy list */
	CFQ_CFQQ_FLAG_wait_request,	/* waiting for a request */
	CFQ_CFQQ_FLAG_must_alloc,	/* must be allowed rq alloc */
	CFQ_CFQQ_FLAG_must_alloc_slice,	/* per-slice must_alloc flag */
	CFQ_CFQQ_FLAG_must_dispatch,	/* must dispatch, even if expired */
	CFQ_CFQQ_FLAG_fifo_expire,	/* FIFO checked in this slice */
	CFQ_CFQQ_FLAG_idle_window,	/* slice idling enabled */
	CFQ_CFQQ_FLAG_prio_changed,	/* task priority has changed */
	CFQ_CFQQ_FLAG_queue_new,	/* queue never been serviced */
174
	CFQ_CFQQ_FLAG_slice_new,	/* no requests dispatched in slice */
Jens Axboe's avatar
Jens Axboe committed
175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198
};

#define CFQ_CFQQ_FNS(name)						\
static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)		\
{									\
	cfqq->flags |= (1 << CFQ_CFQQ_FLAG_##name);			\
}									\
static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)	\
{									\
	cfqq->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);			\
}									\
static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)		\
{									\
	return (cfqq->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;	\
}

CFQ_CFQQ_FNS(on_rr);
CFQ_CFQQ_FNS(wait_request);
CFQ_CFQQ_FNS(must_alloc);
CFQ_CFQQ_FNS(must_alloc_slice);
CFQ_CFQQ_FNS(must_dispatch);
CFQ_CFQQ_FNS(fifo_expire);
CFQ_CFQQ_FNS(idle_window);
CFQ_CFQQ_FNS(prio_changed);
199
CFQ_CFQQ_FNS(queue_new);
200
CFQ_CFQQ_FNS(slice_new);
Jens Axboe's avatar
Jens Axboe committed
201 202 203
#undef CFQ_CFQQ_FNS

static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short);
Jens Axboe's avatar
Jens Axboe committed
204
static void cfq_dispatch_insert(request_queue_t *, struct request *);
205
static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask);
Linus Torvalds's avatar
Linus Torvalds committed
206

Andrew Morton's avatar
Andrew Morton committed
207 208 209 210 211 212
/*
 * scheduler run of queue, if there are requests pending and no one in the
 * driver that will restart queueing
 */
static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
{
213
	if (cfqd->busy_queues)
Andrew Morton's avatar
Andrew Morton committed
214 215 216 217 218 219 220
		kblockd_schedule_work(&cfqd->unplug_work);
}

static int cfq_queue_empty(request_queue_t *q)
{
	struct cfq_data *cfqd = q->elevator->elevator_data;

221
	return !cfqd->busy_queues;
Andrew Morton's avatar
Andrew Morton committed
222 223
}

224
static inline pid_t cfq_queue_pid(struct task_struct *task, int rw, int is_sync)
225
{
226 227 228 229
	/*
	 * Use the per-process queue, for read requests and syncronous writes
	 */
	if (!(rw & REQ_RW) || is_sync)
230 231 232 233 234
		return task->pid;

	return CFQ_KEY_ASYNC;
}

235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253
/*
 * Scale schedule slice based on io priority. Use the sync time slice only
 * if a queue is marked sync and has sync io queued. A sync queue with async
 * io only, should not get full sync slice length.
 */
static inline int
cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
	const int base_slice = cfqd->cfq_slice[cfq_cfqq_sync(cfqq)];

	WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);

	return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - cfqq->ioprio));
}

static inline void
cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
	cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
254 255 256 257 258 259 260 261
	cfqq->slice_end += cfqq->slice_resid;

	/*
	 * Don't carry over residual for more than one slice, we only want
	 * to slightly correct the fairness. Carrying over forever would
	 * easily introduce oscillations.
	 */
	cfqq->slice_resid = 0;
262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
}

/*
 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
 * isn't valid until the first request from the dispatch is activated
 * and the slice time set.
 */
static inline int cfq_slice_used(struct cfq_queue *cfqq)
{
	if (cfq_cfqq_slice_new(cfqq))
		return 0;
	if (time_before(jiffies, cfqq->slice_end))
		return 0;

	return 1;
}

Linus Torvalds's avatar
Linus Torvalds committed
279
/*
Jens Axboe's avatar
Jens Axboe committed
280
 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
Linus Torvalds's avatar
Linus Torvalds committed
281
 * We choose the request that is closest to the head right now. Distance
282
 * behind the head is penalized and only allowed to a certain extent.
Linus Torvalds's avatar
Linus Torvalds committed
283
 */
Jens Axboe's avatar
Jens Axboe committed
284 285
static struct request *
cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
Linus Torvalds's avatar
Linus Torvalds committed
286 287 288
{
	sector_t last, s1, s2, d1 = 0, d2 = 0;
	unsigned long back_max;
289 290 291
#define CFQ_RQ1_WRAP	0x01 /* request 1 wraps */
#define CFQ_RQ2_WRAP	0x02 /* request 2 wraps */
	unsigned wrap = 0; /* bit mask: requests behind the disk head? */
Linus Torvalds's avatar
Linus Torvalds committed
292

Jens Axboe's avatar
Jens Axboe committed
293 294 295 296
	if (rq1 == NULL || rq1 == rq2)
		return rq2;
	if (rq2 == NULL)
		return rq1;
297

Jens Axboe's avatar
Jens Axboe committed
298 299 300 301
	if (rq_is_sync(rq1) && !rq_is_sync(rq2))
		return rq1;
	else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
		return rq2;
302 303 304 305
	if (rq_is_meta(rq1) && !rq_is_meta(rq2))
		return rq1;
	else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
		return rq2;
Linus Torvalds's avatar
Linus Torvalds committed
306

Jens Axboe's avatar
Jens Axboe committed
307 308
	s1 = rq1->sector;
	s2 = rq2->sector;
Linus Torvalds's avatar
Linus Torvalds committed
309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326

	last = cfqd->last_sector;

	/*
	 * by definition, 1KiB is 2 sectors
	 */
	back_max = cfqd->cfq_back_max * 2;

	/*
	 * Strict one way elevator _except_ in the case where we allow
	 * short backward seeks which are biased as twice the cost of a
	 * similar forward seek.
	 */
	if (s1 >= last)
		d1 = s1 - last;
	else if (s1 + back_max >= last)
		d1 = (last - s1) * cfqd->cfq_back_penalty;
	else
327
		wrap |= CFQ_RQ1_WRAP;
Linus Torvalds's avatar
Linus Torvalds committed
328 329 330 331 332 333

	if (s2 >= last)
		d2 = s2 - last;
	else if (s2 + back_max >= last)
		d2 = (last - s2) * cfqd->cfq_back_penalty;
	else
334
		wrap |= CFQ_RQ2_WRAP;
Linus Torvalds's avatar
Linus Torvalds committed
335 336

	/* Found required data */
337 338 339 340 341 342

	/*
	 * By doing switch() on the bit mask "wrap" we avoid having to
	 * check two variables for all permutations: --> faster!
	 */
	switch (wrap) {
Jens Axboe's avatar
Jens Axboe committed
343
	case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
344
		if (d1 < d2)
Jens Axboe's avatar
Jens Axboe committed
345
			return rq1;
346
		else if (d2 < d1)
Jens Axboe's avatar
Jens Axboe committed
347
			return rq2;
348 349
		else {
			if (s1 >= s2)
Jens Axboe's avatar
Jens Axboe committed
350
				return rq1;
351
			else
Jens Axboe's avatar
Jens Axboe committed
352
				return rq2;
353
		}
Linus Torvalds's avatar
Linus Torvalds committed
354

355
	case CFQ_RQ2_WRAP:
Jens Axboe's avatar
Jens Axboe committed
356
		return rq1;
357
	case CFQ_RQ1_WRAP:
Jens Axboe's avatar
Jens Axboe committed
358 359
		return rq2;
	case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
360 361 362 363 364 365 366 367
	default:
		/*
		 * Since both rqs are wrapped,
		 * start with the one that's further behind head
		 * (--> only *one* back seek required),
		 * since back seek takes more time than forward.
		 */
		if (s1 <= s2)
Jens Axboe's avatar
Jens Axboe committed
368
			return rq1;
Linus Torvalds's avatar
Linus Torvalds committed
369
		else
Jens Axboe's avatar
Jens Axboe committed
370
			return rq2;
Linus Torvalds's avatar
Linus Torvalds committed
371 372 373 374 375 376
	}
}

/*
 * would be nice to take fifo expire time into account as well
 */
Jens Axboe's avatar
Jens Axboe committed
377 378 379
static struct request *
cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
		  struct request *last)
Linus Torvalds's avatar
Linus Torvalds committed
380
{
381 382
	struct rb_node *rbnext = rb_next(&last->rb_node);
	struct rb_node *rbprev = rb_prev(&last->rb_node);
Jens Axboe's avatar
Jens Axboe committed
383
	struct request *next = NULL, *prev = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
384

385
	BUG_ON(RB_EMPTY_NODE(&last->rb_node));
Linus Torvalds's avatar
Linus Torvalds committed
386 387

	if (rbprev)
Jens Axboe's avatar
Jens Axboe committed
388
		prev = rb_entry_rq(rbprev);
Linus Torvalds's avatar
Linus Torvalds committed
389

390
	if (rbnext)
Jens Axboe's avatar
Jens Axboe committed
391
		next = rb_entry_rq(rbnext);
392 393 394
	else {
		rbnext = rb_first(&cfqq->sort_list);
		if (rbnext && rbnext != &last->rb_node)
Jens Axboe's avatar
Jens Axboe committed
395
			next = rb_entry_rq(rbnext);
396
	}
Linus Torvalds's avatar
Linus Torvalds committed
397

398
	return cfq_choose_req(cfqd, next, prev);
Linus Torvalds's avatar
Linus Torvalds committed
399 400
}

401
static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
Linus Torvalds's avatar
Linus Torvalds committed
402
{
403
	struct cfq_data *cfqd = cfqq->cfqd;
404 405
	struct list_head *list, *n;
	struct cfq_queue *__cfqq;
Linus Torvalds's avatar
Linus Torvalds committed
406

407 408 409 410 411
	/*
	 * Resorting requires the cfqq to be on the RR list already.
	 */
	if (!cfq_cfqq_on_rr(cfqq))
		return;
Linus Torvalds's avatar
Linus Torvalds committed
412

413
	list_del(&cfqq->cfq_list);
Linus Torvalds's avatar
Linus Torvalds committed
414

415 416 417 418 419 420 421 422 423 424 425 426
	if (cfq_class_rt(cfqq))
		list = &cfqd->cur_rr;
	else if (cfq_class_idle(cfqq))
		list = &cfqd->idle_rr;
	else {
		/*
		 * if cfqq has requests in flight, don't allow it to be
		 * found in cfq_set_active_queue before it has finished them.
		 * this is done to increase fairness between a process that
		 * has lots of io pending vs one that only generates one
		 * sporadically or synchronously
		 */
Jens Axboe's avatar
Jens Axboe committed
427
		if (cfq_cfqq_dispatched(cfqq))
428 429 430
			list = &cfqd->busy_rr;
		else
			list = &cfqd->rr_list[cfqq->ioprio];
Linus Torvalds's avatar
Linus Torvalds committed
431 432
	}

433
	if (preempted || cfq_cfqq_queue_new(cfqq)) {
434 435 436 437 438 439
		/*
		 * If this queue was preempted or is new (never been serviced),
		 * let it be added first for fairness but beind other new
		 * queues.
		 */
		n = list;
440 441 442 443
		while (n->next != list) {
			__cfqq = list_entry_cfqq(n->next);
			if (!cfq_cfqq_queue_new(__cfqq))
				break;
Linus Torvalds's avatar
Linus Torvalds committed
444

445 446
			n = n->next;
		}
447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464
		list_add_tail(&cfqq->cfq_list, n);
	} else if (!cfq_cfqq_class_sync(cfqq)) {
		/*
		 * async queue always goes to the end. this wont be overly
		 * unfair to writes, as the sort of the sync queue wont be
		 * allowed to pass the async queue again.
		 */
		list_add_tail(&cfqq->cfq_list, list);
	} else {
		/*
		 * sort by last service, but don't cross a new or async
		 * queue. we don't cross a new queue because it hasn't been
		 * service before, and we don't cross an async queue because
		 * it gets added to the end on expire.
		 */
		n = list;
		while ((n = n->prev) != list) {
			struct cfq_queue *__cfqq = list_entry_cfqq(n);
Linus Torvalds's avatar
Linus Torvalds committed
465

466 467 468 469 470 471
			if (!cfq_cfqq_class_sync(cfqq) || !__cfqq->service_last)
				break;
			if (time_before(__cfqq->service_last, cfqq->service_last))
				break;
		}
		list_add(&cfqq->cfq_list, n);
Linus Torvalds's avatar
Linus Torvalds committed
472 473 474 475 476
	}
}

/*
 * add to busy list of queues for service, trying to be fair in ordering
477
 * the pending list according to last request service
Linus Torvalds's avatar
Linus Torvalds committed
478 479
 */
static inline void
480
cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
Linus Torvalds's avatar
Linus Torvalds committed
481
{
Jens Axboe's avatar
Jens Axboe committed
482 483
	BUG_ON(cfq_cfqq_on_rr(cfqq));
	cfq_mark_cfqq_on_rr(cfqq);
Linus Torvalds's avatar
Linus Torvalds committed
484 485
	cfqd->busy_queues++;

486
	cfq_resort_rr_list(cfqq, 0);
Linus Torvalds's avatar
Linus Torvalds committed
487 488 489 490 491
}

static inline void
cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
Jens Axboe's avatar
Jens Axboe committed
492 493
	BUG_ON(!cfq_cfqq_on_rr(cfqq));
	cfq_clear_cfqq_on_rr(cfqq);
494
	list_del_init(&cfqq->cfq_list);
Linus Torvalds's avatar
Linus Torvalds committed
495 496 497 498 499 500 501 502

	BUG_ON(!cfqd->busy_queues);
	cfqd->busy_queues--;
}

/*
 * rb tree support functions
 */
Jens Axboe's avatar
Jens Axboe committed
503
static inline void cfq_del_rq_rb(struct request *rq)
Linus Torvalds's avatar
Linus Torvalds committed
504
{
Jens Axboe's avatar
Jens Axboe committed
505
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
506
	struct cfq_data *cfqd = cfqq->cfqd;
Jens Axboe's avatar
Jens Axboe committed
507
	const int sync = rq_is_sync(rq);
Linus Torvalds's avatar
Linus Torvalds committed
508

509 510
	BUG_ON(!cfqq->queued[sync]);
	cfqq->queued[sync]--;
Linus Torvalds's avatar
Linus Torvalds committed
511

Jens Axboe's avatar
Jens Axboe committed
512
	elv_rb_del(&cfqq->sort_list, rq);
Linus Torvalds's avatar
Linus Torvalds committed
513

514
	if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
515
		cfq_del_cfqq_rr(cfqd, cfqq);
Linus Torvalds's avatar
Linus Torvalds committed
516 517
}

Jens Axboe's avatar
Jens Axboe committed
518
static void cfq_add_rq_rb(struct request *rq)
Linus Torvalds's avatar
Linus Torvalds committed
519
{
Jens Axboe's avatar
Jens Axboe committed
520
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
Linus Torvalds's avatar
Linus Torvalds committed
521
	struct cfq_data *cfqd = cfqq->cfqd;
522
	struct request *__alias;
Linus Torvalds's avatar
Linus Torvalds committed
523

524
	cfqq->queued[rq_is_sync(rq)]++;
Linus Torvalds's avatar
Linus Torvalds committed
525 526 527 528 529

	/*
	 * looks a little odd, but the first insert might return an alias.
	 * if that happens, put the alias on the dispatch list
	 */
530
	while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
Jens Axboe's avatar
Jens Axboe committed
531
		cfq_dispatch_insert(cfqd->queue, __alias);
532 533 534

	if (!cfq_cfqq_on_rr(cfqq))
		cfq_add_cfqq_rr(cfqd, cfqq);
535 536 537 538 539 540

	/*
	 * check if this request is a better next-serve candidate
	 */
	cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq);
	BUG_ON(!cfqq->next_rq);
Linus Torvalds's avatar
Linus Torvalds committed
541 542 543
}

static inline void
Jens Axboe's avatar
Jens Axboe committed
544
cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
Linus Torvalds's avatar
Linus Torvalds committed
545
{
546 547
	elv_rb_del(&cfqq->sort_list, rq);
	cfqq->queued[rq_is_sync(rq)]--;
Jens Axboe's avatar
Jens Axboe committed
548
	cfq_add_rq_rb(rq);
Linus Torvalds's avatar
Linus Torvalds committed
549 550
}

551 552
static struct request *
cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
Linus Torvalds's avatar
Linus Torvalds committed
553
{
554
	struct task_struct *tsk = current;
555
	pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio), bio_sync(bio));
556
	struct cfq_queue *cfqq;
Linus Torvalds's avatar
Linus Torvalds committed
557

558
	cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
559 560 561
	if (cfqq) {
		sector_t sector = bio->bi_sector + bio_sectors(bio);

562
		return elv_rb_find(&cfqq->sort_list, sector);
563
	}
Linus Torvalds's avatar
Linus Torvalds committed
564 565 566 567

	return NULL;
}

568
static void cfq_activate_request(request_queue_t *q, struct request *rq)
Linus Torvalds's avatar
Linus Torvalds committed
569
{
570
	struct cfq_data *cfqd = q->elevator->elevator_data;
Jens Axboe's avatar
Jens Axboe committed
571

572
	cfqd->rq_in_driver++;
573 574 575 576 577 578 579 580 581

	/*
	 * If the depth is larger 1, it really could be queueing. But lets
	 * make the mark a little higher - idling could still be good for
	 * low queueing, and a low queueing number could also just indicate
	 * a SCSI mid layer like behaviour where limit+1 is often seen.
	 */
	if (!cfqd->hw_tag && cfqd->rq_in_driver > 4)
		cfqd->hw_tag = 1;
Linus Torvalds's avatar
Linus Torvalds committed
582 583
}

584
static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
Linus Torvalds's avatar
Linus Torvalds committed
585
{
586 587 588 589
	struct cfq_data *cfqd = q->elevator->elevator_data;

	WARN_ON(!cfqd->rq_in_driver);
	cfqd->rq_in_driver--;
Linus Torvalds's avatar
Linus Torvalds committed
590 591
}

592
static void cfq_remove_request(struct request *rq)
Linus Torvalds's avatar
Linus Torvalds committed
593
{
Jens Axboe's avatar
Jens Axboe committed
594
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
595

Jens Axboe's avatar
Jens Axboe committed
596 597
	if (cfqq->next_rq == rq)
		cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
Linus Torvalds's avatar
Linus Torvalds committed
598

599
	list_del_init(&rq->queuelist);
Jens Axboe's avatar
Jens Axboe committed
600
	cfq_del_rq_rb(rq);
601 602 603 604 605

	if (rq_is_meta(rq)) {
		WARN_ON(!cfqq->meta_pending);
		cfqq->meta_pending--;
	}
Linus Torvalds's avatar
Linus Torvalds committed
606 607 608 609 610 611 612 613
}

static int
cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
	struct request *__rq;

614
	__rq = cfq_find_rq_fmerge(cfqd, bio);
615
	if (__rq && elv_rq_merge_ok(__rq, bio)) {
616 617
		*req = __rq;
		return ELEVATOR_FRONT_MERGE;
Linus Torvalds's avatar
Linus Torvalds committed
618 619 620 621 622
	}

	return ELEVATOR_NO_MERGE;
}

623 624
static void cfq_merged_request(request_queue_t *q, struct request *req,
			       int type)
Linus Torvalds's avatar
Linus Torvalds committed
625
{
626
	if (type == ELEVATOR_FRONT_MERGE) {
Jens Axboe's avatar
Jens Axboe committed
627
		struct cfq_queue *cfqq = RQ_CFQQ(req);
Linus Torvalds's avatar
Linus Torvalds committed
628

Jens Axboe's avatar
Jens Axboe committed
629
		cfq_reposition_rq_rb(cfqq, req);
Linus Torvalds's avatar
Linus Torvalds committed
630 631 632 633 634 635 636
	}
}

static void
cfq_merged_requests(request_queue_t *q, struct request *rq,
		    struct request *next)
{
637 638 639 640 641 642 643
	/*
	 * reposition in fifo if next is older than rq
	 */
	if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
	    time_before(next->start_time, rq->start_time))
		list_move(&rq->queuelist, &next->queuelist);

644
	cfq_remove_request(next);
645 646
}

647 648 649 650 651 652 653 654 655
static int cfq_allow_merge(request_queue_t *q, struct request *rq,
			   struct bio *bio)
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
	const int rw = bio_data_dir(bio);
	struct cfq_queue *cfqq;
	pid_t key;

	/*
656
	 * Disallow merge of a sync bio into an async request.
657
	 */
658
	if ((bio_data_dir(bio) == READ || bio_sync(bio)) && !rq_is_sync(rq))
659 660 661
		return 0;

	/*
662 663
	 * Lookup the cfqq that this bio will be queued with. Allow
	 * merge only if rq is queued there.
664
	 */
665
	key = cfq_queue_pid(current, rw, bio_sync(bio));
666
	cfqq = cfq_find_cfq_hash(cfqd, key, current->ioprio);
667 668 669

	if (cfqq == RQ_CFQQ(rq))
		return 1;
670

671
	return 0;
672 673
}

674 675 676 677 678 679 680 681 682 683
static inline void
__cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
	if (cfqq) {
		/*
		 * stop potential idle class queues waiting service
		 */
		del_timer(&cfqd->idle_class_timer);

		cfqq->slice_end = 0;
Jens Axboe's avatar
Jens Axboe committed
684 685
		cfq_clear_cfqq_must_alloc_slice(cfqq);
		cfq_clear_cfqq_fifo_expire(cfqq);
686
		cfq_mark_cfqq_slice_new(cfqq);
687 688 689 690 691
	}

	cfqd->active_queue = cfqq;
}

692 693 694 695 696
/*
 * current cfqq expired its slice (or was too idle), select new one
 */
static void
__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
697
		    int preempted, int timed_out)
698 699 700 701 702 703
{
	if (cfq_cfqq_wait_request(cfqq))
		del_timer(&cfqd->idle_slice_timer);

	cfq_clear_cfqq_must_dispatch(cfqq);
	cfq_clear_cfqq_wait_request(cfqq);
704
	cfq_clear_cfqq_queue_new(cfqq);
705 706 707 708 709

	/*
	 * store what was left of this slice, if the queue idled out
	 * or was preempted
	 */
710
	if (timed_out && !cfq_cfqq_slice_new(cfqq))
711
		cfqq->slice_resid = cfqq->slice_end - jiffies;
712

713
	cfq_resort_rr_list(cfqq, preempted);
714 715 716 717 718 719 720 721 722 723 724 725

	if (cfqq == cfqd->active_queue)
		cfqd->active_queue = NULL;

	if (cfqd->active_cic) {
		put_io_context(cfqd->active_cic->ioc);
		cfqd->active_cic = NULL;
	}

	cfqd->dispatch_slice = 0;
}

726 727
static inline void cfq_slice_expired(struct cfq_data *cfqd, int preempted,
				     int timed_out)
728 729 730 731
{
	struct cfq_queue *cfqq = cfqd->active_queue;

	if (cfqq)
732
		__cfq_slice_expired(cfqd, cfqq, preempted, timed_out);
733 734
}

735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768
/*
 * 0
 * 0,1
 * 0,1,2
 * 0,1,2,3
 * 0,1,2,3,4
 * 0,1,2,3,4,5
 * 0,1,2,3,4,5,6
 * 0,1,2,3,4,5,6,7
 */
static int cfq_get_next_prio_level(struct cfq_data *cfqd)
{
	int prio, wrap;

	prio = -1;
	wrap = 0;
	do {
		int p;

		for (p = cfqd->cur_prio; p <= cfqd->cur_end_prio; p++) {
			if (!list_empty(&cfqd->rr_list[p])) {
				prio = p;
				break;
			}
		}

		if (prio != -1)
			break;
		cfqd->cur_prio = 0;
		if (++cfqd->cur_end_prio == CFQ_PRIO_LISTS) {
			cfqd->cur_end_prio = 0;
			if (wrap)
				break;
			wrap = 1;
Linus Torvalds's avatar
Linus Torvalds committed
769
		}
770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786
	} while (1);

	if (unlikely(prio == -1))
		return -1;

	BUG_ON(prio >= CFQ_PRIO_LISTS);

	list_splice_init(&cfqd->rr_list[prio], &cfqd->cur_rr);

	cfqd->cur_prio = prio + 1;
	if (cfqd->cur_prio > cfqd->cur_end_prio) {
		cfqd->cur_end_prio = cfqd->cur_prio;
		cfqd->cur_prio = 0;
	}
	if (cfqd->cur_end_prio == CFQ_PRIO_LISTS) {
		cfqd->cur_prio = 0;
		cfqd->cur_end_prio = 0;
Linus Torvalds's avatar
Linus Torvalds committed
787 788
	}

789 790 791
	return prio;
}

Jens Axboe's avatar
Jens Axboe committed
792
static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
793
{
794
	struct cfq_queue *cfqq = NULL;
795

796 797 798 799 800 801
	if (!list_empty(&cfqd->cur_rr) || cfq_get_next_prio_level(cfqd) != -1) {
		/*
		 * if current list is non-empty, grab first entry. if it is
		 * empty, get next prio level and grab first entry then if any
		 * are spliced
		 */
802
		cfqq = list_entry_cfqq(cfqd->cur_rr.next);
803 804 805 806 807
	} else if (!list_empty(&cfqd->busy_rr)) {
		/*
		 * If no new queues are available, check if the busy list has
		 * some before falling back to idle io.
		 */
808
		cfqq = list_entry_cfqq(cfqd->busy_rr.next);
809 810 811 812 813 814
	} else if (!list_empty(&cfqd->idle_rr)) {
		/*
		 * if we have idle queues and no rt or be queues had pending
		 * requests, either allow immediate service if the grace period
		 * has passed or arm the idle grace timer
		 */
815 816 817 818 819 820 821 822 823
		unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE;

		if (time_after_eq(jiffies, end))
			cfqq = list_entry_cfqq(cfqd->idle_rr.next);
		else
			mod_timer(&cfqd->idle_class_timer, end);
	}

	__cfq_set_active_queue(cfqd, cfqq);
Jens Axboe's avatar
Jens Axboe committed
824
	return cfqq;
825 826
}

827 828
#define CIC_SEEKY(cic) ((cic)->seek_mean > (128 * 1024))

829
static int cfq_arm_slice_timer(struct cfq_data *cfqd)
830
{
831
	struct cfq_queue *cfqq = cfqd->active_queue;
832
	struct cfq_io_context *cic;
833 834
	unsigned long sl;

835
	WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
836 837 838 839 840 841

	/*
	 * idle is disabled, either manually or by past process history
	 */
	if (!cfqd->cfq_slice_idle)
		return 0;
Jens Axboe's avatar
Jens Axboe committed
842
	if (!cfq_cfqq_idle_window(cfqq))
843 844 845 846
		return 0;
	/*
	 * task has exited, don't wait
	 */
847 848
	cic = cfqd->active_cic;
	if (!cic || !cic->ioc->task)
849 850
		return 0;

Jens Axboe's avatar
Jens Axboe committed
851 852
	cfq_mark_cfqq_must_dispatch(cfqq);
	cfq_mark_cfqq_wait_request(cfqq);
853

854
	sl = min(cfqq->slice_end - 1, (unsigned long) cfqd->cfq_slice_idle);
855 856 857 858 859 860

	/*
	 * we don't want to idle for seeks, but we do want to allow
	 * fair distribution of slice time for a process doing back-to-back
	 * seeks. so allow a little bit of time for him to submit a new rq
	 */
861
	if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
862
		sl = min(sl, msecs_to_jiffies(2));
863

864
	mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
865
	return 1;
Linus Torvalds's avatar
Linus Torvalds committed
866 867
}

Jens Axboe's avatar
Jens Axboe committed
868
static void cfq_dispatch_insert(request_queue_t *q, struct request *rq)
Linus Torvalds's avatar
Linus Torvalds committed
869
{
Jens Axboe's avatar
Jens Axboe committed
870
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
871

872 873 874
	cfq_remove_request(rq);
	cfqq->on_dispatch[rq_is_sync(rq)]++;
	elv_dispatch_sort(q, rq);
Linus Torvalds's avatar
Linus Torvalds committed
875 876 877 878 879
}

/*
 * return expired entry, or NULL to just start from scratch in rbtree
 */
Jens Axboe's avatar
Jens Axboe committed
880
static inline struct request *cfq_check_fifo(struct cfq_queue *cfqq)
Linus Torvalds's avatar
Linus Torvalds committed
881 882
{
	struct cfq_data *cfqd = cfqq->cfqd;
883
	struct request *rq;
884
	int fifo;
Linus Torvalds's avatar
Linus Torvalds committed
885

Jens Axboe's avatar
Jens Axboe committed
886
	if (cfq_cfqq_fifo_expire(cfqq))
Linus Torvalds's avatar
Linus Torvalds committed
887
		return NULL;
888 889 890

	cfq_mark_cfqq_fifo_expire(cfqq);

891 892
	if (list_empty(&cfqq->fifo))
		return NULL;
Linus Torvalds's avatar
Linus Torvalds committed
893

894 895
	fifo = cfq_cfqq_class_sync(cfqq);
	rq = rq_entry_fifo(cfqq->fifo.next);
Linus Torvalds's avatar
Linus Torvalds committed
896

897
	if (time_after(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo]))
898
		return rq;
Linus Torvalds's avatar
Linus Torvalds committed
899 900 901 902

	return NULL;
}

903 904 905 906
static inline int
cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
	const int base_rq = cfqd->cfq_slice_async_rq;
Linus Torvalds's avatar
Linus Torvalds committed
907

908
	WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
Linus Torvalds's avatar
Linus Torvalds committed
909

910
	return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
Linus Torvalds's avatar
Linus Torvalds committed
911 912
}

913 914 915
/*
 * get next queue for service
 */
916
static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
Linus Torvalds's avatar
Linus Torvalds committed
917 918 919
{
	struct cfq_queue *cfqq;

920 921 922
	cfqq = cfqd->active_queue;
	if (!cfqq)
		goto new_queue;
Linus Torvalds's avatar
Linus Torvalds committed
923

924 925 926
	/*
	 * slice has expired
	 */
927
	if (!cfq_cfqq_must_dispatch(cfqq) && cfq_slice_used(cfqq))
Jens Axboe's avatar
Jens Axboe committed
928
		goto expire;
Linus Torvalds's avatar
Linus Torvalds committed
929

930 931 932 933
	/*
	 * if queue has requests, dispatch one. if not, check if
	 * enough slice is left to wait for one
	 */
934
	if (!RB_EMPTY_ROOT(&cfqq->sort_list))
935
		goto keep_queue;
936
	else if (cfq_cfqq_slice_new(cfqq) || cfq_cfqq_dispatched(cfqq)) {
937 938 939
		cfqq = NULL;
		goto keep_queue;
	} else if (cfq_cfqq_class_sync(cfqq)) {
940
		if (cfq_arm_slice_timer(cfqd))
941 942 943
			return NULL;
	}

Jens Axboe's avatar
Jens Axboe committed
944
expire:
945
	cfq_slice_expired(cfqd, 0, 0);
Jens Axboe's avatar
Jens Axboe committed
946 947
new_queue:
	cfqq = cfq_set_active_queue(cfqd);
948
keep_queue:
Jens Axboe's avatar
Jens Axboe committed
949
	return cfqq;
950 951 952 953 954 955 956 957
}

static int
__cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
			int max_dispatch)
{
	int dispatched = 0;

958
	BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
959 960

	do {
Jens Axboe's avatar
Jens Axboe committed
961
		struct request *rq;
Linus Torvalds's avatar
Linus Torvalds committed
962 963

		/*
964
		 * follow expired path, else get first next available
Linus Torvalds's avatar
Linus Torvalds committed
965
		 */
Jens Axboe's avatar
Jens Axboe committed
966 967
		if ((rq = cfq_check_fifo(cfqq)) == NULL)
			rq = cfqq->next_rq;
968 969 970 971

		/*
		 * finally, insert request into driver dispatch list
		 */
Jens Axboe's avatar
Jens Axboe committed
972
		cfq_dispatch_insert(cfqd->queue, rq);
Linus Torvalds's avatar
Linus Torvalds committed
973

974 975
		cfqd->dispatch_slice++;
		dispatched++;
Linus Torvalds's avatar
Linus Torvalds committed
976

977
		if (!cfqd->active_cic) {
Jens Axboe's avatar
Jens Axboe committed
978 979
			atomic_inc(&RQ_CIC(rq)->ioc->refcount);
			cfqd->active_cic = RQ_CIC(rq);
980
		}
Linus Torvalds's avatar
Linus Torvalds committed
981

982
		if (RB_EMPTY_ROOT(&cfqq->sort_list))
983 984 985 986 987 988 989 990
			break;

	} while (dispatched < max_dispatch);

	/*
	 * expire an async queue immediately if it has used up its slice. idle
	 * queue always expire after 1 dispatch round.
	 */
991
	if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
992
	    cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
993
	    cfq_class_idle(cfqq))) {
994
		cfqq->slice_end = jiffies + 1;
995
		cfq_slice_expired(cfqd, 0, 0);
996
	}
997 998 999 1000

	return dispatched;
}

1001 1002 1003 1004
static int
cfq_forced_dispatch_cfqqs(struct list_head *list)
{
	struct cfq_queue *cfqq, *next;
1005
	int dispatched;
1006

1007
	dispatched = 0;
1008
	list_for_each_entry_safe(cfqq, next, list, cfq_list) {
Jens Axboe's avatar
Jens Axboe committed
1009 1010
		while (cfqq->next_rq) {
			cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
1011 1012 1013 1014
			dispatched++;
		}
		BUG_ON(!list_empty(&cfqq->fifo));
	}
1015

1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
	return dispatched;
}

static int
cfq_forced_dispatch(struct cfq_data *cfqd)
{
	int i, dispatched = 0;

	for (i = 0; i < CFQ_PRIO_LISTS; i++)
		dispatched += cfq_forced_dispatch_cfqqs(&cfqd->rr_list[i]);

	dispatched += cfq_forced_dispatch_cfqqs(&cfqd->busy_rr);
	dispatched += cfq_forced_dispatch_cfqqs(&cfqd->cur_rr);
	dispatched += cfq_forced_dispatch_cfqqs(&cfqd->idle_rr);

1031
	cfq_slice_expired(cfqd, 0, 0);
1032 1033 1034 1035 1036 1037

	BUG_ON(cfqd->busy_queues);

	return dispatched;
}

1038
static int
1039
cfq_dispatch_requests(request_queue_t *q, int force)
1040 1041
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
1042 1043
	struct cfq_queue *cfqq, *prev_cfqq;
	int dispatched;
1044 1045 1046 1047

	if (!cfqd->busy_queues)
		return 0;

1048 1049 1050
	if (unlikely(force))
		return cfq_forced_dispatch(cfqd);

1051 1052 1053
	dispatched = 0;
	prev_cfqq = NULL;
	while ((cfqq = cfq_select_queue(cfqd)) != NULL) {
1054 1055
		int max_dispatch;

1056 1057 1058 1059 1060 1061
		if (cfqd->busy_queues > 1) {
			/*
			 * Don't repeat dispatch from the previous queue.
			 */
			if (prev_cfqq == cfqq)
				break;
1062

1063 1064 1065 1066 1067 1068 1069 1070
			/*
			 * So we have dispatched before in this round, if the
			 * next queue has idling enabled (must be sync), don't
			 * allow it service until the previous have continued.
			 */
			if (cfqd->rq_in_driver && cfq_cfqq_idle_window(cfqq))
				break;
		}
1071

Jens Axboe's avatar
Jens Axboe committed
1072 1073
		cfq_clear_cfqq_must_dispatch(cfqq);
		cfq_clear_cfqq_wait_request(cfqq);
1074 1075
		del_timer(&cfqd->idle_slice_timer);

1076 1077 1078
		max_dispatch = cfqd->cfq_quantum;
		if (cfq_class_idle(cfqq))
			max_dispatch = 1;
Linus Torvalds's avatar
Linus Torvalds committed
1079

1080 1081
		dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
		prev_cfqq = cfqq;
Linus Torvalds's avatar
Linus Torvalds committed
1082 1083
	}

1084
	return dispatched;
Linus Torvalds's avatar
Linus Torvalds committed
1085 1086 1087
}

/*
Jens Axboe's avatar
Jens Axboe committed
1088 1089
 * task holds one reference to the queue, dropped when task exits. each rq
 * in-flight on this queue also holds a reference, dropped when rq is freed.
Linus Torvalds's avatar
Linus Torvalds committed
1090 1091 1092 1093 1094
 *
 * queue lock must be held here.
 */
static void cfq_put_queue(struct cfq_queue *cfqq)
{