Commit 7eaceacc authored by Jens Axboe's avatar Jens Axboe

block: remove per-queue plugging

Code has been converted over to the new explicit on-stack plugging,
and delay users have been converted to use the new API for that.
So lets kill off the old plugging along with aops->sync_page().
Signed-off-by: default avatarJens Axboe <jaxboe@fusionio.com>
parent 73c10101
...@@ -963,11 +963,6 @@ elevator_dispatch_fn* fills the dispatch queue with ready requests. ...@@ -963,11 +963,6 @@ elevator_dispatch_fn* fills the dispatch queue with ready requests.
elevator_add_req_fn* called to add a new request into the scheduler elevator_add_req_fn* called to add a new request into the scheduler
elevator_queue_empty_fn returns true if the merge queue is empty.
Drivers shouldn't use this, but rather check
if elv_next_request is NULL (without losing the
request if one exists!)
elevator_former_req_fn elevator_former_req_fn
elevator_latter_req_fn These return the request before or after the elevator_latter_req_fn These return the request before or after the
one specified in disk sort order. Used by the one specified in disk sort order. Used by the
......
...@@ -198,6 +198,19 @@ void blk_dump_rq_flags(struct request *rq, char *msg) ...@@ -198,6 +198,19 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
} }
EXPORT_SYMBOL(blk_dump_rq_flags); EXPORT_SYMBOL(blk_dump_rq_flags);
/*
* Make sure that plugs that were pending when this function was entered,
* are now complete and requests pushed to the queue.
*/
static inline void queue_sync_plugs(struct request_queue *q)
{
/*
* If the current process is plugged and has barriers submitted,
* we will livelock if we don't unplug first.
*/
blk_flush_plug(current);
}
static void blk_delay_work(struct work_struct *work) static void blk_delay_work(struct work_struct *work)
{ {
struct request_queue *q; struct request_queue *q;
...@@ -224,137 +237,6 @@ void blk_delay_queue(struct request_queue *q, unsigned long msecs) ...@@ -224,137 +237,6 @@ void blk_delay_queue(struct request_queue *q, unsigned long msecs)
} }
EXPORT_SYMBOL(blk_delay_queue); EXPORT_SYMBOL(blk_delay_queue);
/*
* "plug" the device if there are no outstanding requests: this will
* force the transfer to start only after we have put all the requests
* on the list.
*
* This is called with interrupts off and no requests on the queue and
* with the queue lock held.
*/
void blk_plug_device(struct request_queue *q)
{
WARN_ON(!irqs_disabled());
/*
* don't plug a stopped queue, it must be paired with blk_start_queue()
* which will restart the queueing
*/
if (blk_queue_stopped(q))
return;
if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) {
mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
trace_block_plug(q);
}
}
EXPORT_SYMBOL(blk_plug_device);
/**
* blk_plug_device_unlocked - plug a device without queue lock held
* @q: The &struct request_queue to plug
*
* Description:
* Like @blk_plug_device(), but grabs the queue lock and disables
* interrupts.
**/
void blk_plug_device_unlocked(struct request_queue *q)
{
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
blk_plug_device(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_plug_device_unlocked);
/*
* remove the queue from the plugged list, if present. called with
* queue lock held and interrupts disabled.
*/
int blk_remove_plug(struct request_queue *q)
{
WARN_ON(!irqs_disabled());
if (!queue_flag_test_and_clear(QUEUE_FLAG_PLUGGED, q))
return 0;
del_timer(&q->unplug_timer);
return 1;
}
EXPORT_SYMBOL(blk_remove_plug);
/*
* remove the plug and let it rip..
*/
void __generic_unplug_device(struct request_queue *q)
{
if (unlikely(blk_queue_stopped(q)))
return;
if (!blk_remove_plug(q) && !blk_queue_nonrot(q))
return;
q->request_fn(q);
}
/**
* generic_unplug_device - fire a request queue
* @q: The &struct request_queue in question
*
* Description:
* Linux uses plugging to build bigger requests queues before letting
* the device have at them. If a queue is plugged, the I/O scheduler
* is still adding and merging requests on the queue. Once the queue
* gets unplugged, the request_fn defined for the queue is invoked and
* transfers started.
**/
void generic_unplug_device(struct request_queue *q)
{
if (blk_queue_plugged(q)) {
spin_lock_irq(q->queue_lock);
__generic_unplug_device(q);
spin_unlock_irq(q->queue_lock);
}
}
EXPORT_SYMBOL(generic_unplug_device);
static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
struct page *page)
{
struct request_queue *q = bdi->unplug_io_data;
blk_unplug(q);
}
void blk_unplug_work(struct work_struct *work)
{
struct request_queue *q =
container_of(work, struct request_queue, unplug_work);
trace_block_unplug_io(q);
q->unplug_fn(q);
}
void blk_unplug_timeout(unsigned long data)
{
struct request_queue *q = (struct request_queue *)data;
trace_block_unplug_timer(q);
kblockd_schedule_work(q, &q->unplug_work);
}
void blk_unplug(struct request_queue *q)
{
/*
* devices don't necessarily have an ->unplug_fn defined
*/
if (q->unplug_fn) {
trace_block_unplug_io(q);
q->unplug_fn(q);
}
}
EXPORT_SYMBOL(blk_unplug);
/** /**
* blk_start_queue - restart a previously stopped queue * blk_start_queue - restart a previously stopped queue
* @q: The &struct request_queue in question * @q: The &struct request_queue in question
...@@ -389,7 +271,6 @@ EXPORT_SYMBOL(blk_start_queue); ...@@ -389,7 +271,6 @@ EXPORT_SYMBOL(blk_start_queue);
**/ **/
void blk_stop_queue(struct request_queue *q) void blk_stop_queue(struct request_queue *q)
{ {
blk_remove_plug(q);
cancel_delayed_work(&q->delay_work); cancel_delayed_work(&q->delay_work);
queue_flag_set(QUEUE_FLAG_STOPPED, q); queue_flag_set(QUEUE_FLAG_STOPPED, q);
} }
...@@ -411,11 +292,10 @@ EXPORT_SYMBOL(blk_stop_queue); ...@@ -411,11 +292,10 @@ EXPORT_SYMBOL(blk_stop_queue);
*/ */
void blk_sync_queue(struct request_queue *q) void blk_sync_queue(struct request_queue *q)
{ {
del_timer_sync(&q->unplug_timer);
del_timer_sync(&q->timeout); del_timer_sync(&q->timeout);
cancel_work_sync(&q->unplug_work);
throtl_shutdown_timer_wq(q); throtl_shutdown_timer_wq(q);
cancel_delayed_work_sync(&q->delay_work); cancel_delayed_work_sync(&q->delay_work);
queue_sync_plugs(q);
} }
EXPORT_SYMBOL(blk_sync_queue); EXPORT_SYMBOL(blk_sync_queue);
...@@ -430,14 +310,9 @@ EXPORT_SYMBOL(blk_sync_queue); ...@@ -430,14 +310,9 @@ EXPORT_SYMBOL(blk_sync_queue);
*/ */
void __blk_run_queue(struct request_queue *q) void __blk_run_queue(struct request_queue *q)
{ {
blk_remove_plug(q);
if (unlikely(blk_queue_stopped(q))) if (unlikely(blk_queue_stopped(q)))
return; return;
if (elv_queue_empty(q))
return;
/* /*
* Only recurse once to avoid overrunning the stack, let the unplug * Only recurse once to avoid overrunning the stack, let the unplug
* handling reinvoke the handler shortly if we already got there. * handling reinvoke the handler shortly if we already got there.
...@@ -445,10 +320,8 @@ void __blk_run_queue(struct request_queue *q) ...@@ -445,10 +320,8 @@ void __blk_run_queue(struct request_queue *q)
if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
q->request_fn(q); q->request_fn(q);
queue_flag_clear(QUEUE_FLAG_REENTER, q); queue_flag_clear(QUEUE_FLAG_REENTER, q);
} else { } else
queue_flag_set(QUEUE_FLAG_PLUGGED, q); queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
kblockd_schedule_work(q, &q->unplug_work);
}
} }
EXPORT_SYMBOL(__blk_run_queue); EXPORT_SYMBOL(__blk_run_queue);
...@@ -535,8 +408,6 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) ...@@ -535,8 +408,6 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
if (!q) if (!q)
return NULL; return NULL;
q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
q->backing_dev_info.unplug_io_data = q;
q->backing_dev_info.ra_pages = q->backing_dev_info.ra_pages =
(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
q->backing_dev_info.state = 0; q->backing_dev_info.state = 0;
...@@ -556,13 +427,11 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) ...@@ -556,13 +427,11 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
laptop_mode_timer_fn, (unsigned long) q); laptop_mode_timer_fn, (unsigned long) q);
init_timer(&q->unplug_timer);
setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
INIT_LIST_HEAD(&q->timeout_list); INIT_LIST_HEAD(&q->timeout_list);
INIT_LIST_HEAD(&q->flush_queue[0]); INIT_LIST_HEAD(&q->flush_queue[0]);
INIT_LIST_HEAD(&q->flush_queue[1]); INIT_LIST_HEAD(&q->flush_queue[1]);
INIT_LIST_HEAD(&q->flush_data_in_flight); INIT_LIST_HEAD(&q->flush_data_in_flight);
INIT_WORK(&q->unplug_work, blk_unplug_work);
INIT_DELAYED_WORK(&q->delay_work, blk_delay_work); INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
kobject_init(&q->kobj, &blk_queue_ktype); kobject_init(&q->kobj, &blk_queue_ktype);
...@@ -652,7 +521,6 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn, ...@@ -652,7 +521,6 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
q->request_fn = rfn; q->request_fn = rfn;
q->prep_rq_fn = NULL; q->prep_rq_fn = NULL;
q->unprep_rq_fn = NULL; q->unprep_rq_fn = NULL;
q->unplug_fn = generic_unplug_device;
q->queue_flags = QUEUE_FLAG_DEFAULT; q->queue_flags = QUEUE_FLAG_DEFAULT;
q->queue_lock = lock; q->queue_lock = lock;
...@@ -910,8 +778,8 @@ out: ...@@ -910,8 +778,8 @@ out:
} }
/* /*
* No available requests for this queue, unplug the device and wait for some * No available requests for this queue, wait for some requests to become
* requests to become available. * available.
* *
* Called with q->queue_lock held, and returns with it unlocked. * Called with q->queue_lock held, and returns with it unlocked.
*/ */
...@@ -932,7 +800,6 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags, ...@@ -932,7 +800,6 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
trace_block_sleeprq(q, bio, rw_flags & 1); trace_block_sleeprq(q, bio, rw_flags & 1);
__generic_unplug_device(q);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
io_schedule(); io_schedule();
...@@ -1058,7 +925,7 @@ static void add_acct_request(struct request_queue *q, struct request *rq, ...@@ -1058,7 +925,7 @@ static void add_acct_request(struct request_queue *q, struct request *rq,
int where) int where)
{ {
drive_stat_acct(rq, 1); drive_stat_acct(rq, 1);
__elv_add_request(q, rq, where, 0); __elv_add_request(q, rq, where);
} }
/** /**
...@@ -2798,7 +2665,7 @@ static void flush_plug_list(struct blk_plug *plug) ...@@ -2798,7 +2665,7 @@ static void flush_plug_list(struct blk_plug *plug)
/* /*
* rq is already accounted, so use raw insert * rq is already accounted, so use raw insert
*/ */
__elv_add_request(q, rq, ELEVATOR_INSERT_SORT, 0); __elv_add_request(q, rq, ELEVATOR_INSERT_SORT);
} }
if (q) { if (q) {
......
...@@ -54,8 +54,8 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, ...@@ -54,8 +54,8 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
rq->end_io = done; rq->end_io = done;
WARN_ON(irqs_disabled()); WARN_ON(irqs_disabled());
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
__elv_add_request(q, rq, where, 1); __elv_add_request(q, rq, where);
__generic_unplug_device(q); __blk_run_queue(q);
/* the queue is stopped so it won't be plugged+unplugged */ /* the queue is stopped so it won't be plugged+unplugged */
if (rq->cmd_type == REQ_TYPE_PM_RESUME) if (rq->cmd_type == REQ_TYPE_PM_RESUME)
q->request_fn(q); q->request_fn(q);
......
...@@ -194,7 +194,6 @@ static void flush_end_io(struct request *flush_rq, int error) ...@@ -194,7 +194,6 @@ static void flush_end_io(struct request *flush_rq, int error)
{ {
struct request_queue *q = flush_rq->q; struct request_queue *q = flush_rq->q;
struct list_head *running = &q->flush_queue[q->flush_running_idx]; struct list_head *running = &q->flush_queue[q->flush_running_idx];
bool was_empty = elv_queue_empty(q);
bool queued = false; bool queued = false;
struct request *rq, *n; struct request *rq, *n;
...@@ -213,7 +212,7 @@ static void flush_end_io(struct request *flush_rq, int error) ...@@ -213,7 +212,7 @@ static void flush_end_io(struct request *flush_rq, int error)
} }
/* after populating an empty queue, kick it to avoid stall */ /* after populating an empty queue, kick it to avoid stall */
if (queued && was_empty) if (queued)
__blk_run_queue(q); __blk_run_queue(q);
} }
......
...@@ -164,14 +164,6 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) ...@@ -164,14 +164,6 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
blk_queue_congestion_threshold(q); blk_queue_congestion_threshold(q);
q->nr_batching = BLK_BATCH_REQ; q->nr_batching = BLK_BATCH_REQ;
q->unplug_thresh = 4; /* hmm */
q->unplug_delay = msecs_to_jiffies(3); /* 3 milliseconds */
if (q->unplug_delay == 0)
q->unplug_delay = 1;
q->unplug_timer.function = blk_unplug_timeout;
q->unplug_timer.data = (unsigned long)q;
blk_set_default_limits(&q->limits); blk_set_default_limits(&q->limits);
blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS); blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);
......
...@@ -800,7 +800,6 @@ out: ...@@ -800,7 +800,6 @@ out:
if (nr_disp) { if (nr_disp) {
while((bio = bio_list_pop(&bio_list_on_stack))) while((bio = bio_list_pop(&bio_list_on_stack)))
generic_make_request(bio); generic_make_request(bio);
blk_unplug(q);
} }
return nr_disp; return nr_disp;
} }
......
...@@ -18,8 +18,6 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq, ...@@ -18,8 +18,6 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq,
void blk_dequeue_request(struct request *rq); void blk_dequeue_request(struct request *rq);
void __blk_queue_free_tags(struct request_queue *q); void __blk_queue_free_tags(struct request_queue *q);
void blk_unplug_work(struct work_struct *work);
void blk_unplug_timeout(unsigned long data);
void blk_rq_timed_out_timer(unsigned long data); void blk_rq_timed_out_timer(unsigned long data);
void blk_delete_timer(struct request *); void blk_delete_timer(struct request *);
void blk_add_timer(struct request *); void blk_add_timer(struct request *);
......
...@@ -499,13 +499,6 @@ static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) ...@@ -499,13 +499,6 @@ static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
} }
} }
static int cfq_queue_empty(struct request_queue *q)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
return !cfqd->rq_queued;
}
/* /*
* Scale schedule slice based on io priority. Use the sync time slice only * Scale schedule slice based on io priority. Use the sync time slice only
* if a queue is marked sync and has sync io queued. A sync queue with async * if a queue is marked sync and has sync io queued. A sync queue with async
...@@ -4061,7 +4054,6 @@ static struct elevator_type iosched_cfq = { ...@@ -4061,7 +4054,6 @@ static struct elevator_type iosched_cfq = {
.elevator_add_req_fn = cfq_insert_request, .elevator_add_req_fn = cfq_insert_request,
.elevator_activate_req_fn = cfq_activate_request, .elevator_activate_req_fn = cfq_activate_request,
.elevator_deactivate_req_fn = cfq_deactivate_request, .elevator_deactivate_req_fn = cfq_deactivate_request,
.elevator_queue_empty_fn = cfq_queue_empty,
.elevator_completed_req_fn = cfq_completed_request, .elevator_completed_req_fn = cfq_completed_request,
.elevator_former_req_fn = elv_rb_former_request, .elevator_former_req_fn = elv_rb_former_request,
.elevator_latter_req_fn = elv_rb_latter_request, .elevator_latter_req_fn = elv_rb_latter_request,
......
...@@ -326,14 +326,6 @@ dispatch_request: ...@@ -326,14 +326,6 @@ dispatch_request:
return 1; return 1;
} }
static int deadline_queue_empty(struct request_queue *q)
{
struct deadline_data *dd = q->elevator->elevator_data;
return list_empty(&dd->fifo_list[WRITE])
&& list_empty(&dd->fifo_list[READ]);
}
static void deadline_exit_queue(struct elevator_queue *e) static void deadline_exit_queue(struct elevator_queue *e)
{ {
struct deadline_data *dd = e->elevator_data; struct deadline_data *dd = e->elevator_data;
...@@ -445,7 +437,6 @@ static struct elevator_type iosched_deadline = { ...@@ -445,7 +437,6 @@ static struct elevator_type iosched_deadline = {
.elevator_merge_req_fn = deadline_merged_requests, .elevator_merge_req_fn = deadline_merged_requests,
.elevator_dispatch_fn = deadline_dispatch_requests, .elevator_dispatch_fn = deadline_dispatch_requests,
.elevator_add_req_fn = deadline_add_request, .elevator_add_req_fn = deadline_add_request,
.elevator_queue_empty_fn = deadline_queue_empty,
.elevator_former_req_fn = elv_rb_former_request, .elevator_former_req_fn = elv_rb_former_request,
.elevator_latter_req_fn = elv_rb_latter_request, .elevator_latter_req_fn = elv_rb_latter_request,
.elevator_init_fn = deadline_init_queue, .elevator_init_fn = deadline_init_queue,
......
...@@ -619,21 +619,12 @@ void elv_quiesce_end(struct request_queue *q) ...@@ -619,21 +619,12 @@ void elv_quiesce_end(struct request_queue *q)
void elv_insert(struct request_queue *q, struct request *rq, int where) void elv_insert(struct request_queue *q, struct request *rq, int where)
{ {
int unplug_it = 1;
trace_block_rq_insert(q, rq); trace_block_rq_insert(q, rq);
rq->q = q; rq->q = q;
switch (where) { switch (where) {
case ELEVATOR_INSERT_REQUEUE: case ELEVATOR_INSERT_REQUEUE:
/*
* Most requeues happen because of a busy condition,
* don't force unplug of the queue for that case.
* Clear unplug_it and fall through.
*/
unplug_it = 0;
case ELEVATOR_INSERT_FRONT: case ELEVATOR_INSERT_FRONT:
rq->cmd_flags |= REQ_SOFTBARRIER; rq->cmd_flags |= REQ_SOFTBARRIER;
list_add(&rq->queuelist, &q->queue_head); list_add(&rq->queuelist, &q->queue_head);
...@@ -679,24 +670,14 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) ...@@ -679,24 +670,14 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
rq->cmd_flags |= REQ_SOFTBARRIER; rq->cmd_flags |= REQ_SOFTBARRIER;
blk_insert_flush(rq); blk_insert_flush(rq);
break; break;
default: default:
printk(KERN_ERR "%s: bad insertion point %d\n", printk(KERN_ERR "%s: bad insertion point %d\n",
__func__, where); __func__, where);
BUG(); BUG();
} }
if (unplug_it && blk_queue_plugged(q)) {
int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC]
- queue_in_flight(q);
if (nrq >= q->unplug_thresh)
__generic_unplug_device(q);
}
} }
void __elv_add_request(struct request_queue *q, struct request *rq, int where, void __elv_add_request(struct request_queue *q, struct request *rq, int where)
int plug)
{ {
BUG_ON(rq->cmd_flags & REQ_ON_PLUG); BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
...@@ -711,38 +692,20 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where, ...@@ -711,38 +692,20 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where,
where == ELEVATOR_INSERT_SORT) where == ELEVATOR_INSERT_SORT)
where = ELEVATOR_INSERT_BACK; where = ELEVATOR_INSERT_BACK;
if (plug)
blk_plug_device(q);
elv_insert(q, rq, where); elv_insert(q, rq, where);
} }
EXPORT_SYMBOL(__elv_add_request); EXPORT_SYMBOL(__elv_add_request);
void elv_add_request(struct request_queue *q, struct request *rq, int where, void elv_add_request(struct request_queue *q, struct request *rq, int where)
int plug)
{ {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags); spin_lock_irqsave(q->queue_lock, flags);
__elv_add_request(q, rq, where, plug); __elv_add_request(q, rq, where);
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
} }
EXPORT_SYMBOL(elv_add_request); EXPORT_SYMBOL(elv_add_request);
int elv_queue_empty(struct request_queue *q)
{
struct elevator_queue *e = q->elevator;
if (!list_empty(&q->queue_head))
return 0;
if (e->ops->elevator_queue_empty_fn)
return e->ops->elevator_queue_empty_fn(q);
return 1;
}
EXPORT_SYMBOL(elv_queue_empty);
struct request *elv_latter_request(struct request_queue *q, struct request *rq) struct request *elv_latter_request(struct request_queue *q, struct request *rq)
{ {
struct elevator_queue *e = q->elevator; struct elevator_queue *e = q->elevator;
......
...@@ -39,13 +39,6 @@ static void noop_add_request(struct request_queue *q, struct request *rq) ...@@ -39,13 +39,6 @@ static void noop_add_request(struct request_queue *q, struct request *rq)
list_add_tail(&rq->queuelist, &nd->queue); list_add_tail(&rq->queuelist, &nd->queue);
} }
static int noop_queue_empty(struct request_queue *q)
{
struct noop_data *nd = q->elevator->elevator_data;
return list_empty(&nd->queue);
}
static struct request * static struct request *
noop_former_request(struct request_queue *q, struct request *rq) noop_former_request(struct request_queue *q, struct request *rq)
{ {
...@@ -90,7 +83,6 @@ static struct elevator_type elevator_noop = { ...@@ -90,7 +83,6 @@ static struct elevator_type elevator_noop = {
.elevator_merge_req_fn = noop_merged_requests, .elevator_merge_req_fn = noop_merged_requests,
.elevator_dispatch_fn = noop_dispatch,