Commit c5869807 authored by Tejun Heo's avatar Tejun Heo Committed by Jens Axboe

block, cfq: reorganize cfq_io_context into generic and cfq specific parts

Currently io_context and cfq logics are mixed without clear boundary.
Most of io_context is independent from cfq but cfq_io_context handling
logic is dispersed between generic ioc code and cfq.

cfq_io_context represents association between an io_context and a
request_queue, which is a concept useful outside of cfq, but it also
contains fields which are useful only to cfq.

This patch takes out generic part and put it into io_cq (io
context-queue) and the rest into cfq_io_cq (cic moniker remains the
same) which contains io_cq.  The following changes are made together.

* cfq_ttime and cfq_io_cq now live in cfq-iosched.c.

* All related fields, functions and constants are renamed accordingly.

* ioc->ioc_data is now "struct io_cq *" instead of "void *" and
  renamed to icq_hint.

This prepares for io_context API cleanup.  Documentation is currently
sparse.  It will be added later.

Changes in this patch are mechanical and don't cause functional
change.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 22f746e2
......@@ -46,7 +46,7 @@ EXPORT_SYMBOL(get_io_context);
/*
* Slow path for ioc release in put_io_context(). Performs double-lock
* dancing to unlink all cic's and then frees ioc.
* dancing to unlink all icq's and then frees ioc.
*/
static void ioc_release_fn(struct work_struct *work)
{
......@@ -56,11 +56,10 @@ static void ioc_release_fn(struct work_struct *work)
spin_lock_irq(&ioc->lock);
while (!hlist_empty(&ioc->cic_list)) {
struct cfq_io_context *cic = hlist_entry(ioc->cic_list.first,
struct cfq_io_context,
cic_list);
struct request_queue *this_q = cic->q;
while (!hlist_empty(&ioc->icq_list)) {
struct io_cq *icq = hlist_entry(ioc->icq_list.first,
struct io_cq, ioc_node);
struct request_queue *this_q = icq->q;
if (this_q != last_q) {
/*
......@@ -89,8 +88,8 @@ static void ioc_release_fn(struct work_struct *work)
continue;
}
ioc_release_depth_inc(this_q);
cic->exit(cic);
cic->release(cic);
icq->exit(icq);
icq->release(icq);
ioc_release_depth_dec(this_q);
}
......@@ -131,10 +130,10 @@ void put_io_context(struct io_context *ioc, struct request_queue *locked_q)
return;
/*
* Destroy @ioc. This is a bit messy because cic's are chained
* Destroy @ioc. This is a bit messy because icq's are chained
* from both ioc and queue, and ioc->lock nests inside queue_lock.
* The inner ioc->lock should be held to walk our cic_list and then
* for each cic the outer matching queue_lock should be grabbed.
* The inner ioc->lock should be held to walk our icq_list and then
* for each icq the outer matching queue_lock should be grabbed.
* ie. We need to do reverse-order double lock dancing.
*
* Another twist is that we are often called with one of the
......@@ -153,11 +152,10 @@ void put_io_context(struct io_context *ioc, struct request_queue *locked_q)
spin_lock_irqsave_nested(&ioc->lock, flags,
ioc_release_depth(locked_q));
while (!hlist_empty(&ioc->cic_list)) {
struct cfq_io_context *cic = hlist_entry(ioc->cic_list.first,
struct cfq_io_context,
cic_list);
struct request_queue *this_q = cic->q;
while (!hlist_empty(&ioc->icq_list)) {
struct io_cq *icq = hlist_entry(ioc->icq_list.first,
struct io_cq, ioc_node);
struct request_queue *this_q = icq->q;
if (this_q != last_q) {
if (last_q && last_q != locked_q)
......@@ -170,8 +168,8 @@ void put_io_context(struct io_context *ioc, struct request_queue *locked_q)
continue;
}
ioc_release_depth_inc(this_q);
cic->exit(cic);
cic->release(cic);
icq->exit(icq);
icq->release(icq);
ioc_release_depth_dec(this_q);
}
......@@ -180,8 +178,8 @@ void put_io_context(struct io_context *ioc, struct request_queue *locked_q)
spin_unlock_irqrestore(&ioc->lock, flags);
/* if no cic's left, we're done; otherwise, kick release_work */
if (hlist_empty(&ioc->cic_list))
/* if no icq is left, we're done; otherwise, kick release_work */
if (hlist_empty(&ioc->icq_list))
kmem_cache_free(iocontext_cachep, ioc);
else
schedule_work(&ioc->release_work);
......@@ -219,8 +217,8 @@ void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
atomic_long_set(&ioc->refcount, 1);
atomic_set(&ioc->nr_tasks, 1);
spin_lock_init(&ioc->lock);
INIT_RADIX_TREE(&ioc->radix_root, GFP_ATOMIC | __GFP_HIGH);
INIT_HLIST_HEAD(&ioc->cic_list);
INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
INIT_HLIST_HEAD(&ioc->icq_list);
INIT_WORK(&ioc->release_work, ioc_release_fn);
/* try to install, somebody might already have beaten us to it */
......@@ -270,11 +268,11 @@ EXPORT_SYMBOL(get_task_io_context);
void ioc_set_changed(struct io_context *ioc, int which)
{
struct cfq_io_context *cic;
struct io_cq *icq;
struct hlist_node *n;
hlist_for_each_entry(cic, n, &ioc->cic_list, cic_list)
set_bit(which, &cic->changed);
hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node)
set_bit(which, &icq->changed);
}
/**
......@@ -282,8 +280,8 @@ void ioc_set_changed(struct io_context *ioc, int which)
* @ioc: io_context of interest
* @ioprio: new ioprio
*
* @ioc's ioprio has changed to @ioprio. Set %CIC_IOPRIO_CHANGED for all
* cic's. iosched is responsible for checking the bit and applying it on
* @ioc's ioprio has changed to @ioprio. Set %ICQ_IOPRIO_CHANGED for all
* icq's. iosched is responsible for checking the bit and applying it on
* request issue path.
*/
void ioc_ioprio_changed(struct io_context *ioc, int ioprio)
......@@ -292,7 +290,7 @@ void ioc_ioprio_changed(struct io_context *ioc, int ioprio)
spin_lock_irqsave(&ioc->lock, flags);
ioc->ioprio = ioprio;
ioc_set_changed(ioc, CIC_IOPRIO_CHANGED);
ioc_set_changed(ioc, ICQ_IOPRIO_CHANGED);
spin_unlock_irqrestore(&ioc->lock, flags);
}
......@@ -300,7 +298,7 @@ void ioc_ioprio_changed(struct io_context *ioc, int ioprio)
* ioc_cgroup_changed - notify cgroup change
* @ioc: io_context of interest
*
* @ioc's cgroup has changed. Set %CIC_CGROUP_CHANGED for all cic's.
* @ioc's cgroup has changed. Set %ICQ_CGROUP_CHANGED for all icq's.
* iosched is responsible for checking the bit and applying it on request
* issue path.
*/
......@@ -309,7 +307,7 @@ void ioc_cgroup_changed(struct io_context *ioc)
unsigned long flags;
spin_lock_irqsave(&ioc->lock, flags);
ioc_set_changed(ioc, CIC_CGROUP_CHANGED);
ioc_set_changed(ioc, ICQ_CGROUP_CHANGED);
spin_unlock_irqrestore(&ioc->lock, flags);
}
......
This diff is collapsed.
......@@ -5,38 +5,23 @@
#include <linux/rcupdate.h>
#include <linux/workqueue.h>
struct cfq_queue;
struct cfq_ttime {
unsigned long last_end_request;
unsigned long ttime_total;
unsigned long ttime_samples;
unsigned long ttime_mean;
};
enum {
CIC_IOPRIO_CHANGED,
CIC_CGROUP_CHANGED,
ICQ_IOPRIO_CHANGED,
ICQ_CGROUP_CHANGED,
};
struct cfq_io_context {
struct request_queue *q;
struct cfq_queue *cfqq[2];
struct io_context *ioc;
struct cfq_ttime ttime;
struct list_head queue_list;
struct hlist_node cic_list;
struct io_cq {
struct request_queue *q;
struct io_context *ioc;
unsigned long changed;
struct list_head q_node;
struct hlist_node ioc_node;
void (*exit)(struct cfq_io_context *);
void (*release)(struct cfq_io_context *);
unsigned long changed;
struct rcu_head rcu_head;
struct rcu_head rcu_head;
void (*exit)(struct io_cq *);
void (*release)(struct io_cq *);
};
/*
......@@ -58,9 +43,9 @@ struct io_context {
int nr_batch_requests; /* Number of requests left in the batch */
unsigned long last_waited; /* Time last woken after wait for request */
struct radix_tree_root radix_root;
struct hlist_head cic_list;
void __rcu *ioc_data;
struct radix_tree_root icq_tree;
struct io_cq __rcu *icq_hint;
struct hlist_head icq_list;
struct work_struct release_work;
};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment