aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo2011-12-13 17:33:41 -0600
committerJens Axboe2011-12-13 17:33:41 -0600
commita612fddf0d8090f2877305c9168b6c1a34fb5d90 (patch)
treeb59047a1670469362e1ea44093522224bdcf5aae /block/cfq-iosched.c
parentc58698073218f2c8f2fc5982fa3938c2d3803b9f (diff)
downloadkernel-common-a612fddf0d8090f2877305c9168b6c1a34fb5d90.tar.gz
kernel-common-a612fddf0d8090f2877305c9168b6c1a34fb5d90.tar.xz
kernel-common-a612fddf0d8090f2877305c9168b6c1a34fb5d90.zip
block, cfq: move cfqd->icq_list to request_queue and add request->elv.icq
Most of icq management is about to be moved out of cfq into blk-ioc. This patch prepares for it. * Move cfqd->icq_list to request_queue->icq_list * Make request explicitly point to icq instead of through elevator private data. ->elevator_private[3] is replaced with sub struct elv which contains icq pointer and priv[2]. cfq is updated accordingly. * Meaningless clearing of ->elevator_private[0] removed from elv_set_request(). At that point in code, the field was guaranteed to be %NULL anyway. This patch doesn't introduce any functional change. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c28
1 files changed, 11 insertions, 17 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index d2f16fcdec7..9bc5ecc1b33 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -54,9 +54,9 @@ static const int cfq_hist_divisor = 4;
54#define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32) 54#define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
55#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8) 55#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8)
56 56
57#define RQ_CIC(rq) icq_to_cic((rq)->elevator_private[0]) 57#define RQ_CIC(rq) icq_to_cic((rq)->elv.icq)
58#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private[1]) 58#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elv.priv[0])
59#define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elevator_private[2]) 59#define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elv.priv[1])
60 60
61static struct kmem_cache *cfq_pool; 61static struct kmem_cache *cfq_pool;
62static struct kmem_cache *cfq_icq_pool; 62static struct kmem_cache *cfq_icq_pool;
@@ -297,8 +297,6 @@ struct cfq_data {
297 unsigned int cfq_group_idle; 297 unsigned int cfq_group_idle;
298 unsigned int cfq_latency; 298 unsigned int cfq_latency;
299 299
300 struct list_head icq_list;
301
302 /* 300 /*
303 * Fallback dummy cfqq for extreme OOM conditions 301 * Fallback dummy cfqq for extreme OOM conditions
304 */ 302 */
@@ -3053,7 +3051,7 @@ static int cfq_create_cic(struct cfq_data *cfqd, gfp_t gfp_mask)
3053 ret = radix_tree_insert(&ioc->icq_tree, q->id, icq); 3051 ret = radix_tree_insert(&ioc->icq_tree, q->id, icq);
3054 if (likely(!ret)) { 3052 if (likely(!ret)) {
3055 hlist_add_head(&icq->ioc_node, &ioc->icq_list); 3053 hlist_add_head(&icq->ioc_node, &ioc->icq_list);
3056 list_add(&icq->q_node, &cfqd->icq_list); 3054 list_add(&icq->q_node, &q->icq_list);
3057 icq = NULL; 3055 icq = NULL;
3058 } else if (ret == -EEXIST) { 3056 } else if (ret == -EEXIST) {
3059 /* someone else already did it */ 3057 /* someone else already did it */
@@ -3605,12 +3603,10 @@ static void cfq_put_request(struct request *rq)
3605 3603
3606 put_io_context(RQ_CIC(rq)->icq.ioc, cfqq->cfqd->queue); 3604 put_io_context(RQ_CIC(rq)->icq.ioc, cfqq->cfqd->queue);
3607 3605
3608 rq->elevator_private[0] = NULL;
3609 rq->elevator_private[1] = NULL;
3610
3611 /* Put down rq reference on cfqg */ 3606 /* Put down rq reference on cfqg */
3612 cfq_put_cfqg(RQ_CFQG(rq)); 3607 cfq_put_cfqg(RQ_CFQG(rq));
3613 rq->elevator_private[2] = NULL; 3608 rq->elv.priv[0] = NULL;
3609 rq->elv.priv[1] = NULL;
3614 3610
3615 cfq_put_queue(cfqq); 3611 cfq_put_queue(cfqq);
3616 } 3612 }
@@ -3696,9 +3692,9 @@ new_queue:
3696 cfqq->allocated[rw]++; 3692 cfqq->allocated[rw]++;
3697 3693
3698 cfqq->ref++; 3694 cfqq->ref++;
3699 rq->elevator_private[0] = &cic->icq; 3695 rq->elv.icq = &cic->icq;
3700 rq->elevator_private[1] = cfqq; 3696 rq->elv.priv[0] = cfqq;
3701 rq->elevator_private[2] = cfq_ref_get_cfqg(cfqq->cfqg); 3697 rq->elv.priv[1] = cfq_ref_get_cfqg(cfqq->cfqg);
3702 spin_unlock_irq(q->queue_lock); 3698 spin_unlock_irq(q->queue_lock);
3703 return 0; 3699 return 0;
3704 3700
@@ -3810,8 +3806,8 @@ static void cfq_exit_queue(struct elevator_queue *e)
3810 if (cfqd->active_queue) 3806 if (cfqd->active_queue)
3811 __cfq_slice_expired(cfqd, cfqd->active_queue, 0); 3807 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
3812 3808
3813 while (!list_empty(&cfqd->icq_list)) { 3809 while (!list_empty(&q->icq_list)) {
3814 struct io_cq *icq = list_entry(cfqd->icq_list.next, 3810 struct io_cq *icq = list_entry(q->icq_list.next,
3815 struct io_cq, q_node); 3811 struct io_cq, q_node);
3816 struct io_context *ioc = icq->ioc; 3812 struct io_context *ioc = icq->ioc;
3817 3813
@@ -3922,8 +3918,6 @@ static void *cfq_init_queue(struct request_queue *q)
3922 cfqd->oom_cfqq.ref++; 3918 cfqd->oom_cfqq.ref++;
3923 cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group); 3919 cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group);
3924 3920
3925 INIT_LIST_HEAD(&cfqd->icq_list);
3926
3927 cfqd->queue = q; 3921 cfqd->queue = q;
3928 3922
3929 init_timer(&cfqd->idle_slice_timer); 3923 init_timer(&cfqd->idle_slice_timer);