aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo2011-12-13 17:33:42 -0600
committerJens Axboe2011-12-13 17:33:42 -0600
commit47fdd4ca96bf4b28ac4d05d7a6e382df31d3d758 (patch)
treebcb928575b66511345b00102a7e8cace84526e3e /block/cfq-iosched.c
parenta612fddf0d8090f2877305c9168b6c1a34fb5d90 (diff)
downloadkernel-common-47fdd4ca96bf4b28ac4d05d7a6e382df31d3d758.tar.gz
kernel-common-47fdd4ca96bf4b28ac4d05d7a6e382df31d3d758.tar.xz
kernel-common-47fdd4ca96bf4b28ac4d05d7a6e382df31d3d758.zip
block, cfq: move io_cq lookup to blk-ioc.c
Now that all io_cq related data structures are in block core layer, io_cq lookup can be moved from cfq-iosched.c to blk-ioc.c. Lookup logic from cfq_cic_lookup() is moved to ioc_lookup_icq() with parameter return type changes (cfqd -> request_queue, cfq_io_cq -> io_cq) and cfq_cic_lookup() becomes thin wrapper around cfq_cic_lookup(). Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c48
1 files changed, 8 insertions, 40 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 9bc5ecc1b33..048fa699adf 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -468,7 +468,6 @@ static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
468static void cfq_dispatch_insert(struct request_queue *, struct request *); 468static void cfq_dispatch_insert(struct request_queue *, struct request *);
469static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool, 469static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
470 struct io_context *, gfp_t); 470 struct io_context *, gfp_t);
471static struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *, struct io_context *);
472 471
473static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq) 472static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
474{ 473{
@@ -476,6 +475,14 @@ static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
476 return container_of(icq, struct cfq_io_cq, icq); 475 return container_of(icq, struct cfq_io_cq, icq);
477} 476}
478 477
478static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
479 struct io_context *ioc)
480{
481 if (ioc)
482 return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
483 return NULL;
484}
485
479static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync) 486static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
480{ 487{
481 return cic->cfqq[is_sync]; 488 return cic->cfqq[is_sync];
@@ -2971,45 +2978,6 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
2971} 2978}
2972 2979
2973/** 2980/**
2974 * cfq_cic_lookup - lookup cfq_io_cq
2975 * @cfqd: the associated cfq_data
2976 * @ioc: the associated io_context
2977 *
2978 * Look up cfq_io_cq associated with @cfqd - @ioc pair. Must be called
2979 * with queue_lock held.
2980 */
2981static struct cfq_io_cq *
2982cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
2983{
2984 struct request_queue *q = cfqd->queue;
2985 struct io_cq *icq;
2986
2987 lockdep_assert_held(cfqd->queue->queue_lock);
2988 if (unlikely(!ioc))
2989 return NULL;
2990
2991 /*
2992 * icq's are indexed from @ioc using radix tree and hint pointer,
2993 * both of which are protected with RCU. All removals are done
2994 * holding both q and ioc locks, and we're holding q lock - if we
2995 * find a icq which points to us, it's guaranteed to be valid.
2996 */
2997 rcu_read_lock();
2998 icq = rcu_dereference(ioc->icq_hint);
2999 if (icq && icq->q == q)
3000 goto out;
3001
3002 icq = radix_tree_lookup(&ioc->icq_tree, cfqd->queue->id);
3003 if (icq && icq->q == q)
3004 rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */
3005 else
3006 icq = NULL;
3007out:
3008 rcu_read_unlock();
3009 return icq_to_cic(icq);
3010}
3011
3012/**
3013 * cfq_create_cic - create and link a cfq_io_cq 2981 * cfq_create_cic - create and link a cfq_io_cq
3014 * @cfqd: cfqd of interest 2982 * @cfqd: cfqd of interest
3015 * @gfp_mask: allocation mask 2983 * @gfp_mask: allocation mask