aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo2012-04-13 16:50:53 -0500
committerJens Axboe2012-04-20 03:06:06 -0500
commit80fd99792b0b9f162abdf3da12fb10eb9eb5f321 (patch)
tree3273581c76d9bcad18f0668f6707be9323e650fb /block/blk-core.c
parentda8b066262e12d1d0a3b1e6d3486e500169bf730 (diff)
downloadkernel-common-80fd99792b0b9f162abdf3da12fb10eb9eb5f321.tar.gz
kernel-common-80fd99792b0b9f162abdf3da12fb10eb9eb5f321.tar.xz
kernel-common-80fd99792b0b9f162abdf3da12fb10eb9eb5f321.zip
blkcg: make sure blkg_lookup() returns %NULL if @q is bypassing
Currently, blkg_lookup() doesn't check @q bypass state. This patch updates blk_queue_bypass_start() to do synchronize_rcu() before returning and updates blkg_lookup() to check blk_queue_bypass() and return %NULL if bypassing. This ensures blkg_lookup() returns %NULL if @q is bypassing. This is to guarantee that nobody is accessing policy data while @q is bypassing, which is necessary to allow replacing blkio_cgroup->pd[] in place on policy [de]activation. v2: Added more comments explaining bypass guarantees as suggested by Vivek. v3: Added more comments explaining why there's no synchronize_rcu() in blk_cleanup_queue() as suggested by Vivek. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c15
1 files changed, 13 insertions, 2 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 991c1d6ef24..f2db628aa50 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -416,7 +416,8 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
416 * In bypass mode, only the dispatch FIFO queue of @q is used. This 416 * In bypass mode, only the dispatch FIFO queue of @q is used. This
417 * function makes @q enter bypass mode and drains all requests which were 417 * function makes @q enter bypass mode and drains all requests which were
418 * throttled or issued before. On return, it's guaranteed that no request 418 * throttled or issued before. On return, it's guaranteed that no request
419 * is being throttled or has ELVPRIV set. 419 * is being throttled or has ELVPRIV set and blk_queue_bypass() %true
420 * inside queue or RCU read lock.
420 */ 421 */
421void blk_queue_bypass_start(struct request_queue *q) 422void blk_queue_bypass_start(struct request_queue *q)
422{ 423{
@@ -426,6 +427,8 @@ void blk_queue_bypass_start(struct request_queue *q)
426 spin_unlock_irq(q->queue_lock); 427 spin_unlock_irq(q->queue_lock);
427 428
428 blk_drain_queue(q, false); 429 blk_drain_queue(q, false);
430 /* ensure blk_queue_bypass() is %true inside RCU read lock */
431 synchronize_rcu();
429} 432}
430EXPORT_SYMBOL_GPL(blk_queue_bypass_start); 433EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
431 434
@@ -462,7 +465,15 @@ void blk_cleanup_queue(struct request_queue *q)
462 465
463 spin_lock_irq(lock); 466 spin_lock_irq(lock);
464 467
465 /* dead queue is permanently in bypass mode till released */ 468 /*
469 * Dead queue is permanently in bypass mode till released. Note
470 * that, unlike blk_queue_bypass_start(), we aren't performing
471 * synchronize_rcu() after entering bypass mode to avoid the delay
472 * as some drivers create and destroy a lot of queues while
473 * probing. This is still safe because blk_release_queue() will be
474 * called only after the queue refcnt drops to zero and nothing,
475 * RCU or not, would be traversing the queue by then.
476 */
466 q->bypass_depth++; 477 q->bypass_depth++;
467 queue_flag_set(QUEUE_FLAG_BYPASS, q); 478 queue_flag_set(QUEUE_FLAG_BYPASS, q);
468 479