aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c42
1 files changed, 22 insertions, 20 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index db2db0b70d34..ae70b4809bec 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1118,6 +1118,23 @@ static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
1118 1118
1119#define BLK_MQ_RESOURCE_DELAY 3 /* ms units */ 1119#define BLK_MQ_RESOURCE_DELAY 3 /* ms units */
1120 1120
1121static void blk_mq_handle_dev_resource(struct request *rq,
1122 struct list_head *list)
1123{
1124 struct request *next =
1125 list_first_entry_or_null(list, struct request, queuelist);
1126
1127 /*
1128 * If an I/O scheduler has been configured and we got a driver tag for
1129 * the next request already, free it.
1130 */
1131 if (next)
1132 blk_mq_put_driver_tag(next);
1133
1134 list_add(&rq->queuelist, list);
1135 __blk_mq_requeue_request(rq);
1136}
1137
1121/* 1138/*
1122 * Returns true if we did some work AND can potentially do more. 1139 * Returns true if we did some work AND can potentially do more.
1123 */ 1140 */
@@ -1185,17 +1202,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
1185 1202
1186 ret = q->mq_ops->queue_rq(hctx, &bd); 1203 ret = q->mq_ops->queue_rq(hctx, &bd);
1187 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) { 1204 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
1188 /* 1205 blk_mq_handle_dev_resource(rq, list);
1189 * If an I/O scheduler has been configured and we got a
1190 * driver tag for the next request already, free it
1191 * again.
1192 */
1193 if (!list_empty(list)) {
1194 nxt = list_first_entry(list, struct request, queuelist);
1195 blk_mq_put_driver_tag(nxt);
1196 }
1197 list_add(&rq->queuelist, list);
1198 __blk_mq_requeue_request(rq);
1199 break; 1206 break;
1200 } 1207 }
1201 1208
@@ -2317,11 +2324,6 @@ static void blk_mq_map_swqueue(struct request_queue *q)
2317 struct blk_mq_ctx *ctx; 2324 struct blk_mq_ctx *ctx;
2318 struct blk_mq_tag_set *set = q->tag_set; 2325 struct blk_mq_tag_set *set = q->tag_set;
2319 2326
2320 /*
2321 * Avoid others reading imcomplete hctx->cpumask through sysfs
2322 */
2323 mutex_lock(&q->sysfs_lock);
2324
2325 queue_for_each_hw_ctx(q, hctx, i) { 2327 queue_for_each_hw_ctx(q, hctx, i) {
2326 cpumask_clear(hctx->cpumask); 2328 cpumask_clear(hctx->cpumask);
2327 hctx->nr_ctx = 0; 2329 hctx->nr_ctx = 0;
@@ -2355,8 +2357,6 @@ static void blk_mq_map_swqueue(struct request_queue *q)
2355 hctx->ctxs[hctx->nr_ctx++] = ctx; 2357 hctx->ctxs[hctx->nr_ctx++] = ctx;
2356 } 2358 }
2357 2359
2358 mutex_unlock(&q->sysfs_lock);
2359
2360 queue_for_each_hw_ctx(q, hctx, i) { 2360 queue_for_each_hw_ctx(q, hctx, i) {
2361 /* 2361 /*
2362 * If no software queues are mapped to this hardware queue, 2362 * If no software queues are mapped to this hardware queue,
@@ -2673,10 +2673,12 @@ EXPORT_SYMBOL(blk_mq_init_allocated_queue);
2673/* tags can _not_ be used after returning from blk_mq_exit_queue */ 2673/* tags can _not_ be used after returning from blk_mq_exit_queue */
2674void blk_mq_exit_queue(struct request_queue *q) 2674void blk_mq_exit_queue(struct request_queue *q)
2675{ 2675{
2676 struct blk_mq_tag_set *set = q->tag_set; 2676 struct blk_mq_tag_set *set = q->tag_set;
2677 2677
2678 blk_mq_del_queue_tag_set(q); 2678 /* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
2679 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); 2679 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
2680 /* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
2681 blk_mq_del_queue_tag_set(q);
2680} 2682}
2681 2683
2682/* Basically redo blk_mq_init_queue with queue frozen */ 2684/* Basically redo blk_mq_init_queue with queue frozen */