aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe2011-05-20 13:52:16 -0500
committerJens Axboe2011-05-20 13:52:16 -0500
commit771949d03b4f5295f648f09141325fd478f6c7ce (patch)
tree5e69b743566cc8f1eaafffcd03554bbd2b84e028 /block
parent0eb8e885726a3a93206510092bbc7e39e272f6ef (diff)
downloadkernel-common-771949d03b4f5295f648f09141325fd478f6c7ce.tar.gz
kernel-common-771949d03b4f5295f648f09141325fd478f6c7ce.tar.xz
kernel-common-771949d03b4f5295f648f09141325fd478f6c7ce.zip
block: get rid of on-stack plugging debug checks
We don't need them anymore, so kill: - REQ_ON_PLUG checks in various places - !rq_mergeable() check in plug merging Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c27
-rw-r--r--block/elevator.c4
2 files changed, 0 insertions, 31 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 9e8e297374b..7369eeeafe2 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -569,8 +569,6 @@ int blk_get_queue(struct request_queue *q)
569 569
570static inline void blk_free_request(struct request_queue *q, struct request *rq) 570static inline void blk_free_request(struct request_queue *q, struct request *rq)
571{ 571{
572 BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
573
574 if (rq->cmd_flags & REQ_ELVPRIV) 572 if (rq->cmd_flags & REQ_ELVPRIV)
575 elv_put_request(q, rq); 573 elv_put_request(q, rq);
576 mempool_free(rq, q->rq.rq_pool); 574 mempool_free(rq, q->rq.rq_pool);
@@ -1110,14 +1108,6 @@ static bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
1110{ 1108{
1111 const int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1109 const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1112 1110
1113 /*
1114 * Debug stuff, kill later
1115 */
1116 if (!rq_mergeable(req)) {
1117 blk_dump_rq_flags(req, "back");
1118 return false;
1119 }
1120
1121 if (!ll_back_merge_fn(q, req, bio)) 1111 if (!ll_back_merge_fn(q, req, bio))
1122 return false; 1112 return false;
1123 1113
@@ -1141,14 +1131,6 @@ static bool bio_attempt_front_merge(struct request_queue *q,
1141 const int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1131 const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1142 sector_t sector; 1132 sector_t sector;
1143 1133
1144 /*
1145 * Debug stuff, kill later
1146 */
1147 if (!rq_mergeable(req)) {
1148 blk_dump_rq_flags(req, "front");
1149 return false;
1150 }
1151
1152 if (!ll_front_merge_fn(q, req, bio)) 1134 if (!ll_front_merge_fn(q, req, bio))
1153 return false; 1135 return false;
1154 1136
@@ -1258,14 +1240,12 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1258 1240
1259 el_ret = elv_merge(q, &req, bio); 1241 el_ret = elv_merge(q, &req, bio);
1260 if (el_ret == ELEVATOR_BACK_MERGE) { 1242 if (el_ret == ELEVATOR_BACK_MERGE) {
1261 BUG_ON(req->cmd_flags & REQ_ON_PLUG);
1262 if (bio_attempt_back_merge(q, req, bio)) { 1243 if (bio_attempt_back_merge(q, req, bio)) {
1263 if (!attempt_back_merge(q, req)) 1244 if (!attempt_back_merge(q, req))
1264 elv_merged_request(q, req, el_ret); 1245 elv_merged_request(q, req, el_ret);
1265 goto out_unlock; 1246 goto out_unlock;
1266 } 1247 }
1267 } else if (el_ret == ELEVATOR_FRONT_MERGE) { 1248 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
1268 BUG_ON(req->cmd_flags & REQ_ON_PLUG);
1269 if (bio_attempt_front_merge(q, req, bio)) { 1249 if (bio_attempt_front_merge(q, req, bio)) {
1270 if (!attempt_front_merge(q, req)) 1250 if (!attempt_front_merge(q, req))
1271 elv_merged_request(q, req, el_ret); 1251 elv_merged_request(q, req, el_ret);
@@ -1320,10 +1300,6 @@ get_rq:
1320 if (__rq->q != q) 1300 if (__rq->q != q)
1321 plug->should_sort = 1; 1301 plug->should_sort = 1;
1322 } 1302 }
1323 /*
1324 * Debug flag, kill later
1325 */
1326 req->cmd_flags |= REQ_ON_PLUG;
1327 list_add_tail(&req->queuelist, &plug->list); 1303 list_add_tail(&req->queuelist, &plug->list);
1328 drive_stat_acct(req, 1); 1304 drive_stat_acct(req, 1);
1329 } else { 1305 } else {
@@ -2749,7 +2725,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2749 while (!list_empty(&list)) { 2725 while (!list_empty(&list)) {
2750 rq = list_entry_rq(list.next); 2726 rq = list_entry_rq(list.next);
2751 list_del_init(&rq->queuelist); 2727 list_del_init(&rq->queuelist);
2752 BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG));
2753 BUG_ON(!rq->q); 2728 BUG_ON(!rq->q);
2754 if (rq->q != q) { 2729 if (rq->q != q) {
2755 /* 2730 /*
@@ -2761,8 +2736,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2761 depth = 0; 2736 depth = 0;
2762 spin_lock(q->queue_lock); 2737 spin_lock(q->queue_lock);
2763 } 2738 }
2764 rq->cmd_flags &= ~REQ_ON_PLUG;
2765
2766 /* 2739 /*
2767 * rq is already accounted, so use raw insert 2740 * rq is already accounted, so use raw insert
2768 */ 2741 */
diff --git a/block/elevator.c b/block/elevator.c
index 2a0b653c90f..b0b38ce0dcb 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -416,8 +416,6 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
416 struct list_head *entry; 416 struct list_head *entry;
417 int stop_flags; 417 int stop_flags;
418 418
419 BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
420
421 if (q->last_merge == rq) 419 if (q->last_merge == rq)
422 q->last_merge = NULL; 420 q->last_merge = NULL;
423 421
@@ -656,8 +654,6 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
656 654
657 rq->q = q; 655 rq->q = q;
658 656
659 BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
660
661 if (rq->cmd_flags & REQ_SOFTBARRIER) { 657 if (rq->cmd_flags & REQ_SOFTBARRIER) {
662 /* barriers are scheduling boundary, update end_sector */ 658 /* barriers are scheduling boundary, update end_sector */
663 if (rq->cmd_type == REQ_TYPE_FS || 659 if (rq->cmd_type == REQ_TYPE_FS ||