aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe2006-12-13 06:02:26 -0600
committerJens Axboe2006-12-13 06:02:26 -0600
commit7749a8d423c483a51983b666613acda1a4dd9c1b (patch)
tree5a2a20640cca9ca519324b7933005f6fd9c4a6a5 /block/ll_rw_blk.c
parent445722f97a0ecd3aed3f53d9f0dcaacaef8c6223 (diff)
downloadkernel-common-7749a8d423c483a51983b666613acda1a4dd9c1b.tar.gz
kernel-common-7749a8d423c483a51983b666613acda1a4dd9c1b.tar.xz
kernel-common-7749a8d423c483a51983b666613acda1a4dd9c1b.zip
[PATCH] Propagate down request sync flag
We need to do this, otherwise the io schedulers don't get access to the sync flag. Then they cannot tell the difference between a regular write and an O_DIRECT write, which can cause a performance loss. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r--block/ll_rw_blk.c28
1 files changed, 20 insertions, 8 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index a541b42c08e..79807dbc306 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -2058,15 +2058,16 @@ static void freed_request(request_queue_t *q, int rw, int priv)
2058 * Returns NULL on failure, with queue_lock held. 2058 * Returns NULL on failure, with queue_lock held.
2059 * Returns !NULL on success, with queue_lock *not held*. 2059 * Returns !NULL on success, with queue_lock *not held*.
2060 */ 2060 */
2061static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, 2061static struct request *get_request(request_queue_t *q, int rw_flags,
2062 gfp_t gfp_mask) 2062 struct bio *bio, gfp_t gfp_mask)
2063{ 2063{
2064 struct request *rq = NULL; 2064 struct request *rq = NULL;
2065 struct request_list *rl = &q->rq; 2065 struct request_list *rl = &q->rq;
2066 struct io_context *ioc = NULL; 2066 struct io_context *ioc = NULL;
2067 const int rw = rw_flags & 0x01;
2067 int may_queue, priv; 2068 int may_queue, priv;
2068 2069
2069 may_queue = elv_may_queue(q, rw); 2070 may_queue = elv_may_queue(q, rw_flags);
2070 if (may_queue == ELV_MQUEUE_NO) 2071 if (may_queue == ELV_MQUEUE_NO)
2071 goto rq_starved; 2072 goto rq_starved;
2072 2073
@@ -2114,7 +2115,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
2114 2115
2115 spin_unlock_irq(q->queue_lock); 2116 spin_unlock_irq(q->queue_lock);
2116 2117
2117 rq = blk_alloc_request(q, rw, priv, gfp_mask); 2118 rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
2118 if (unlikely(!rq)) { 2119 if (unlikely(!rq)) {
2119 /* 2120 /*
2120 * Allocation failed presumably due to memory. Undo anything 2121 * Allocation failed presumably due to memory. Undo anything
@@ -2162,12 +2163,13 @@ out:
2162 * 2163 *
2163 * Called with q->queue_lock held, and returns with it unlocked. 2164 * Called with q->queue_lock held, and returns with it unlocked.
2164 */ 2165 */
2165static struct request *get_request_wait(request_queue_t *q, int rw, 2166static struct request *get_request_wait(request_queue_t *q, int rw_flags,
2166 struct bio *bio) 2167 struct bio *bio)
2167{ 2168{
2169 const int rw = rw_flags & 0x01;
2168 struct request *rq; 2170 struct request *rq;
2169 2171
2170 rq = get_request(q, rw, bio, GFP_NOIO); 2172 rq = get_request(q, rw_flags, bio, GFP_NOIO);
2171 while (!rq) { 2173 while (!rq) {
2172 DEFINE_WAIT(wait); 2174 DEFINE_WAIT(wait);
2173 struct request_list *rl = &q->rq; 2175 struct request_list *rl = &q->rq;
@@ -2175,7 +2177,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw,
2175 prepare_to_wait_exclusive(&rl->wait[rw], &wait, 2177 prepare_to_wait_exclusive(&rl->wait[rw], &wait,
2176 TASK_UNINTERRUPTIBLE); 2178 TASK_UNINTERRUPTIBLE);
2177 2179
2178 rq = get_request(q, rw, bio, GFP_NOIO); 2180 rq = get_request(q, rw_flags, bio, GFP_NOIO);
2179 2181
2180 if (!rq) { 2182 if (!rq) {
2181 struct io_context *ioc; 2183 struct io_context *ioc;
@@ -2910,6 +2912,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
2910 int el_ret, nr_sectors, barrier, err; 2912 int el_ret, nr_sectors, barrier, err;
2911 const unsigned short prio = bio_prio(bio); 2913 const unsigned short prio = bio_prio(bio);
2912 const int sync = bio_sync(bio); 2914 const int sync = bio_sync(bio);
2915 int rw_flags;
2913 2916
2914 nr_sectors = bio_sectors(bio); 2917 nr_sectors = bio_sectors(bio);
2915 2918
@@ -2984,10 +2987,19 @@ static int __make_request(request_queue_t *q, struct bio *bio)
2984 2987
2985get_rq: 2988get_rq:
2986 /* 2989 /*
2990 * This sync check and mask will be re-done in init_request_from_bio(),
2991 * but we need to set it earlier to expose the sync flag to the
2992 * rq allocator and io schedulers.
2993 */
2994 rw_flags = bio_data_dir(bio);
2995 if (sync)
2996 rw_flags |= REQ_RW_SYNC;
2997
2998 /*
2987 * Grab a free request. This is might sleep but can not fail. 2999 * Grab a free request. This is might sleep but can not fail.
2988 * Returns with the queue unlocked. 3000 * Returns with the queue unlocked.
2989 */ 3001 */
2990 req = get_request_wait(q, bio_data_dir(bio), bio); 3002 req = get_request_wait(q, rw_flags, bio);
2991 3003
2992 /* 3004 /*
2993 * After dropping the lock and possibly sleeping here, our request 3005 * After dropping the lock and possibly sleeping here, our request