aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Bottomley2009-05-17 10:55:18 -0500
committerJens Axboe2009-05-19 05:14:55 -0500
commit3a5a39276d2a32b05b1ee25b384516805b17cf87 (patch)
tree8a52a4a158ed341b7c3049826c549c2a210386e3
parentb2858d7d1639c04ca3c54988d76c5f7300b76f1c (diff)
downloadkernel-common-3a5a39276d2a32b05b1ee25b384516805b17cf87.tar.gz
kernel-common-3a5a39276d2a32b05b1ee25b384516805b17cf87.tar.xz
kernel-common-3a5a39276d2a32b05b1ee25b384516805b17cf87.zip
block: allow blk_rq_map_kern to append to requests
Use blk_rq_append_bio() internally instead of blk_rq_bio_prep() so blk_rq_map_kern can be called multiple times, to map multiple buffers. This is in the effort to un-export blk_rq_append_bio() Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com> Signed-off-by: Boaz Harrosh <bharrosh@panasas.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r--block/blk-map.c12
1 files changed, 10 insertions, 2 deletions
diff --git a/block/blk-map.c b/block/blk-map.c
index 56082bea450..caa05a66774 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -282,7 +282,8 @@ EXPORT_SYMBOL(blk_rq_unmap_user);
282 * 282 *
283 * Description: 283 * Description:
284 * Data will be mapped directly if possible. Otherwise a bounce 284 * Data will be mapped directly if possible. Otherwise a bounce
285 * buffer is used. 285 * buffer is used. Can be called multple times to append multple
286 * buffers.
286 */ 287 */
287int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, 288int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
288 unsigned int len, gfp_t gfp_mask) 289 unsigned int len, gfp_t gfp_mask)
@@ -290,6 +291,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
290 int reading = rq_data_dir(rq) == READ; 291 int reading = rq_data_dir(rq) == READ;
291 int do_copy = 0; 292 int do_copy = 0;
292 struct bio *bio; 293 struct bio *bio;
294 int ret;
293 295
294 if (len > (q->max_hw_sectors << 9)) 296 if (len > (q->max_hw_sectors << 9))
295 return -EINVAL; 297 return -EINVAL;
@@ -311,7 +313,13 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
311 if (do_copy) 313 if (do_copy)
312 rq->cmd_flags |= REQ_COPY_USER; 314 rq->cmd_flags |= REQ_COPY_USER;
313 315
314 blk_rq_bio_prep(q, rq, bio); 316 ret = blk_rq_append_bio(q, rq, bio);
317 if (unlikely(ret)) {
318 /* request is too big */
319 bio_put(bio);
320 return ret;
321 }
322
315 blk_queue_bounce(q, &rq->bio); 323 blk_queue_bounce(q, &rq->bio);
316 rq->buffer = NULL; 324 rq->buffer = NULL;
317 return 0; 325 return 0;