aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-barrier.c8
-rw-r--r--block/blk-core.c16
-rw-r--r--block/blk-map.c4
-rw-r--r--block/blk-merge.c27
-rw-r--r--block/blk-settings.c15
-rw-r--r--block/blk-sysfs.c8
-rw-r--r--block/compat_ioctl.c2
-rw-r--r--block/ioctl.c10
-rw-r--r--block/scsi_ioctl.c8
9 files changed, 54 insertions, 44 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 0d98054cdbd..30022b4e2f6 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -388,10 +388,10 @@ int blkdev_issue_discard(struct block_device *bdev,
388 388
389 bio->bi_sector = sector; 389 bio->bi_sector = sector;
390 390
391 if (nr_sects > q->max_hw_sectors) { 391 if (nr_sects > queue_max_hw_sectors(q)) {
392 bio->bi_size = q->max_hw_sectors << 9; 392 bio->bi_size = queue_max_hw_sectors(q) << 9;
393 nr_sects -= q->max_hw_sectors; 393 nr_sects -= queue_max_hw_sectors(q);
394 sector += q->max_hw_sectors; 394 sector += queue_max_hw_sectors(q);
395 } else { 395 } else {
396 bio->bi_size = nr_sects << 9; 396 bio->bi_size = nr_sects << 9;
397 nr_sects = 0; 397 nr_sects = 0;
diff --git a/block/blk-core.c b/block/blk-core.c
index 59c4af52311..7a4c40184a6 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1437,11 +1437,11 @@ static inline void __generic_make_request(struct bio *bio)
1437 goto end_io; 1437 goto end_io;
1438 } 1438 }
1439 1439
1440 if (unlikely(nr_sectors > q->max_hw_sectors)) { 1440 if (unlikely(nr_sectors > queue_max_hw_sectors(q))) {
1441 printk(KERN_ERR "bio too big device %s (%u > %u)\n", 1441 printk(KERN_ERR "bio too big device %s (%u > %u)\n",
1442 bdevname(bio->bi_bdev, b), 1442 bdevname(bio->bi_bdev, b),
1443 bio_sectors(bio), 1443 bio_sectors(bio),
1444 q->max_hw_sectors); 1444 queue_max_hw_sectors(q));
1445 goto end_io; 1445 goto end_io;
1446 } 1446 }
1447 1447
@@ -1608,8 +1608,8 @@ EXPORT_SYMBOL(submit_bio);
1608 */ 1608 */
1609int blk_rq_check_limits(struct request_queue *q, struct request *rq) 1609int blk_rq_check_limits(struct request_queue *q, struct request *rq)
1610{ 1610{
1611 if (blk_rq_sectors(rq) > q->max_sectors || 1611 if (blk_rq_sectors(rq) > queue_max_sectors(q) ||
1612 blk_rq_bytes(rq) > q->max_hw_sectors << 9) { 1612 blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) {
1613 printk(KERN_ERR "%s: over max size limit.\n", __func__); 1613 printk(KERN_ERR "%s: over max size limit.\n", __func__);
1614 return -EIO; 1614 return -EIO;
1615 } 1615 }
@@ -1621,8 +1621,8 @@ int blk_rq_check_limits(struct request_queue *q, struct request *rq)
1621 * limitation. 1621 * limitation.
1622 */ 1622 */
1623 blk_recalc_rq_segments(rq); 1623 blk_recalc_rq_segments(rq);
1624 if (rq->nr_phys_segments > q->max_phys_segments || 1624 if (rq->nr_phys_segments > queue_max_phys_segments(q) ||
1625 rq->nr_phys_segments > q->max_hw_segments) { 1625 rq->nr_phys_segments > queue_max_hw_segments(q)) {
1626 printk(KERN_ERR "%s: over max segments limit.\n", __func__); 1626 printk(KERN_ERR "%s: over max segments limit.\n", __func__);
1627 return -EIO; 1627 return -EIO;
1628 } 1628 }
diff --git a/block/blk-map.c b/block/blk-map.c
index ef2492adca7..9083cf0180c 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -115,7 +115,7 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
115 struct bio *bio = NULL; 115 struct bio *bio = NULL;
116 int ret; 116 int ret;
117 117
118 if (len > (q->max_hw_sectors << 9)) 118 if (len > (queue_max_hw_sectors(q) << 9))
119 return -EINVAL; 119 return -EINVAL;
120 if (!len) 120 if (!len)
121 return -EINVAL; 121 return -EINVAL;
@@ -292,7 +292,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
292 struct bio *bio; 292 struct bio *bio;
293 int ret; 293 int ret;
294 294
295 if (len > (q->max_hw_sectors << 9)) 295 if (len > (queue_max_hw_sectors(q) << 9))
296 return -EINVAL; 296 return -EINVAL;
297 if (!len || !kbuf) 297 if (!len || !kbuf)
298 return -EINVAL; 298 return -EINVAL;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 4974dd5767e..39ce64432ba 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -32,11 +32,12 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
32 * never considered part of another segment, since that 32 * never considered part of another segment, since that
33 * might change with the bounce page. 33 * might change with the bounce page.
34 */ 34 */
35 high = page_to_pfn(bv->bv_page) > q->bounce_pfn; 35 high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
36 if (high || highprv) 36 if (high || highprv)
37 goto new_segment; 37 goto new_segment;
38 if (cluster) { 38 if (cluster) {
39 if (seg_size + bv->bv_len > q->max_segment_size) 39 if (seg_size + bv->bv_len
40 > queue_max_segment_size(q))
40 goto new_segment; 41 goto new_segment;
41 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv)) 42 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
42 goto new_segment; 43 goto new_segment;
@@ -91,7 +92,7 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
91 return 0; 92 return 0;
92 93
93 if (bio->bi_seg_back_size + nxt->bi_seg_front_size > 94 if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
94 q->max_segment_size) 95 queue_max_segment_size(q))
95 return 0; 96 return 0;
96 97
97 if (!bio_has_data(bio)) 98 if (!bio_has_data(bio))
@@ -134,7 +135,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
134 int nbytes = bvec->bv_len; 135 int nbytes = bvec->bv_len;
135 136
136 if (bvprv && cluster) { 137 if (bvprv && cluster) {
137 if (sg->length + nbytes > q->max_segment_size) 138 if (sg->length + nbytes > queue_max_segment_size(q))
138 goto new_segment; 139 goto new_segment;
139 140
140 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) 141 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
@@ -205,8 +206,8 @@ static inline int ll_new_hw_segment(struct request_queue *q,
205{ 206{
206 int nr_phys_segs = bio_phys_segments(q, bio); 207 int nr_phys_segs = bio_phys_segments(q, bio);
207 208
208 if (req->nr_phys_segments + nr_phys_segs > q->max_hw_segments 209 if (req->nr_phys_segments + nr_phys_segs > queue_max_hw_segments(q) ||
209 || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { 210 req->nr_phys_segments + nr_phys_segs > queue_max_phys_segments(q)) {
210 req->cmd_flags |= REQ_NOMERGE; 211 req->cmd_flags |= REQ_NOMERGE;
211 if (req == q->last_merge) 212 if (req == q->last_merge)
212 q->last_merge = NULL; 213 q->last_merge = NULL;
@@ -227,9 +228,9 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
227 unsigned short max_sectors; 228 unsigned short max_sectors;
228 229
229 if (unlikely(blk_pc_request(req))) 230 if (unlikely(blk_pc_request(req)))
230 max_sectors = q->max_hw_sectors; 231 max_sectors = queue_max_hw_sectors(q);
231 else 232 else
232 max_sectors = q->max_sectors; 233 max_sectors = queue_max_sectors(q);
233 234
234 if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) { 235 if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
235 req->cmd_flags |= REQ_NOMERGE; 236 req->cmd_flags |= REQ_NOMERGE;
@@ -251,9 +252,9 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
251 unsigned short max_sectors; 252 unsigned short max_sectors;
252 253
253 if (unlikely(blk_pc_request(req))) 254 if (unlikely(blk_pc_request(req)))
254 max_sectors = q->max_hw_sectors; 255 max_sectors = queue_max_hw_sectors(q);
255 else 256 else
256 max_sectors = q->max_sectors; 257 max_sectors = queue_max_sectors(q);
257 258
258 259
259 if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) { 260 if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
@@ -287,7 +288,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
287 /* 288 /*
288 * Will it become too large? 289 * Will it become too large?
289 */ 290 */
290 if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > q->max_sectors) 291 if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q))
291 return 0; 292 return 0;
292 293
293 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; 294 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
@@ -299,10 +300,10 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
299 total_phys_segments--; 300 total_phys_segments--;
300 } 301 }
301 302
302 if (total_phys_segments > q->max_phys_segments) 303 if (total_phys_segments > queue_max_phys_segments(q))
303 return 0; 304 return 0;
304 305
305 if (total_phys_segments > q->max_hw_segments) 306 if (total_phys_segments > queue_max_hw_segments(q))
306 return 0; 307 return 0;
307 308
308 /* Merge is OK... */ 309 /* Merge is OK... */
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 15c3164537b..0b32f984eed 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -219,6 +219,15 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
219} 219}
220EXPORT_SYMBOL(blk_queue_max_sectors); 220EXPORT_SYMBOL(blk_queue_max_sectors);
221 221
222void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors)
223{
224 if (BLK_DEF_MAX_SECTORS > max_sectors)
225 q->max_hw_sectors = BLK_DEF_MAX_SECTORS;
226 else
227 q->max_hw_sectors = max_sectors;
228}
229EXPORT_SYMBOL(blk_queue_max_hw_sectors);
230
222/** 231/**
223 * blk_queue_max_phys_segments - set max phys segments for a request for this queue 232 * blk_queue_max_phys_segments - set max phys segments for a request for this queue
224 * @q: the request queue for the device 233 * @q: the request queue for the device
@@ -395,11 +404,11 @@ int blk_queue_dma_drain(struct request_queue *q,
395 dma_drain_needed_fn *dma_drain_needed, 404 dma_drain_needed_fn *dma_drain_needed,
396 void *buf, unsigned int size) 405 void *buf, unsigned int size)
397{ 406{
398 if (q->max_hw_segments < 2 || q->max_phys_segments < 2) 407 if (queue_max_hw_segments(q) < 2 || queue_max_phys_segments(q) < 2)
399 return -EINVAL; 408 return -EINVAL;
400 /* make room for appending the drain */ 409 /* make room for appending the drain */
401 --q->max_hw_segments; 410 blk_queue_max_hw_segments(q, queue_max_hw_segments(q) - 1);
402 --q->max_phys_segments; 411 blk_queue_max_phys_segments(q, queue_max_phys_segments(q) - 1);
403 q->dma_drain_needed = dma_drain_needed; 412 q->dma_drain_needed = dma_drain_needed;
404 q->dma_drain_buffer = buf; 413 q->dma_drain_buffer = buf;
405 q->dma_drain_size = size; 414 q->dma_drain_size = size;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 13d38b7e4d0..142a4acddd4 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -95,7 +95,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
95 95
96static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) 96static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
97{ 97{
98 int max_sectors_kb = q->max_sectors >> 1; 98 int max_sectors_kb = queue_max_sectors(q) >> 1;
99 99
100 return queue_var_show(max_sectors_kb, (page)); 100 return queue_var_show(max_sectors_kb, (page));
101} 101}
@@ -109,7 +109,7 @@ static ssize_t
109queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) 109queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
110{ 110{
111 unsigned long max_sectors_kb, 111 unsigned long max_sectors_kb,
112 max_hw_sectors_kb = q->max_hw_sectors >> 1, 112 max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
113 page_kb = 1 << (PAGE_CACHE_SHIFT - 10); 113 page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
114 ssize_t ret = queue_var_store(&max_sectors_kb, page, count); 114 ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
115 115
@@ -117,7 +117,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
117 return -EINVAL; 117 return -EINVAL;
118 118
119 spin_lock_irq(q->queue_lock); 119 spin_lock_irq(q->queue_lock);
120 q->max_sectors = max_sectors_kb << 1; 120 blk_queue_max_sectors(q, max_sectors_kb << 1);
121 spin_unlock_irq(q->queue_lock); 121 spin_unlock_irq(q->queue_lock);
122 122
123 return ret; 123 return ret;
@@ -125,7 +125,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
125 125
126static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) 126static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
127{ 127{
128 int max_hw_sectors_kb = q->max_hw_sectors >> 1; 128 int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
129 129
130 return queue_var_show(max_hw_sectors_kb, (page)); 130 return queue_var_show(max_hw_sectors_kb, (page));
131} 131}
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index 9eaa1940273..df18a156d01 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -766,7 +766,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
766 return compat_put_int(arg, bdev_logical_block_size(bdev)); 766 return compat_put_int(arg, bdev_logical_block_size(bdev));
767 case BLKSECTGET: 767 case BLKSECTGET:
768 return compat_put_ushort(arg, 768 return compat_put_ushort(arg,
769 bdev_get_queue(bdev)->max_sectors); 769 queue_max_sectors(bdev_get_queue(bdev)));
770 case BLKRASET: /* compatible, but no compat_ptr (!) */ 770 case BLKRASET: /* compatible, but no compat_ptr (!) */
771 case BLKFRASET: 771 case BLKFRASET:
772 if (!capable(CAP_SYS_ADMIN)) 772 if (!capable(CAP_SYS_ADMIN))
diff --git a/block/ioctl.c b/block/ioctl.c
index 7aa97f65da8..500e4c73cc5 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -152,10 +152,10 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
152 bio->bi_private = &wait; 152 bio->bi_private = &wait;
153 bio->bi_sector = start; 153 bio->bi_sector = start;
154 154
155 if (len > q->max_hw_sectors) { 155 if (len > queue_max_hw_sectors(q)) {
156 bio->bi_size = q->max_hw_sectors << 9; 156 bio->bi_size = queue_max_hw_sectors(q) << 9;
157 len -= q->max_hw_sectors; 157 len -= queue_max_hw_sectors(q);
158 start += q->max_hw_sectors; 158 start += queue_max_hw_sectors(q);
159 } else { 159 } else {
160 bio->bi_size = len << 9; 160 bio->bi_size = len << 9;
161 len = 0; 161 len = 0;
@@ -313,7 +313,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
313 case BLKSSZGET: /* get block device hardware sector size */ 313 case BLKSSZGET: /* get block device hardware sector size */
314 return put_int(arg, bdev_logical_block_size(bdev)); 314 return put_int(arg, bdev_logical_block_size(bdev));
315 case BLKSECTGET: 315 case BLKSECTGET:
316 return put_ushort(arg, bdev_get_queue(bdev)->max_sectors); 316 return put_ushort(arg, queue_max_sectors(bdev_get_queue(bdev)));
317 case BLKRASET: 317 case BLKRASET:
318 case BLKFRASET: 318 case BLKFRASET:
319 if(!capable(CAP_SYS_ADMIN)) 319 if(!capable(CAP_SYS_ADMIN))
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index a9670dd4b5d..5f8e798ede4 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -75,7 +75,7 @@ static int sg_set_timeout(struct request_queue *q, int __user *p)
75 75
76static int sg_get_reserved_size(struct request_queue *q, int __user *p) 76static int sg_get_reserved_size(struct request_queue *q, int __user *p)
77{ 77{
78 unsigned val = min(q->sg_reserved_size, q->max_sectors << 9); 78 unsigned val = min(q->sg_reserved_size, queue_max_sectors(q) << 9);
79 79
80 return put_user(val, p); 80 return put_user(val, p);
81} 81}
@@ -89,8 +89,8 @@ static int sg_set_reserved_size(struct request_queue *q, int __user *p)
89 89
90 if (size < 0) 90 if (size < 0)
91 return -EINVAL; 91 return -EINVAL;
92 if (size > (q->max_sectors << 9)) 92 if (size > (queue_max_sectors(q) << 9))
93 size = q->max_sectors << 9; 93 size = queue_max_sectors(q) << 9;
94 94
95 q->sg_reserved_size = size; 95 q->sg_reserved_size = size;
96 return 0; 96 return 0;
@@ -264,7 +264,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
264 if (hdr->cmd_len > BLK_MAX_CDB) 264 if (hdr->cmd_len > BLK_MAX_CDB)
265 return -EINVAL; 265 return -EINVAL;
266 266
267 if (hdr->dxfer_len > (q->max_hw_sectors << 9)) 267 if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9))
268 return -EIO; 268 return -EIO;
269 269
270 if (hdr->dxfer_len) 270 if (hdr->dxfer_len)