aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin K. Petersen2009-11-10 04:50:21 -0600
committerJens Axboe2009-11-10 04:50:21 -0600
commit86b37281411cf1e9bc0a6b5406c45edb7bd9ea5d (patch)
tree729db57dd52054af1bc16b4afb131093dfc9d255 /block/blk-sysfs.c
parentcf7c25cf91f632a3528669fc0876e1fc8355ff9b (diff)
downloadkernel-common-86b37281411cf1e9bc0a6b5406c45edb7bd9ea5d.tar.gz
kernel-common-86b37281411cf1e9bc0a6b5406c45edb7bd9ea5d.tar.xz
kernel-common-86b37281411cf1e9bc0a6b5406c45edb7bd9ea5d.zip
block: Expose discard granularity
While SSDs track block usage on a per-sector basis, RAID arrays often have allocation blocks that are bigger. Allow the discard granularity and alignment to be set and teach the topology stacking logic how to handle them. Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-sysfs.c')
-rw-r--r--block/blk-sysfs.c22
1 files changed, 22 insertions, 0 deletions
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 8a6d81afb28..3147145edc1 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -126,6 +126,16 @@ static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
126 return queue_var_show(queue_io_opt(q), page); 126 return queue_var_show(queue_io_opt(q), page);
127} 127}
128 128
129static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
130{
131 return queue_var_show(q->limits.discard_granularity, page);
132}
133
134static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
135{
136 return queue_var_show(q->limits.max_discard_sectors << 9, page);
137}
138
129static ssize_t 139static ssize_t
130queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) 140queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
131{ 141{
@@ -293,6 +303,16 @@ static struct queue_sysfs_entry queue_io_opt_entry = {
293 .show = queue_io_opt_show, 303 .show = queue_io_opt_show,
294}; 304};
295 305
306static struct queue_sysfs_entry queue_discard_granularity_entry = {
307 .attr = {.name = "discard_granularity", .mode = S_IRUGO },
308 .show = queue_discard_granularity_show,
309};
310
311static struct queue_sysfs_entry queue_discard_max_entry = {
312 .attr = {.name = "discard_max_bytes", .mode = S_IRUGO },
313 .show = queue_discard_max_show,
314};
315
296static struct queue_sysfs_entry queue_nonrot_entry = { 316static struct queue_sysfs_entry queue_nonrot_entry = {
297 .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR }, 317 .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR },
298 .show = queue_nonrot_show, 318 .show = queue_nonrot_show,
@@ -328,6 +348,8 @@ static struct attribute *default_attrs[] = {
328 &queue_physical_block_size_entry.attr, 348 &queue_physical_block_size_entry.attr,
329 &queue_io_min_entry.attr, 349 &queue_io_min_entry.attr,
330 &queue_io_opt_entry.attr, 350 &queue_io_opt_entry.attr,
351 &queue_discard_granularity_entry.attr,
352 &queue_discard_max_entry.attr,
331 &queue_nonrot_entry.attr, 353 &queue_nonrot_entry.attr,
332 &queue_nomerges_entry.attr, 354 &queue_nomerges_entry.attr,
333 &queue_rq_affinity_entry.attr, 355 &queue_rq_affinity_entry.attr,