aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe2010-06-09 03:42:09 -0500
committerJens Axboe2010-08-07 11:13:00 -0500
commite2e1a148bc45855816ae6b4692ce29d0020fa22e (patch)
treefd5ec8a580d4333b471acfe50f6f92b4cc880087 /block
parent841fdffdd382722d33579a6aa1487e8a4e526dbd (diff)
downloadkernel-common-e2e1a148bc45855816ae6b4692ce29d0020fa22e.tar.gz
kernel-common-e2e1a148bc45855816ae6b4692ce29d0020fa22e.tar.xz
kernel-common-e2e1a148bc45855816ae6b4692ce29d0020fa22e.zip
block: add sysfs knob for turning off disk entropy contributions
There are two reasons for doing this: - On SSD disks, the completion times aren't as random as they are for rotational drives. So it's questionable whether they should contribute to the random pool in the first place. - Calling add_disk_randomness() has a lot of overhead. This adds /sys/block/<dev>/queue/add_random that will allow you to switch off on a per-device basis. The default setting is on, so there should be no functional changes from this patch. Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c3
-rw-r--r--block/blk-sysfs.c28
2 files changed, 30 insertions, 1 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index f0640d7f800..b4131d29148 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2111,7 +2111,8 @@ static bool blk_update_bidi_request(struct request *rq, int error,
2111 blk_update_request(rq->next_rq, error, bidi_bytes)) 2111 blk_update_request(rq->next_rq, error, bidi_bytes))
2112 return true; 2112 return true;
2113 2113
2114 add_disk_randomness(rq->rq_disk); 2114 if (blk_queue_add_random(rq->q))
2115 add_disk_randomness(rq->rq_disk);
2115 2116
2116 return false; 2117 return false;
2117} 2118}
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 306759bbdf1..58b53c354c2 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -250,6 +250,27 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
250 return ret; 250 return ret;
251} 251}
252 252
253static ssize_t queue_random_show(struct request_queue *q, char *page)
254{
255 return queue_var_show(blk_queue_add_random(q), page);
256}
257
258static ssize_t queue_random_store(struct request_queue *q, const char *page,
259 size_t count)
260{
261 unsigned long val;
262 ssize_t ret = queue_var_store(&val, page, count);
263
264 spin_lock_irq(q->queue_lock);
265 if (val)
266 queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
267 else
268 queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
269 spin_unlock_irq(q->queue_lock);
270
271 return ret;
272}
273
253static ssize_t queue_iostats_show(struct request_queue *q, char *page) 274static ssize_t queue_iostats_show(struct request_queue *q, char *page)
254{ 275{
255 return queue_var_show(blk_queue_io_stat(q), page); 276 return queue_var_show(blk_queue_io_stat(q), page);
@@ -374,6 +395,12 @@ static struct queue_sysfs_entry queue_iostats_entry = {
374 .store = queue_iostats_store, 395 .store = queue_iostats_store,
375}; 396};
376 397
398static struct queue_sysfs_entry queue_random_entry = {
399 .attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR },
400 .show = queue_random_show,
401 .store = queue_random_store,
402};
403
377static struct attribute *default_attrs[] = { 404static struct attribute *default_attrs[] = {
378 &queue_requests_entry.attr, 405 &queue_requests_entry.attr,
379 &queue_ra_entry.attr, 406 &queue_ra_entry.attr,
@@ -394,6 +421,7 @@ static struct attribute *default_attrs[] = {
394 &queue_nomerges_entry.attr, 421 &queue_nomerges_entry.attr,
395 &queue_rq_affinity_entry.attr, 422 &queue_rq_affinity_entry.attr,
396 &queue_iostats_entry.attr, 423 &queue_iostats_entry.attr,
424 &queue_random_entry.attr,
397 NULL, 425 NULL,
398}; 426};
399 427