diff options
Diffstat (limited to 'drivers/md/dm-table.c')
-rw-r--r-- | drivers/md/dm-table.c | 183 |
1 files changed, 82 insertions, 101 deletions
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 729936139138..e8f24fca958c 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -433,14 +433,23 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, | |||
433 | { | 433 | { |
434 | int r; | 434 | int r; |
435 | dev_t dev; | 435 | dev_t dev; |
436 | unsigned int major, minor; | ||
437 | char dummy; | ||
436 | struct dm_dev_internal *dd; | 438 | struct dm_dev_internal *dd; |
437 | struct dm_table *t = ti->table; | 439 | struct dm_table *t = ti->table; |
438 | 440 | ||
439 | BUG_ON(!t); | 441 | BUG_ON(!t); |
440 | 442 | ||
441 | dev = dm_get_dev_t(path); | 443 | if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) { |
442 | if (!dev) | 444 | /* Extract the major/minor numbers */ |
443 | return -ENODEV; | 445 | dev = MKDEV(major, minor); |
446 | if (MAJOR(dev) != major || MINOR(dev) != minor) | ||
447 | return -EOVERFLOW; | ||
448 | } else { | ||
449 | dev = dm_get_dev_t(path); | ||
450 | if (!dev) | ||
451 | return -ENODEV; | ||
452 | } | ||
444 | 453 | ||
445 | dd = find_device(&t->devices, dev); | 454 | dd = find_device(&t->devices, dev); |
446 | if (!dd) { | 455 | if (!dd) { |
@@ -882,10 +891,10 @@ void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type) | |||
882 | } | 891 | } |
883 | EXPORT_SYMBOL_GPL(dm_table_set_type); | 892 | EXPORT_SYMBOL_GPL(dm_table_set_type); |
884 | 893 | ||
885 | static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev, | 894 | static int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev, |
886 | sector_t start, sector_t len, void *data) | 895 | sector_t start, sector_t len, void *data) |
887 | { | 896 | { |
888 | return bdev_dax_supported(dev->bdev, PAGE_SIZE); | 897 | return !bdev_dax_supported(dev->bdev, PAGE_SIZE); |
889 | } | 898 | } |
890 | 899 | ||
891 | static bool dm_table_supports_dax(struct dm_table *t) | 900 | static bool dm_table_supports_dax(struct dm_table *t) |
@@ -901,7 +910,7 @@ static bool dm_table_supports_dax(struct dm_table *t) | |||
901 | return false; | 910 | return false; |
902 | 911 | ||
903 | if (!ti->type->iterate_devices || | 912 | if (!ti->type->iterate_devices || |
904 | !ti->type->iterate_devices(ti, device_supports_dax, NULL)) | 913 | ti->type->iterate_devices(ti, device_not_dax_capable, NULL)) |
905 | return false; | 914 | return false; |
906 | } | 915 | } |
907 | 916 | ||
@@ -1344,6 +1353,46 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) | |||
1344 | return &t->targets[(KEYS_PER_NODE * n) + k]; | 1353 | return &t->targets[(KEYS_PER_NODE * n) + k]; |
1345 | } | 1354 | } |
1346 | 1355 | ||
1356 | /* | ||
1357 | * type->iterate_devices() should be called when the sanity check needs to | ||
1358 | * iterate and check all underlying data devices. iterate_devices() will | ||
1359 | * iterate all underlying data devices until it encounters a non-zero return | ||
1360 | * code, returned by whether the input iterate_devices_callout_fn, or | ||
1361 | * iterate_devices() itself internally. | ||
1362 | * | ||
1363 | * For some target type (e.g. dm-stripe), one call of iterate_devices() may | ||
1364 | * iterate multiple underlying devices internally, in which case a non-zero | ||
1365 | * return code returned by iterate_devices_callout_fn will stop the iteration | ||
1366 | * in advance. | ||
1367 | * | ||
1368 | * Cases requiring _any_ underlying device supporting some kind of attribute, | ||
1369 | * should use the iteration structure like dm_table_any_dev_attr(), or call | ||
1370 | * it directly. @func should handle semantics of positive examples, e.g. | ||
1371 | * capable of something. | ||
1372 | * | ||
1373 | * Cases requiring _all_ underlying devices supporting some kind of attribute, | ||
1374 | * should use the iteration structure like dm_table_supports_nowait() or | ||
1375 | * dm_table_supports_discards(). Or introduce dm_table_all_devs_attr() that | ||
1376 | * uses an @anti_func that handle semantics of counter examples, e.g. not | ||
1377 | * capable of something. So: return !dm_table_any_dev_attr(t, anti_func, data); | ||
1378 | */ | ||
1379 | static bool dm_table_any_dev_attr(struct dm_table *t, | ||
1380 | iterate_devices_callout_fn func, void *data) | ||
1381 | { | ||
1382 | struct dm_target *ti; | ||
1383 | unsigned int i; | ||
1384 | |||
1385 | for (i = 0; i < dm_table_get_num_targets(t); i++) { | ||
1386 | ti = dm_table_get_target(t, i); | ||
1387 | |||
1388 | if (ti->type->iterate_devices && | ||
1389 | ti->type->iterate_devices(ti, func, data)) | ||
1390 | return true; | ||
1391 | } | ||
1392 | |||
1393 | return false; | ||
1394 | } | ||
1395 | |||
1347 | static int count_device(struct dm_target *ti, struct dm_dev *dev, | 1396 | static int count_device(struct dm_target *ti, struct dm_dev *dev, |
1348 | sector_t start, sector_t len, void *data) | 1397 | sector_t start, sector_t len, void *data) |
1349 | { | 1398 | { |
@@ -1380,13 +1429,13 @@ bool dm_table_has_no_data_devices(struct dm_table *table) | |||
1380 | return true; | 1429 | return true; |
1381 | } | 1430 | } |
1382 | 1431 | ||
1383 | static int device_is_zoned_model(struct dm_target *ti, struct dm_dev *dev, | 1432 | static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev, |
1384 | sector_t start, sector_t len, void *data) | 1433 | sector_t start, sector_t len, void *data) |
1385 | { | 1434 | { |
1386 | struct request_queue *q = bdev_get_queue(dev->bdev); | 1435 | struct request_queue *q = bdev_get_queue(dev->bdev); |
1387 | enum blk_zoned_model *zoned_model = data; | 1436 | enum blk_zoned_model *zoned_model = data; |
1388 | 1437 | ||
1389 | return q && blk_queue_zoned_model(q) == *zoned_model; | 1438 | return !q || blk_queue_zoned_model(q) != *zoned_model; |
1390 | } | 1439 | } |
1391 | 1440 | ||
1392 | static bool dm_table_supports_zoned_model(struct dm_table *t, | 1441 | static bool dm_table_supports_zoned_model(struct dm_table *t, |
@@ -1403,37 +1452,20 @@ static bool dm_table_supports_zoned_model(struct dm_table *t, | |||
1403 | return false; | 1452 | return false; |
1404 | 1453 | ||
1405 | if (!ti->type->iterate_devices || | 1454 | if (!ti->type->iterate_devices || |
1406 | !ti->type->iterate_devices(ti, device_is_zoned_model, &zoned_model)) | 1455 | ti->type->iterate_devices(ti, device_not_zoned_model, &zoned_model)) |
1407 | return false; | 1456 | return false; |
1408 | } | 1457 | } |
1409 | 1458 | ||
1410 | return true; | 1459 | return true; |
1411 | } | 1460 | } |
1412 | 1461 | ||
1413 | static int device_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev, | 1462 | static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev, |
1414 | sector_t start, sector_t len, void *data) | 1463 | sector_t start, sector_t len, void *data) |
1415 | { | 1464 | { |
1416 | struct request_queue *q = bdev_get_queue(dev->bdev); | 1465 | struct request_queue *q = bdev_get_queue(dev->bdev); |
1417 | unsigned int *zone_sectors = data; | 1466 | unsigned int *zone_sectors = data; |
1418 | 1467 | ||
1419 | return q && blk_queue_zone_sectors(q) == *zone_sectors; | 1468 | return !q || blk_queue_zone_sectors(q) != *zone_sectors; |
1420 | } | ||
1421 | |||
1422 | static bool dm_table_matches_zone_sectors(struct dm_table *t, | ||
1423 | unsigned int zone_sectors) | ||
1424 | { | ||
1425 | struct dm_target *ti; | ||
1426 | unsigned i; | ||
1427 | |||
1428 | for (i = 0; i < dm_table_get_num_targets(t); i++) { | ||
1429 | ti = dm_table_get_target(t, i); | ||
1430 | |||
1431 | if (!ti->type->iterate_devices || | ||
1432 | !ti->type->iterate_devices(ti, device_matches_zone_sectors, &zone_sectors)) | ||
1433 | return false; | ||
1434 | } | ||
1435 | |||
1436 | return true; | ||
1437 | } | 1469 | } |
1438 | 1470 | ||
1439 | static int validate_hardware_zoned_model(struct dm_table *table, | 1471 | static int validate_hardware_zoned_model(struct dm_table *table, |
@@ -1453,7 +1485,7 @@ static int validate_hardware_zoned_model(struct dm_table *table, | |||
1453 | if (!zone_sectors || !is_power_of_2(zone_sectors)) | 1485 | if (!zone_sectors || !is_power_of_2(zone_sectors)) |
1454 | return -EINVAL; | 1486 | return -EINVAL; |
1455 | 1487 | ||
1456 | if (!dm_table_matches_zone_sectors(table, zone_sectors)) { | 1488 | if (dm_table_any_dev_attr(table, device_not_matches_zone_sectors, &zone_sectors)) { |
1457 | DMERR("%s: zone sectors is not consistent across all devices", | 1489 | DMERR("%s: zone sectors is not consistent across all devices", |
1458 | dm_device_name(table->md)); | 1490 | dm_device_name(table->md)); |
1459 | return -EINVAL; | 1491 | return -EINVAL; |
@@ -1691,29 +1723,12 @@ static int device_dax_write_cache_enabled(struct dm_target *ti, | |||
1691 | return false; | 1723 | return false; |
1692 | } | 1724 | } |
1693 | 1725 | ||
1694 | static int dm_table_supports_dax_write_cache(struct dm_table *t) | 1726 | static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev, |
1695 | { | 1727 | sector_t start, sector_t len, void *data) |
1696 | struct dm_target *ti; | ||
1697 | unsigned i; | ||
1698 | |||
1699 | for (i = 0; i < dm_table_get_num_targets(t); i++) { | ||
1700 | ti = dm_table_get_target(t, i); | ||
1701 | |||
1702 | if (ti->type->iterate_devices && | ||
1703 | ti->type->iterate_devices(ti, | ||
1704 | device_dax_write_cache_enabled, NULL)) | ||
1705 | return true; | ||
1706 | } | ||
1707 | |||
1708 | return false; | ||
1709 | } | ||
1710 | |||
1711 | static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev, | ||
1712 | sector_t start, sector_t len, void *data) | ||
1713 | { | 1728 | { |
1714 | struct request_queue *q = bdev_get_queue(dev->bdev); | 1729 | struct request_queue *q = bdev_get_queue(dev->bdev); |
1715 | 1730 | ||
1716 | return q && blk_queue_nonrot(q); | 1731 | return q && !blk_queue_nonrot(q); |
1717 | } | 1732 | } |
1718 | 1733 | ||
1719 | static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, | 1734 | static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, |
@@ -1724,29 +1739,12 @@ static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, | |||
1724 | return q && !blk_queue_add_random(q); | 1739 | return q && !blk_queue_add_random(q); |
1725 | } | 1740 | } |
1726 | 1741 | ||
1727 | static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev, | 1742 | static int queue_no_sg_merge(struct dm_target *ti, struct dm_dev *dev, |
1728 | sector_t start, sector_t len, void *data) | 1743 | sector_t start, sector_t len, void *data) |
1729 | { | 1744 | { |
1730 | struct request_queue *q = bdev_get_queue(dev->bdev); | 1745 | struct request_queue *q = bdev_get_queue(dev->bdev); |
1731 | 1746 | ||
1732 | return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags); | 1747 | return q && test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags); |
1733 | } | ||
1734 | |||
1735 | static bool dm_table_all_devices_attribute(struct dm_table *t, | ||
1736 | iterate_devices_callout_fn func) | ||
1737 | { | ||
1738 | struct dm_target *ti; | ||
1739 | unsigned i; | ||
1740 | |||
1741 | for (i = 0; i < dm_table_get_num_targets(t); i++) { | ||
1742 | ti = dm_table_get_target(t, i); | ||
1743 | |||
1744 | if (!ti->type->iterate_devices || | ||
1745 | !ti->type->iterate_devices(ti, func, NULL)) | ||
1746 | return false; | ||
1747 | } | ||
1748 | |||
1749 | return true; | ||
1750 | } | 1748 | } |
1751 | 1749 | ||
1752 | static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev, | 1750 | static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev, |
@@ -1845,27 +1843,6 @@ static int device_requires_stable_pages(struct dm_target *ti, | |||
1845 | return q && bdi_cap_stable_pages_required(q->backing_dev_info); | 1843 | return q && bdi_cap_stable_pages_required(q->backing_dev_info); |
1846 | } | 1844 | } |
1847 | 1845 | ||
1848 | /* | ||
1849 | * If any underlying device requires stable pages, a table must require | ||
1850 | * them as well. Only targets that support iterate_devices are considered: | ||
1851 | * don't want error, zero, etc to require stable pages. | ||
1852 | */ | ||
1853 | static bool dm_table_requires_stable_pages(struct dm_table *t) | ||
1854 | { | ||
1855 | struct dm_target *ti; | ||
1856 | unsigned i; | ||
1857 | |||
1858 | for (i = 0; i < dm_table_get_num_targets(t); i++) { | ||
1859 | ti = dm_table_get_target(t, i); | ||
1860 | |||
1861 | if (ti->type->iterate_devices && | ||
1862 | ti->type->iterate_devices(ti, device_requires_stable_pages, NULL)) | ||
1863 | return true; | ||
1864 | } | ||
1865 | |||
1866 | return false; | ||
1867 | } | ||
1868 | |||
1869 | void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | 1846 | void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, |
1870 | struct queue_limits *limits) | 1847 | struct queue_limits *limits) |
1871 | { | 1848 | { |
@@ -1893,24 +1870,24 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | |||
1893 | else | 1870 | else |
1894 | queue_flag_clear_unlocked(QUEUE_FLAG_DAX, q); | 1871 | queue_flag_clear_unlocked(QUEUE_FLAG_DAX, q); |
1895 | 1872 | ||
1896 | if (dm_table_supports_dax_write_cache(t)) | 1873 | if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL)) |
1897 | dax_write_cache(t->md->dax_dev, true); | 1874 | dax_write_cache(t->md->dax_dev, true); |
1898 | 1875 | ||
1899 | /* Ensure that all underlying devices are non-rotational. */ | 1876 | /* Ensure that all underlying devices are non-rotational. */ |
1900 | if (dm_table_all_devices_attribute(t, device_is_nonrot)) | 1877 | if (dm_table_any_dev_attr(t, device_is_rotational, NULL)) |
1901 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); | ||
1902 | else | ||
1903 | queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q); | 1878 | queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q); |
1879 | else | ||
1880 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); | ||
1904 | 1881 | ||
1905 | if (!dm_table_supports_write_same(t)) | 1882 | if (!dm_table_supports_write_same(t)) |
1906 | q->limits.max_write_same_sectors = 0; | 1883 | q->limits.max_write_same_sectors = 0; |
1907 | if (!dm_table_supports_write_zeroes(t)) | 1884 | if (!dm_table_supports_write_zeroes(t)) |
1908 | q->limits.max_write_zeroes_sectors = 0; | 1885 | q->limits.max_write_zeroes_sectors = 0; |
1909 | 1886 | ||
1910 | if (dm_table_all_devices_attribute(t, queue_supports_sg_merge)) | 1887 | if (dm_table_any_dev_attr(t, queue_no_sg_merge, NULL)) |
1911 | queue_flag_clear_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); | ||
1912 | else | ||
1913 | queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); | 1888 | queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); |
1889 | else | ||
1890 | queue_flag_clear_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); | ||
1914 | 1891 | ||
1915 | dm_table_verify_integrity(t); | 1892 | dm_table_verify_integrity(t); |
1916 | 1893 | ||
@@ -1919,8 +1896,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | |||
1919 | /* | 1896 | /* |
1920 | * Some devices don't use blk_integrity but still want stable pages | 1897 | * Some devices don't use blk_integrity but still want stable pages |
1921 | * because they do their own checksumming. | 1898 | * because they do their own checksumming. |
1899 | * If any underlying device requires stable pages, a table must require | ||
1900 | * them as well. Only targets that support iterate_devices are considered: | ||
1901 | * don't want error, zero, etc to require stable pages. | ||
1922 | */ | 1902 | */ |
1923 | if (dm_table_requires_stable_pages(t)) | 1903 | if (dm_table_any_dev_attr(t, device_requires_stable_pages, NULL)) |
1924 | q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; | 1904 | q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; |
1925 | else | 1905 | else |
1926 | q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES; | 1906 | q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES; |
@@ -1931,7 +1911,8 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | |||
1931 | * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not | 1911 | * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not |
1932 | * have it set. | 1912 | * have it set. |
1933 | */ | 1913 | */ |
1934 | if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random)) | 1914 | if (blk_queue_add_random(q) && |
1915 | dm_table_any_dev_attr(t, device_is_not_random, NULL)) | ||
1935 | queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); | 1916 | queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); |
1936 | 1917 | ||
1937 | /* | 1918 | /* |