aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to 'fs/f2fs/data.c')
-rw-r--r--fs/f2fs/data.c259
1 files changed, 166 insertions, 93 deletions
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index be4da52604ed..d1e83f119338 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -27,6 +27,7 @@
27#include "segment.h" 27#include "segment.h"
28#include "trace.h" 28#include "trace.h"
29#include <trace/events/f2fs.h> 29#include <trace/events/f2fs.h>
30#include <trace/events/android_fs.h>
30 31
31#define NUM_PREALLOC_POST_READ_CTXS 128 32#define NUM_PREALLOC_POST_READ_CTXS 128
32 33
@@ -202,7 +203,7 @@ static void f2fs_verify_bio(struct bio *bio)
202 dic = (struct decompress_io_ctx *)page_private(page); 203 dic = (struct decompress_io_ctx *)page_private(page);
203 204
204 if (dic) { 205 if (dic) {
205 if (atomic_dec_return(&dic->pending_pages)) 206 if (atomic_dec_return(&dic->verity_pages))
206 continue; 207 continue;
207 f2fs_verify_pages(dic->rpages, 208 f2fs_verify_pages(dic->rpages,
208 dic->cluster_size); 209 dic->cluster_size);
@@ -471,6 +472,8 @@ static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
471 */ 472 */
472 if (!fio || !fio->encrypted_page) 473 if (!fio || !fio->encrypted_page)
473 fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask); 474 fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask);
475 else if (fscrypt_inode_should_skip_dm_default_key(inode))
476 bio_set_skip_dm_default_key(bio);
474} 477}
475 478
476static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode, 479static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
@@ -482,7 +485,9 @@ static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
482 * read/write raw data without encryption. 485 * read/write raw data without encryption.
483 */ 486 */
484 if (fio && fio->encrypted_page) 487 if (fio && fio->encrypted_page)
485 return !bio_has_crypt_ctx(bio); 488 return !bio_has_crypt_ctx(bio) &&
489 (bio_should_skip_dm_default_key(bio) ==
490 fscrypt_inode_should_skip_dm_default_key(inode));
486 491
487 return fscrypt_mergeable_bio(bio, inode, next_idx); 492 return fscrypt_mergeable_bio(bio, inode, next_idx);
488} 493}
@@ -736,6 +741,9 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
736static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio, 741static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
737 block_t last_blkaddr, block_t cur_blkaddr) 742 block_t last_blkaddr, block_t cur_blkaddr)
738{ 743{
744 if (unlikely(sbi->max_io_bytes &&
745 bio->bi_iter.bi_size >= sbi->max_io_bytes))
746 return false;
739 if (last_blkaddr + 1 != cur_blkaddr) 747 if (last_blkaddr + 1 != cur_blkaddr)
740 return false; 748 return false;
741 return __same_bdev(sbi, cur_blkaddr, bio); 749 return __same_bdev(sbi, cur_blkaddr, bio);
@@ -1027,7 +1035,8 @@ static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx)
1027 1035
1028static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr, 1036static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
1029 unsigned nr_pages, unsigned op_flag, 1037 unsigned nr_pages, unsigned op_flag,
1030 pgoff_t first_idx, bool for_write) 1038 pgoff_t first_idx, bool for_write,
1039 bool for_verity)
1031{ 1040{
1032 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1041 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1033 struct bio *bio; 1042 struct bio *bio;
@@ -1049,7 +1058,7 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
1049 post_read_steps |= 1 << STEP_DECRYPT; 1058 post_read_steps |= 1 << STEP_DECRYPT;
1050 if (f2fs_compressed_file(inode)) 1059 if (f2fs_compressed_file(inode))
1051 post_read_steps |= 1 << STEP_DECOMPRESS_NOWQ; 1060 post_read_steps |= 1 << STEP_DECOMPRESS_NOWQ;
1052 if (f2fs_need_verity(inode, first_idx)) 1061 if (for_verity && f2fs_need_verity(inode, first_idx))
1053 post_read_steps |= 1 << STEP_VERITY; 1062 post_read_steps |= 1 << STEP_VERITY;
1054 1063
1055 if (post_read_steps) { 1064 if (post_read_steps) {
@@ -1079,7 +1088,7 @@ static int f2fs_submit_page_read(struct inode *inode, struct page *page,
1079 struct bio *bio; 1088 struct bio *bio;
1080 1089
1081 bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags, 1090 bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags,
1082 page->index, for_write); 1091 page->index, for_write, true);
1083 if (IS_ERR(bio)) 1092 if (IS_ERR(bio))
1084 return PTR_ERR(bio); 1093 return PTR_ERR(bio);
1085 1094
@@ -1750,6 +1759,16 @@ bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
1750 return true; 1759 return true;
1751} 1760}
1752 1761
1762static inline u64 bytes_to_blks(struct inode *inode, u64 bytes)
1763{
1764 return (bytes >> inode->i_blkbits);
1765}
1766
1767static inline u64 blks_to_bytes(struct inode *inode, u64 blks)
1768{
1769 return (blks << inode->i_blkbits);
1770}
1771
1753static int __get_data_block(struct inode *inode, sector_t iblock, 1772static int __get_data_block(struct inode *inode, sector_t iblock,
1754 struct buffer_head *bh, int create, int flag, 1773 struct buffer_head *bh, int create, int flag,
1755 pgoff_t *next_pgofs, int seg_type, bool may_write) 1774 pgoff_t *next_pgofs, int seg_type, bool may_write)
@@ -1758,7 +1777,7 @@ static int __get_data_block(struct inode *inode, sector_t iblock,
1758 int err; 1777 int err;
1759 1778
1760 map.m_lblk = iblock; 1779 map.m_lblk = iblock;
1761 map.m_len = bh->b_size >> inode->i_blkbits; 1780 map.m_len = bytes_to_blks(inode, bh->b_size);
1762 map.m_next_pgofs = next_pgofs; 1781 map.m_next_pgofs = next_pgofs;
1763 map.m_next_extent = NULL; 1782 map.m_next_extent = NULL;
1764 map.m_seg_type = seg_type; 1783 map.m_seg_type = seg_type;
@@ -1768,20 +1787,11 @@ static int __get_data_block(struct inode *inode, sector_t iblock,
1768 if (!err) { 1787 if (!err) {
1769 map_bh(bh, inode->i_sb, map.m_pblk); 1788 map_bh(bh, inode->i_sb, map.m_pblk);
1770 bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags; 1789 bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
1771 bh->b_size = (u64)map.m_len << inode->i_blkbits; 1790 bh->b_size = blks_to_bytes(inode, map.m_len);
1772 } 1791 }
1773 return err; 1792 return err;
1774} 1793}
1775 1794
1776static int get_data_block(struct inode *inode, sector_t iblock,
1777 struct buffer_head *bh_result, int create, int flag,
1778 pgoff_t *next_pgofs)
1779{
1780 return __get_data_block(inode, iblock, bh_result, create,
1781 flag, next_pgofs,
1782 NO_CHECK_TYPE, create);
1783}
1784
1785static int get_data_block_dio_write(struct inode *inode, sector_t iblock, 1795static int get_data_block_dio_write(struct inode *inode, sector_t iblock,
1786 struct buffer_head *bh_result, int create) 1796 struct buffer_head *bh_result, int create)
1787{ 1797{
@@ -1800,24 +1810,6 @@ static int get_data_block_dio(struct inode *inode, sector_t iblock,
1800 false); 1810 false);
1801} 1811}
1802 1812
1803static int get_data_block_bmap(struct inode *inode, sector_t iblock,
1804 struct buffer_head *bh_result, int create)
1805{
1806 return __get_data_block(inode, iblock, bh_result, create,
1807 F2FS_GET_BLOCK_BMAP, NULL,
1808 NO_CHECK_TYPE, create);
1809}
1810
1811static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
1812{
1813 return (offset >> inode->i_blkbits);
1814}
1815
1816static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
1817{
1818 return (blk << inode->i_blkbits);
1819}
1820
1821static int f2fs_xattr_fiemap(struct inode *inode, 1813static int f2fs_xattr_fiemap(struct inode *inode,
1822 struct fiemap_extent_info *fieinfo) 1814 struct fiemap_extent_info *fieinfo)
1823{ 1815{
@@ -1843,7 +1835,7 @@ static int f2fs_xattr_fiemap(struct inode *inode,
1843 return err; 1835 return err;
1844 } 1836 }
1845 1837
1846 phys = (__u64)blk_to_logical(inode, ni.blk_addr); 1838 phys = blks_to_bytes(inode, ni.blk_addr);
1847 offset = offsetof(struct f2fs_inode, i_addr) + 1839 offset = offsetof(struct f2fs_inode, i_addr) +
1848 sizeof(__le32) * (DEF_ADDRS_PER_INODE - 1840 sizeof(__le32) * (DEF_ADDRS_PER_INODE -
1849 get_inline_xattr_addrs(inode)); 1841 get_inline_xattr_addrs(inode));
@@ -1875,7 +1867,7 @@ static int f2fs_xattr_fiemap(struct inode *inode,
1875 return err; 1867 return err;
1876 } 1868 }
1877 1869
1878 phys = (__u64)blk_to_logical(inode, ni.blk_addr); 1870 phys = blks_to_bytes(inode, ni.blk_addr);
1879 len = inode->i_sb->s_blocksize; 1871 len = inode->i_sb->s_blocksize;
1880 1872
1881 f2fs_put_page(page, 1); 1873 f2fs_put_page(page, 1);
@@ -1913,7 +1905,7 @@ static loff_t max_inode_blocks(struct inode *inode)
1913int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 1905int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1914 u64 start, u64 len) 1906 u64 start, u64 len)
1915{ 1907{
1916 struct buffer_head map_bh; 1908 struct f2fs_map_blocks map;
1917 sector_t start_blk, last_blk; 1909 sector_t start_blk, last_blk;
1918 pgoff_t next_pgofs; 1910 pgoff_t next_pgofs;
1919 u64 logical = 0, phys = 0, size = 0; 1911 u64 logical = 0, phys = 0, size = 0;
@@ -1945,29 +1937,31 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1945 goto out; 1937 goto out;
1946 } 1938 }
1947 1939
1948 if (logical_to_blk(inode, len) == 0) 1940 if (bytes_to_blks(inode, len) == 0)
1949 len = blk_to_logical(inode, 1); 1941 len = blks_to_bytes(inode, 1);
1950 1942
1951 start_blk = logical_to_blk(inode, start); 1943 start_blk = bytes_to_blks(inode, start);
1952 last_blk = logical_to_blk(inode, start + len - 1); 1944 last_blk = bytes_to_blks(inode, start + len - 1);
1953 1945
1954next: 1946next:
1955 memset(&map_bh, 0, sizeof(struct buffer_head)); 1947 memset(&map, 0, sizeof(map));
1956 map_bh.b_size = len; 1948 map.m_lblk = start_blk;
1949 map.m_len = bytes_to_blks(inode, len);
1950 map.m_next_pgofs = &next_pgofs;
1951 map.m_seg_type = NO_CHECK_TYPE;
1957 1952
1958 if (compr_cluster) 1953 if (compr_cluster)
1959 map_bh.b_size = blk_to_logical(inode, cluster_size - 1); 1954 map.m_len = cluster_size - 1;
1960 1955
1961 ret = get_data_block(inode, start_blk, &map_bh, 0, 1956 ret = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_FIEMAP);
1962 F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
1963 if (ret) 1957 if (ret)
1964 goto out; 1958 goto out;
1965 1959
1966 /* HOLE */ 1960 /* HOLE */
1967 if (!buffer_mapped(&map_bh)) { 1961 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
1968 start_blk = next_pgofs; 1962 start_blk = next_pgofs;
1969 1963
1970 if (blk_to_logical(inode, start_blk) < blk_to_logical(inode, 1964 if (blks_to_bytes(inode, start_blk) < blks_to_bytes(inode,
1971 max_inode_blocks(inode))) 1965 max_inode_blocks(inode)))
1972 goto prep_next; 1966 goto prep_next;
1973 1967
@@ -1993,9 +1987,9 @@ next:
1993 compr_cluster = false; 1987 compr_cluster = false;
1994 1988
1995 1989
1996 logical = blk_to_logical(inode, start_blk - 1); 1990 logical = blks_to_bytes(inode, start_blk - 1);
1997 phys = blk_to_logical(inode, map_bh.b_blocknr); 1991 phys = blks_to_bytes(inode, map.m_pblk);
1998 size = blk_to_logical(inode, cluster_size); 1992 size = blks_to_bytes(inode, cluster_size);
1999 1993
2000 flags |= FIEMAP_EXTENT_ENCODED; 1994 flags |= FIEMAP_EXTENT_ENCODED;
2001 1995
@@ -2007,20 +2001,20 @@ next:
2007 goto prep_next; 2001 goto prep_next;
2008 } 2002 }
2009 2003
2010 if (map_bh.b_blocknr == COMPRESS_ADDR) { 2004 if (map.m_pblk == COMPRESS_ADDR) {
2011 compr_cluster = true; 2005 compr_cluster = true;
2012 start_blk++; 2006 start_blk++;
2013 goto prep_next; 2007 goto prep_next;
2014 } 2008 }
2015 2009
2016 logical = blk_to_logical(inode, start_blk); 2010 logical = blks_to_bytes(inode, start_blk);
2017 phys = blk_to_logical(inode, map_bh.b_blocknr); 2011 phys = blks_to_bytes(inode, map.m_pblk);
2018 size = map_bh.b_size; 2012 size = blks_to_bytes(inode, map.m_len);
2019 flags = 0; 2013 flags = 0;
2020 if (buffer_unwritten(&map_bh)) 2014 if (map.m_flags & F2FS_MAP_UNWRITTEN)
2021 flags = FIEMAP_EXTENT_UNWRITTEN; 2015 flags = FIEMAP_EXTENT_UNWRITTEN;
2022 2016
2023 start_blk += logical_to_blk(inode, size); 2017 start_blk += bytes_to_blks(inode, size);
2024 2018
2025prep_next: 2019prep_next:
2026 cond_resched(); 2020 cond_resched();
@@ -2053,8 +2047,7 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page,
2053 bool is_readahead) 2047 bool is_readahead)
2054{ 2048{
2055 struct bio *bio = *bio_ret; 2049 struct bio *bio = *bio_ret;
2056 const unsigned blkbits = inode->i_blkbits; 2050 const unsigned blocksize = blks_to_bytes(inode, 1);
2057 const unsigned blocksize = 1 << blkbits;
2058 sector_t block_in_file; 2051 sector_t block_in_file;
2059 sector_t last_block; 2052 sector_t last_block;
2060 sector_t last_block_in_file; 2053 sector_t last_block_in_file;
@@ -2063,8 +2056,8 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page,
2063 2056
2064 block_in_file = (sector_t)page_index(page); 2057 block_in_file = (sector_t)page_index(page);
2065 last_block = block_in_file + nr_pages; 2058 last_block = block_in_file + nr_pages;
2066 last_block_in_file = (f2fs_readpage_limit(inode) + blocksize - 1) >> 2059 last_block_in_file = bytes_to_blks(inode,
2067 blkbits; 2060 f2fs_readpage_limit(inode) + blocksize - 1);
2068 if (last_block > last_block_in_file) 2061 if (last_block > last_block_in_file)
2069 last_block = last_block_in_file; 2062 last_block = last_block_in_file;
2070 2063
@@ -2133,7 +2126,7 @@ submit_and_realloc:
2133 if (bio == NULL) { 2126 if (bio == NULL) {
2134 bio = f2fs_grab_read_bio(inode, block_nr, nr_pages, 2127 bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
2135 is_readahead ? REQ_RAHEAD : 0, page->index, 2128 is_readahead ? REQ_RAHEAD : 0, page->index,
2136 false); 2129 false, true);
2137 if (IS_ERR(bio)) { 2130 if (IS_ERR(bio)) {
2138 ret = PTR_ERR(bio); 2131 ret = PTR_ERR(bio);
2139 bio = NULL; 2132 bio = NULL;
@@ -2177,16 +2170,17 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
2177 struct bio *bio = *bio_ret; 2170 struct bio *bio = *bio_ret;
2178 unsigned int start_idx = cc->cluster_idx << cc->log_cluster_size; 2171 unsigned int start_idx = cc->cluster_idx << cc->log_cluster_size;
2179 sector_t last_block_in_file; 2172 sector_t last_block_in_file;
2180 const unsigned blkbits = inode->i_blkbits; 2173 const unsigned blocksize = blks_to_bytes(inode, 1);
2181 const unsigned blocksize = 1 << blkbits;
2182 struct decompress_io_ctx *dic = NULL; 2174 struct decompress_io_ctx *dic = NULL;
2175 struct bio_post_read_ctx *ctx;
2176 bool for_verity = false;
2183 int i; 2177 int i;
2184 int ret = 0; 2178 int ret = 0;
2185 2179
2186 f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc)); 2180 f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc));
2187 2181
2188 last_block_in_file = (f2fs_readpage_limit(inode) + 2182 last_block_in_file = bytes_to_blks(inode,
2189 blocksize - 1) >> blkbits; 2183 f2fs_readpage_limit(inode) + blocksize - 1);
2190 2184
2191 /* get rid of pages beyond EOF */ 2185 /* get rid of pages beyond EOF */
2192 for (i = 0; i < cc->cluster_size; i++) { 2186 for (i = 0; i < cc->cluster_size; i++) {
@@ -2245,10 +2239,29 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
2245 goto out_put_dnode; 2239 goto out_put_dnode;
2246 } 2240 }
2247 2241
2242 /*
2243 * It's possible to enable fsverity on the fly when handling a cluster,
2244 * which requires complicated error handling. Instead of adding more
2245 * complexity, let's give a rule where end_io post-processes fsverity
2246 * per cluster. In order to do that, we need to submit bio, if previous
2247 * bio sets a different post-process policy.
2248 */
2249 if (fsverity_active(cc->inode)) {
2250 atomic_set(&dic->verity_pages, cc->nr_cpages);
2251 for_verity = true;
2252
2253 if (bio) {
2254 ctx = bio->bi_private;
2255 if (!(ctx->enabled_steps & (1 << STEP_VERITY))) {
2256 __submit_bio(sbi, bio, DATA);
2257 bio = NULL;
2258 }
2259 }
2260 }
2261
2248 for (i = 0; i < dic->nr_cpages; i++) { 2262 for (i = 0; i < dic->nr_cpages; i++) {
2249 struct page *page = dic->cpages[i]; 2263 struct page *page = dic->cpages[i];
2250 block_t blkaddr; 2264 block_t blkaddr;
2251 struct bio_post_read_ctx *ctx;
2252 2265
2253 blkaddr = data_blkaddr(dn.inode, dn.node_page, 2266 blkaddr = data_blkaddr(dn.inode, dn.node_page,
2254 dn.ofs_in_node + i + 1); 2267 dn.ofs_in_node + i + 1);
@@ -2264,17 +2277,31 @@ submit_and_realloc:
2264 if (!bio) { 2277 if (!bio) {
2265 bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages, 2278 bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
2266 is_readahead ? REQ_RAHEAD : 0, 2279 is_readahead ? REQ_RAHEAD : 0,
2267 page->index, for_write); 2280 page->index, for_write, for_verity);
2268 if (IS_ERR(bio)) { 2281 if (IS_ERR(bio)) {
2282 unsigned int remained = dic->nr_cpages - i;
2283 bool release = false;
2284
2269 ret = PTR_ERR(bio); 2285 ret = PTR_ERR(bio);
2270 dic->failed = true; 2286 dic->failed = true;
2271 if (!atomic_sub_return(dic->nr_cpages - i, 2287
2272 &dic->pending_pages)) { 2288 if (for_verity) {
2289 if (!atomic_sub_return(remained,
2290 &dic->verity_pages))
2291 release = true;
2292 } else {
2293 if (!atomic_sub_return(remained,
2294 &dic->pending_pages))
2295 release = true;
2296 }
2297
2298 if (release) {
2273 f2fs_decompress_end_io(dic->rpages, 2299 f2fs_decompress_end_io(dic->rpages,
2274 cc->cluster_size, true, 2300 cc->cluster_size, true,
2275 false); 2301 false);
2276 f2fs_free_dic(dic); 2302 f2fs_free_dic(dic);
2277 } 2303 }
2304
2278 f2fs_put_dnode(&dn); 2305 f2fs_put_dnode(&dn);
2279 *bio_ret = NULL; 2306 *bio_ret = NULL;
2280 return ret; 2307 return ret;
@@ -3164,7 +3191,7 @@ static inline bool __should_serialize_io(struct inode *inode,
3164 if (IS_NOQUOTA(inode)) 3191 if (IS_NOQUOTA(inode))
3165 return false; 3192 return false;
3166 3193
3167 if (f2fs_compressed_file(inode)) 3194 if (f2fs_need_compress_data(inode))
3168 return true; 3195 return true;
3169 if (wbc->sync_mode != WB_SYNC_ALL) 3196 if (wbc->sync_mode != WB_SYNC_ALL)
3170 return true; 3197 return true;
@@ -3371,6 +3398,16 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
3371 block_t blkaddr = NULL_ADDR; 3398 block_t blkaddr = NULL_ADDR;
3372 int err = 0; 3399 int err = 0;
3373 3400
3401 if (trace_android_fs_datawrite_start_enabled()) {
3402 char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
3403
3404 path = android_fstrace_get_pathname(pathbuf,
3405 MAX_TRACE_PATHBUF_LEN,
3406 inode);
3407 trace_android_fs_datawrite_start(inode, pos, len,
3408 current->pid, path,
3409 current->comm);
3410 }
3374 trace_f2fs_write_begin(inode, pos, len, flags); 3411 trace_f2fs_write_begin(inode, pos, len, flags);
3375 3412
3376 if (!f2fs_is_checkpoint_ready(sbi)) { 3413 if (!f2fs_is_checkpoint_ready(sbi)) {
@@ -3498,6 +3535,7 @@ static int f2fs_write_end(struct file *file,
3498{ 3535{
3499 struct inode *inode = page->mapping->host; 3536 struct inode *inode = page->mapping->host;
3500 3537
3538 trace_android_fs_datawrite_end(inode, pos, len);
3501 trace_f2fs_write_end(inode, pos, len, copied); 3539 trace_f2fs_write_end(inode, pos, len, copied);
3502 3540
3503 /* 3541 /*
@@ -3631,6 +3669,29 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
3631 3669
3632 trace_f2fs_direct_IO_enter(inode, offset, count, rw); 3670 trace_f2fs_direct_IO_enter(inode, offset, count, rw);
3633 3671
3672 if (trace_android_fs_dataread_start_enabled() &&
3673 (rw == READ)) {
3674 char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
3675
3676 path = android_fstrace_get_pathname(pathbuf,
3677 MAX_TRACE_PATHBUF_LEN,
3678 inode);
3679 trace_android_fs_dataread_start(inode, offset,
3680 count, current->pid, path,
3681 current->comm);
3682 }
3683 if (trace_android_fs_datawrite_start_enabled() &&
3684 (rw == WRITE)) {
3685 char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
3686
3687 path = android_fstrace_get_pathname(pathbuf,
3688 MAX_TRACE_PATHBUF_LEN,
3689 inode);
3690 trace_android_fs_datawrite_start(inode, offset, count,
3691 current->pid, path,
3692 current->comm);
3693 }
3694
3634 if (rw == WRITE && whint_mode == WHINT_MODE_OFF) 3695 if (rw == WRITE && whint_mode == WHINT_MODE_OFF)
3635 iocb->ki_hint = WRITE_LIFE_NOT_SET; 3696 iocb->ki_hint = WRITE_LIFE_NOT_SET;
3636 3697
@@ -3686,6 +3747,13 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
3686 } 3747 }
3687 3748
3688out: 3749out:
3750 if (trace_android_fs_dataread_start_enabled() &&
3751 (rw == READ))
3752 trace_android_fs_dataread_end(inode, offset, count);
3753 if (trace_android_fs_datawrite_start_enabled() &&
3754 (rw == WRITE))
3755 trace_android_fs_datawrite_end(inode, offset, count);
3756
3689 trace_f2fs_direct_IO_exit(inode, offset, count, rw, err); 3757 trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
3690 3758
3691 return err; 3759 return err;
@@ -3799,9 +3867,6 @@ static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block)
3799static sector_t f2fs_bmap(struct address_space *mapping, sector_t block) 3867static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
3800{ 3868{
3801 struct inode *inode = mapping->host; 3869 struct inode *inode = mapping->host;
3802 struct buffer_head tmp = {
3803 .b_size = i_blocksize(inode),
3804 };
3805 sector_t blknr = 0; 3870 sector_t blknr = 0;
3806 3871
3807 if (f2fs_has_inline_data(inode)) 3872 if (f2fs_has_inline_data(inode))
@@ -3818,8 +3883,16 @@ static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
3818 if (f2fs_compressed_file(inode)) { 3883 if (f2fs_compressed_file(inode)) {
3819 blknr = f2fs_bmap_compress(inode, block); 3884 blknr = f2fs_bmap_compress(inode, block);
3820 } else { 3885 } else {
3821 if (!get_data_block_bmap(inode, block, &tmp, 0)) 3886 struct f2fs_map_blocks map;
3822 blknr = tmp.b_blocknr; 3887
3888 memset(&map, 0, sizeof(map));
3889 map.m_lblk = block;
3890 map.m_len = 1;
3891 map.m_next_pgofs = NULL;
3892 map.m_seg_type = NO_CHECK_TYPE;
3893
3894 if (!f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_BMAP))
3895 blknr = map.m_pblk;
3823 } 3896 }
3824out: 3897out:
3825 trace_f2fs_bmap(inode, block, blknr); 3898 trace_f2fs_bmap(inode, block, blknr);
@@ -3895,7 +3968,7 @@ static int check_swap_activate_fast(struct swap_info_struct *sis,
3895 sector_t highest_pblock = 0; 3968 sector_t highest_pblock = 0;
3896 int nr_extents = 0; 3969 int nr_extents = 0;
3897 unsigned long nr_pblocks; 3970 unsigned long nr_pblocks;
3898 unsigned long len; 3971 u64 len;
3899 int ret; 3972 int ret;
3900 3973
3901 /* 3974 /*
@@ -3903,29 +3976,31 @@ static int check_swap_activate_fast(struct swap_info_struct *sis,
3903 * to be very smart. 3976 * to be very smart.
3904 */ 3977 */
3905 cur_lblock = 0; 3978 cur_lblock = 0;
3906 last_lblock = logical_to_blk(inode, i_size_read(inode)); 3979 last_lblock = bytes_to_blks(inode, i_size_read(inode));
3907 len = i_size_read(inode); 3980 len = i_size_read(inode);
3908 3981
3909 while (cur_lblock <= last_lblock && cur_lblock < sis->max) { 3982 while (cur_lblock <= last_lblock && cur_lblock < sis->max) {
3910 struct buffer_head map_bh; 3983 struct f2fs_map_blocks map;
3911 pgoff_t next_pgofs; 3984 pgoff_t next_pgofs;
3912 3985
3913 cond_resched(); 3986 cond_resched();
3914 3987
3915 memset(&map_bh, 0, sizeof(struct buffer_head)); 3988 memset(&map, 0, sizeof(map));
3916 map_bh.b_size = len - cur_lblock; 3989 map.m_lblk = cur_lblock;
3990 map.m_len = bytes_to_blks(inode, len) - cur_lblock;
3991 map.m_next_pgofs = &next_pgofs;
3992 map.m_seg_type = NO_CHECK_TYPE;
3917 3993
3918 ret = get_data_block(inode, cur_lblock, &map_bh, 0, 3994 ret = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_FIEMAP);
3919 F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
3920 if (ret) 3995 if (ret)
3921 goto err_out; 3996 goto err_out;
3922 3997
3923 /* hole */ 3998 /* hole */
3924 if (!buffer_mapped(&map_bh)) 3999 if (!(map.m_flags & F2FS_MAP_FLAGS))
3925 goto err_out; 4000 goto err_out;
3926 4001
3927 pblock = map_bh.b_blocknr; 4002 pblock = map.m_pblk;
3928 nr_pblocks = logical_to_blk(inode, map_bh.b_size); 4003 nr_pblocks = map.m_len;
3929 4004
3930 if (cur_lblock + nr_pblocks >= sis->max) 4005 if (cur_lblock + nr_pblocks >= sis->max)
3931 nr_pblocks = sis->max - cur_lblock; 4006 nr_pblocks = sis->max - cur_lblock;
@@ -3968,7 +4043,6 @@ static int check_swap_activate(struct swap_info_struct *sis,
3968 struct inode *inode = mapping->host; 4043 struct inode *inode = mapping->host;
3969 unsigned blocks_per_page; 4044 unsigned blocks_per_page;
3970 unsigned long page_no; 4045 unsigned long page_no;
3971 unsigned blkbits;
3972 sector_t probe_block; 4046 sector_t probe_block;
3973 sector_t last_block; 4047 sector_t last_block;
3974 sector_t lowest_block = -1; 4048 sector_t lowest_block = -1;
@@ -3979,8 +4053,7 @@ static int check_swap_activate(struct swap_info_struct *sis,
3979 if (PAGE_SIZE == F2FS_BLKSIZE) 4053 if (PAGE_SIZE == F2FS_BLKSIZE)
3980 return check_swap_activate_fast(sis, swap_file, span); 4054 return check_swap_activate_fast(sis, swap_file, span);
3981 4055
3982 blkbits = inode->i_blkbits; 4056 blocks_per_page = bytes_to_blks(inode, PAGE_SIZE);
3983 blocks_per_page = PAGE_SIZE >> blkbits;
3984 4057
3985 /* 4058 /*
3986 * Map all the blocks into the extent list. This code doesn't try 4059 * Map all the blocks into the extent list. This code doesn't try
@@ -3988,7 +4061,7 @@ static int check_swap_activate(struct swap_info_struct *sis,
3988 */ 4061 */
3989 probe_block = 0; 4062 probe_block = 0;
3990 page_no = 0; 4063 page_no = 0;
3991 last_block = i_size_read(inode) >> blkbits; 4064 last_block = bytes_to_blks(inode, i_size_read(inode));
3992 while ((probe_block + blocks_per_page) <= last_block && 4065 while ((probe_block + blocks_per_page) <= last_block &&
3993 page_no < sis->max) { 4066 page_no < sis->max) {
3994 unsigned block_in_page; 4067 unsigned block_in_page;
@@ -4028,7 +4101,7 @@ static int check_swap_activate(struct swap_info_struct *sis,
4028 } 4101 }
4029 } 4102 }
4030 4103
4031 first_block >>= (PAGE_SHIFT - blkbits); 4104 first_block >>= (PAGE_SHIFT - inode->i_blkbits);
4032 if (page_no) { /* exclude the header page */ 4105 if (page_no) { /* exclude the header page */
4033 if (first_block < lowest_block) 4106 if (first_block < lowest_block)
4034 lowest_block = first_block; 4107 lowest_block = first_block;