Skip to content

Commit cc95425

Browse files
Christoph Hellwigbrauner
authored andcommitted
iomap: move all remaining per-folio logic into iomap_writepage_map
Move the tracepoint and the iomap check from iomap_do_writepage into iomap_writepage_map. This keeps all logic in one places, and leaves iomap_do_writepage just as the wrapper for the callback conventions of write_cache_pages, which will go away when that is converted to an iterator. Signed-off-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20231207072710.176093-7-hch@lst.de Reviewed-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Christian Brauner <brauner@kernel.org>
1 parent e3a491a commit cc95425

File tree

1 file changed

+11
-23
lines changed

1 file changed

+11
-23
lines changed

fs/iomap/buffered-io.c

Lines changed: 11 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -1832,19 +1832,25 @@ static bool iomap_writepage_handle_eof(struct folio *folio, struct inode *inode,
18321832
* At the end of a writeback pass, there will be a cached ioend remaining on the
18331833
* writepage context that the caller will need to submit.
18341834
*/
1835-
static int
1836-
iomap_writepage_map(struct iomap_writepage_ctx *wpc,
1837-
struct writeback_control *wbc, struct inode *inode,
1838-
struct folio *folio, u64 end_pos)
1835+
static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
1836+
struct writeback_control *wbc, struct folio *folio)
18391837
{
18401838
struct iomap_folio_state *ifs = folio->private;
1839+
struct inode *inode = folio->mapping->host;
18411840
struct iomap_ioend *ioend, *next;
18421841
unsigned len = i_blocksize(inode);
18431842
unsigned nblocks = i_blocks_per_folio(inode, folio);
18441843
u64 pos = folio_pos(folio);
1844+
u64 end_pos = pos + folio_size(folio);
18451845
int error = 0, count = 0, i;
18461846
LIST_HEAD(submit_list);
18471847

1848+
trace_iomap_writepage(inode, pos, folio_size(folio));
1849+
1850+
if (!iomap_writepage_handle_eof(folio, inode, &end_pos)) {
1851+
folio_unlock(folio);
1852+
return 0;
1853+
}
18481854
WARN_ON_ONCE(end_pos <= pos);
18491855

18501856
if (!ifs && nblocks > 1) {
@@ -1944,28 +1950,10 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
19441950
return error;
19451951
}
19461952

1947-
/*
1948-
* Write out a dirty page.
1949-
*
1950-
* For delalloc space on the page, we need to allocate space and flush it.
1951-
* For unwritten space on the page, we need to start the conversion to
1952-
* regular allocated space.
1953-
*/
19541953
static int iomap_do_writepage(struct folio *folio,
19551954
struct writeback_control *wbc, void *data)
19561955
{
1957-
struct iomap_writepage_ctx *wpc = data;
1958-
struct inode *inode = folio->mapping->host;
1959-
u64 end_pos = folio_pos(folio) + folio_size(folio);
1960-
1961-
trace_iomap_writepage(inode, folio_pos(folio), folio_size(folio));
1962-
1963-
if (!iomap_writepage_handle_eof(folio, inode, &end_pos)) {
1964-
folio_unlock(folio);
1965-
return 0;
1966-
}
1967-
1968-
return iomap_writepage_map(wpc, wbc, inode, folio, end_pos);
1956+
return iomap_writepage_map(data, wbc, folio);
19691957
}
19701958

19711959
int

0 commit comments

Comments
 (0)