@@ -824,12 +824,11 @@ static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
824
824
825
825
out_unlock :
826
826
__iomap_put_folio (iter , pos , 0 , folio );
827
- iomap_write_failed (iter -> inode , pos , len );
828
827
829
828
return status ;
830
829
}
831
830
832
- static size_t __iomap_write_end (struct inode * inode , loff_t pos , size_t len ,
831
+ static bool __iomap_write_end (struct inode * inode , loff_t pos , size_t len ,
833
832
size_t copied , struct folio * folio )
834
833
{
835
834
flush_dcache_folio (folio );
@@ -846,14 +845,14 @@ static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
846
845
* redo the whole thing.
847
846
*/
848
847
if (unlikely (copied < len && !folio_test_uptodate (folio )))
849
- return 0 ;
848
+ return false ;
850
849
iomap_set_range_uptodate (folio , offset_in_folio (folio , pos ), len );
851
850
iomap_set_range_dirty (folio , offset_in_folio (folio , pos ), copied );
852
851
filemap_dirty_folio (inode -> i_mapping , folio );
853
- return copied ;
852
+ return true ;
854
853
}
855
854
856
- static size_t iomap_write_end_inline (const struct iomap_iter * iter ,
855
+ static void iomap_write_end_inline (const struct iomap_iter * iter ,
857
856
struct folio * folio , loff_t pos , size_t copied )
858
857
{
859
858
const struct iomap * iomap = & iter -> iomap ;
@@ -868,59 +867,51 @@ static size_t iomap_write_end_inline(const struct iomap_iter *iter,
868
867
kunmap_local (addr );
869
868
870
869
mark_inode_dirty (iter -> inode );
871
- return copied ;
872
870
}
873
871
874
- /* Returns the number of bytes copied. May be 0. Cannot be an errno. */
875
- static size_t iomap_write_end (struct iomap_iter * iter , loff_t pos , size_t len ,
872
+ /*
873
+ * Returns true if all copied bytes have been written to the pagecache,
874
+ * otherwise return false.
875
+ */
876
+ static bool iomap_write_end (struct iomap_iter * iter , loff_t pos , size_t len ,
876
877
size_t copied , struct folio * folio )
877
878
{
878
879
const struct iomap * srcmap = iomap_iter_srcmap (iter );
879
- loff_t old_size = iter -> inode -> i_size ;
880
- size_t ret ;
881
880
882
881
if (srcmap -> type == IOMAP_INLINE ) {
883
- ret = iomap_write_end_inline (iter , folio , pos , copied );
884
- } else if (srcmap -> flags & IOMAP_F_BUFFER_HEAD ) {
885
- ret = block_write_end (NULL , iter -> inode -> i_mapping , pos , len ,
886
- copied , & folio -> page , NULL );
887
- } else {
888
- ret = __iomap_write_end (iter -> inode , pos , len , copied , folio );
882
+ iomap_write_end_inline (iter , folio , pos , copied );
883
+ return true;
889
884
}
890
885
891
- /*
892
- * Update the in-memory inode size after copying the data into the page
893
- * cache. It's up to the file system to write the updated size to disk,
894
- * preferably after I/O completion so that no stale data is exposed.
895
- */
896
- if (pos + ret > old_size ) {
897
- i_size_write (iter -> inode , pos + ret );
898
- iter -> iomap .flags |= IOMAP_F_SIZE_CHANGED ;
886
+ if (srcmap -> flags & IOMAP_F_BUFFER_HEAD ) {
887
+ size_t bh_written ;
888
+
889
+ bh_written = block_write_end (NULL , iter -> inode -> i_mapping , pos ,
890
+ len , copied , & folio -> page , NULL );
891
+ WARN_ON_ONCE (bh_written != copied && bh_written != 0 );
892
+ return bh_written == copied ;
899
893
}
900
- __iomap_put_folio (iter , pos , ret , folio );
901
894
902
- if (old_size < pos )
903
- pagecache_isize_extended (iter -> inode , old_size , pos );
904
- if (ret < len )
905
- iomap_write_failed (iter -> inode , pos + ret , len - ret );
906
- return ret ;
895
+ return __iomap_write_end (iter -> inode , pos , len , copied , folio );
907
896
}
908
897
909
898
static loff_t iomap_write_iter (struct iomap_iter * iter , struct iov_iter * i )
910
899
{
911
900
loff_t length = iomap_length (iter );
912
901
size_t chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER ;
913
902
loff_t pos = iter -> pos ;
914
- ssize_t written = 0 ;
903
+ ssize_t total_written = 0 ;
915
904
long status = 0 ;
916
905
struct address_space * mapping = iter -> inode -> i_mapping ;
917
906
unsigned int bdp_flags = (iter -> flags & IOMAP_NOWAIT ) ? BDP_ASYNC : 0 ;
918
907
919
908
do {
920
909
struct folio * folio ;
910
+ loff_t old_size ;
921
911
size_t offset ; /* Offset into folio */
922
912
size_t bytes ; /* Bytes to write to folio */
923
913
size_t copied ; /* Bytes copied from user */
914
+ size_t written ; /* Bytes have been written */
924
915
925
916
bytes = iov_iter_count (i );
926
917
retry :
@@ -950,8 +941,10 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
950
941
}
951
942
952
943
status = iomap_write_begin (iter , pos , bytes , & folio );
953
- if (unlikely (status ))
944
+ if (unlikely (status )) {
945
+ iomap_write_failed (iter -> inode , pos , bytes );
954
946
break ;
947
+ }
955
948
if (iter -> iomap .flags & IOMAP_F_STALE )
956
949
break ;
957
950
@@ -963,37 +956,55 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
963
956
flush_dcache_folio (folio );
964
957
965
958
copied = copy_folio_from_iter_atomic (folio , offset , bytes , i );
966
- status = iomap_write_end (iter , pos , bytes , copied , folio );
959
+ written = iomap_write_end (iter , pos , bytes , copied , folio ) ?
960
+ copied : 0 ;
961
+
962
+ /*
963
+ * Update the in-memory inode size after copying the data into
964
+ * the page cache. It's up to the file system to write the
965
+ * updated size to disk, preferably after I/O completion so that
966
+ * no stale data is exposed. Only once that's done can we
967
+ * unlock and release the folio.
968
+ */
969
+ old_size = iter -> inode -> i_size ;
970
+ if (pos + written > old_size ) {
971
+ i_size_write (iter -> inode , pos + written );
972
+ iter -> iomap .flags |= IOMAP_F_SIZE_CHANGED ;
973
+ }
974
+ __iomap_put_folio (iter , pos , written , folio );
967
975
968
- if (unlikely ( copied != status ) )
969
- iov_iter_revert ( i , copied - status );
976
+ if (old_size < pos )
977
+ pagecache_isize_extended ( iter -> inode , old_size , pos );
970
978
971
979
cond_resched ();
972
- if (unlikely (status == 0 )) {
980
+ if (unlikely (written == 0 )) {
973
981
/*
974
982
* A short copy made iomap_write_end() reject the
975
983
* thing entirely. Might be memory poisoning
976
984
* halfway through, might be a race with munmap,
977
985
* might be severe memory pressure.
978
986
*/
987
+ iomap_write_failed (iter -> inode , pos , bytes );
988
+ iov_iter_revert (i , copied );
989
+
979
990
if (chunk > PAGE_SIZE )
980
991
chunk /= 2 ;
981
992
if (copied ) {
982
993
bytes = copied ;
983
994
goto retry ;
984
995
}
985
996
} else {
986
- pos += status ;
987
- written += status ;
988
- length -= status ;
997
+ pos += written ;
998
+ total_written += written ;
999
+ length -= written ;
989
1000
}
990
1001
} while (iov_iter_count (i ) && length );
991
1002
992
1003
if (status == - EAGAIN ) {
993
- iov_iter_revert (i , written );
1004
+ iov_iter_revert (i , total_written );
994
1005
return - EAGAIN ;
995
1006
}
996
- return written ? written : status ;
1007
+ return total_written ? total_written : status ;
997
1008
}
998
1009
999
1010
ssize_t
@@ -1322,6 +1333,7 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter)
1322
1333
int status ;
1323
1334
size_t offset ;
1324
1335
size_t bytes = min_t (u64 , SIZE_MAX , length );
1336
+ bool ret ;
1325
1337
1326
1338
status = iomap_write_begin (iter , pos , bytes , & folio );
1327
1339
if (unlikely (status ))
@@ -1333,8 +1345,9 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter)
1333
1345
if (bytes > folio_size (folio ) - offset )
1334
1346
bytes = folio_size (folio ) - offset ;
1335
1347
1336
- bytes = iomap_write_end (iter , pos , bytes , bytes , folio );
1337
- if (WARN_ON_ONCE (bytes == 0 ))
1348
+ ret = iomap_write_end (iter , pos , bytes , bytes , folio );
1349
+ __iomap_put_folio (iter , pos , bytes , folio );
1350
+ if (WARN_ON_ONCE (!ret ))
1338
1351
return - EIO ;
1339
1352
1340
1353
cond_resched ();
@@ -1383,6 +1396,7 @@ static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
1383
1396
int status ;
1384
1397
size_t offset ;
1385
1398
size_t bytes = min_t (u64 , SIZE_MAX , length );
1399
+ bool ret ;
1386
1400
1387
1401
status = iomap_write_begin (iter , pos , bytes , & folio );
1388
1402
if (status )
@@ -1397,8 +1411,9 @@ static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
1397
1411
folio_zero_range (folio , offset , bytes );
1398
1412
folio_mark_accessed (folio );
1399
1413
1400
- bytes = iomap_write_end (iter , pos , bytes , bytes , folio );
1401
- if (WARN_ON_ONCE (bytes == 0 ))
1414
+ ret = iomap_write_end (iter , pos , bytes , bytes , folio );
1415
+ __iomap_put_folio (iter , pos , bytes , folio );
1416
+ if (WARN_ON_ONCE (!ret ))
1402
1417
return - EIO ;
1403
1418
1404
1419
pos += bytes ;
@@ -1958,18 +1973,13 @@ static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
1958
1973
return error ;
1959
1974
}
1960
1975
1961
- static int iomap_do_writepage (struct folio * folio ,
1962
- struct writeback_control * wbc , void * data )
1963
- {
1964
- return iomap_writepage_map (data , wbc , folio );
1965
- }
1966
-
1967
1976
int
1968
1977
iomap_writepages (struct address_space * mapping , struct writeback_control * wbc ,
1969
1978
struct iomap_writepage_ctx * wpc ,
1970
1979
const struct iomap_writeback_ops * ops )
1971
1980
{
1972
- int ret ;
1981
+ struct folio * folio = NULL ;
1982
+ int error ;
1973
1983
1974
1984
/*
1975
1985
* Writeback from reclaim context should never happen except in the case
@@ -1980,8 +1990,9 @@ iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
1980
1990
return - EIO ;
1981
1991
1982
1992
wpc -> ops = ops ;
1983
- ret = write_cache_pages (mapping , wbc , iomap_do_writepage , wpc );
1984
- return iomap_submit_ioend (wpc , ret );
1993
+ while ((folio = writeback_iter (mapping , wbc , folio , & error )))
1994
+ error = iomap_writepage_map (wpc , wbc , folio );
1995
+ return iomap_submit_ioend (wpc , error );
1985
1996
}
1986
1997
EXPORT_SYMBOL_GPL (iomap_writepages );
1987
1998
0 commit comments