@@ -425,7 +425,7 @@ static void end_folio_read(struct folio *folio, bool uptodate, u64 start, u32 le
425
425
struct btrfs_fs_info * fs_info = folio_to_fs_info (folio );
426
426
427
427
ASSERT (folio_pos (folio ) <= start &&
428
- start + len <= folio_pos (folio ) + PAGE_SIZE );
428
+ start + len <= folio_pos (folio ) + folio_size ( folio ) );
429
429
430
430
if (uptodate && btrfs_verify_folio (folio , start , len ))
431
431
btrfs_folio_set_uptodate (fs_info , folio , start , len );
@@ -492,7 +492,7 @@ static void begin_folio_read(struct btrfs_fs_info *fs_info, struct folio *folio)
492
492
return ;
493
493
494
494
ASSERT (folio_test_private (folio ));
495
- btrfs_folio_set_lock (fs_info , folio , folio_pos (folio ), PAGE_SIZE );
495
+ btrfs_folio_set_lock (fs_info , folio , folio_pos (folio ), folio_size ( folio ) );
496
496
}
497
497
498
498
/*
@@ -753,7 +753,7 @@ static void submit_extent_folio(struct btrfs_bio_ctrl *bio_ctrl,
753
753
{
754
754
struct btrfs_inode * inode = folio_to_inode (folio );
755
755
756
- ASSERT (pg_offset + size <= PAGE_SIZE );
756
+ ASSERT (pg_offset + size <= folio_size ( folio ) );
757
757
ASSERT (bio_ctrl -> end_io_func );
758
758
759
759
if (bio_ctrl -> bbio &&
@@ -935,7 +935,7 @@ static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached,
935
935
struct inode * inode = folio -> mapping -> host ;
936
936
struct btrfs_fs_info * fs_info = inode_to_fs_info (inode );
937
937
u64 start = folio_pos (folio );
938
- const u64 end = start + PAGE_SIZE - 1 ;
938
+ const u64 end = start + folio_size ( folio ) - 1 ;
939
939
u64 extent_offset ;
940
940
u64 last_byte = i_size_read (inode );
941
941
struct extent_map * em ;
@@ -1275,7 +1275,7 @@ static void set_delalloc_bitmap(struct folio *folio, unsigned long *delalloc_bit
1275
1275
unsigned int start_bit ;
1276
1276
unsigned int nbits ;
1277
1277
1278
- ASSERT (start >= folio_start && start + len <= folio_start + PAGE_SIZE );
1278
+ ASSERT (start >= folio_start && start + len <= folio_start + folio_size ( folio ) );
1279
1279
start_bit = (start - folio_start ) >> fs_info -> sectorsize_bits ;
1280
1280
nbits = len >> fs_info -> sectorsize_bits ;
1281
1281
ASSERT (bitmap_test_range_all_zero (delalloc_bitmap , start_bit , nbits ));
@@ -1293,7 +1293,7 @@ static bool find_next_delalloc_bitmap(struct folio *folio,
1293
1293
unsigned int first_zero ;
1294
1294
unsigned int first_set ;
1295
1295
1296
- ASSERT (start >= folio_start && start < folio_start + PAGE_SIZE );
1296
+ ASSERT (start >= folio_start && start < folio_start + folio_size ( folio ) );
1297
1297
1298
1298
start_bit = (start - folio_start ) >> fs_info -> sectorsize_bits ;
1299
1299
first_set = find_next_bit (delalloc_bitmap , bitmap_size , start_bit );
@@ -1495,7 +1495,7 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
1495
1495
delalloc_end = page_end ;
1496
1496
/*
1497
1497
* delalloc_end is already one less than the total length, so
1498
- * we don't subtract one from PAGE_SIZE
1498
+ * we don't subtract one from PAGE_SIZE.
1499
1499
*/
1500
1500
delalloc_to_write +=
1501
1501
DIV_ROUND_UP (delalloc_end + 1 - page_start , PAGE_SIZE );
@@ -1761,7 +1761,7 @@ static int extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ctrl
1761
1761
goto done ;
1762
1762
1763
1763
ret = extent_writepage_io (inode , folio , folio_pos (folio ),
1764
- PAGE_SIZE , bio_ctrl , i_size );
1764
+ folio_size ( folio ) , bio_ctrl , i_size );
1765
1765
if (ret == 1 )
1766
1766
return 0 ;
1767
1767
if (ret < 0 )
@@ -2488,8 +2488,8 @@ void extent_write_locked_range(struct inode *inode, const struct folio *locked_f
2488
2488
ASSERT (IS_ALIGNED (start , sectorsize ) && IS_ALIGNED (end + 1 , sectorsize ));
2489
2489
2490
2490
while (cur <= end ) {
2491
- u64 cur_end = min ( round_down ( cur , PAGE_SIZE ) + PAGE_SIZE - 1 , end ) ;
2492
- u32 cur_len = cur_end + 1 - cur ;
2491
+ u64 cur_end ;
2492
+ u32 cur_len ;
2493
2493
struct folio * folio ;
2494
2494
2495
2495
folio = filemap_get_folio (mapping , cur >> PAGE_SHIFT );
@@ -2499,13 +2499,18 @@ void extent_write_locked_range(struct inode *inode, const struct folio *locked_f
2499
2499
* code is just in case, but shouldn't actually be run.
2500
2500
*/
2501
2501
if (IS_ERR (folio )) {
2502
+ cur_end = min (round_down (cur , PAGE_SIZE ) + PAGE_SIZE - 1 , end );
2503
+ cur_len = cur_end + 1 - cur ;
2502
2504
btrfs_mark_ordered_io_finished (BTRFS_I (inode ), NULL ,
2503
2505
cur , cur_len , false);
2504
2506
mapping_set_error (mapping , PTR_ERR (folio ));
2505
- cur = cur_end + 1 ;
2507
+ cur = cur_end ;
2506
2508
continue ;
2507
2509
}
2508
2510
2511
+ cur_end = min_t (u64 , folio_pos (folio ) + folio_size (folio ) - 1 , end );
2512
+ cur_len = cur_end + 1 - cur ;
2513
+
2509
2514
ASSERT (folio_test_locked (folio ));
2510
2515
if (pages_dirty && folio != locked_folio )
2511
2516
ASSERT (folio_test_dirty (folio ));
@@ -2617,7 +2622,7 @@ static bool try_release_extent_state(struct extent_io_tree *tree,
2617
2622
struct folio * folio )
2618
2623
{
2619
2624
u64 start = folio_pos (folio );
2620
- u64 end = start + PAGE_SIZE - 1 ;
2625
+ u64 end = start + folio_size ( folio ) - 1 ;
2621
2626
bool ret ;
2622
2627
2623
2628
if (test_range_bit_exists (tree , start , end , EXTENT_LOCKED )) {
@@ -2655,7 +2660,7 @@ static bool try_release_extent_state(struct extent_io_tree *tree,
2655
2660
bool try_release_extent_mapping (struct folio * folio , gfp_t mask )
2656
2661
{
2657
2662
u64 start = folio_pos (folio );
2658
- u64 end = start + PAGE_SIZE - 1 ;
2663
+ u64 end = start + folio_size ( folio ) - 1 ;
2659
2664
struct btrfs_inode * inode = folio_to_inode (folio );
2660
2665
struct extent_io_tree * io_tree = & inode -> io_tree ;
2661
2666
0 commit comments