Skip to content

Commit bc33723

Browse files
committed
Merge tag 'for-6.15-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux
Pull btrfs fixes from David Sterba: - subpage mode fixes: - access correct object (folio) when looking up bit offset - fix assertion condition for number of blocks per folio - fix upper boundary of locking range in hole punch - zoned fixes: - fix potential deadlock caught by lockdep when zone reporting and device freeze run in parallel - fix zone write pointer mismatch and NULL pointer dereference when metadata are converted from DUP to RAID1 - fix error handling when reloc inode creation fails - in tree-checker, unify error code for header level check - block layer: add helpers to read zone capacity * tag 'for-6.15-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: btrfs: zoned: skip reporting zone for new block group block: introduce zone capacity helper btrfs: tree-checker: adjust error code for header level check btrfs: fix invalid inode pointer after failure to create reloc inode btrfs: zoned: return EIO on RAID1 block group write pointer mismatch btrfs: fix the ASSERT() inside GET_SUBPAGE_BITMAP() btrfs: avoid page_lockend underflow in btrfs_punch_hole_lock_range() btrfs: subpage: access correct object when reading bitmap start in subpage_calc_start_bit()
2 parents e4b51cb + 866bafa commit bc33723

File tree

6 files changed

+72
-31
lines changed

6 files changed

+72
-31
lines changed

fs/btrfs/file.c

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2104,15 +2104,20 @@ static void btrfs_punch_hole_lock_range(struct inode *inode,
21042104
* will always return true.
21052105
* So here we need to do extra page alignment for
21062106
* filemap_range_has_page().
2107+
*
2108+
* And do not decrease page_lockend right now, as it can be 0.
21072109
*/
21082110
const u64 page_lockstart = round_up(lockstart, PAGE_SIZE);
2109-
const u64 page_lockend = round_down(lockend + 1, PAGE_SIZE) - 1;
2111+
const u64 page_lockend = round_down(lockend + 1, PAGE_SIZE);
21102112

21112113
while (1) {
21122114
truncate_pagecache_range(inode, lockstart, lockend);
21132115

21142116
lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
21152117
cached_state);
2118+
/* The same page or adjacent pages. */
2119+
if (page_lockend <= page_lockstart)
2120+
break;
21162121
/*
21172122
* We can't have ordered extents in the range, nor dirty/writeback
21182123
* pages, because we have locked the inode's VFS lock in exclusive
@@ -2124,7 +2129,7 @@ static void btrfs_punch_hole_lock_range(struct inode *inode,
21242129
* we do, unlock the range and retry.
21252130
*/
21262131
if (!filemap_range_has_page(inode->i_mapping, page_lockstart,
2127-
page_lockend))
2132+
page_lockend - 1))
21282133
break;
21292134

21302135
unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,

fs/btrfs/relocation.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3803,7 +3803,7 @@ static noinline_for_stack struct inode *create_reloc_inode(
38033803
if (ret) {
38043804
if (inode)
38053805
iput(&inode->vfs_inode);
3806-
inode = ERR_PTR(ret);
3806+
return ERR_PTR(ret);
38073807
}
38083808
return &inode->vfs_inode;
38093809
}

fs/btrfs/subpage.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -204,7 +204,7 @@ static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info,
204204
btrfs_blocks_per_folio(fs_info, folio); \
205205
\
206206
btrfs_subpage_assert(fs_info, folio, start, len); \
207-
__start_bit = offset_in_page(start) >> fs_info->sectorsize_bits; \
207+
__start_bit = offset_in_folio(folio, start) >> fs_info->sectorsize_bits; \
208208
__start_bit += blocks_per_folio * btrfs_bitmap_nr_##name; \
209209
__start_bit; \
210210
})
@@ -666,7 +666,7 @@ IMPLEMENT_BTRFS_PAGE_OPS(checked, folio_set_checked, folio_clear_checked,
666666
btrfs_blocks_per_folio(fs_info, folio); \
667667
const struct btrfs_subpage *subpage = folio_get_private(folio); \
668668
\
669-
ASSERT(blocks_per_folio < BITS_PER_LONG); \
669+
ASSERT(blocks_per_folio <= BITS_PER_LONG); \
670670
*dst = bitmap_read(subpage->bitmaps, \
671671
blocks_per_folio * btrfs_bitmap_nr_##name, \
672672
blocks_per_folio); \

fs/btrfs/tree-checker.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2235,7 +2235,7 @@ int btrfs_verify_level_key(struct extent_buffer *eb,
22352235
btrfs_err(fs_info,
22362236
"tree level mismatch detected, bytenr=%llu level expected=%u has=%u",
22372237
eb->start, check->level, found_level);
2238-
return -EIO;
2238+
return -EUCLEAN;
22392239
}
22402240

22412241
if (!check->has_first_key)

fs/btrfs/zoned.c

Lines changed: 16 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1277,7 +1277,7 @@ struct zone_info {
12771277

12781278
static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
12791279
struct zone_info *info, unsigned long *active,
1280-
struct btrfs_chunk_map *map)
1280+
struct btrfs_chunk_map *map, bool new)
12811281
{
12821282
struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
12831283
struct btrfs_device *device;
@@ -1307,6 +1307,8 @@ static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
13071307
return 0;
13081308
}
13091309

1310+
ASSERT(!new || btrfs_dev_is_empty_zone(device, info->physical));
1311+
13101312
/* This zone will be used for allocation, so mark this zone non-empty. */
13111313
btrfs_dev_clear_zone_empty(device, info->physical);
13121314

@@ -1319,6 +1321,18 @@ static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
13191321
* to determine the allocation offset within the zone.
13201322
*/
13211323
WARN_ON(!IS_ALIGNED(info->physical, fs_info->zone_size));
1324+
1325+
if (new) {
1326+
sector_t capacity;
1327+
1328+
capacity = bdev_zone_capacity(device->bdev, info->physical >> SECTOR_SHIFT);
1329+
up_read(&dev_replace->rwsem);
1330+
info->alloc_offset = 0;
1331+
info->capacity = capacity << SECTOR_SHIFT;
1332+
1333+
return 0;
1334+
}
1335+
13221336
nofs_flag = memalloc_nofs_save();
13231337
ret = btrfs_get_dev_zone(device, info->physical, &zone);
13241338
memalloc_nofs_restore(nofs_flag);
@@ -1588,7 +1602,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
15881602
}
15891603

15901604
for (i = 0; i < map->num_stripes; i++) {
1591-
ret = btrfs_load_zone_info(fs_info, i, &zone_info[i], active, map);
1605+
ret = btrfs_load_zone_info(fs_info, i, &zone_info[i], active, map, new);
15921606
if (ret)
15931607
goto out;
15941608

@@ -1659,7 +1673,6 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
16591673
* stripe.
16601674
*/
16611675
cache->alloc_offset = cache->zone_capacity;
1662-
ret = 0;
16631676
}
16641677

16651678
out:

include/linux/blkdev.h

Lines changed: 45 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -712,35 +712,13 @@ static inline bool blk_queue_is_zoned(struct request_queue *q)
712712
(q->limits.features & BLK_FEAT_ZONED);
713713
}
714714

715-
#ifdef CONFIG_BLK_DEV_ZONED
716-
static inline unsigned int disk_nr_zones(struct gendisk *disk)
717-
{
718-
return disk->nr_zones;
719-
}
720-
bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs);
721-
#else /* CONFIG_BLK_DEV_ZONED */
722-
static inline unsigned int disk_nr_zones(struct gendisk *disk)
723-
{
724-
return 0;
725-
}
726-
static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
727-
{
728-
return false;
729-
}
730-
#endif /* CONFIG_BLK_DEV_ZONED */
731-
732715
static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
733716
{
734717
if (!blk_queue_is_zoned(disk->queue))
735718
return 0;
736719
return sector >> ilog2(disk->queue->limits.chunk_sectors);
737720
}
738721

739-
static inline unsigned int bdev_nr_zones(struct block_device *bdev)
740-
{
741-
return disk_nr_zones(bdev->bd_disk);
742-
}
743-
744722
static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
745723
{
746724
return bdev->bd_disk->queue->limits.max_open_zones;
@@ -847,6 +825,51 @@ static inline u64 sb_bdev_nr_blocks(struct super_block *sb)
847825
(sb->s_blocksize_bits - SECTOR_SHIFT);
848826
}
849827

828+
#ifdef CONFIG_BLK_DEV_ZONED
829+
static inline unsigned int disk_nr_zones(struct gendisk *disk)
830+
{
831+
return disk->nr_zones;
832+
}
833+
bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs);
834+
835+
/**
836+
* disk_zone_capacity - returns the zone capacity of zone containing @sector
837+
* @disk: disk to work with
838+
* @sector: sector number within the querying zone
839+
*
840+
* Returns the zone capacity of a zone containing @sector. @sector can be any
841+
* sector in the zone.
842+
*/
843+
static inline unsigned int disk_zone_capacity(struct gendisk *disk,
844+
sector_t sector)
845+
{
846+
sector_t zone_sectors = disk->queue->limits.chunk_sectors;
847+
848+
if (sector + zone_sectors >= get_capacity(disk))
849+
return disk->last_zone_capacity;
850+
return disk->zone_capacity;
851+
}
852+
static inline unsigned int bdev_zone_capacity(struct block_device *bdev,
853+
sector_t pos)
854+
{
855+
return disk_zone_capacity(bdev->bd_disk, pos);
856+
}
857+
#else /* CONFIG_BLK_DEV_ZONED */
858+
static inline unsigned int disk_nr_zones(struct gendisk *disk)
859+
{
860+
return 0;
861+
}
862+
static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
863+
{
864+
return false;
865+
}
866+
#endif /* CONFIG_BLK_DEV_ZONED */
867+
868+
static inline unsigned int bdev_nr_zones(struct block_device *bdev)
869+
{
870+
return disk_nr_zones(bdev->bd_disk);
871+
}
872+
850873
int bdev_disk_changed(struct gendisk *disk, bool invalidate);
851874

852875
void put_disk(struct gendisk *disk);

0 commit comments

Comments
 (0)