Skip to content

Commit c0e473a

Browse files
Darrick J. Wongaxboe
authored andcommitted
block: fix race between set_blocksize and read paths
With the new large sector size support, it's now the case that set_blocksize can change i_blksize and the folio order in a manner that conflicts with a concurrent reader and causes a kernel crash. Specifically, let's say that udev-worker calls libblkid to detect the labels on a block device. The read call can create an order-0 folio to read the first 4096 bytes from the disk. But then udev is preempted. Next, someone tries to mount an 8k-sectorsize filesystem from the same block device. The filesystem calls set_blksize, which sets i_blksize to 8192 and the minimum folio order to 1. Now udev resumes, still holding the order-0 folio it allocated. It then tries to schedule a read bio and do_mpage_readahead tries to create bufferheads for the folio. Unfortunately, blocks_per_folio == 0 because the page size is 4096 but the blocksize is 8192 so no bufferheads are attached and the bh walk never sets bdev. We then submit the bio with a NULL block device and crash. Therefore, truncate the page cache after flushing but before updating i_blksize. However, that's not enough -- we also need to lock out file IO and page faults during the update. Take both the i_rwsem and the invalidate_lock in exclusive mode for invalidations, and in shared mode for read/write operations. I don't know if this is the correct fix, but xfs/259 found it. Signed-off-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Luis Chamberlain <mcgrof@kernel.org> Tested-by: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com> Link: https://lore.kernel.org/r/174543795699.4139148.2086129139322431423.stgit@frogsfrogsfrogs Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 81dd1fe commit c0e473a

File tree

4 files changed

+43
-1
lines changed

4 files changed

+43
-1
lines changed

block/bdev.c

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -169,10 +169,27 @@ int set_blocksize(struct file *file, int size)
169169

170170
/* Don't change the size if it is same as current */
171171
if (inode->i_blkbits != blksize_bits(size)) {
172+
/*
173+
* Flush and truncate the pagecache before we reconfigure the
174+
* mapping geometry because folio sizes are variable now. If a
175+
* reader has already allocated a folio whose size is smaller
176+
* than the new min_order but invokes readahead after the new
177+
* min_order becomes visible, readahead will think there are
178+
* "zero" blocks per folio and crash. Take the inode and
179+
* invalidation locks to avoid racing with
180+
* read/write/fallocate.
181+
*/
182+
inode_lock(inode);
183+
filemap_invalidate_lock(inode->i_mapping);
184+
172185
sync_blockdev(bdev);
186+
kill_bdev(bdev);
187+
173188
inode->i_blkbits = blksize_bits(size);
174189
mapping_set_folio_min_order(inode->i_mapping, get_order(size));
175190
kill_bdev(bdev);
191+
filemap_invalidate_unlock(inode->i_mapping);
192+
inode_unlock(inode);
176193
}
177194
return 0;
178195
}

block/blk-zoned.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -343,6 +343,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
343343
op = REQ_OP_ZONE_RESET;
344344

345345
/* Invalidate the page cache, including dirty pages. */
346+
inode_lock(bdev->bd_mapping->host);
346347
filemap_invalidate_lock(bdev->bd_mapping);
347348
ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
348349
if (ret)
@@ -364,8 +365,10 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
364365
ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors);
365366

366367
fail:
367-
if (cmd == BLKRESETZONE)
368+
if (cmd == BLKRESETZONE) {
368369
filemap_invalidate_unlock(bdev->bd_mapping);
370+
inode_unlock(bdev->bd_mapping->host);
371+
}
369372

370373
return ret;
371374
}

block/fops.c

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -746,7 +746,14 @@ static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
746746
ret = direct_write_fallback(iocb, from, ret,
747747
blkdev_buffered_write(iocb, from));
748748
} else {
749+
/*
750+
* Take i_rwsem and invalidate_lock to avoid racing with
751+
* set_blocksize changing i_blkbits/folio order and punching
752+
* out the pagecache.
753+
*/
754+
inode_lock_shared(bd_inode);
749755
ret = blkdev_buffered_write(iocb, from);
756+
inode_unlock_shared(bd_inode);
750757
}
751758

752759
if (ret > 0)
@@ -757,6 +764,7 @@ static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
757764

758765
static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
759766
{
767+
struct inode *bd_inode = bdev_file_inode(iocb->ki_filp);
760768
struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
761769
loff_t size = bdev_nr_bytes(bdev);
762770
loff_t pos = iocb->ki_pos;
@@ -793,7 +801,13 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
793801
goto reexpand;
794802
}
795803

804+
/*
805+
* Take i_rwsem and invalidate_lock to avoid racing with set_blocksize
806+
* changing i_blkbits/folio order and punching out the pagecache.
807+
*/
808+
inode_lock_shared(bd_inode);
796809
ret = filemap_read(iocb, to, ret);
810+
inode_unlock_shared(bd_inode);
797811

798812
reexpand:
799813
if (unlikely(shorted))
@@ -836,6 +850,7 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
836850
if ((start | len) & (bdev_logical_block_size(bdev) - 1))
837851
return -EINVAL;
838852

853+
inode_lock(inode);
839854
filemap_invalidate_lock(inode->i_mapping);
840855

841856
/*
@@ -868,6 +883,7 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
868883

869884
fail:
870885
filemap_invalidate_unlock(inode->i_mapping);
886+
inode_unlock(inode);
871887
return error;
872888
}
873889

block/ioctl.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -142,6 +142,7 @@ static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode,
142142
if (err)
143143
return err;
144144

145+
inode_lock(bdev->bd_mapping->host);
145146
filemap_invalidate_lock(bdev->bd_mapping);
146147
err = truncate_bdev_range(bdev, mode, start, start + len - 1);
147148
if (err)
@@ -174,6 +175,7 @@ static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode,
174175
blk_finish_plug(&plug);
175176
fail:
176177
filemap_invalidate_unlock(bdev->bd_mapping);
178+
inode_unlock(bdev->bd_mapping->host);
177179
return err;
178180
}
179181

@@ -199,12 +201,14 @@ static int blk_ioctl_secure_erase(struct block_device *bdev, blk_mode_t mode,
199201
end > bdev_nr_bytes(bdev))
200202
return -EINVAL;
201203

204+
inode_lock(bdev->bd_mapping->host);
202205
filemap_invalidate_lock(bdev->bd_mapping);
203206
err = truncate_bdev_range(bdev, mode, start, end - 1);
204207
if (!err)
205208
err = blkdev_issue_secure_erase(bdev, start >> 9, len >> 9,
206209
GFP_KERNEL);
207210
filemap_invalidate_unlock(bdev->bd_mapping);
211+
inode_unlock(bdev->bd_mapping->host);
208212
return err;
209213
}
210214

@@ -236,6 +240,7 @@ static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode,
236240
return -EINVAL;
237241

238242
/* Invalidate the page cache, including dirty pages */
243+
inode_lock(bdev->bd_mapping->host);
239244
filemap_invalidate_lock(bdev->bd_mapping);
240245
err = truncate_bdev_range(bdev, mode, start, end);
241246
if (err)
@@ -246,6 +251,7 @@ static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode,
246251

247252
fail:
248253
filemap_invalidate_unlock(bdev->bd_mapping);
254+
inode_unlock(bdev->bd_mapping->host);
249255
return err;
250256
}
251257

0 commit comments

Comments
 (0)