Skip to content

Commit 4b90de5

Browse files
Christoph Hellwigcmaiolino
authored andcommitted
xfs: reduce context switches for synchronous buffered I/O
Currently all metadata I/O completions happen in the m_buf_workqueue workqueue. But for synchronous I/O (i.e. all buffer reads) there is no need for that, as there always is a called in process context that is waiting for the I/O. Factor out the guts of xfs_buf_ioend into a separate helper and call it from xfs_buf_iowait to avoid a double an extra context switch to the workqueue. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Carlos Maiolino <cem@kernel.org>
1 parent 2d873ef commit 4b90de5

File tree

1 file changed

+27
-16
lines changed

1 file changed

+27
-16
lines changed

fs/xfs/xfs_buf.c

Lines changed: 27 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1345,6 +1345,7 @@ xfs_buf_ioend_handle_error(
13451345
resubmit:
13461346
xfs_buf_ioerror(bp, 0);
13471347
bp->b_flags |= (XBF_DONE | XBF_WRITE_FAIL);
1348+
reinit_completion(&bp->b_iowait);
13481349
xfs_buf_submit(bp);
13491350
return true;
13501351
out_stale:
@@ -1355,8 +1356,9 @@ xfs_buf_ioend_handle_error(
13551356
return false;
13561357
}
13571358

1358-
static void
1359-
xfs_buf_ioend(
1359+
/* returns false if the caller needs to resubmit the I/O, else true */
1360+
static bool
1361+
__xfs_buf_ioend(
13601362
struct xfs_buf *bp)
13611363
{
13621364
trace_xfs_buf_iodone(bp, _RET_IP_);
@@ -1376,7 +1378,7 @@ xfs_buf_ioend(
13761378
}
13771379

13781380
if (unlikely(bp->b_error) && xfs_buf_ioend_handle_error(bp))
1379-
return;
1381+
return false;
13801382

13811383
/* clear the retry state */
13821384
bp->b_last_error = 0;
@@ -1397,7 +1399,15 @@ xfs_buf_ioend(
13971399

13981400
bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD |
13991401
_XBF_LOGRECOVERY);
1402+
return true;
1403+
}
14001404

1405+
static void
1406+
xfs_buf_ioend(
1407+
struct xfs_buf *bp)
1408+
{
1409+
if (!__xfs_buf_ioend(bp))
1410+
return;
14011411
if (bp->b_flags & XBF_ASYNC)
14021412
xfs_buf_relse(bp);
14031413
else
@@ -1411,15 +1421,8 @@ xfs_buf_ioend_work(
14111421
struct xfs_buf *bp =
14121422
container_of(work, struct xfs_buf, b_ioend_work);
14131423

1414-
xfs_buf_ioend(bp);
1415-
}
1416-
1417-
static void
1418-
xfs_buf_ioend_async(
1419-
struct xfs_buf *bp)
1420-
{
1421-
INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work);
1422-
queue_work(bp->b_mount->m_buf_workqueue, &bp->b_ioend_work);
1424+
if (__xfs_buf_ioend(bp))
1425+
xfs_buf_relse(bp);
14231426
}
14241427

14251428
void
@@ -1491,7 +1494,13 @@ xfs_buf_bio_end_io(
14911494
XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_IOERROR))
14921495
xfs_buf_ioerror(bp, -EIO);
14931496

1494-
xfs_buf_ioend_async(bp);
1497+
if (bp->b_flags & XBF_ASYNC) {
1498+
INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work);
1499+
queue_work(bp->b_mount->m_buf_workqueue, &bp->b_ioend_work);
1500+
} else {
1501+
complete(&bp->b_iowait);
1502+
}
1503+
14951504
bio_put(bio);
14961505
}
14971506

@@ -1568,9 +1577,11 @@ xfs_buf_iowait(
15681577
{
15691578
ASSERT(!(bp->b_flags & XBF_ASYNC));
15701579

1571-
trace_xfs_buf_iowait(bp, _RET_IP_);
1572-
wait_for_completion(&bp->b_iowait);
1573-
trace_xfs_buf_iowait_done(bp, _RET_IP_);
1580+
do {
1581+
trace_xfs_buf_iowait(bp, _RET_IP_);
1582+
wait_for_completion(&bp->b_iowait);
1583+
trace_xfs_buf_iowait_done(bp, _RET_IP_);
1584+
} while (!__xfs_buf_ioend(bp));
15741585

15751586
return bp->b_error;
15761587
}

0 commit comments

Comments
 (0)