Skip to content

Commit 0d1120b

Browse files
Christoph Hellwigcmaiolino
authored andcommitted
xfs: remove most in-flight buffer accounting
The buffer cache keeps a bt_io_count per-CPU counter to track all in-flight I/O, which is used to ensure no I/O is in flight when unmounting the file system. For most I/O we already keep track of inflight I/O at higher levels: - for synchronous I/O (xfs_buf_read/xfs_bwrite/xfs_buf_delwri_submit), the caller has a reference and waits for I/O completions using xfs_buf_iowait - for xfs_buf_delwri_submit_nowait the only caller (AIL writeback) tracks the log items that the buffer attached to This only leaves only xfs_buf_readahead_map as a submitter of asynchronous I/O that is not tracked by anything else. Replace the bt_io_count per-cpu counter with a more specific bt_readahead_count counter only tracking readahead I/O. This allows to simply increment it when submitting readahead I/O and decrementing it when it completed, and thus simplify xfs_buf_rele and remove the needed for the XBF_NO_IOACCT flags and the XFS_BSTATE_IN_FLIGHT buffer state. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Carlos Maiolino <cem@kernel.org>
1 parent efc5f7a commit 0d1120b

File tree

5 files changed

+20
-86
lines changed

5 files changed

+20
-86
lines changed

fs/xfs/xfs_buf.c

Lines changed: 15 additions & 75 deletions
Original file line numberDiff line numberDiff line change
@@ -29,11 +29,6 @@ struct kmem_cache *xfs_buf_cache;
2929
/*
3030
* Locking orders
3131
*
32-
* xfs_buf_ioacct_inc:
33-
* xfs_buf_ioacct_dec:
34-
* b_sema (caller holds)
35-
* b_lock
36-
*
3732
* xfs_buf_stale:
3833
* b_sema (caller holds)
3934
* b_lock
@@ -81,51 +76,6 @@ xfs_buf_vmap_len(
8176
return (bp->b_page_count * PAGE_SIZE);
8277
}
8378

84-
/*
85-
* Bump the I/O in flight count on the buftarg if we haven't yet done so for
86-
* this buffer. The count is incremented once per buffer (per hold cycle)
87-
* because the corresponding decrement is deferred to buffer release. Buffers
88-
* can undergo I/O multiple times in a hold-release cycle and per buffer I/O
89-
* tracking adds unnecessary overhead. This is used for sychronization purposes
90-
* with unmount (see xfs_buftarg_drain()), so all we really need is a count of
91-
* in-flight buffers.
92-
*
93-
* Buffers that are never released (e.g., superblock, iclog buffers) must set
94-
* the XBF_NO_IOACCT flag before I/O submission. Otherwise, the buftarg count
95-
* never reaches zero and unmount hangs indefinitely.
96-
*/
97-
static inline void
98-
xfs_buf_ioacct_inc(
99-
struct xfs_buf *bp)
100-
{
101-
if (bp->b_flags & XBF_NO_IOACCT)
102-
return;
103-
104-
ASSERT(bp->b_flags & XBF_ASYNC);
105-
spin_lock(&bp->b_lock);
106-
if (!(bp->b_state & XFS_BSTATE_IN_FLIGHT)) {
107-
bp->b_state |= XFS_BSTATE_IN_FLIGHT;
108-
percpu_counter_inc(&bp->b_target->bt_io_count);
109-
}
110-
spin_unlock(&bp->b_lock);
111-
}
112-
113-
/*
114-
* Clear the in-flight state on a buffer about to be released to the LRU or
115-
* freed and unaccount from the buftarg.
116-
*/
117-
static inline void
118-
__xfs_buf_ioacct_dec(
119-
struct xfs_buf *bp)
120-
{
121-
lockdep_assert_held(&bp->b_lock);
122-
123-
if (bp->b_state & XFS_BSTATE_IN_FLIGHT) {
124-
bp->b_state &= ~XFS_BSTATE_IN_FLIGHT;
125-
percpu_counter_dec(&bp->b_target->bt_io_count);
126-
}
127-
}
128-
12979
/*
13080
* When we mark a buffer stale, we remove the buffer from the LRU and clear the
13181
* b_lru_ref count so that the buffer is freed immediately when the buffer
@@ -156,8 +106,6 @@ xfs_buf_stale(
156106
* status now to preserve accounting consistency.
157107
*/
158108
spin_lock(&bp->b_lock);
159-
__xfs_buf_ioacct_dec(bp);
160-
161109
atomic_set(&bp->b_lru_ref, 0);
162110
if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
163111
(list_lru_del_obj(&bp->b_target->bt_lru, &bp->b_lru)))
@@ -946,6 +894,7 @@ xfs_buf_readahead_map(
946894
bp->b_ops = ops;
947895
bp->b_flags &= ~(XBF_WRITE | XBF_DONE);
948896
bp->b_flags |= flags;
897+
percpu_counter_inc(&target->bt_readahead_count);
949898
xfs_buf_submit(bp);
950899
}
951900

@@ -1002,10 +951,12 @@ xfs_buf_get_uncached(
1002951
struct xfs_buf *bp;
1003952
DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
1004953

954+
/* there are currently no valid flags for xfs_buf_get_uncached */
955+
ASSERT(flags == 0);
956+
1005957
*bpp = NULL;
1006958

1007-
/* flags might contain irrelevant bits, pass only what we care about */
1008-
error = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT, &bp);
959+
error = _xfs_buf_alloc(target, &map, 1, flags, &bp);
1009960
if (error)
1010961
return error;
1011962

@@ -1059,7 +1010,6 @@ xfs_buf_rele_uncached(
10591010
spin_unlock(&bp->b_lock);
10601011
return;
10611012
}
1062-
__xfs_buf_ioacct_dec(bp);
10631013
spin_unlock(&bp->b_lock);
10641014
xfs_buf_free(bp);
10651015
}
@@ -1078,19 +1028,11 @@ xfs_buf_rele_cached(
10781028
spin_lock(&bp->b_lock);
10791029
ASSERT(bp->b_hold >= 1);
10801030
if (bp->b_hold > 1) {
1081-
/*
1082-
* Drop the in-flight state if the buffer is already on the LRU
1083-
* and it holds the only reference. This is racy because we
1084-
* haven't acquired the pag lock, but the use of _XBF_IN_FLIGHT
1085-
* ensures the decrement occurs only once per-buf.
1086-
*/
1087-
if (--bp->b_hold == 1 && !list_empty(&bp->b_lru))
1088-
__xfs_buf_ioacct_dec(bp);
1031+
bp->b_hold--;
10891032
goto out_unlock;
10901033
}
10911034

10921035
/* we are asked to drop the last reference */
1093-
__xfs_buf_ioacct_dec(bp);
10941036
if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
10951037
/*
10961038
* If the buffer is added to the LRU, keep the reference to the
@@ -1370,6 +1312,8 @@ __xfs_buf_ioend(
13701312
bp->b_ops->verify_read(bp);
13711313
if (!bp->b_error)
13721314
bp->b_flags |= XBF_DONE;
1315+
if (bp->b_flags & XBF_READ_AHEAD)
1316+
percpu_counter_dec(&bp->b_target->bt_readahead_count);
13731317
} else {
13741318
if (!bp->b_error) {
13751319
bp->b_flags &= ~XBF_WRITE_FAIL;
@@ -1658,9 +1602,6 @@ xfs_buf_submit(
16581602
*/
16591603
bp->b_error = 0;
16601604

1661-
if (bp->b_flags & XBF_ASYNC)
1662-
xfs_buf_ioacct_inc(bp);
1663-
16641605
if ((bp->b_flags & XBF_WRITE) && !xfs_buf_verify_write(bp)) {
16651606
xfs_force_shutdown(bp->b_mount, SHUTDOWN_CORRUPT_INCORE);
16661607
xfs_buf_ioend(bp);
@@ -1786,9 +1727,8 @@ xfs_buftarg_wait(
17861727
struct xfs_buftarg *btp)
17871728
{
17881729
/*
1789-
* First wait on the buftarg I/O count for all in-flight buffers to be
1790-
* released. This is critical as new buffers do not make the LRU until
1791-
* they are released.
1730+
* First wait for all in-flight readahead buffers to be released. This is
1731+
* critical as new buffers do not make the LRU until they are released.
17921732
*
17931733
* Next, flush the buffer workqueue to ensure all completion processing
17941734
* has finished. Just waiting on buffer locks is not sufficient for
@@ -1797,7 +1737,7 @@ xfs_buftarg_wait(
17971737
* all reference counts have been dropped before we start walking the
17981738
* LRU list.
17991739
*/
1800-
while (percpu_counter_sum(&btp->bt_io_count))
1740+
while (percpu_counter_sum(&btp->bt_readahead_count))
18011741
delay(100);
18021742
flush_workqueue(btp->bt_mount->m_buf_workqueue);
18031743
}
@@ -1914,8 +1854,8 @@ xfs_destroy_buftarg(
19141854
struct xfs_buftarg *btp)
19151855
{
19161856
shrinker_free(btp->bt_shrinker);
1917-
ASSERT(percpu_counter_sum(&btp->bt_io_count) == 0);
1918-
percpu_counter_destroy(&btp->bt_io_count);
1857+
ASSERT(percpu_counter_sum(&btp->bt_readahead_count) == 0);
1858+
percpu_counter_destroy(&btp->bt_readahead_count);
19191859
list_lru_destroy(&btp->bt_lru);
19201860
}
19211861

@@ -1969,7 +1909,7 @@ xfs_init_buftarg(
19691909

19701910
if (list_lru_init(&btp->bt_lru))
19711911
return -ENOMEM;
1972-
if (percpu_counter_init(&btp->bt_io_count, 0, GFP_KERNEL))
1912+
if (percpu_counter_init(&btp->bt_readahead_count, 0, GFP_KERNEL))
19731913
goto out_destroy_lru;
19741914

19751915
btp->bt_shrinker =
@@ -1983,7 +1923,7 @@ xfs_init_buftarg(
19831923
return 0;
19841924

19851925
out_destroy_io_count:
1986-
percpu_counter_destroy(&btp->bt_io_count);
1926+
percpu_counter_destroy(&btp->bt_readahead_count);
19871927
out_destroy_lru:
19881928
list_lru_destroy(&btp->bt_lru);
19891929
return -ENOMEM;

fs/xfs/xfs_buf.h

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,6 @@ struct xfs_buf;
2727
#define XBF_READ (1u << 0) /* buffer intended for reading from device */
2828
#define XBF_WRITE (1u << 1) /* buffer intended for writing to device */
2929
#define XBF_READ_AHEAD (1u << 2) /* asynchronous read-ahead */
30-
#define XBF_NO_IOACCT (1u << 3) /* bypass I/O accounting (non-LRU bufs) */
3130
#define XBF_ASYNC (1u << 4) /* initiator will not wait for completion */
3231
#define XBF_DONE (1u << 5) /* all pages in the buffer uptodate */
3332
#define XBF_STALE (1u << 6) /* buffer has been staled, do not find it */
@@ -58,7 +57,6 @@ typedef unsigned int xfs_buf_flags_t;
5857
{ XBF_READ, "READ" }, \
5958
{ XBF_WRITE, "WRITE" }, \
6059
{ XBF_READ_AHEAD, "READ_AHEAD" }, \
61-
{ XBF_NO_IOACCT, "NO_IOACCT" }, \
6260
{ XBF_ASYNC, "ASYNC" }, \
6361
{ XBF_DONE, "DONE" }, \
6462
{ XBF_STALE, "STALE" }, \
@@ -77,7 +75,6 @@ typedef unsigned int xfs_buf_flags_t;
7775
* Internal state flags.
7876
*/
7977
#define XFS_BSTATE_DISPOSE (1 << 0) /* buffer being discarded */
80-
#define XFS_BSTATE_IN_FLIGHT (1 << 1) /* I/O in flight */
8178

8279
struct xfs_buf_cache {
8380
struct rhashtable bc_hash;
@@ -116,7 +113,7 @@ struct xfs_buftarg {
116113
struct shrinker *bt_shrinker;
117114
struct list_lru bt_lru;
118115

119-
struct percpu_counter bt_io_count;
116+
struct percpu_counter bt_readahead_count;
120117
struct ratelimit_state bt_ioerror_rl;
121118

122119
/* Atomic write unit values */

fs/xfs/xfs_buf_mem.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,7 @@ xmbuf_free(
117117
struct xfs_buftarg *btp)
118118
{
119119
ASSERT(xfs_buftarg_is_mem(btp));
120-
ASSERT(percpu_counter_sum(&btp->bt_io_count) == 0);
120+
ASSERT(percpu_counter_sum(&btp->bt_readahead_count) == 0);
121121

122122
trace_xmbuf_free(btp);
123123

fs/xfs/xfs_mount.c

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -181,14 +181,11 @@ xfs_readsb(
181181

182182
/*
183183
* Allocate a (locked) buffer to hold the superblock. This will be kept
184-
* around at all times to optimize access to the superblock. Therefore,
185-
* set XBF_NO_IOACCT to make sure it doesn't hold the buftarg count
186-
* elevated.
184+
* around at all times to optimize access to the superblock.
187185
*/
188186
reread:
189187
error = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR,
190-
BTOBB(sector_size), XBF_NO_IOACCT, &bp,
191-
buf_ops);
188+
BTOBB(sector_size), 0, &bp, buf_ops);
192189
if (error) {
193190
if (loud)
194191
xfs_warn(mp, "SB validate failed with error %d.", error);

fs/xfs/xfs_rtalloc.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1407,7 +1407,7 @@ xfs_rtmount_readsb(
14071407

14081408
/* m_blkbb_log is not set up yet */
14091409
error = xfs_buf_read_uncached(mp->m_rtdev_targp, XFS_RTSB_DADDR,
1410-
mp->m_sb.sb_blocksize >> BBSHIFT, XBF_NO_IOACCT, &bp,
1410+
mp->m_sb.sb_blocksize >> BBSHIFT, 0, &bp,
14111411
&xfs_rtsb_buf_ops);
14121412
if (error) {
14131413
xfs_warn(mp, "rt sb validate failed with error %d.", error);

0 commit comments

Comments
 (0)