@@ -29,11 +29,6 @@ struct kmem_cache *xfs_buf_cache;
29
29
/*
30
30
* Locking orders
31
31
*
32
- * xfs_buf_ioacct_inc:
33
- * xfs_buf_ioacct_dec:
34
- * b_sema (caller holds)
35
- * b_lock
36
- *
37
32
* xfs_buf_stale:
38
33
* b_sema (caller holds)
39
34
* b_lock
@@ -81,51 +76,6 @@ xfs_buf_vmap_len(
81
76
return (bp -> b_page_count * PAGE_SIZE );
82
77
}
83
78
84
- /*
85
- * Bump the I/O in flight count on the buftarg if we haven't yet done so for
86
- * this buffer. The count is incremented once per buffer (per hold cycle)
87
- * because the corresponding decrement is deferred to buffer release. Buffers
88
- * can undergo I/O multiple times in a hold-release cycle and per buffer I/O
89
- * tracking adds unnecessary overhead. This is used for sychronization purposes
90
- * with unmount (see xfs_buftarg_drain()), so all we really need is a count of
91
- * in-flight buffers.
92
- *
93
- * Buffers that are never released (e.g., superblock, iclog buffers) must set
94
- * the XBF_NO_IOACCT flag before I/O submission. Otherwise, the buftarg count
95
- * never reaches zero and unmount hangs indefinitely.
96
- */
97
- static inline void
98
- xfs_buf_ioacct_inc (
99
- struct xfs_buf * bp )
100
- {
101
- if (bp -> b_flags & XBF_NO_IOACCT )
102
- return ;
103
-
104
- ASSERT (bp -> b_flags & XBF_ASYNC );
105
- spin_lock (& bp -> b_lock );
106
- if (!(bp -> b_state & XFS_BSTATE_IN_FLIGHT )) {
107
- bp -> b_state |= XFS_BSTATE_IN_FLIGHT ;
108
- percpu_counter_inc (& bp -> b_target -> bt_io_count );
109
- }
110
- spin_unlock (& bp -> b_lock );
111
- }
112
-
113
- /*
114
- * Clear the in-flight state on a buffer about to be released to the LRU or
115
- * freed and unaccount from the buftarg.
116
- */
117
- static inline void
118
- __xfs_buf_ioacct_dec (
119
- struct xfs_buf * bp )
120
- {
121
- lockdep_assert_held (& bp -> b_lock );
122
-
123
- if (bp -> b_state & XFS_BSTATE_IN_FLIGHT ) {
124
- bp -> b_state &= ~XFS_BSTATE_IN_FLIGHT ;
125
- percpu_counter_dec (& bp -> b_target -> bt_io_count );
126
- }
127
- }
128
-
129
79
/*
130
80
* When we mark a buffer stale, we remove the buffer from the LRU and clear the
131
81
* b_lru_ref count so that the buffer is freed immediately when the buffer
@@ -156,8 +106,6 @@ xfs_buf_stale(
156
106
* status now to preserve accounting consistency.
157
107
*/
158
108
spin_lock (& bp -> b_lock );
159
- __xfs_buf_ioacct_dec (bp );
160
-
161
109
atomic_set (& bp -> b_lru_ref , 0 );
162
110
if (!(bp -> b_state & XFS_BSTATE_DISPOSE ) &&
163
111
(list_lru_del_obj (& bp -> b_target -> bt_lru , & bp -> b_lru )))
@@ -946,6 +894,7 @@ xfs_buf_readahead_map(
946
894
bp -> b_ops = ops ;
947
895
bp -> b_flags &= ~(XBF_WRITE | XBF_DONE );
948
896
bp -> b_flags |= flags ;
897
+ percpu_counter_inc (& target -> bt_readahead_count );
949
898
xfs_buf_submit (bp );
950
899
}
951
900
@@ -1002,10 +951,12 @@ xfs_buf_get_uncached(
1002
951
struct xfs_buf * bp ;
1003
952
DEFINE_SINGLE_BUF_MAP (map , XFS_BUF_DADDR_NULL , numblks );
1004
953
954
+ /* there are currently no valid flags for xfs_buf_get_uncached */
955
+ ASSERT (flags == 0 );
956
+
1005
957
* bpp = NULL ;
1006
958
1007
- /* flags might contain irrelevant bits, pass only what we care about */
1008
- error = _xfs_buf_alloc (target , & map , 1 , flags & XBF_NO_IOACCT , & bp );
959
+ error = _xfs_buf_alloc (target , & map , 1 , flags , & bp );
1009
960
if (error )
1010
961
return error ;
1011
962
@@ -1059,7 +1010,6 @@ xfs_buf_rele_uncached(
1059
1010
spin_unlock (& bp -> b_lock );
1060
1011
return ;
1061
1012
}
1062
- __xfs_buf_ioacct_dec (bp );
1063
1013
spin_unlock (& bp -> b_lock );
1064
1014
xfs_buf_free (bp );
1065
1015
}
@@ -1078,19 +1028,11 @@ xfs_buf_rele_cached(
1078
1028
spin_lock (& bp -> b_lock );
1079
1029
ASSERT (bp -> b_hold >= 1 );
1080
1030
if (bp -> b_hold > 1 ) {
1081
- /*
1082
- * Drop the in-flight state if the buffer is already on the LRU
1083
- * and it holds the only reference. This is racy because we
1084
- * haven't acquired the pag lock, but the use of _XBF_IN_FLIGHT
1085
- * ensures the decrement occurs only once per-buf.
1086
- */
1087
- if (-- bp -> b_hold == 1 && !list_empty (& bp -> b_lru ))
1088
- __xfs_buf_ioacct_dec (bp );
1031
+ bp -> b_hold -- ;
1089
1032
goto out_unlock ;
1090
1033
}
1091
1034
1092
1035
/* we are asked to drop the last reference */
1093
- __xfs_buf_ioacct_dec (bp );
1094
1036
if (!(bp -> b_flags & XBF_STALE ) && atomic_read (& bp -> b_lru_ref )) {
1095
1037
/*
1096
1038
* If the buffer is added to the LRU, keep the reference to the
@@ -1370,6 +1312,8 @@ __xfs_buf_ioend(
1370
1312
bp -> b_ops -> verify_read (bp );
1371
1313
if (!bp -> b_error )
1372
1314
bp -> b_flags |= XBF_DONE ;
1315
+ if (bp -> b_flags & XBF_READ_AHEAD )
1316
+ percpu_counter_dec (& bp -> b_target -> bt_readahead_count );
1373
1317
} else {
1374
1318
if (!bp -> b_error ) {
1375
1319
bp -> b_flags &= ~XBF_WRITE_FAIL ;
@@ -1658,9 +1602,6 @@ xfs_buf_submit(
1658
1602
*/
1659
1603
bp -> b_error = 0 ;
1660
1604
1661
- if (bp -> b_flags & XBF_ASYNC )
1662
- xfs_buf_ioacct_inc (bp );
1663
-
1664
1605
if ((bp -> b_flags & XBF_WRITE ) && !xfs_buf_verify_write (bp )) {
1665
1606
xfs_force_shutdown (bp -> b_mount , SHUTDOWN_CORRUPT_INCORE );
1666
1607
xfs_buf_ioend (bp );
@@ -1786,9 +1727,8 @@ xfs_buftarg_wait(
1786
1727
struct xfs_buftarg * btp )
1787
1728
{
1788
1729
/*
1789
- * First wait on the buftarg I/O count for all in-flight buffers to be
1790
- * released. This is critical as new buffers do not make the LRU until
1791
- * they are released.
1730
+ * First wait for all in-flight readahead buffers to be released. This is
1731
+ * critical as new buffers do not make the LRU until they are released.
1792
1732
*
1793
1733
* Next, flush the buffer workqueue to ensure all completion processing
1794
1734
* has finished. Just waiting on buffer locks is not sufficient for
@@ -1797,7 +1737,7 @@ xfs_buftarg_wait(
1797
1737
* all reference counts have been dropped before we start walking the
1798
1738
* LRU list.
1799
1739
*/
1800
- while (percpu_counter_sum (& btp -> bt_io_count ))
1740
+ while (percpu_counter_sum (& btp -> bt_readahead_count ))
1801
1741
delay (100 );
1802
1742
flush_workqueue (btp -> bt_mount -> m_buf_workqueue );
1803
1743
}
@@ -1914,8 +1854,8 @@ xfs_destroy_buftarg(
1914
1854
struct xfs_buftarg * btp )
1915
1855
{
1916
1856
shrinker_free (btp -> bt_shrinker );
1917
- ASSERT (percpu_counter_sum (& btp -> bt_io_count ) == 0 );
1918
- percpu_counter_destroy (& btp -> bt_io_count );
1857
+ ASSERT (percpu_counter_sum (& btp -> bt_readahead_count ) == 0 );
1858
+ percpu_counter_destroy (& btp -> bt_readahead_count );
1919
1859
list_lru_destroy (& btp -> bt_lru );
1920
1860
}
1921
1861
@@ -1969,7 +1909,7 @@ xfs_init_buftarg(
1969
1909
1970
1910
if (list_lru_init (& btp -> bt_lru ))
1971
1911
return - ENOMEM ;
1972
- if (percpu_counter_init (& btp -> bt_io_count , 0 , GFP_KERNEL ))
1912
+ if (percpu_counter_init (& btp -> bt_readahead_count , 0 , GFP_KERNEL ))
1973
1913
goto out_destroy_lru ;
1974
1914
1975
1915
btp -> bt_shrinker =
@@ -1983,7 +1923,7 @@ xfs_init_buftarg(
1983
1923
return 0 ;
1984
1924
1985
1925
out_destroy_io_count :
1986
- percpu_counter_destroy (& btp -> bt_io_count );
1926
+ percpu_counter_destroy (& btp -> bt_readahead_count );
1987
1927
out_destroy_lru :
1988
1928
list_lru_destroy (& btp -> bt_lru );
1989
1929
return - ENOMEM ;
0 commit comments