Skip to content

Commit 105549d

Browse files
dhowellsbrauner
authored andcommitted
netfs: Fix enomem handling in buffered reads
If netfs_read_to_pagecache() gets an error from either ->prepare_read() or from netfs_prepare_read_iterator(), it needs to decrement ->nr_outstanding, cancel the subrequest and break out of the issuing loop. Currently, it only does this for two of the cases, but there are two more that aren't handled. Fix this by moving the handling to a common place and jumping to it from all four places. This is in preference to inserting a wrapper around netfs_prepare_read_iterator() as proposed by Dmitry Antipov[1]. Link: https://lore.kernel.org/r/20241202093943.227786-1-dmantipov@yandex.ru/ [1] Fixes: ee4cdf7 ("netfs: Speed up buffered reading") Reported-by: syzbot+404b4b745080b6210c6c@syzkaller.appspotmail.com Closes: https://syzkaller.appspot.com/bug?extid=404b4b745080b6210c6c Signed-off-by: David Howells <dhowells@redhat.com> Link: https://lore.kernel.org/r/20241213135013.2964079-4-dhowells@redhat.com Tested-by: syzbot+404b4b745080b6210c6c@syzkaller.appspotmail.com cc: Dmitry Antipov <dmantipov@yandex.ru> cc: Jeff Layton <jlayton@kernel.org> cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org Signed-off-by: Christian Brauner <brauner@kernel.org>
1 parent c8b90d4 commit 105549d

File tree

1 file changed

+16
-12
lines changed

1 file changed

+16
-12
lines changed

fs/netfs/buffered_read.c

Lines changed: 16 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -275,22 +275,14 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
275275
netfs_stat(&netfs_n_rh_download);
276276
if (rreq->netfs_ops->prepare_read) {
277277
ret = rreq->netfs_ops->prepare_read(subreq);
278-
if (ret < 0) {
279-
atomic_dec(&rreq->nr_outstanding);
280-
netfs_put_subrequest(subreq, false,
281-
netfs_sreq_trace_put_cancel);
282-
break;
283-
}
278+
if (ret < 0)
279+
goto prep_failed;
284280
trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
285281
}
286282

287283
slice = netfs_prepare_read_iterator(subreq);
288-
if (slice < 0) {
289-
atomic_dec(&rreq->nr_outstanding);
290-
netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel);
291-
ret = slice;
292-
break;
293-
}
284+
if (slice < 0)
285+
goto prep_iter_failed;
294286

295287
rreq->netfs_ops->issue_read(subreq);
296288
goto done;
@@ -302,6 +294,8 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
302294
trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
303295
netfs_stat(&netfs_n_rh_zero);
304296
slice = netfs_prepare_read_iterator(subreq);
297+
if (slice < 0)
298+
goto prep_iter_failed;
305299
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
306300
netfs_read_subreq_terminated(subreq, 0, false);
307301
goto done;
@@ -310,6 +304,8 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
310304
if (source == NETFS_READ_FROM_CACHE) {
311305
trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
312306
slice = netfs_prepare_read_iterator(subreq);
307+
if (slice < 0)
308+
goto prep_iter_failed;
313309
netfs_read_cache_to_pagecache(rreq, subreq);
314310
goto done;
315311
}
@@ -318,6 +314,14 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
318314
WARN_ON_ONCE(1);
319315
break;
320316

317+
prep_iter_failed:
318+
ret = slice;
319+
prep_failed:
320+
subreq->error = ret;
321+
atomic_dec(&rreq->nr_outstanding);
322+
netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel);
323+
break;
324+
321325
done:
322326
size -= slice;
323327
start += slice;

0 commit comments

Comments
 (0)