Skip to content

Commit 4090b31

Browse files
committed
netfs: Add a function to consolidate beginning a read
Add a function to do the steps needed to begin a read request, allowing this code to be removed from several other functions and consolidated. Changes ======= ver #2) - Move before the unstaticking patch so that some functions can be left static. - Set uninitialised return code in netfs_begin_read()[1][2]. - Fixed a refleak caused by non-removal of a get from netfs_write_begin() when the request submission code got moved to netfs_begin_read(). - Use INIT_WORK() to (re-)init the request work_struct[3]. Signed-off-by: David Howells <dhowells@redhat.com> Reviewed-by: Jeff Layton <jlayton@kernel.org> cc: linux-cachefs@redhat.com Link: https://lore.kernel.org/r/20220303163826.1120936-1-nathan@kernel.org/ [1] Link: https://lore.kernel.org/r/20220303235647.1297171-1-colin.i.king@gmail.com/ [2] Link: https://lore.kernel.org/r/9d69be49081bccff44260e4c6e0049c63d6d04a1.camel@redhat.com/ [3] Link: https://lore.kernel.org/r/164623004355.3564931.7275693529042495641.stgit@warthog.procyon.org.uk/ # v1 Link: https://lore.kernel.org/r/164678214287.1200972.16734134007649832160.stgit@warthog.procyon.org.uk/ # v2 Link: https://lore.kernel.org/r/164692911113.2099075.1060868473229451371.stgit@warthog.procyon.org.uk/ # v3
1 parent bc899ee commit 4090b31

File tree

4 files changed

+76
-76
lines changed

4 files changed

+76
-76
lines changed

fs/netfs/internal.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ static inline void netfs_see_request(struct netfs_io_request *rreq,
3939
*/
4040
extern unsigned int netfs_debug;
4141

42-
void netfs_rreq_work(struct work_struct *work);
42+
int netfs_begin_read(struct netfs_io_request *rreq, bool sync);
4343

4444
/*
4545
* stats.c

fs/netfs/objects.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,6 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
3535
rreq->i_size = i_size_read(inode);
3636
rreq->debug_id = atomic_inc_return(&debug_ids);
3737
INIT_LIST_HEAD(&rreq->subrequests);
38-
INIT_WORK(&rreq->work, netfs_rreq_work);
3938
refcount_set(&rreq->ref, 1);
4039
__set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
4140
if (rreq->netfs_ops->init_request) {

fs/netfs/read_helper.c

Lines changed: 72 additions & 72 deletions
Original file line numberDiff line numberDiff line change
@@ -443,7 +443,7 @@ static void netfs_rreq_assess(struct netfs_io_request *rreq, bool was_async)
443443
netfs_rreq_completed(rreq, was_async);
444444
}
445445

446-
void netfs_rreq_work(struct work_struct *work)
446+
static void netfs_rreq_work(struct work_struct *work)
447447
{
448448
struct netfs_io_request *rreq =
449449
container_of(work, struct netfs_io_request, work);
@@ -688,6 +688,69 @@ static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq,
688688
return false;
689689
}
690690

691+
/*
692+
* Begin the process of reading in a chunk of data, where that data may be
693+
* stitched together from multiple sources, including multiple servers and the
694+
* local cache.
695+
*/
696+
int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
697+
{
698+
unsigned int debug_index = 0;
699+
int ret;
700+
701+
_enter("R=%x %llx-%llx",
702+
rreq->debug_id, rreq->start, rreq->start + rreq->len - 1);
703+
704+
if (rreq->len == 0) {
705+
pr_err("Zero-sized read [R=%x]\n", rreq->debug_id);
706+
netfs_put_request(rreq, false, netfs_rreq_trace_put_zero_len);
707+
return -EIO;
708+
}
709+
710+
INIT_WORK(&rreq->work, netfs_rreq_work);
711+
712+
if (sync)
713+
netfs_get_request(rreq, netfs_rreq_trace_get_hold);
714+
715+
/* Chop the read into slices according to what the cache and the netfs
716+
* want and submit each one.
717+
*/
718+
atomic_set(&rreq->nr_outstanding, 1);
719+
do {
720+
if (!netfs_rreq_submit_slice(rreq, &debug_index))
721+
break;
722+
723+
} while (rreq->submitted < rreq->len);
724+
725+
if (sync) {
726+
/* Keep nr_outstanding incremented so that the ref always belongs to
727+
* us, and the service code isn't punted off to a random thread pool to
728+
* process.
729+
*/
730+
for (;;) {
731+
wait_var_event(&rreq->nr_outstanding,
732+
atomic_read(&rreq->nr_outstanding) == 1);
733+
netfs_rreq_assess(rreq, false);
734+
if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
735+
break;
736+
cond_resched();
737+
}
738+
739+
ret = rreq->error;
740+
if (ret == 0 && rreq->submitted < rreq->len) {
741+
trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
742+
ret = -EIO;
743+
}
744+
netfs_put_request(rreq, false, netfs_rreq_trace_put_hold);
745+
} else {
746+
/* If we decrement nr_outstanding to 0, the ref belongs to us. */
747+
if (atomic_dec_and_test(&rreq->nr_outstanding))
748+
netfs_rreq_assess(rreq, false);
749+
ret = 0;
750+
}
751+
return ret;
752+
}
753+
691754
static void netfs_cache_expand_readahead(struct netfs_io_request *rreq,
692755
loff_t *_start, size_t *_len, loff_t i_size)
693756
{
@@ -750,7 +813,6 @@ void netfs_readahead(struct readahead_control *ractl)
750813
{
751814
struct netfs_io_request *rreq;
752815
struct netfs_i_context *ctx = netfs_i_context(ractl->mapping->host);
753-
unsigned int debug_index = 0;
754816
int ret;
755817

756818
_enter("%lx,%x", readahead_index(ractl), readahead_count(ractl));
@@ -777,22 +839,13 @@ void netfs_readahead(struct readahead_control *ractl)
777839

778840
netfs_rreq_expand(rreq, ractl);
779841

780-
atomic_set(&rreq->nr_outstanding, 1);
781-
do {
782-
if (!netfs_rreq_submit_slice(rreq, &debug_index))
783-
break;
784-
785-
} while (rreq->submitted < rreq->len);
786-
787842
/* Drop the refs on the folios here rather than in the cache or
788843
* filesystem. The locks will be dropped in netfs_rreq_unlock().
789844
*/
790845
while (readahead_folio(ractl))
791846
;
792847

793-
/* If we decrement nr_outstanding to 0, the ref belongs to us. */
794-
if (atomic_dec_and_test(&rreq->nr_outstanding))
795-
netfs_rreq_assess(rreq, false);
848+
netfs_begin_read(rreq, false);
796849
return;
797850

798851
cleanup_free:
@@ -821,7 +874,6 @@ int netfs_readpage(struct file *file, struct page *subpage)
821874
struct address_space *mapping = folio->mapping;
822875
struct netfs_io_request *rreq;
823876
struct netfs_i_context *ctx = netfs_i_context(mapping->host);
824-
unsigned int debug_index = 0;
825877
int ret;
826878

827879
_enter("%lx", folio_index(folio));
@@ -836,42 +888,16 @@ int netfs_readpage(struct file *file, struct page *subpage)
836888

837889
if (ctx->ops->begin_cache_operation) {
838890
ret = ctx->ops->begin_cache_operation(rreq);
839-
if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) {
840-
folio_unlock(folio);
841-
goto out;
842-
}
891+
if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
892+
goto discard;
843893
}
844894

845895
netfs_stat(&netfs_n_rh_readpage);
846896
trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage);
897+
return netfs_begin_read(rreq, true);
847898

848-
netfs_get_request(rreq, netfs_rreq_trace_get_hold);
849-
850-
atomic_set(&rreq->nr_outstanding, 1);
851-
do {
852-
if (!netfs_rreq_submit_slice(rreq, &debug_index))
853-
break;
854-
855-
} while (rreq->submitted < rreq->len);
856-
857-
/* Keep nr_outstanding incremented so that the ref always belongs to us, and
858-
* the service code isn't punted off to a random thread pool to
859-
* process.
860-
*/
861-
do {
862-
wait_var_event(&rreq->nr_outstanding,
863-
atomic_read(&rreq->nr_outstanding) == 1);
864-
netfs_rreq_assess(rreq, false);
865-
} while (test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags));
866-
867-
ret = rreq->error;
868-
if (ret == 0 && rreq->submitted < rreq->len) {
869-
trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_readpage);
870-
ret = -EIO;
871-
}
872-
out:
873-
netfs_put_request(rreq, false, netfs_rreq_trace_put_hold);
874-
return ret;
899+
discard:
900+
netfs_put_request(rreq, false, netfs_rreq_trace_put_discard);
875901
alloc_error:
876902
folio_unlock(folio);
877903
return ret;
@@ -966,7 +992,7 @@ int netfs_write_begin(struct file *file, struct address_space *mapping,
966992
struct netfs_io_request *rreq;
967993
struct netfs_i_context *ctx = netfs_i_context(file_inode(file ));
968994
struct folio *folio;
969-
unsigned int debug_index = 0, fgp_flags;
995+
unsigned int fgp_flags;
970996
pgoff_t index = pos >> PAGE_SHIFT;
971997
int ret;
972998

@@ -1029,39 +1055,13 @@ int netfs_write_begin(struct file *file, struct address_space *mapping,
10291055
*/
10301056
ractl._nr_pages = folio_nr_pages(folio);
10311057
netfs_rreq_expand(rreq, &ractl);
1032-
netfs_get_request(rreq, netfs_rreq_trace_get_hold);
10331058

10341059
/* We hold the folio locks, so we can drop the references */
10351060
folio_get(folio);
10361061
while (readahead_folio(&ractl))
10371062
;
10381063

1039-
atomic_set(&rreq->nr_outstanding, 1);
1040-
do {
1041-
if (!netfs_rreq_submit_slice(rreq, &debug_index))
1042-
break;
1043-
1044-
} while (rreq->submitted < rreq->len);
1045-
1046-
/* Keep nr_outstanding incremented so that the ref always belongs to
1047-
* us, and the service code isn't punted off to a random thread pool to
1048-
* process.
1049-
*/
1050-
for (;;) {
1051-
wait_var_event(&rreq->nr_outstanding,
1052-
atomic_read(&rreq->nr_outstanding) == 1);
1053-
netfs_rreq_assess(rreq, false);
1054-
if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
1055-
break;
1056-
cond_resched();
1057-
}
1058-
1059-
ret = rreq->error;
1060-
if (ret == 0 && rreq->submitted < rreq->len) {
1061-
trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_write_begin);
1062-
ret = -EIO;
1063-
}
1064-
netfs_put_request(rreq, false, netfs_rreq_trace_put_hold);
1064+
ret = netfs_begin_read(rreq, true);
10651065
if (ret < 0)
10661066
goto error;
10671067

include/trace/events/netfs.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -56,17 +56,18 @@
5656
EM(netfs_fail_check_write_begin, "check-write-begin") \
5757
EM(netfs_fail_copy_to_cache, "copy-to-cache") \
5858
EM(netfs_fail_read, "read") \
59-
EM(netfs_fail_short_readpage, "short-readpage") \
60-
EM(netfs_fail_short_write_begin, "short-write-begin") \
59+
EM(netfs_fail_short_read, "short-read") \
6160
E_(netfs_fail_prepare_write, "prep-write")
6261

6362
#define netfs_rreq_ref_traces \
6463
EM(netfs_rreq_trace_get_hold, "GET HOLD ") \
6564
EM(netfs_rreq_trace_get_subreq, "GET SUBREQ ") \
6665
EM(netfs_rreq_trace_put_complete, "PUT COMPLT ") \
66+
EM(netfs_rreq_trace_put_discard, "PUT DISCARD") \
6767
EM(netfs_rreq_trace_put_failed, "PUT FAILED ") \
6868
EM(netfs_rreq_trace_put_hold, "PUT HOLD ") \
6969
EM(netfs_rreq_trace_put_subreq, "PUT SUBREQ ") \
70+
EM(netfs_rreq_trace_put_zero_len, "PUT ZEROLEN") \
7071
E_(netfs_rreq_trace_new, "NEW ")
7172

7273
#define netfs_sreq_ref_traces \

0 commit comments

Comments
 (0)