@@ -443,7 +443,7 @@ static void netfs_rreq_assess(struct netfs_io_request *rreq, bool was_async)
443
443
netfs_rreq_completed (rreq , was_async );
444
444
}
445
445
446
- void netfs_rreq_work (struct work_struct * work )
446
+ static void netfs_rreq_work (struct work_struct * work )
447
447
{
448
448
struct netfs_io_request * rreq =
449
449
container_of (work , struct netfs_io_request , work );
@@ -688,6 +688,69 @@ static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq,
688
688
return false;
689
689
}
690
690
691
+ /*
692
+ * Begin the process of reading in a chunk of data, where that data may be
693
+ * stitched together from multiple sources, including multiple servers and the
694
+ * local cache.
695
+ */
696
+ int netfs_begin_read (struct netfs_io_request * rreq , bool sync )
697
+ {
698
+ unsigned int debug_index = 0 ;
699
+ int ret ;
700
+
701
+ _enter ("R=%x %llx-%llx" ,
702
+ rreq -> debug_id , rreq -> start , rreq -> start + rreq -> len - 1 );
703
+
704
+ if (rreq -> len == 0 ) {
705
+ pr_err ("Zero-sized read [R=%x]\n" , rreq -> debug_id );
706
+ netfs_put_request (rreq , false, netfs_rreq_trace_put_zero_len );
707
+ return - EIO ;
708
+ }
709
+
710
+ INIT_WORK (& rreq -> work , netfs_rreq_work );
711
+
712
+ if (sync )
713
+ netfs_get_request (rreq , netfs_rreq_trace_get_hold );
714
+
715
+ /* Chop the read into slices according to what the cache and the netfs
716
+ * want and submit each one.
717
+ */
718
+ atomic_set (& rreq -> nr_outstanding , 1 );
719
+ do {
720
+ if (!netfs_rreq_submit_slice (rreq , & debug_index ))
721
+ break ;
722
+
723
+ } while (rreq -> submitted < rreq -> len );
724
+
725
+ if (sync ) {
726
+ /* Keep nr_outstanding incremented so that the ref always belongs to
727
+ * us, and the service code isn't punted off to a random thread pool to
728
+ * process.
729
+ */
730
+ for (;;) {
731
+ wait_var_event (& rreq -> nr_outstanding ,
732
+ atomic_read (& rreq -> nr_outstanding ) == 1 );
733
+ netfs_rreq_assess (rreq , false);
734
+ if (!test_bit (NETFS_RREQ_IN_PROGRESS , & rreq -> flags ))
735
+ break ;
736
+ cond_resched ();
737
+ }
738
+
739
+ ret = rreq -> error ;
740
+ if (ret == 0 && rreq -> submitted < rreq -> len ) {
741
+ trace_netfs_failure (rreq , NULL , ret , netfs_fail_short_read );
742
+ ret = - EIO ;
743
+ }
744
+ netfs_put_request (rreq , false, netfs_rreq_trace_put_hold );
745
+ } else {
746
+ /* If we decrement nr_outstanding to 0, the ref belongs to us. */
747
+ if (atomic_dec_and_test (& rreq -> nr_outstanding ))
748
+ netfs_rreq_assess (rreq , false);
749
+ ret = 0 ;
750
+ }
751
+ return ret ;
752
+ }
753
+
691
754
static void netfs_cache_expand_readahead (struct netfs_io_request * rreq ,
692
755
loff_t * _start , size_t * _len , loff_t i_size )
693
756
{
@@ -750,7 +813,6 @@ void netfs_readahead(struct readahead_control *ractl)
750
813
{
751
814
struct netfs_io_request * rreq ;
752
815
struct netfs_i_context * ctx = netfs_i_context (ractl -> mapping -> host );
753
- unsigned int debug_index = 0 ;
754
816
int ret ;
755
817
756
818
_enter ("%lx,%x" , readahead_index (ractl ), readahead_count (ractl ));
@@ -777,22 +839,13 @@ void netfs_readahead(struct readahead_control *ractl)
777
839
778
840
netfs_rreq_expand (rreq , ractl );
779
841
780
- atomic_set (& rreq -> nr_outstanding , 1 );
781
- do {
782
- if (!netfs_rreq_submit_slice (rreq , & debug_index ))
783
- break ;
784
-
785
- } while (rreq -> submitted < rreq -> len );
786
-
787
842
/* Drop the refs on the folios here rather than in the cache or
788
843
* filesystem. The locks will be dropped in netfs_rreq_unlock().
789
844
*/
790
845
while (readahead_folio (ractl ))
791
846
;
792
847
793
- /* If we decrement nr_outstanding to 0, the ref belongs to us. */
794
- if (atomic_dec_and_test (& rreq -> nr_outstanding ))
795
- netfs_rreq_assess (rreq , false);
848
+ netfs_begin_read (rreq , false);
796
849
return ;
797
850
798
851
cleanup_free :
@@ -821,7 +874,6 @@ int netfs_readpage(struct file *file, struct page *subpage)
821
874
struct address_space * mapping = folio -> mapping ;
822
875
struct netfs_io_request * rreq ;
823
876
struct netfs_i_context * ctx = netfs_i_context (mapping -> host );
824
- unsigned int debug_index = 0 ;
825
877
int ret ;
826
878
827
879
_enter ("%lx" , folio_index (folio ));
@@ -836,42 +888,16 @@ int netfs_readpage(struct file *file, struct page *subpage)
836
888
837
889
if (ctx -> ops -> begin_cache_operation ) {
838
890
ret = ctx -> ops -> begin_cache_operation (rreq );
839
- if (ret == - ENOMEM || ret == - EINTR || ret == - ERESTARTSYS ) {
840
- folio_unlock (folio );
841
- goto out ;
842
- }
891
+ if (ret == - ENOMEM || ret == - EINTR || ret == - ERESTARTSYS )
892
+ goto discard ;
843
893
}
844
894
845
895
netfs_stat (& netfs_n_rh_readpage );
846
896
trace_netfs_read (rreq , rreq -> start , rreq -> len , netfs_read_trace_readpage );
897
+ return netfs_begin_read (rreq , true);
847
898
848
- netfs_get_request (rreq , netfs_rreq_trace_get_hold );
849
-
850
- atomic_set (& rreq -> nr_outstanding , 1 );
851
- do {
852
- if (!netfs_rreq_submit_slice (rreq , & debug_index ))
853
- break ;
854
-
855
- } while (rreq -> submitted < rreq -> len );
856
-
857
- /* Keep nr_outstanding incremented so that the ref always belongs to us, and
858
- * the service code isn't punted off to a random thread pool to
859
- * process.
860
- */
861
- do {
862
- wait_var_event (& rreq -> nr_outstanding ,
863
- atomic_read (& rreq -> nr_outstanding ) == 1 );
864
- netfs_rreq_assess (rreq , false);
865
- } while (test_bit (NETFS_RREQ_IN_PROGRESS , & rreq -> flags ));
866
-
867
- ret = rreq -> error ;
868
- if (ret == 0 && rreq -> submitted < rreq -> len ) {
869
- trace_netfs_failure (rreq , NULL , ret , netfs_fail_short_readpage );
870
- ret = - EIO ;
871
- }
872
- out :
873
- netfs_put_request (rreq , false, netfs_rreq_trace_put_hold );
874
- return ret ;
899
+ discard :
900
+ netfs_put_request (rreq , false, netfs_rreq_trace_put_discard );
875
901
alloc_error :
876
902
folio_unlock (folio );
877
903
return ret ;
@@ -966,7 +992,7 @@ int netfs_write_begin(struct file *file, struct address_space *mapping,
966
992
struct netfs_io_request * rreq ;
967
993
struct netfs_i_context * ctx = netfs_i_context (file_inode (file ));
968
994
struct folio * folio ;
969
- unsigned int debug_index = 0 , fgp_flags ;
995
+ unsigned int fgp_flags ;
970
996
pgoff_t index = pos >> PAGE_SHIFT ;
971
997
int ret ;
972
998
@@ -1029,39 +1055,13 @@ int netfs_write_begin(struct file *file, struct address_space *mapping,
1029
1055
*/
1030
1056
ractl ._nr_pages = folio_nr_pages (folio );
1031
1057
netfs_rreq_expand (rreq , & ractl );
1032
- netfs_get_request (rreq , netfs_rreq_trace_get_hold );
1033
1058
1034
1059
/* We hold the folio locks, so we can drop the references */
1035
1060
folio_get (folio );
1036
1061
while (readahead_folio (& ractl ))
1037
1062
;
1038
1063
1039
- atomic_set (& rreq -> nr_outstanding , 1 );
1040
- do {
1041
- if (!netfs_rreq_submit_slice (rreq , & debug_index ))
1042
- break ;
1043
-
1044
- } while (rreq -> submitted < rreq -> len );
1045
-
1046
- /* Keep nr_outstanding incremented so that the ref always belongs to
1047
- * us, and the service code isn't punted off to a random thread pool to
1048
- * process.
1049
- */
1050
- for (;;) {
1051
- wait_var_event (& rreq -> nr_outstanding ,
1052
- atomic_read (& rreq -> nr_outstanding ) == 1 );
1053
- netfs_rreq_assess (rreq , false);
1054
- if (!test_bit (NETFS_RREQ_IN_PROGRESS , & rreq -> flags ))
1055
- break ;
1056
- cond_resched ();
1057
- }
1058
-
1059
- ret = rreq -> error ;
1060
- if (ret == 0 && rreq -> submitted < rreq -> len ) {
1061
- trace_netfs_failure (rreq , NULL , ret , netfs_fail_short_write_begin );
1062
- ret = - EIO ;
1063
- }
1064
- netfs_put_request (rreq , false, netfs_rreq_trace_put_hold );
1064
+ ret = netfs_begin_read (rreq , true);
1065
1065
if (ret < 0 )
1066
1066
goto error ;
1067
1067
0 commit comments