9
9
#include <linux/slab.h>
10
10
#include <linux/file.h>
11
11
#include <linux/uio.h>
12
+ #include <linux/bio.h>
12
13
#include <linux/falloc.h>
13
14
#include <linux/sched/mm.h>
14
15
#include <trace/events/fscache.h>
@@ -622,6 +623,77 @@ static int cachefiles_prepare_write(struct netfs_cache_resources *cres,
622
623
return ret ;
623
624
}
624
625
626
+ static void cachefiles_prepare_write_subreq (struct netfs_io_subrequest * subreq )
627
+ {
628
+ struct netfs_io_request * wreq = subreq -> rreq ;
629
+ struct netfs_cache_resources * cres = & wreq -> cache_resources ;
630
+
631
+ _enter ("W=%x[%x] %llx" , wreq -> debug_id , subreq -> debug_index , subreq -> start );
632
+
633
+ subreq -> max_len = ULONG_MAX ;
634
+ subreq -> max_nr_segs = BIO_MAX_VECS ;
635
+
636
+ if (!cachefiles_cres_file (cres )) {
637
+ if (!fscache_wait_for_operation (cres , FSCACHE_WANT_WRITE ))
638
+ return netfs_prepare_write_failed (subreq );
639
+ if (!cachefiles_cres_file (cres ))
640
+ return netfs_prepare_write_failed (subreq );
641
+ }
642
+ }
643
+
644
+ static void cachefiles_issue_write (struct netfs_io_subrequest * subreq )
645
+ {
646
+ struct netfs_io_request * wreq = subreq -> rreq ;
647
+ struct netfs_cache_resources * cres = & wreq -> cache_resources ;
648
+ struct cachefiles_object * object = cachefiles_cres_object (cres );
649
+ struct cachefiles_cache * cache = object -> volume -> cache ;
650
+ const struct cred * saved_cred ;
651
+ size_t off , pre , post , len = subreq -> len ;
652
+ loff_t start = subreq -> start ;
653
+ int ret ;
654
+
655
+ _enter ("W=%x[%x] %llx-%llx" ,
656
+ wreq -> debug_id , subreq -> debug_index , start , start + len - 1 );
657
+
658
+ /* We need to start on the cache granularity boundary */
659
+ off = start & (CACHEFILES_DIO_BLOCK_SIZE - 1 );
660
+ if (off ) {
661
+ pre = CACHEFILES_DIO_BLOCK_SIZE - off ;
662
+ if (pre >= len ) {
663
+ netfs_write_subrequest_terminated (subreq , len , false);
664
+ return ;
665
+ }
666
+ subreq -> transferred += pre ;
667
+ start += pre ;
668
+ len -= pre ;
669
+ iov_iter_advance (& subreq -> io_iter , pre );
670
+ }
671
+
672
+ /* We also need to end on the cache granularity boundary */
673
+ post = len & (CACHEFILES_DIO_BLOCK_SIZE - 1 );
674
+ if (post ) {
675
+ len -= post ;
676
+ if (len == 0 ) {
677
+ netfs_write_subrequest_terminated (subreq , post , false);
678
+ return ;
679
+ }
680
+ iov_iter_truncate (& subreq -> io_iter , len );
681
+ }
682
+
683
+ cachefiles_begin_secure (cache , & saved_cred );
684
+ ret = __cachefiles_prepare_write (object , cachefiles_cres_file (cres ),
685
+ & start , & len , len , true);
686
+ cachefiles_end_secure (cache , saved_cred );
687
+ if (ret < 0 ) {
688
+ netfs_write_subrequest_terminated (subreq , ret , false);
689
+ return ;
690
+ }
691
+
692
+ cachefiles_write (& subreq -> rreq -> cache_resources ,
693
+ subreq -> start , & subreq -> io_iter ,
694
+ netfs_write_subrequest_terminated , subreq );
695
+ }
696
+
625
697
/*
626
698
* Clean up an operation.
627
699
*/
@@ -638,8 +710,10 @@ static const struct netfs_cache_ops cachefiles_netfs_cache_ops = {
638
710
.end_operation = cachefiles_end_operation ,
639
711
.read = cachefiles_read ,
640
712
.write = cachefiles_write ,
713
+ .issue_write = cachefiles_issue_write ,
641
714
.prepare_read = cachefiles_prepare_read ,
642
715
.prepare_write = cachefiles_prepare_write ,
716
+ .prepare_write_subreq = cachefiles_prepare_write_subreq ,
643
717
.prepare_ondemand_read = cachefiles_prepare_ondemand_read ,
644
718
.query_occupancy = cachefiles_query_occupancy ,
645
719
};
0 commit comments