Skip to content

Commit 2df8654

Browse files
committed
netfs: Cut over to using new writeback code
Cut over to using the new writeback code. The old code is #ifdef'd out or otherwise removed from compilation to avoid conflicts and will be removed in a future patch. Signed-off-by: David Howells <dhowells@redhat.com> Reviewed-by: Jeff Layton <jlayton@kernel.org> cc: Eric Van Hensbergen <ericvh@kernel.org> cc: Latchesar Ionkov <lucho@ionkov.net> cc: Dominique Martinet <asmadeus@codewreck.org> cc: Christian Schoenebeck <linux_oss@crudebyte.com> cc: Marc Dionne <marc.dionne@auristor.com> cc: v9fs@lists.linux.dev cc: linux-afs@lists.infradead.org cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org
1 parent 64e64e6 commit 2df8654

File tree

11 files changed

+62
-78
lines changed

11 files changed

+62
-78
lines changed

fs/9p/vfs_addr.c

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,7 @@ static void v9fs_issue_write(struct netfs_io_subrequest *subreq)
6060
netfs_write_subrequest_terminated(subreq, len ?: err, false);
6161
}
6262

63+
#if 0 // TODO: Remove
6364
static void v9fs_upload_to_server(struct netfs_io_subrequest *subreq)
6465
{
6566
struct p9_fid *fid = subreq->rreq->netfs_priv;
@@ -91,6 +92,7 @@ static void v9fs_create_write_requests(struct netfs_io_request *wreq, loff_t sta
9192
if (subreq)
9293
netfs_queue_write_request(subreq);
9394
}
95+
#endif
9496

9597
/**
9698
* v9fs_issue_read - Issue a read from 9P
@@ -121,18 +123,15 @@ static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file)
121123
{
122124
struct p9_fid *fid;
123125
bool writing = (rreq->origin == NETFS_READ_FOR_WRITE ||
124-
rreq->origin == NETFS_WRITEBACK ||
125126
rreq->origin == NETFS_WRITETHROUGH ||
126127
rreq->origin == NETFS_UNBUFFERED_WRITE ||
127128
rreq->origin == NETFS_DIO_WRITE);
128129

129-
#if 0 // TODO: Cut over
130130
if (rreq->origin == NETFS_WRITEBACK)
131131
return 0; /* We don't get the write handle until we find we
132132
* have actually dirty data and not just
133133
* copy-to-cache data.
134134
*/
135-
#endif
136135

137136
if (file) {
138137
fid = file->private_data;
@@ -179,7 +178,6 @@ const struct netfs_request_ops v9fs_req_ops = {
179178
.issue_read = v9fs_issue_read,
180179
.begin_writeback = v9fs_begin_writeback,
181180
.issue_write = v9fs_issue_write,
182-
.create_write_requests = v9fs_create_write_requests,
183181
};
184182

185183
const struct address_space_operations v9fs_addr_operations = {

fs/afs/file.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -353,7 +353,7 @@ static int afs_init_request(struct netfs_io_request *rreq, struct file *file)
353353
if (file)
354354
rreq->netfs_priv = key_get(afs_file_key(file));
355355
rreq->rsize = 256 * 1024;
356-
rreq->wsize = 256 * 1024;
356+
rreq->wsize = 256 * 1024 * 1024;
357357
return 0;
358358
}
359359

@@ -399,7 +399,6 @@ const struct netfs_request_ops afs_req_ops = {
399399
.issue_read = afs_issue_read,
400400
.update_i_size = afs_update_i_size,
401401
.invalidate_cache = afs_netfs_invalidate_cache,
402-
.create_write_requests = afs_create_write_requests,
403402
.begin_writeback = afs_begin_writeback,
404403
.prepare_write = afs_prepare_write,
405404
.issue_write = afs_issue_write,

fs/afs/internal.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1605,7 +1605,6 @@ extern int afs_writepages(struct address_space *, struct writeback_control *);
16051605
extern int afs_fsync(struct file *, loff_t, loff_t, int);
16061606
extern vm_fault_t afs_page_mkwrite(struct vm_fault *vmf);
16071607
extern void afs_prune_wb_keys(struct afs_vnode *);
1608-
void afs_create_write_requests(struct netfs_io_request *wreq, loff_t start, size_t len);
16091608

16101609
/*
16111610
* xattr.c

fs/afs/write.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -156,6 +156,7 @@ static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t
156156
return afs_put_operation(op);
157157
}
158158

159+
#if 0 // TODO: Remove
159160
static void afs_upload_to_server(struct netfs_io_subrequest *subreq)
160161
{
161162
struct afs_vnode *vnode = AFS_FS_I(subreq->rreq->inode);
@@ -193,6 +194,7 @@ void afs_create_write_requests(struct netfs_io_request *wreq, loff_t start, size
193194
if (subreq)
194195
netfs_queue_write_request(subreq);
195196
}
197+
#endif
196198

197199
/*
198200
* Writeback calls this when it finds a folio that needs uploading. This isn't

fs/netfs/Makefile

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,6 @@ netfs-y := \
1111
main.o \
1212
misc.o \
1313
objects.o \
14-
output.o \
1514
write_collect.o \
1615
write_issue.o
1716

fs/netfs/buffered_write.c

Lines changed: 24 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,6 @@ enum netfs_how_to_modify {
2626
NETFS_FLUSH_CONTENT, /* Flush incompatible content. */
2727
};
2828

29-
static void netfs_cleanup_buffered_write(struct netfs_io_request *wreq);
30-
3129
static void netfs_set_group(struct folio *folio, struct netfs_group *netfs_group)
3230
{
3331
void *priv = folio_get_private(folio);
@@ -180,7 +178,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
180178
};
181179
struct netfs_io_request *wreq = NULL;
182180
struct netfs_folio *finfo;
183-
struct folio *folio;
181+
struct folio *folio, *writethrough = NULL;
184182
enum netfs_how_to_modify howto;
185183
enum netfs_folio_trace trace;
186184
unsigned int bdp_flags = (iocb->ki_flags & IOCB_SYNC) ? 0: BDP_ASYNC;
@@ -209,7 +207,6 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
209207
}
210208
if (!is_sync_kiocb(iocb))
211209
wreq->iocb = iocb;
212-
wreq->cleanup = netfs_cleanup_buffered_write;
213210
netfs_stat(&netfs_n_wh_writethrough);
214211
} else {
215212
netfs_stat(&netfs_n_wh_buffered_write);
@@ -253,6 +250,16 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
253250
offset = pos & (flen - 1);
254251
part = min_t(size_t, flen - offset, part);
255252

253+
/* Wait for writeback to complete. The writeback engine owns
254+
* the info in folio->private and may change it until it
255+
* removes the WB mark.
256+
*/
257+
if (folio_get_private(folio) &&
258+
folio_wait_writeback_killable(folio)) {
259+
ret = written ? -EINTR : -ERESTARTSYS;
260+
goto error_folio_unlock;
261+
}
262+
256263
if (signal_pending(current)) {
257264
ret = written ? -EINTR : -ERESTARTSYS;
258265
goto error_folio_unlock;
@@ -327,6 +334,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
327334
maybe_trouble = true;
328335
iov_iter_revert(iter, copied);
329336
copied = 0;
337+
folio_unlock(folio);
330338
goto retry;
331339
}
332340
netfs_set_group(folio, netfs_group);
@@ -382,23 +390,14 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
382390

383391
if (likely(!wreq)) {
384392
folio_mark_dirty(folio);
393+
folio_unlock(folio);
385394
} else {
386-
if (folio_test_dirty(folio))
387-
/* Sigh. mmap. */
388-
folio_clear_dirty_for_io(folio);
389-
/* We make multiple writes to the folio... */
390-
if (!folio_test_writeback(folio)) {
391-
folio_start_writeback(folio);
392-
if (wreq->iter.count == 0)
393-
trace_netfs_folio(folio, netfs_folio_trace_wthru);
394-
else
395-
trace_netfs_folio(folio, netfs_folio_trace_wthru_plus);
396-
}
397-
netfs_advance_writethrough(wreq, copied,
398-
offset + copied == flen);
395+
netfs_advance_writethrough(wreq, &wbc, folio, copied,
396+
offset + copied == flen,
397+
&writethrough);
398+
/* Folio unlocked */
399399
}
400400
retry:
401-
folio_unlock(folio);
402401
folio_put(folio);
403402
folio = NULL;
404403

@@ -407,7 +406,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
407406

408407
out:
409408
if (unlikely(wreq)) {
410-
ret2 = netfs_end_writethrough(wreq, iocb);
409+
ret2 = netfs_end_writethrough(wreq, &wbc, writethrough);
411410
wbc_detach_inode(&wbc);
412411
if (ret2 == -EIOCBQUEUED)
413412
return ret2;
@@ -529,11 +528,13 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr
529528

530529
sb_start_pagefault(inode->i_sb);
531530

532-
if (folio_wait_writeback_killable(folio))
531+
if (folio_lock_killable(folio) < 0)
533532
goto out;
534533

535-
if (folio_lock_killable(folio) < 0)
534+
if (folio_wait_writeback_killable(folio)) {
535+
ret = VM_FAULT_LOCKED;
536536
goto out;
537+
}
537538

538539
/* Can we see a streaming write here? */
539540
if (WARN_ON(!folio_test_uptodate(folio))) {
@@ -573,6 +574,7 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr
573574
}
574575
EXPORT_SYMBOL(netfs_page_mkwrite);
575576

577+
#if 0 // TODO: Remove
576578
/*
577579
* Kill all the pages in the given range
578580
*/
@@ -1199,3 +1201,4 @@ int netfs_writepages(struct address_space *mapping,
11991201
return ret;
12001202
}
12011203
EXPORT_SYMBOL(netfs_writepages);
1204+
#endif

fs/netfs/direct_write.c

Lines changed: 14 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov
3434
unsigned long long start = iocb->ki_pos;
3535
unsigned long long end = start + iov_iter_count(iter);
3636
ssize_t ret, n;
37+
size_t len = iov_iter_count(iter);
3738
bool async = !is_sync_kiocb(iocb);
3839

3940
_enter("");
@@ -46,13 +47,17 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov
4647

4748
_debug("uw %llx-%llx", start, end);
4849

49-
wreq = netfs_alloc_request(iocb->ki_filp->f_mapping, iocb->ki_filp,
50-
start, end - start,
51-
iocb->ki_flags & IOCB_DIRECT ?
52-
NETFS_DIO_WRITE : NETFS_UNBUFFERED_WRITE);
50+
wreq = netfs_create_write_req(iocb->ki_filp->f_mapping, iocb->ki_filp, start,
51+
iocb->ki_flags & IOCB_DIRECT ?
52+
NETFS_DIO_WRITE : NETFS_UNBUFFERED_WRITE);
5353
if (IS_ERR(wreq))
5454
return PTR_ERR(wreq);
5555

56+
wreq->io_streams[0].avail = true;
57+
trace_netfs_write(wreq, (iocb->ki_flags & IOCB_DIRECT ?
58+
netfs_write_trace_dio_write :
59+
netfs_write_trace_unbuffered_write));
60+
5661
{
5762
/* If this is an async op and we're not using a bounce buffer,
5863
* we have to save the source buffer as the iterator is only
@@ -63,22 +68,23 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov
6368
* request.
6469
*/
6570
if (async || user_backed_iter(iter)) {
66-
n = netfs_extract_user_iter(iter, wreq->len, &wreq->iter, 0);
71+
n = netfs_extract_user_iter(iter, len, &wreq->iter, 0);
6772
if (n < 0) {
6873
ret = n;
6974
goto out;
7075
}
7176
wreq->direct_bv = (struct bio_vec *)wreq->iter.bvec;
7277
wreq->direct_bv_count = n;
7378
wreq->direct_bv_unpin = iov_iter_extract_will_pin(iter);
74-
wreq->len = iov_iter_count(&wreq->iter);
7579
} else {
7680
wreq->iter = *iter;
7781
}
7882

7983
wreq->io_iter = wreq->iter;
8084
}
8185

86+
__set_bit(NETFS_RREQ_USE_IO_ITER, &wreq->flags);
87+
8288
/* Copy the data into the bounce buffer and encrypt it. */
8389
// TODO
8490

@@ -87,10 +93,7 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov
8793
if (async)
8894
wreq->iocb = iocb;
8995
wreq->cleanup = netfs_cleanup_dio_write;
90-
ret = netfs_begin_write(wreq, is_sync_kiocb(iocb),
91-
iocb->ki_flags & IOCB_DIRECT ?
92-
netfs_write_trace_dio_write :
93-
netfs_write_trace_unbuffered_write);
96+
ret = netfs_unbuffered_write(wreq, is_sync_kiocb(iocb), iov_iter_count(&wreq->io_iter));
9497
if (ret < 0) {
9598
_debug("begin = %zd", ret);
9699
goto out;
@@ -100,9 +103,8 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov
100103
trace_netfs_rreq(wreq, netfs_rreq_trace_wait_ip);
101104
wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS,
102105
TASK_UNINTERRUPTIBLE);
103-
106+
smp_rmb(); /* Read error/transferred after RIP flag */
104107
ret = wreq->error;
105-
_debug("waited = %zd", ret);
106108
if (ret == 0) {
107109
ret = wreq->transferred;
108110
iocb->ki_pos += ret;

fs/netfs/internal.h

Lines changed: 6 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -92,15 +92,6 @@ static inline void netfs_see_request(struct netfs_io_request *rreq,
9292
trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), what);
9393
}
9494

95-
/*
96-
* output.c
97-
*/
98-
int netfs_begin_write(struct netfs_io_request *wreq, bool may_wait,
99-
enum netfs_write_trace what);
100-
struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len);
101-
int netfs_advance_writethrough(struct netfs_io_request *wreq, size_t copied, bool to_page_end);
102-
int netfs_end_writethrough(struct netfs_io_request *wreq, struct kiocb *iocb);
103-
10495
/*
10596
* stats.c
10697
*/
@@ -172,12 +163,12 @@ void netfs_reissue_write(struct netfs_io_stream *stream,
172163
int netfs_advance_write(struct netfs_io_request *wreq,
173164
struct netfs_io_stream *stream,
174165
loff_t start, size_t len, bool to_eof);
175-
struct netfs_io_request *new_netfs_begin_writethrough(struct kiocb *iocb, size_t len);
176-
int new_netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
177-
struct folio *folio, size_t copied, bool to_page_end,
178-
struct folio **writethrough_cache);
179-
int new_netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
180-
struct folio *writethrough_cache);
166+
struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len);
167+
int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
168+
struct folio *folio, size_t copied, bool to_page_end,
169+
struct folio **writethrough_cache);
170+
int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
171+
struct folio *writethrough_cache);
181172
int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len);
182173

183174
/*

fs/netfs/write_collect.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -709,7 +709,7 @@ void netfs_wake_write_collector(struct netfs_io_request *wreq, bool was_async)
709709
}
710710

711711
/**
712-
* new_netfs_write_subrequest_terminated - Note the termination of a write operation.
712+
* netfs_write_subrequest_terminated - Note the termination of a write operation.
713713
* @_op: The I/O request that has terminated.
714714
* @transferred_or_error: The amount of data transferred or an error code.
715715
* @was_async: The termination was asynchronous
@@ -731,8 +731,8 @@ void netfs_wake_write_collector(struct netfs_io_request *wreq, bool was_async)
731731
* Note that %_op is a void* so that the function can be passed to
732732
* kiocb::term_func without the need for a casting wrapper.
733733
*/
734-
void new_netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error,
735-
bool was_async)
734+
void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error,
735+
bool was_async)
736736
{
737737
struct netfs_io_subrequest *subreq = _op;
738738
struct netfs_io_request *wreq = subreq->rreq;
@@ -800,4 +800,4 @@ void new_netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_err
800800

801801
netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
802802
}
803-
EXPORT_SYMBOL(new_netfs_write_subrequest_terminated);
803+
EXPORT_SYMBOL(netfs_write_subrequest_terminated);

fs/netfs/write_issue.c

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -494,8 +494,8 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
494494
/*
495495
* Write some of the pending data back to the server
496496
*/
497-
int new_netfs_writepages(struct address_space *mapping,
498-
struct writeback_control *wbc)
497+
int netfs_writepages(struct address_space *mapping,
498+
struct writeback_control *wbc)
499499
{
500500
struct netfs_inode *ictx = netfs_inode(mapping->host);
501501
struct netfs_io_request *wreq = NULL;
@@ -556,12 +556,12 @@ int new_netfs_writepages(struct address_space *mapping,
556556
_leave(" = %d", error);
557557
return error;
558558
}
559-
EXPORT_SYMBOL(new_netfs_writepages);
559+
EXPORT_SYMBOL(netfs_writepages);
560560

561561
/*
562562
* Begin a write operation for writing through the pagecache.
563563
*/
564-
struct netfs_io_request *new_netfs_begin_writethrough(struct kiocb *iocb, size_t len)
564+
struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len)
565565
{
566566
struct netfs_io_request *wreq = NULL;
567567
struct netfs_inode *ictx = netfs_inode(file_inode(iocb->ki_filp));
@@ -586,9 +586,9 @@ struct netfs_io_request *new_netfs_begin_writethrough(struct kiocb *iocb, size_t
586586
* to the request. If we've added more than wsize then we need to create a new
587587
* subrequest.
588588
*/
589-
int new_netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
590-
struct folio *folio, size_t copied, bool to_page_end,
591-
struct folio **writethrough_cache)
589+
int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
590+
struct folio *folio, size_t copied, bool to_page_end,
591+
struct folio **writethrough_cache)
592592
{
593593
_enter("R=%x ic=%zu ws=%u cp=%zu tp=%u",
594594
wreq->debug_id, wreq->iter.count, wreq->wsize, copied, to_page_end);
@@ -618,8 +618,8 @@ int new_netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeba
618618
/*
619619
* End a write operation used when writing through the pagecache.
620620
*/
621-
int new_netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
622-
struct folio *writethrough_cache)
621+
int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
622+
struct folio *writethrough_cache)
623623
{
624624
struct netfs_inode *ictx = netfs_inode(wreq->inode);
625625
int ret;

0 commit comments

Comments
 (0)