Skip to content
This repository was archived by the owner on Nov 8, 2023. It is now read-only.

Commit 4f2d34b

Browse files
committed
Merge tag 'fuse-update-6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse
Pull fuse updates from Miklos Szeredi: - Add fs-verity support (Richard Fung) - Add multi-queue support to virtio-fs (Peter-Jan Gootzen) - Fix a bug in NOTIFY_RESEND handling (Hou Tao) - page -> folio cleanup (Matthew Wilcox) * tag 'fuse-update-6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse: virtio-fs: add multi-queue support virtio-fs: limit number of request queues fuse: clear FR_SENT when re-adding requests into pending list fuse: set FR_PENDING atomically in fuse_resend() fuse: Add initial support for fs-verity fuse: Convert fuse_readpages_end() to use folio_end_read()
2 parents 681ce86 + 529395d commit 4f2d34b

File tree

4 files changed

+130
-16
lines changed

4 files changed

+130
-16
lines changed

fs/fuse/dev.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1813,7 +1813,8 @@ static void fuse_resend(struct fuse_conn *fc)
18131813
spin_unlock(&fc->lock);
18141814

18151815
list_for_each_entry_safe(req, next, &to_queue, list) {
1816-
__set_bit(FR_PENDING, &req->flags);
1816+
set_bit(FR_PENDING, &req->flags);
1817+
clear_bit(FR_SENT, &req->flags);
18171818
/* mark the request as resend request */
18181819
req->in.h.unique |= FUSE_UNIQUE_RESEND;
18191820
}

fs/fuse/file.c

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -935,14 +935,10 @@ static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args,
935935
}
936936

937937
for (i = 0; i < ap->num_pages; i++) {
938-
struct page *page = ap->pages[i];
938+
struct folio *folio = page_folio(ap->pages[i]);
939939

940-
if (!err)
941-
SetPageUptodate(page);
942-
else
943-
SetPageError(page);
944-
unlock_page(page);
945-
put_page(page);
940+
folio_end_read(folio, !err);
941+
folio_put(folio);
946942
}
947943
if (ia->ff)
948944
fuse_file_put(ia->ff, false);

fs/fuse/ioctl.c

Lines changed: 60 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
#include <linux/uio.h>
99
#include <linux/compat.h>
1010
#include <linux/fileattr.h>
11+
#include <linux/fsverity.h>
1112

1213
static ssize_t fuse_send_ioctl(struct fuse_mount *fm, struct fuse_args *args,
1314
struct fuse_ioctl_out *outarg)
@@ -117,6 +118,53 @@ static int fuse_copy_ioctl_iovec(struct fuse_conn *fc, struct iovec *dst,
117118
return 0;
118119
}
119120

121+
/* For fs-verity, determine iov lengths from input */
122+
static int fuse_setup_measure_verity(unsigned long arg, struct iovec *iov)
123+
{
124+
__u16 digest_size;
125+
struct fsverity_digest __user *uarg = (void __user *)arg;
126+
127+
if (copy_from_user(&digest_size, &uarg->digest_size, sizeof(digest_size)))
128+
return -EFAULT;
129+
130+
if (digest_size > SIZE_MAX - sizeof(struct fsverity_digest))
131+
return -EINVAL;
132+
133+
iov->iov_len = sizeof(struct fsverity_digest) + digest_size;
134+
135+
return 0;
136+
}
137+
138+
static int fuse_setup_enable_verity(unsigned long arg, struct iovec *iov,
139+
unsigned int *in_iovs)
140+
{
141+
struct fsverity_enable_arg enable;
142+
struct fsverity_enable_arg __user *uarg = (void __user *)arg;
143+
const __u32 max_buffer_len = FUSE_MAX_MAX_PAGES * PAGE_SIZE;
144+
145+
if (copy_from_user(&enable, uarg, sizeof(enable)))
146+
return -EFAULT;
147+
148+
if (enable.salt_size > max_buffer_len || enable.sig_size > max_buffer_len)
149+
return -ENOMEM;
150+
151+
if (enable.salt_size > 0) {
152+
iov++;
153+
(*in_iovs)++;
154+
155+
iov->iov_base = u64_to_user_ptr(enable.salt_ptr);
156+
iov->iov_len = enable.salt_size;
157+
}
158+
159+
if (enable.sig_size > 0) {
160+
iov++;
161+
(*in_iovs)++;
162+
163+
iov->iov_base = u64_to_user_ptr(enable.sig_ptr);
164+
iov->iov_len = enable.sig_size;
165+
}
166+
return 0;
167+
}
120168

121169
/*
122170
* For ioctls, there is no generic way to determine how much memory
@@ -227,6 +275,18 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
227275
out_iov = iov;
228276
out_iovs = 1;
229277
}
278+
279+
err = 0;
280+
switch (cmd) {
281+
case FS_IOC_MEASURE_VERITY:
282+
err = fuse_setup_measure_verity(arg, iov);
283+
break;
284+
case FS_IOC_ENABLE_VERITY:
285+
err = fuse_setup_enable_verity(arg, iov, &in_iovs);
286+
break;
287+
}
288+
if (err)
289+
goto out;
230290
}
231291

232292
retry:

fs/fuse/virtio_fs.c

Lines changed: 65 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,8 @@
77
#include <linux/fs.h>
88
#include <linux/dax.h>
99
#include <linux/pci.h>
10+
#include <linux/interrupt.h>
11+
#include <linux/group_cpus.h>
1012
#include <linux/pfn_t.h>
1113
#include <linux/memremap.h>
1214
#include <linux/module.h>
@@ -67,6 +69,8 @@ struct virtio_fs {
6769
unsigned int num_request_queues; /* number of request queues */
6870
struct dax_device *dax_dev;
6971

72+
unsigned int *mq_map; /* index = cpu id, value = request vq id */
73+
7074
/* DAX memory window where file contents are mapped */
7175
void *window_kaddr;
7276
phys_addr_t window_phys_addr;
@@ -185,6 +189,7 @@ static void virtio_fs_ktype_release(struct kobject *kobj)
185189
{
186190
struct virtio_fs *vfs = container_of(kobj, struct virtio_fs, kobj);
187191

192+
kfree(vfs->mq_map);
188193
kfree(vfs->vqs);
189194
kfree(vfs);
190195
}
@@ -706,6 +711,44 @@ static void virtio_fs_requests_done_work(struct work_struct *work)
706711
}
707712
}
708713

714+
static void virtio_fs_map_queues(struct virtio_device *vdev, struct virtio_fs *fs)
715+
{
716+
const struct cpumask *mask, *masks;
717+
unsigned int q, cpu;
718+
719+
/* First attempt to map using existing transport layer affinities
720+
* e.g. PCIe MSI-X
721+
*/
722+
if (!vdev->config->get_vq_affinity)
723+
goto fallback;
724+
725+
for (q = 0; q < fs->num_request_queues; q++) {
726+
mask = vdev->config->get_vq_affinity(vdev, VQ_REQUEST + q);
727+
if (!mask)
728+
goto fallback;
729+
730+
for_each_cpu(cpu, mask)
731+
fs->mq_map[cpu] = q;
732+
}
733+
734+
return;
735+
fallback:
736+
/* Attempt to map evenly in groups over the CPUs */
737+
masks = group_cpus_evenly(fs->num_request_queues);
738+
/* If even this fails we default to all CPUs use queue zero */
739+
if (!masks) {
740+
for_each_possible_cpu(cpu)
741+
fs->mq_map[cpu] = 0;
742+
return;
743+
}
744+
745+
for (q = 0; q < fs->num_request_queues; q++) {
746+
for_each_cpu(cpu, &masks[q])
747+
fs->mq_map[cpu] = q;
748+
}
749+
kfree(masks);
750+
}
751+
709752
/* Virtqueue interrupt handler */
710753
static void virtio_fs_vq_done(struct virtqueue *vq)
711754
{
@@ -742,6 +785,11 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev,
742785
{
743786
struct virtqueue **vqs;
744787
vq_callback_t **callbacks;
788+
/* Specify pre_vectors to ensure that the queues before the
789+
* request queues (e.g. hiprio) don't claim any of the CPUs in
790+
* the multi-queue mapping and interrupt affinities
791+
*/
792+
struct irq_affinity desc = { .pre_vectors = VQ_REQUEST };
745793
const char **names;
746794
unsigned int i;
747795
int ret = 0;
@@ -751,6 +799,9 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev,
751799
if (fs->num_request_queues == 0)
752800
return -EINVAL;
753801

802+
/* Truncate nr of request queues to nr_cpu_id */
803+
fs->num_request_queues = min_t(unsigned int, fs->num_request_queues,
804+
nr_cpu_ids);
754805
fs->nvqs = VQ_REQUEST + fs->num_request_queues;
755806
fs->vqs = kcalloc(fs->nvqs, sizeof(fs->vqs[VQ_HIPRIO]), GFP_KERNEL);
756807
if (!fs->vqs)
@@ -760,7 +811,9 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev,
760811
callbacks = kmalloc_array(fs->nvqs, sizeof(callbacks[VQ_HIPRIO]),
761812
GFP_KERNEL);
762813
names = kmalloc_array(fs->nvqs, sizeof(names[VQ_HIPRIO]), GFP_KERNEL);
763-
if (!vqs || !callbacks || !names) {
814+
fs->mq_map = kcalloc_node(nr_cpu_ids, sizeof(*fs->mq_map), GFP_KERNEL,
815+
dev_to_node(&vdev->dev));
816+
if (!vqs || !callbacks || !names || !fs->mq_map) {
764817
ret = -ENOMEM;
765818
goto out;
766819
}
@@ -780,7 +833,7 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev,
780833
names[i] = fs->vqs[i].name;
781834
}
782835

783-
ret = virtio_find_vqs(vdev, fs->nvqs, vqs, callbacks, names, NULL);
836+
ret = virtio_find_vqs(vdev, fs->nvqs, vqs, callbacks, names, &desc);
784837
if (ret < 0)
785838
goto out;
786839

@@ -792,8 +845,10 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev,
792845
kfree(names);
793846
kfree(callbacks);
794847
kfree(vqs);
795-
if (ret)
848+
if (ret) {
796849
kfree(fs->vqs);
850+
kfree(fs->mq_map);
851+
}
797852
return ret;
798853
}
799854

@@ -939,7 +994,7 @@ static int virtio_fs_probe(struct virtio_device *vdev)
939994
if (ret < 0)
940995
goto out;
941996

942-
/* TODO vq affinity */
997+
virtio_fs_map_queues(vdev, fs);
943998

944999
ret = virtio_fs_setup_dax(vdev, fs);
9451000
if (ret < 0)
@@ -1288,7 +1343,7 @@ static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
12881343
static void virtio_fs_wake_pending_and_unlock(struct fuse_iqueue *fiq)
12891344
__releases(fiq->lock)
12901345
{
1291-
unsigned int queue_id = VQ_REQUEST; /* TODO multiqueue */
1346+
unsigned int queue_id;
12921347
struct virtio_fs *fs;
12931348
struct fuse_req *req;
12941349
struct virtio_fs_vq *fsvq;
@@ -1302,11 +1357,13 @@ __releases(fiq->lock)
13021357
spin_unlock(&fiq->lock);
13031358

13041359
fs = fiq->priv;
1360+
queue_id = VQ_REQUEST + fs->mq_map[raw_smp_processor_id()];
13051361

1306-
pr_debug("%s: opcode %u unique %#llx nodeid %#llx in.len %u out.len %u\n",
1307-
__func__, req->in.h.opcode, req->in.h.unique,
1362+
pr_debug("%s: opcode %u unique %#llx nodeid %#llx in.len %u out.len %u queue_id %u\n",
1363+
__func__, req->in.h.opcode, req->in.h.unique,
13081364
req->in.h.nodeid, req->in.h.len,
1309-
fuse_len_args(req->args->out_numargs, req->args->out_args));
1365+
fuse_len_args(req->args->out_numargs, req->args->out_args),
1366+
queue_id);
13101367

13111368
fsvq = &fs->vqs[queue_id];
13121369
ret = virtio_fs_enqueue_req(fsvq, req, false);

0 commit comments

Comments
 (0)