Skip to content

Commit a49c26f

Browse files
uran0sHmstsirkin
authored andcommitted
virtio: Make vring_new_virtqueue support packed vring
It is used for testing in tools/virtio/vringh_test.c. If vring_new_virtqueue supports packed vring, we can add support for packed vring to vringh and test it. Signed-off-by: Wenyu Huang <huangwenyu1998@gmail.com> Message-Id: <20241013033233.65026-1-huangwenyu1998@gmail.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Reviewed-by: Stefano Garzarella <sgarzare@redhat.com> Acked-by: Jason Wang <jasowang@redhat.com>
1 parent 76f0d87 commit a49c26f

File tree

1 file changed

+121
-106
lines changed

1 file changed

+121
-106
lines changed

drivers/virtio/virtio_ring.c

Lines changed: 121 additions & 106 deletions
Original file line numberDiff line numberDiff line change
@@ -223,15 +223,6 @@ struct vring_virtqueue {
223223
#endif
224224
};
225225

226-
static struct virtqueue *__vring_new_virtqueue(unsigned int index,
227-
struct vring_virtqueue_split *vring_split,
228-
struct virtio_device *vdev,
229-
bool weak_barriers,
230-
bool context,
231-
bool (*notify)(struct virtqueue *),
232-
void (*callback)(struct virtqueue *),
233-
const char *name,
234-
struct device *dma_dev);
235226
static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num);
236227
static void vring_free(struct virtqueue *_vq);
237228

@@ -1138,6 +1129,66 @@ static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split,
11381129
return 0;
11391130
}
11401131

1132+
static struct virtqueue *__vring_new_virtqueue_split(unsigned int index,
1133+
struct vring_virtqueue_split *vring_split,
1134+
struct virtio_device *vdev,
1135+
bool weak_barriers,
1136+
bool context,
1137+
bool (*notify)(struct virtqueue *),
1138+
void (*callback)(struct virtqueue *),
1139+
const char *name,
1140+
struct device *dma_dev)
1141+
{
1142+
struct vring_virtqueue *vq;
1143+
int err;
1144+
1145+
vq = kmalloc(sizeof(*vq), GFP_KERNEL);
1146+
if (!vq)
1147+
return NULL;
1148+
1149+
vq->packed_ring = false;
1150+
vq->vq.callback = callback;
1151+
vq->vq.vdev = vdev;
1152+
vq->vq.name = name;
1153+
vq->vq.index = index;
1154+
vq->vq.reset = false;
1155+
vq->we_own_ring = false;
1156+
vq->notify = notify;
1157+
vq->weak_barriers = weak_barriers;
1158+
#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
1159+
vq->broken = true;
1160+
#else
1161+
vq->broken = false;
1162+
#endif
1163+
vq->dma_dev = dma_dev;
1164+
vq->use_dma_api = vring_use_dma_api(vdev);
1165+
vq->premapped = false;
1166+
vq->do_unmap = vq->use_dma_api;
1167+
1168+
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
1169+
!context;
1170+
vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
1171+
1172+
if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
1173+
vq->weak_barriers = false;
1174+
1175+
err = vring_alloc_state_extra_split(vring_split);
1176+
if (err) {
1177+
kfree(vq);
1178+
return NULL;
1179+
}
1180+
1181+
virtqueue_vring_init_split(vring_split, vq);
1182+
1183+
virtqueue_init(vq, vring_split->vring.num);
1184+
virtqueue_vring_attach_split(vq, vring_split);
1185+
1186+
spin_lock(&vdev->vqs_list_lock);
1187+
list_add_tail(&vq->vq.list, &vdev->vqs);
1188+
spin_unlock(&vdev->vqs_list_lock);
1189+
return &vq->vq;
1190+
}
1191+
11411192
static struct virtqueue *vring_create_virtqueue_split(
11421193
unsigned int index,
11431194
unsigned int num,
@@ -1160,7 +1211,7 @@ static struct virtqueue *vring_create_virtqueue_split(
11601211
if (err)
11611212
return NULL;
11621213

1163-
vq = __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers,
1214+
vq = __vring_new_virtqueue_split(index, &vring_split, vdev, weak_barriers,
11641215
context, notify, callback, name, dma_dev);
11651216
if (!vq) {
11661217
vring_free_split(&vring_split, vdev, dma_dev);
@@ -2050,36 +2101,29 @@ static void virtqueue_reinit_packed(struct vring_virtqueue *vq)
20502101
virtqueue_vring_init_packed(&vq->packed, !!vq->vq.callback);
20512102
}
20522103

2053-
static struct virtqueue *vring_create_virtqueue_packed(
2054-
unsigned int index,
2055-
unsigned int num,
2056-
unsigned int vring_align,
2057-
struct virtio_device *vdev,
2058-
bool weak_barriers,
2059-
bool may_reduce_num,
2060-
bool context,
2061-
bool (*notify)(struct virtqueue *),
2062-
void (*callback)(struct virtqueue *),
2063-
const char *name,
2064-
struct device *dma_dev)
2104+
static struct virtqueue *__vring_new_virtqueue_packed(unsigned int index,
2105+
struct vring_virtqueue_packed *vring_packed,
2106+
struct virtio_device *vdev,
2107+
bool weak_barriers,
2108+
bool context,
2109+
bool (*notify)(struct virtqueue *),
2110+
void (*callback)(struct virtqueue *),
2111+
const char *name,
2112+
struct device *dma_dev)
20652113
{
2066-
struct vring_virtqueue_packed vring_packed = {};
20672114
struct vring_virtqueue *vq;
20682115
int err;
20692116

2070-
if (vring_alloc_queue_packed(&vring_packed, vdev, num, dma_dev))
2071-
goto err_ring;
2072-
20732117
vq = kmalloc(sizeof(*vq), GFP_KERNEL);
20742118
if (!vq)
2075-
goto err_vq;
2119+
return NULL;
20762120

20772121
vq->vq.callback = callback;
20782122
vq->vq.vdev = vdev;
20792123
vq->vq.name = name;
20802124
vq->vq.index = index;
20812125
vq->vq.reset = false;
2082-
vq->we_own_ring = true;
2126+
vq->we_own_ring = false;
20832127
vq->notify = notify;
20842128
vq->weak_barriers = weak_barriers;
20852129
#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
@@ -2100,26 +2144,52 @@ static struct virtqueue *vring_create_virtqueue_packed(
21002144
if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
21012145
vq->weak_barriers = false;
21022146

2103-
err = vring_alloc_state_extra_packed(&vring_packed);
2104-
if (err)
2105-
goto err_state_extra;
2147+
err = vring_alloc_state_extra_packed(vring_packed);
2148+
if (err) {
2149+
kfree(vq);
2150+
return NULL;
2151+
}
21062152

2107-
virtqueue_vring_init_packed(&vring_packed, !!callback);
2153+
virtqueue_vring_init_packed(vring_packed, !!callback);
21082154

2109-
virtqueue_init(vq, num);
2110-
virtqueue_vring_attach_packed(vq, &vring_packed);
2155+
virtqueue_init(vq, vring_packed->vring.num);
2156+
virtqueue_vring_attach_packed(vq, vring_packed);
21112157

21122158
spin_lock(&vdev->vqs_list_lock);
21132159
list_add_tail(&vq->vq.list, &vdev->vqs);
21142160
spin_unlock(&vdev->vqs_list_lock);
21152161
return &vq->vq;
2162+
}
21162163

2117-
err_state_extra:
2118-
kfree(vq);
2119-
err_vq:
2120-
vring_free_packed(&vring_packed, vdev, dma_dev);
2121-
err_ring:
2122-
return NULL;
2164+
static struct virtqueue *vring_create_virtqueue_packed(
2165+
unsigned int index,
2166+
unsigned int num,
2167+
unsigned int vring_align,
2168+
struct virtio_device *vdev,
2169+
bool weak_barriers,
2170+
bool may_reduce_num,
2171+
bool context,
2172+
bool (*notify)(struct virtqueue *),
2173+
void (*callback)(struct virtqueue *),
2174+
const char *name,
2175+
struct device *dma_dev)
2176+
{
2177+
struct vring_virtqueue_packed vring_packed = {};
2178+
struct virtqueue *vq;
2179+
2180+
if (vring_alloc_queue_packed(&vring_packed, vdev, num, dma_dev))
2181+
return NULL;
2182+
2183+
vq = __vring_new_virtqueue_packed(index, &vring_packed, vdev, weak_barriers,
2184+
context, notify, callback, name, dma_dev);
2185+
if (!vq) {
2186+
vring_free_packed(&vring_packed, vdev, dma_dev);
2187+
return NULL;
2188+
}
2189+
2190+
to_vvq(vq)->we_own_ring = true;
2191+
2192+
return vq;
21232193
}
21242194

21252195
static int virtqueue_resize_packed(struct virtqueue *_vq, u32 num)
@@ -2598,69 +2668,7 @@ irqreturn_t vring_interrupt(int irq, void *_vq)
25982668
}
25992669
EXPORT_SYMBOL_GPL(vring_interrupt);
26002670

2601-
/* Only available for split ring */
2602-
static struct virtqueue *__vring_new_virtqueue(unsigned int index,
2603-
struct vring_virtqueue_split *vring_split,
2604-
struct virtio_device *vdev,
2605-
bool weak_barriers,
2606-
bool context,
2607-
bool (*notify)(struct virtqueue *),
2608-
void (*callback)(struct virtqueue *),
2609-
const char *name,
2610-
struct device *dma_dev)
2611-
{
2612-
struct vring_virtqueue *vq;
2613-
int err;
2614-
2615-
if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2616-
return NULL;
2617-
2618-
vq = kmalloc(sizeof(*vq), GFP_KERNEL);
2619-
if (!vq)
2620-
return NULL;
2621-
2622-
vq->packed_ring = false;
2623-
vq->vq.callback = callback;
2624-
vq->vq.vdev = vdev;
2625-
vq->vq.name = name;
2626-
vq->vq.index = index;
2627-
vq->vq.reset = false;
2628-
vq->we_own_ring = false;
2629-
vq->notify = notify;
2630-
vq->weak_barriers = weak_barriers;
2631-
#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
2632-
vq->broken = true;
2633-
#else
2634-
vq->broken = false;
2635-
#endif
2636-
vq->dma_dev = dma_dev;
2637-
vq->use_dma_api = vring_use_dma_api(vdev);
2638-
vq->premapped = false;
2639-
vq->do_unmap = vq->use_dma_api;
26402671

2641-
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
2642-
!context;
2643-
vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
2644-
2645-
if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
2646-
vq->weak_barriers = false;
2647-
2648-
err = vring_alloc_state_extra_split(vring_split);
2649-
if (err) {
2650-
kfree(vq);
2651-
return NULL;
2652-
}
2653-
2654-
virtqueue_vring_init_split(vring_split, vq);
2655-
2656-
virtqueue_init(vq, vring_split->vring.num);
2657-
virtqueue_vring_attach_split(vq, vring_split);
2658-
2659-
spin_lock(&vdev->vqs_list_lock);
2660-
list_add_tail(&vq->vq.list, &vdev->vqs);
2661-
spin_unlock(&vdev->vqs_list_lock);
2662-
return &vq->vq;
2663-
}
26642672

26652673
struct virtqueue *vring_create_virtqueue(
26662674
unsigned int index,
@@ -2840,7 +2848,6 @@ int virtqueue_reset(struct virtqueue *_vq,
28402848
}
28412849
EXPORT_SYMBOL_GPL(virtqueue_reset);
28422850

2843-
/* Only available for split ring */
28442851
struct virtqueue *vring_new_virtqueue(unsigned int index,
28452852
unsigned int num,
28462853
unsigned int vring_align,
@@ -2854,11 +2861,19 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
28542861
{
28552862
struct vring_virtqueue_split vring_split = {};
28562863

2857-
if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2858-
return NULL;
2864+
if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
2865+
struct vring_virtqueue_packed vring_packed = {};
2866+
2867+
vring_packed.vring.num = num;
2868+
vring_packed.vring.desc = pages;
2869+
return __vring_new_virtqueue_packed(index, &vring_packed,
2870+
vdev, weak_barriers,
2871+
context, notify, callback,
2872+
name, vdev->dev.parent);
2873+
}
28592874

28602875
vring_init(&vring_split.vring, num, pages, vring_align);
2861-
return __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers,
2876+
return __vring_new_virtqueue_split(index, &vring_split, vdev, weak_barriers,
28622877
context, notify, callback, name,
28632878
vdev->dev.parent);
28642879
}

0 commit comments

Comments
 (0)