@@ -46,12 +46,26 @@ bool vp_notify(struct virtqueue *vq)
46
46
return true;
47
47
}
48
48
49
+ /* Notify all slow path virtqueues on an interrupt. */
50
+ static void vp_vring_slow_path_interrupt (int irq ,
51
+ struct virtio_pci_device * vp_dev )
52
+ {
53
+ struct virtio_pci_vq_info * info ;
54
+ unsigned long flags ;
55
+
56
+ spin_lock_irqsave (& vp_dev -> lock , flags );
57
+ list_for_each_entry (info , & vp_dev -> slow_virtqueues , node )
58
+ vring_interrupt (irq , info -> vq );
59
+ spin_unlock_irqrestore (& vp_dev -> lock , flags );
60
+ }
61
+
49
62
/* Handle a configuration change: Tell driver if it wants to know. */
50
63
static irqreturn_t vp_config_changed (int irq , void * opaque )
51
64
{
52
65
struct virtio_pci_device * vp_dev = opaque ;
53
66
54
67
virtio_config_changed (& vp_dev -> vdev );
68
+ vp_vring_slow_path_interrupt (irq , vp_dev );
55
69
return IRQ_HANDLED ;
56
70
}
57
71
@@ -174,6 +188,11 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
174
188
return err ;
175
189
}
176
190
191
+ static bool vp_is_slow_path_vector (u16 msix_vec )
192
+ {
193
+ return msix_vec == VP_MSIX_CONFIG_VECTOR ;
194
+ }
195
+
177
196
static struct virtqueue * vp_setup_vq (struct virtio_device * vdev , unsigned int index ,
178
197
void (* callback )(struct virtqueue * vq ),
179
198
const char * name ,
@@ -197,7 +216,10 @@ static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned int in
197
216
info -> vq = vq ;
198
217
if (callback ) {
199
218
spin_lock_irqsave (& vp_dev -> lock , flags );
200
- list_add (& info -> node , & vp_dev -> virtqueues );
219
+ if (!vp_is_slow_path_vector (msix_vec ))
220
+ list_add (& info -> node , & vp_dev -> virtqueues );
221
+ else
222
+ list_add (& info -> node , & vp_dev -> slow_virtqueues );
201
223
spin_unlock_irqrestore (& vp_dev -> lock , flags );
202
224
} else {
203
225
INIT_LIST_HEAD (& info -> node );
@@ -245,7 +267,8 @@ void vp_del_vqs(struct virtio_device *vdev)
245
267
if (vp_dev -> per_vq_vectors ) {
246
268
int v = vp_dev -> vqs [vq -> index ]-> msix_vector ;
247
269
248
- if (v != VIRTIO_MSI_NO_VECTOR ) {
270
+ if (v != VIRTIO_MSI_NO_VECTOR &&
271
+ !vp_is_slow_path_vector (v )) {
249
272
int irq = pci_irq_vector (vp_dev -> pci_dev , v );
250
273
251
274
irq_update_affinity_hint (irq , NULL );
@@ -289,13 +312,14 @@ void vp_del_vqs(struct virtio_device *vdev)
289
312
290
313
enum vp_vq_vector_policy {
291
314
VP_VQ_VECTOR_POLICY_EACH ,
315
+ VP_VQ_VECTOR_POLICY_SHARED_SLOW ,
292
316
VP_VQ_VECTOR_POLICY_SHARED ,
293
317
};
294
318
295
319
static struct virtqueue *
296
320
vp_find_one_vq_msix (struct virtio_device * vdev , int queue_idx ,
297
321
vq_callback_t * callback , const char * name , bool ctx ,
298
- int * allocated_vectors ,
322
+ bool slow_path , int * allocated_vectors ,
299
323
enum vp_vq_vector_policy vector_policy )
300
324
{
301
325
struct virtio_pci_device * vp_dev = to_vp_device (vdev );
@@ -305,16 +329,22 @@ vp_find_one_vq_msix(struct virtio_device *vdev, int queue_idx,
305
329
306
330
if (!callback )
307
331
msix_vec = VIRTIO_MSI_NO_VECTOR ;
308
- else if (vector_policy == VP_VQ_VECTOR_POLICY_EACH )
332
+ else if (vector_policy == VP_VQ_VECTOR_POLICY_EACH ||
333
+ (vector_policy == VP_VQ_VECTOR_POLICY_SHARED_SLOW &&
334
+ !slow_path ))
309
335
msix_vec = (* allocated_vectors )++ ;
336
+ else if (vector_policy != VP_VQ_VECTOR_POLICY_EACH &&
337
+ slow_path )
338
+ msix_vec = VP_MSIX_CONFIG_VECTOR ;
310
339
else
311
340
msix_vec = VP_MSIX_VQ_VECTOR ;
312
341
vq = vp_setup_vq (vdev , queue_idx , callback , name , ctx , msix_vec );
313
342
if (IS_ERR (vq ))
314
343
return vq ;
315
344
316
345
if (vector_policy == VP_VQ_VECTOR_POLICY_SHARED ||
317
- msix_vec == VIRTIO_MSI_NO_VECTOR )
346
+ msix_vec == VIRTIO_MSI_NO_VECTOR ||
347
+ vp_is_slow_path_vector (msix_vec ))
318
348
return vq ;
319
349
320
350
/* allocate per-vq irq if available and necessary */
@@ -374,7 +404,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
374
404
continue ;
375
405
}
376
406
vqs [i ] = vp_find_one_vq_msix (vdev , queue_idx ++ , vqi -> callback ,
377
- vqi -> name , vqi -> ctx ,
407
+ vqi -> name , vqi -> ctx , false,
378
408
& allocated_vectors , vector_policy );
379
409
if (IS_ERR (vqs [i ])) {
380
410
err = PTR_ERR (vqs [i ]);
@@ -440,6 +470,13 @@ int vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
440
470
VP_VQ_VECTOR_POLICY_EACH , desc );
441
471
if (!err )
442
472
return 0 ;
473
+ /* Fallback: MSI-X with one shared vector for config and
474
+ * slow path queues, one vector per queue for the rest.
475
+ */
476
+ err = vp_find_vqs_msix (vdev , nvqs , vqs , vqs_info ,
477
+ VP_VQ_VECTOR_POLICY_SHARED_SLOW , desc );
478
+ if (!err )
479
+ return 0 ;
443
480
/* Fallback: MSI-X with one vector for config, one shared for queues. */
444
481
err = vp_find_vqs_msix (vdev , nvqs , vqs , vqs_info ,
445
482
VP_VQ_VECTOR_POLICY_SHARED , desc );
@@ -493,7 +530,8 @@ const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index)
493
530
struct virtio_pci_device * vp_dev = to_vp_device (vdev );
494
531
495
532
if (!vp_dev -> per_vq_vectors ||
496
- vp_dev -> vqs [index ]-> msix_vector == VIRTIO_MSI_NO_VECTOR )
533
+ vp_dev -> vqs [index ]-> msix_vector == VIRTIO_MSI_NO_VECTOR ||
534
+ vp_is_slow_path_vector (vp_dev -> vqs [index ]-> msix_vector ))
497
535
return NULL ;
498
536
499
537
return pci_irq_get_affinity (vp_dev -> pci_dev ,
@@ -601,6 +639,7 @@ static int virtio_pci_probe(struct pci_dev *pci_dev,
601
639
vp_dev -> vdev .dev .release = virtio_pci_release_dev ;
602
640
vp_dev -> pci_dev = pci_dev ;
603
641
INIT_LIST_HEAD (& vp_dev -> virtqueues );
642
+ INIT_LIST_HEAD (& vp_dev -> slow_virtqueues );
604
643
spin_lock_init (& vp_dev -> lock );
605
644
606
645
/* enable the device */
0 commit comments