@@ -223,15 +223,6 @@ struct vring_virtqueue {
223
223
#endif
224
224
};
225
225
226
- static struct virtqueue * __vring_new_virtqueue (unsigned int index ,
227
- struct vring_virtqueue_split * vring_split ,
228
- struct virtio_device * vdev ,
229
- bool weak_barriers ,
230
- bool context ,
231
- bool (* notify )(struct virtqueue * ),
232
- void (* callback )(struct virtqueue * ),
233
- const char * name ,
234
- struct device * dma_dev );
235
226
static struct vring_desc_extra * vring_alloc_desc_extra (unsigned int num );
236
227
static void vring_free (struct virtqueue * _vq );
237
228
@@ -1138,6 +1129,66 @@ static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split,
1138
1129
return 0 ;
1139
1130
}
1140
1131
1132
+ static struct virtqueue * __vring_new_virtqueue_split (unsigned int index ,
1133
+ struct vring_virtqueue_split * vring_split ,
1134
+ struct virtio_device * vdev ,
1135
+ bool weak_barriers ,
1136
+ bool context ,
1137
+ bool (* notify )(struct virtqueue * ),
1138
+ void (* callback )(struct virtqueue * ),
1139
+ const char * name ,
1140
+ struct device * dma_dev )
1141
+ {
1142
+ struct vring_virtqueue * vq ;
1143
+ int err ;
1144
+
1145
+ vq = kmalloc (sizeof (* vq ), GFP_KERNEL );
1146
+ if (!vq )
1147
+ return NULL ;
1148
+
1149
+ vq -> packed_ring = false;
1150
+ vq -> vq .callback = callback ;
1151
+ vq -> vq .vdev = vdev ;
1152
+ vq -> vq .name = name ;
1153
+ vq -> vq .index = index ;
1154
+ vq -> vq .reset = false;
1155
+ vq -> we_own_ring = false;
1156
+ vq -> notify = notify ;
1157
+ vq -> weak_barriers = weak_barriers ;
1158
+ #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
1159
+ vq -> broken = true;
1160
+ #else
1161
+ vq -> broken = false;
1162
+ #endif
1163
+ vq -> dma_dev = dma_dev ;
1164
+ vq -> use_dma_api = vring_use_dma_api (vdev );
1165
+ vq -> premapped = false;
1166
+ vq -> do_unmap = vq -> use_dma_api ;
1167
+
1168
+ vq -> indirect = virtio_has_feature (vdev , VIRTIO_RING_F_INDIRECT_DESC ) &&
1169
+ !context ;
1170
+ vq -> event = virtio_has_feature (vdev , VIRTIO_RING_F_EVENT_IDX );
1171
+
1172
+ if (virtio_has_feature (vdev , VIRTIO_F_ORDER_PLATFORM ))
1173
+ vq -> weak_barriers = false;
1174
+
1175
+ err = vring_alloc_state_extra_split (vring_split );
1176
+ if (err ) {
1177
+ kfree (vq );
1178
+ return NULL ;
1179
+ }
1180
+
1181
+ virtqueue_vring_init_split (vring_split , vq );
1182
+
1183
+ virtqueue_init (vq , vring_split -> vring .num );
1184
+ virtqueue_vring_attach_split (vq , vring_split );
1185
+
1186
+ spin_lock (& vdev -> vqs_list_lock );
1187
+ list_add_tail (& vq -> vq .list , & vdev -> vqs );
1188
+ spin_unlock (& vdev -> vqs_list_lock );
1189
+ return & vq -> vq ;
1190
+ }
1191
+
1141
1192
static struct virtqueue * vring_create_virtqueue_split (
1142
1193
unsigned int index ,
1143
1194
unsigned int num ,
@@ -1160,7 +1211,7 @@ static struct virtqueue *vring_create_virtqueue_split(
1160
1211
if (err )
1161
1212
return NULL ;
1162
1213
1163
- vq = __vring_new_virtqueue (index , & vring_split , vdev , weak_barriers ,
1214
+ vq = __vring_new_virtqueue_split (index , & vring_split , vdev , weak_barriers ,
1164
1215
context , notify , callback , name , dma_dev );
1165
1216
if (!vq ) {
1166
1217
vring_free_split (& vring_split , vdev , dma_dev );
@@ -2050,36 +2101,29 @@ static void virtqueue_reinit_packed(struct vring_virtqueue *vq)
2050
2101
virtqueue_vring_init_packed (& vq -> packed , !!vq -> vq .callback );
2051
2102
}
2052
2103
2053
- static struct virtqueue * vring_create_virtqueue_packed (
2054
- unsigned int index ,
2055
- unsigned int num ,
2056
- unsigned int vring_align ,
2057
- struct virtio_device * vdev ,
2058
- bool weak_barriers ,
2059
- bool may_reduce_num ,
2060
- bool context ,
2061
- bool (* notify )(struct virtqueue * ),
2062
- void (* callback )(struct virtqueue * ),
2063
- const char * name ,
2064
- struct device * dma_dev )
2104
+ static struct virtqueue * __vring_new_virtqueue_packed (unsigned int index ,
2105
+ struct vring_virtqueue_packed * vring_packed ,
2106
+ struct virtio_device * vdev ,
2107
+ bool weak_barriers ,
2108
+ bool context ,
2109
+ bool (* notify )(struct virtqueue * ),
2110
+ void (* callback )(struct virtqueue * ),
2111
+ const char * name ,
2112
+ struct device * dma_dev )
2065
2113
{
2066
- struct vring_virtqueue_packed vring_packed = {};
2067
2114
struct vring_virtqueue * vq ;
2068
2115
int err ;
2069
2116
2070
- if (vring_alloc_queue_packed (& vring_packed , vdev , num , dma_dev ))
2071
- goto err_ring ;
2072
-
2073
2117
vq = kmalloc (sizeof (* vq ), GFP_KERNEL );
2074
2118
if (!vq )
2075
- goto err_vq ;
2119
+ return NULL ;
2076
2120
2077
2121
vq -> vq .callback = callback ;
2078
2122
vq -> vq .vdev = vdev ;
2079
2123
vq -> vq .name = name ;
2080
2124
vq -> vq .index = index ;
2081
2125
vq -> vq .reset = false;
2082
- vq -> we_own_ring = true ;
2126
+ vq -> we_own_ring = false ;
2083
2127
vq -> notify = notify ;
2084
2128
vq -> weak_barriers = weak_barriers ;
2085
2129
#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
@@ -2100,26 +2144,52 @@ static struct virtqueue *vring_create_virtqueue_packed(
2100
2144
if (virtio_has_feature (vdev , VIRTIO_F_ORDER_PLATFORM ))
2101
2145
vq -> weak_barriers = false;
2102
2146
2103
- err = vring_alloc_state_extra_packed (& vring_packed );
2104
- if (err )
2105
- goto err_state_extra ;
2147
+ err = vring_alloc_state_extra_packed (vring_packed );
2148
+ if (err ) {
2149
+ kfree (vq );
2150
+ return NULL ;
2151
+ }
2106
2152
2107
- virtqueue_vring_init_packed (& vring_packed , !!callback );
2153
+ virtqueue_vring_init_packed (vring_packed , !!callback );
2108
2154
2109
- virtqueue_init (vq , num );
2110
- virtqueue_vring_attach_packed (vq , & vring_packed );
2155
+ virtqueue_init (vq , vring_packed -> vring . num );
2156
+ virtqueue_vring_attach_packed (vq , vring_packed );
2111
2157
2112
2158
spin_lock (& vdev -> vqs_list_lock );
2113
2159
list_add_tail (& vq -> vq .list , & vdev -> vqs );
2114
2160
spin_unlock (& vdev -> vqs_list_lock );
2115
2161
return & vq -> vq ;
2162
+ }
2116
2163
2117
- err_state_extra :
2118
- kfree (vq );
2119
- err_vq :
2120
- vring_free_packed (& vring_packed , vdev , dma_dev );
2121
- err_ring :
2122
- return NULL ;
2164
+ static struct virtqueue * vring_create_virtqueue_packed (
2165
+ unsigned int index ,
2166
+ unsigned int num ,
2167
+ unsigned int vring_align ,
2168
+ struct virtio_device * vdev ,
2169
+ bool weak_barriers ,
2170
+ bool may_reduce_num ,
2171
+ bool context ,
2172
+ bool (* notify )(struct virtqueue * ),
2173
+ void (* callback )(struct virtqueue * ),
2174
+ const char * name ,
2175
+ struct device * dma_dev )
2176
+ {
2177
+ struct vring_virtqueue_packed vring_packed = {};
2178
+ struct virtqueue * vq ;
2179
+
2180
+ if (vring_alloc_queue_packed (& vring_packed , vdev , num , dma_dev ))
2181
+ return NULL ;
2182
+
2183
+ vq = __vring_new_virtqueue_packed (index , & vring_packed , vdev , weak_barriers ,
2184
+ context , notify , callback , name , dma_dev );
2185
+ if (!vq ) {
2186
+ vring_free_packed (& vring_packed , vdev , dma_dev );
2187
+ return NULL ;
2188
+ }
2189
+
2190
+ to_vvq (vq )-> we_own_ring = true;
2191
+
2192
+ return vq ;
2123
2193
}
2124
2194
2125
2195
static int virtqueue_resize_packed (struct virtqueue * _vq , u32 num )
@@ -2598,69 +2668,7 @@ irqreturn_t vring_interrupt(int irq, void *_vq)
2598
2668
}
2599
2669
EXPORT_SYMBOL_GPL (vring_interrupt );
2600
2670
2601
- /* Only available for split ring */
2602
- static struct virtqueue * __vring_new_virtqueue (unsigned int index ,
2603
- struct vring_virtqueue_split * vring_split ,
2604
- struct virtio_device * vdev ,
2605
- bool weak_barriers ,
2606
- bool context ,
2607
- bool (* notify )(struct virtqueue * ),
2608
- void (* callback )(struct virtqueue * ),
2609
- const char * name ,
2610
- struct device * dma_dev )
2611
- {
2612
- struct vring_virtqueue * vq ;
2613
- int err ;
2614
-
2615
- if (virtio_has_feature (vdev , VIRTIO_F_RING_PACKED ))
2616
- return NULL ;
2617
-
2618
- vq = kmalloc (sizeof (* vq ), GFP_KERNEL );
2619
- if (!vq )
2620
- return NULL ;
2621
-
2622
- vq -> packed_ring = false;
2623
- vq -> vq .callback = callback ;
2624
- vq -> vq .vdev = vdev ;
2625
- vq -> vq .name = name ;
2626
- vq -> vq .index = index ;
2627
- vq -> vq .reset = false;
2628
- vq -> we_own_ring = false;
2629
- vq -> notify = notify ;
2630
- vq -> weak_barriers = weak_barriers ;
2631
- #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
2632
- vq -> broken = true;
2633
- #else
2634
- vq -> broken = false;
2635
- #endif
2636
- vq -> dma_dev = dma_dev ;
2637
- vq -> use_dma_api = vring_use_dma_api (vdev );
2638
- vq -> premapped = false;
2639
- vq -> do_unmap = vq -> use_dma_api ;
2640
2671
2641
- vq -> indirect = virtio_has_feature (vdev , VIRTIO_RING_F_INDIRECT_DESC ) &&
2642
- !context ;
2643
- vq -> event = virtio_has_feature (vdev , VIRTIO_RING_F_EVENT_IDX );
2644
-
2645
- if (virtio_has_feature (vdev , VIRTIO_F_ORDER_PLATFORM ))
2646
- vq -> weak_barriers = false;
2647
-
2648
- err = vring_alloc_state_extra_split (vring_split );
2649
- if (err ) {
2650
- kfree (vq );
2651
- return NULL ;
2652
- }
2653
-
2654
- virtqueue_vring_init_split (vring_split , vq );
2655
-
2656
- virtqueue_init (vq , vring_split -> vring .num );
2657
- virtqueue_vring_attach_split (vq , vring_split );
2658
-
2659
- spin_lock (& vdev -> vqs_list_lock );
2660
- list_add_tail (& vq -> vq .list , & vdev -> vqs );
2661
- spin_unlock (& vdev -> vqs_list_lock );
2662
- return & vq -> vq ;
2663
- }
2664
2672
2665
2673
struct virtqueue * vring_create_virtqueue (
2666
2674
unsigned int index ,
@@ -2840,7 +2848,6 @@ int virtqueue_reset(struct virtqueue *_vq,
2840
2848
}
2841
2849
EXPORT_SYMBOL_GPL (virtqueue_reset );
2842
2850
2843
- /* Only available for split ring */
2844
2851
struct virtqueue * vring_new_virtqueue (unsigned int index ,
2845
2852
unsigned int num ,
2846
2853
unsigned int vring_align ,
@@ -2854,11 +2861,19 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
2854
2861
{
2855
2862
struct vring_virtqueue_split vring_split = {};
2856
2863
2857
- if (virtio_has_feature (vdev , VIRTIO_F_RING_PACKED ))
2858
- return NULL ;
2864
+ if (virtio_has_feature (vdev , VIRTIO_F_RING_PACKED )) {
2865
+ struct vring_virtqueue_packed vring_packed = {};
2866
+
2867
+ vring_packed .vring .num = num ;
2868
+ vring_packed .vring .desc = pages ;
2869
+ return __vring_new_virtqueue_packed (index , & vring_packed ,
2870
+ vdev , weak_barriers ,
2871
+ context , notify , callback ,
2872
+ name , vdev -> dev .parent );
2873
+ }
2859
2874
2860
2875
vring_init (& vring_split .vring , num , pages , vring_align );
2861
- return __vring_new_virtqueue (index , & vring_split , vdev , weak_barriers ,
2876
+ return __vring_new_virtqueue_split (index , & vring_split , vdev , weak_barriers ,
2862
2877
context , notify , callback , name ,
2863
2878
vdev -> dev .parent );
2864
2879
}
0 commit comments