Skip to content

Commit 2400617

Browse files
roygerjgross1
authored andcommitted
xen/blkfront: force data bouncing when backend is untrusted
Split the current bounce buffering logic used with persistent grants into it's own option, and allow enabling it independently of persistent grants. This allows to reuse the same code paths to perform the bounce buffering required to avoid leaking contiguous data in shared pages not part of the request fragments. Reporting whether the backend is to be trusted can be done using a module parameter, or from the xenstore frontend path as set by the toolstack when adding the device. This is CVE-2022-33742, part of XSA-403. Signed-off-by: Roger Pau Monné <roger.pau@citrix.com> Reviewed-by: Juergen Gross <jgross@suse.com> Signed-off-by: Juergen Gross <jgross@suse.com>
1 parent 4491001 commit 2400617

File tree

1 file changed

+34
-15
lines changed

1 file changed

+34
-15
lines changed

drivers/block/xen-blkfront.c

Lines changed: 34 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -152,6 +152,10 @@ static unsigned int xen_blkif_max_ring_order;
152152
module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, 0444);
153153
MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
154154

155+
static bool __read_mostly xen_blkif_trusted = true;
156+
module_param_named(trusted, xen_blkif_trusted, bool, 0644);
157+
MODULE_PARM_DESC(trusted, "Is the backend trusted");
158+
155159
#define BLK_RING_SIZE(info) \
156160
__CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages)
157161

@@ -210,6 +214,7 @@ struct blkfront_info
210214
unsigned int feature_discard:1;
211215
unsigned int feature_secdiscard:1;
212216
unsigned int feature_persistent:1;
217+
unsigned int bounce:1;
213218
unsigned int discard_granularity;
214219
unsigned int discard_alignment;
215220
/* Number of 4KB segments handled */
@@ -310,7 +315,7 @@ static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num)
310315
if (!gnt_list_entry)
311316
goto out_of_memory;
312317

313-
if (info->feature_persistent) {
318+
if (info->bounce) {
314319
granted_page = alloc_page(GFP_NOIO | __GFP_ZERO);
315320
if (!granted_page) {
316321
kfree(gnt_list_entry);
@@ -330,7 +335,7 @@ static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num)
330335
list_for_each_entry_safe(gnt_list_entry, n,
331336
&rinfo->grants, node) {
332337
list_del(&gnt_list_entry->node);
333-
if (info->feature_persistent)
338+
if (info->bounce)
334339
__free_page(gnt_list_entry->page);
335340
kfree(gnt_list_entry);
336341
i--;
@@ -376,7 +381,7 @@ static struct grant *get_grant(grant_ref_t *gref_head,
376381
/* Assign a gref to this page */
377382
gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
378383
BUG_ON(gnt_list_entry->gref == -ENOSPC);
379-
if (info->feature_persistent)
384+
if (info->bounce)
380385
grant_foreign_access(gnt_list_entry, info);
381386
else {
382387
/* Grant access to the GFN passed by the caller */
@@ -400,7 +405,7 @@ static struct grant *get_indirect_grant(grant_ref_t *gref_head,
400405
/* Assign a gref to this page */
401406
gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
402407
BUG_ON(gnt_list_entry->gref == -ENOSPC);
403-
if (!info->feature_persistent) {
408+
if (!info->bounce) {
404409
struct page *indirect_page;
405410

406411
/* Fetch a pre-allocated page to use for indirect grefs */
@@ -703,7 +708,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
703708
.grant_idx = 0,
704709
.segments = NULL,
705710
.rinfo = rinfo,
706-
.need_copy = rq_data_dir(req) && info->feature_persistent,
711+
.need_copy = rq_data_dir(req) && info->bounce,
707712
};
708713

709714
/*
@@ -981,11 +986,12 @@ static void xlvbd_flush(struct blkfront_info *info)
981986
{
982987
blk_queue_write_cache(info->rq, info->feature_flush ? true : false,
983988
info->feature_fua ? true : false);
984-
pr_info("blkfront: %s: %s %s %s %s %s\n",
989+
pr_info("blkfront: %s: %s %s %s %s %s %s %s\n",
985990
info->gd->disk_name, flush_info(info),
986991
"persistent grants:", info->feature_persistent ?
987992
"enabled;" : "disabled;", "indirect descriptors:",
988-
info->max_indirect_segments ? "enabled;" : "disabled;");
993+
info->max_indirect_segments ? "enabled;" : "disabled;",
994+
"bounce buffer:", info->bounce ? "enabled" : "disabled;");
989995
}
990996

991997
static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
@@ -1207,7 +1213,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
12071213
if (!list_empty(&rinfo->indirect_pages)) {
12081214
struct page *indirect_page, *n;
12091215

1210-
BUG_ON(info->feature_persistent);
1216+
BUG_ON(info->bounce);
12111217
list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) {
12121218
list_del(&indirect_page->lru);
12131219
__free_page(indirect_page);
@@ -1224,7 +1230,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
12241230
NULL);
12251231
rinfo->persistent_gnts_c--;
12261232
}
1227-
if (info->feature_persistent)
1233+
if (info->bounce)
12281234
__free_page(persistent_gnt->page);
12291235
kfree(persistent_gnt);
12301236
}
@@ -1245,7 +1251,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
12451251
for (j = 0; j < segs; j++) {
12461252
persistent_gnt = rinfo->shadow[i].grants_used[j];
12471253
gnttab_end_foreign_access(persistent_gnt->gref, NULL);
1248-
if (info->feature_persistent)
1254+
if (info->bounce)
12491255
__free_page(persistent_gnt->page);
12501256
kfree(persistent_gnt);
12511257
}
@@ -1428,7 +1434,7 @@ static int blkif_completion(unsigned long *id,
14281434
data.s = s;
14291435
num_sg = s->num_sg;
14301436

1431-
if (bret->operation == BLKIF_OP_READ && info->feature_persistent) {
1437+
if (bret->operation == BLKIF_OP_READ && info->bounce) {
14321438
for_each_sg(s->sg, sg, num_sg, i) {
14331439
BUG_ON(sg->offset + sg->length > PAGE_SIZE);
14341440

@@ -1487,7 +1493,7 @@ static int blkif_completion(unsigned long *id,
14871493
* Add the used indirect page back to the list of
14881494
* available pages for indirect grefs.
14891495
*/
1490-
if (!info->feature_persistent) {
1496+
if (!info->bounce) {
14911497
indirect_page = s->indirect_grants[i]->page;
14921498
list_add(&indirect_page->lru, &rinfo->indirect_pages);
14931499
}
@@ -1764,6 +1770,10 @@ static int talk_to_blkback(struct xenbus_device *dev,
17641770
if (!info)
17651771
return -ENODEV;
17661772

1773+
/* Check if backend is trusted. */
1774+
info->bounce = !xen_blkif_trusted ||
1775+
!xenbus_read_unsigned(dev->nodename, "trusted", 1);
1776+
17671777
max_page_order = xenbus_read_unsigned(info->xbdev->otherend,
17681778
"max-ring-page-order", 0);
17691779
ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
@@ -2173,10 +2183,10 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
21732183
if (err)
21742184
goto out_of_memory;
21752185

2176-
if (!info->feature_persistent && info->max_indirect_segments) {
2186+
if (!info->bounce && info->max_indirect_segments) {
21772187
/*
2178-
* We are using indirect descriptors but not persistent
2179-
* grants, we need to allocate a set of pages that can be
2188+
* We are using indirect descriptors but don't have a bounce
2189+
* buffer, we need to allocate a set of pages that can be
21802190
* used for mapping indirect grefs
21812191
*/
21822192
int num = INDIRECT_GREFS(grants) * BLK_RING_SIZE(info);
@@ -2277,6 +2287,8 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
22772287
info->feature_persistent =
22782288
!!xenbus_read_unsigned(info->xbdev->otherend,
22792289
"feature-persistent", 0);
2290+
if (info->feature_persistent)
2291+
info->bounce = true;
22802292

22812293
indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
22822294
"feature-max-indirect-segments", 0);
@@ -2548,6 +2560,13 @@ static void blkfront_delay_work(struct work_struct *work)
25482560
struct blkfront_info *info;
25492561
bool need_schedule_work = false;
25502562

2563+
/*
2564+
* Note that when using bounce buffers but not persistent grants
2565+
* there's no need to run blkfront_delay_work because grants are
2566+
* revoked in blkif_completion or else an error is reported and the
2567+
* connection is closed.
2568+
*/
2569+
25512570
mutex_lock(&blkfront_mutex);
25522571

25532572
list_for_each_entry(info, &info_list, info_list) {

0 commit comments

Comments
 (0)