Skip to content

Commit d9a688a

Browse files
jbeulichjgross1
authored andcommitted
swiotlb-xen: maintain slab count properly
Generic swiotlb code makes sure to keep the slab count a multiple of the number of slabs per segment. Yet even without checking whether any such assumption is made elsewhere, it is easy to see that xen_swiotlb_fixup() might alter unrelated memory when calling xen_create_contiguous_region() for the last segment, when that's not a full one - the function acts on full order-N regions, not individual pages. Align the slab count suitably when halving it for a retry. Add a build time check and a runtime one. Replace the no longer useful local variable "slabs" by an "order" one calculated just once, outside of the loop. Re-use "order" for calculating "dma_bits", and change the type of the latter as well as the one of "i" while touching this anyway. Signed-off-by: Jan Beulich <jbeulich@suse.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/dc054cb0-bec4-4db0-fc06-c9fc957b6e66@suse.com Signed-off-by: Juergen Gross <jgross@suse.com>
1 parent 4c092c5 commit d9a688a

File tree

1 file changed

+9
-10
lines changed

1 file changed

+9
-10
lines changed

drivers/xen/swiotlb-xen.c

Lines changed: 9 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -106,27 +106,26 @@ static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
106106

107107
static int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
108108
{
109-
int i, rc;
110-
int dma_bits;
109+
int rc;
110+
unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT);
111+
unsigned int i, dma_bits = order + PAGE_SHIFT;
111112
dma_addr_t dma_handle;
112113
phys_addr_t p = virt_to_phys(buf);
113114

114-
dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
115+
BUILD_BUG_ON(IO_TLB_SEGSIZE & (IO_TLB_SEGSIZE - 1));
116+
BUG_ON(nslabs % IO_TLB_SEGSIZE);
115117

116118
i = 0;
117119
do {
118-
int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
119-
120120
do {
121121
rc = xen_create_contiguous_region(
122-
p + (i << IO_TLB_SHIFT),
123-
get_order(slabs << IO_TLB_SHIFT),
122+
p + (i << IO_TLB_SHIFT), order,
124123
dma_bits, &dma_handle);
125124
} while (rc && dma_bits++ < MAX_DMA_BITS);
126125
if (rc)
127126
return rc;
128127

129-
i += slabs;
128+
i += IO_TLB_SEGSIZE;
130129
} while (i < nslabs);
131130
return 0;
132131
}
@@ -210,7 +209,7 @@ int __ref xen_swiotlb_init(void)
210209
error:
211210
if (repeat--) {
212211
/* Min is 2MB */
213-
nslabs = max(1024UL, (nslabs >> 1));
212+
nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE));
214213
bytes = nslabs << IO_TLB_SHIFT;
215214
pr_info("Lowering to %luMB\n", bytes >> 20);
216215
goto retry;
@@ -245,7 +244,7 @@ void __init xen_swiotlb_init_early(void)
245244
memblock_free(__pa(start), PAGE_ALIGN(bytes));
246245
if (repeat--) {
247246
/* Min is 2MB */
248-
nslabs = max(1024UL, (nslabs >> 1));
247+
nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE));
249248
bytes = nslabs << IO_TLB_SHIFT;
250249
pr_info("Lowering to %luMB\n", bytes >> 20);
251250
goto retry;

0 commit comments

Comments
 (0)