@@ -244,8 +244,10 @@ static int bcm2712_iommu_map(struct iommu_domain *domain, unsigned long iova,
244
244
u32 entry = MMMU_PTE_VALID | (pa >> MMU_PAGE_SHIFT );
245
245
u32 align = (u32 )(iova | pa | bytes );
246
246
unsigned int p ;
247
+ unsigned long flags ;
247
248
248
- /* Reject if at least the first page is not within our aperture */
249
+ /* Reject if not entirely within our aperture (should never happen) */
250
+ bytes *= count ;
249
251
if (iova < mmu -> dma_iova_offset + APERTURE_BASE ||
250
252
iova + bytes > mmu -> dma_iova_offset + APERTURE_TOP ) {
251
253
dev_warn (mmu -> dev , "%s: iova=0x%lx pa=0x%llx bytes=0x%lx OUT OF RANGE\n" ,
@@ -267,14 +269,14 @@ static int bcm2712_iommu_map(struct iommu_domain *domain, unsigned long iova,
267
269
entry |= MMMU_PTE_WRITEABLE ;
268
270
269
271
/* Ensure tables are cache-coherent with CPU */
272
+ spin_lock_irqsave (& mmu -> hw_lock , flags );
270
273
if (!mmu -> dirty ) {
271
274
dma_sync_sgtable_for_cpu (mmu -> dev , mmu -> sgt , DMA_TO_DEVICE );
272
275
mmu -> dirty = true;
273
276
}
274
277
275
- /* Make iova relative to table base; amalgamate count pages */
278
+ /* Make iova relative to table base */
276
279
iova -= (mmu -> dma_iova_offset + APERTURE_BASE );
277
- bytes = min (APERTURE_SIZE - iova , count * bytes );
278
280
279
281
/* Iterate over table by smallest native IOMMU page size */
280
282
for (p = iova >> MMU_PAGE_SHIFT ;
@@ -283,6 +285,7 @@ static int bcm2712_iommu_map(struct iommu_domain *domain, unsigned long iova,
283
285
mmu -> tables [p ] = entry ++ ;
284
286
}
285
287
288
+ spin_unlock_irqrestore (& mmu -> hw_lock , flags );
286
289
* mapped = bytes ;
287
290
288
291
return 0 ;
@@ -293,31 +296,27 @@ static size_t bcm2712_iommu_unmap(struct iommu_domain *domain, unsigned long iov
293
296
struct iommu_iotlb_gather * gather )
294
297
{
295
298
struct bcm2712_iommu * mmu = domain_to_mmu (domain );
299
+ unsigned long flags ;
296
300
unsigned int p ;
297
301
302
+ /* Reject if not entirely within our aperture (should never happen) */
303
+ bytes *= count ;
298
304
if (iova < mmu -> dma_iova_offset + APERTURE_BASE ||
299
305
iova + bytes > mmu -> dma_iova_offset + APERTURE_TOP )
300
306
return 0 ;
301
307
302
308
/* Record just the lower and upper bounds in "gather" */
303
- if (gather ) {
304
- bool empty = (gather -> end <= gather -> start );
305
-
306
- if (empty || gather -> start < iova )
307
- gather -> start = iova ;
308
- if (empty || gather -> end < iova + bytes )
309
- gather -> end = iova + bytes ;
310
- }
309
+ spin_lock_irqsave (& mmu -> hw_lock , flags );
310
+ iommu_iotlb_gather_add_range (gather , iova , bytes );
311
311
312
312
/* Ensure tables are cache-coherent with CPU */
313
313
if (!mmu -> dirty ) {
314
314
dma_sync_sgtable_for_cpu (mmu -> dev , mmu -> sgt , DMA_TO_DEVICE );
315
315
mmu -> dirty = true;
316
316
}
317
317
318
- /* Make iova relative to table base; amalgamate count pages */
318
+ /* Make iova relative to table base */
319
319
iova -= (mmu -> dma_iova_offset + APERTURE_BASE );
320
- bytes = min (APERTURE_SIZE - iova , count * bytes );
321
320
322
321
/* Clear table entries, this marks the addresses as illegal */
323
322
for (p = iova >> MMU_PAGE_SHIFT ;
@@ -327,20 +326,22 @@ static size_t bcm2712_iommu_unmap(struct iommu_domain *domain, unsigned long iov
327
326
mmu -> tables [p ] = 0 ;
328
327
}
329
328
329
+ spin_unlock_irqrestore (& mmu -> hw_lock , flags );
330
330
return bytes ;
331
331
}
332
332
333
333
static int bcm2712_iommu_sync_range (struct iommu_domain * domain ,
334
334
unsigned long iova , size_t size )
335
335
{
336
336
struct bcm2712_iommu * mmu = domain_to_mmu (domain );
337
- unsigned long iova_end ;
337
+ unsigned long flags , iova_end ;
338
338
unsigned int i , p4 ;
339
339
340
340
if (!mmu || !mmu -> dirty )
341
341
return 0 ;
342
342
343
343
/* Ensure tables are cleaned from CPU cache or write-buffer */
344
+ spin_lock_irqsave (& mmu -> hw_lock , flags );
344
345
dma_sync_sgtable_for_device (mmu -> dev , mmu -> sgt , DMA_TO_DEVICE );
345
346
mmu -> dirty = false;
346
347
@@ -384,19 +385,26 @@ static int bcm2712_iommu_sync_range(struct iommu_domain *domain,
384
385
}
385
386
}
386
387
388
+ spin_unlock_irqrestore (& mmu -> hw_lock , flags );
387
389
return 0 ;
388
390
}
389
391
390
392
static void bcm2712_iommu_sync (struct iommu_domain * domain ,
391
393
struct iommu_iotlb_gather * gather )
392
394
{
393
- bcm2712_iommu_sync_range (domain , gather -> start ,
394
- gather -> end - gather -> start );
395
+ if (gather -> end )
396
+ bcm2712_iommu_sync_range (domain , gather -> start ,
397
+ gather -> end - gather -> start + 1 );
395
398
}
396
399
397
400
static void bcm2712_iommu_sync_all (struct iommu_domain * domain )
398
401
{
399
- bcm2712_iommu_sync_range (domain , APERTURE_BASE , APERTURE_SIZE );
402
+ struct bcm2712_iommu * mmu = domain_to_mmu (domain );
403
+
404
+ if (mmu )
405
+ bcm2712_iommu_sync_range (domain ,
406
+ mmu -> dma_iova_offset + APERTURE_BASE ,
407
+ APERTURE_SIZE );
400
408
}
401
409
402
410
static phys_addr_t bcm2712_iommu_iova_to_phys (struct iommu_domain * domain , dma_addr_t iova )
@@ -406,9 +414,17 @@ static phys_addr_t bcm2712_iommu_iova_to_phys(struct iommu_domain *domain, dma_a
406
414
407
415
iova -= mmu -> dma_iova_offset ;
408
416
if (iova >= APERTURE_BASE && iova < APERTURE_TOP ) {
417
+ unsigned long flags ;
418
+ phys_addr_t addr ;
419
+
420
+ spin_lock_irqsave (& mmu -> hw_lock , flags );
409
421
p = (iova - APERTURE_BASE ) >> MMU_PAGE_SHIFT ;
410
422
p = mmu -> tables [p ] & 0x0FFFFFFFu ;
411
- return (((phys_addr_t )p ) << MMU_PAGE_SHIFT ) + (iova & (MMU_PAGE_SIZE - 1u ));
423
+ addr = (((phys_addr_t )p ) << MMU_PAGE_SHIFT ) +
424
+ (iova & (MMU_PAGE_SIZE - 1u ));
425
+
426
+ spin_unlock_irqrestore (& mmu -> hw_lock , flags );
427
+ return addr ;
412
428
} else if (iova < APERTURE_BASE ) {
413
429
return (phys_addr_t )iova ;
414
430
} else {
0 commit comments