Skip to content

Commit fd50651

Browse files
jgunthorpewilldeacon
authored andcommitted
iommu/io-pgtable-arm-v7s: Remove split on unmap behavior
A minority of page table implementations (arm_lpae, armv7) are unique in how they handle partial unmap of large IOPTEs. Other implementations will unmap the large IOPTE and return it's length. For example if a 2M IOPTE is present and the first 4K is requested to be unmapped then unmap will remove the whole 2M and report 2M as the result. armv7 instead will break up contiguous entries and replace an entry with a whole table so it can unmap the requested 4k. This seems copied from the arm_lpae implementation, which was analyzed here: https://lore.kernel.org/all/20241024134411.GA6956@nvidia.com/ Bring consistency to the implementations and remove this unused functionality. There are no uses outside iommu, this effects the ARM_V7S drivers msm_iommu, mtk_iommu, and arm-smmmu. Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/2-v3-b3a5b5937f56+7bb-arm_no_split_jgg@nvidia.com [will: Remove unused 'loopnr' variable] Signed-off-by: Will Deacon <will@kernel.org>
1 parent 33729a5 commit fd50651

File tree

1 file changed

+6
-143
lines changed

1 file changed

+6
-143
lines changed

drivers/iommu/io-pgtable-arm-v7s.c

Lines changed: 6 additions & 143 deletions
Original file line numberDiff line numberDiff line change
@@ -166,7 +166,6 @@ struct arm_v7s_io_pgtable {
166166

167167
arm_v7s_iopte *pgd;
168168
struct kmem_cache *l2_tables;
169-
spinlock_t split_lock;
170169
};
171170

172171
static bool arm_v7s_pte_is_cont(arm_v7s_iopte pte, int lvl);
@@ -363,25 +362,6 @@ static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl,
363362
return pte;
364363
}
365364

366-
static int arm_v7s_pte_to_prot(arm_v7s_iopte pte, int lvl)
367-
{
368-
int prot = IOMMU_READ;
369-
arm_v7s_iopte attr = pte >> ARM_V7S_ATTR_SHIFT(lvl);
370-
371-
if (!(attr & ARM_V7S_PTE_AP_RDONLY))
372-
prot |= IOMMU_WRITE;
373-
if (!(attr & ARM_V7S_PTE_AP_UNPRIV))
374-
prot |= IOMMU_PRIV;
375-
if ((attr & (ARM_V7S_TEX_MASK << ARM_V7S_TEX_SHIFT)) == 0)
376-
prot |= IOMMU_MMIO;
377-
else if (pte & ARM_V7S_ATTR_C)
378-
prot |= IOMMU_CACHE;
379-
if (pte & ARM_V7S_ATTR_XN(lvl))
380-
prot |= IOMMU_NOEXEC;
381-
382-
return prot;
383-
}
384-
385365
static arm_v7s_iopte arm_v7s_pte_to_cont(arm_v7s_iopte pte, int lvl)
386366
{
387367
if (lvl == 1) {
@@ -398,23 +378,6 @@ static arm_v7s_iopte arm_v7s_pte_to_cont(arm_v7s_iopte pte, int lvl)
398378
return pte;
399379
}
400380

401-
static arm_v7s_iopte arm_v7s_cont_to_pte(arm_v7s_iopte pte, int lvl)
402-
{
403-
if (lvl == 1) {
404-
pte &= ~ARM_V7S_CONT_SECTION;
405-
} else if (lvl == 2) {
406-
arm_v7s_iopte xn = pte & BIT(ARM_V7S_CONT_PAGE_XN_SHIFT);
407-
arm_v7s_iopte tex = pte & (ARM_V7S_CONT_PAGE_TEX_MASK <<
408-
ARM_V7S_CONT_PAGE_TEX_SHIFT);
409-
410-
pte ^= xn | tex | ARM_V7S_PTE_TYPE_CONT_PAGE;
411-
pte |= (xn >> ARM_V7S_CONT_PAGE_XN_SHIFT) |
412-
(tex >> ARM_V7S_CONT_PAGE_TEX_SHIFT) |
413-
ARM_V7S_PTE_TYPE_PAGE;
414-
}
415-
return pte;
416-
}
417-
418381
static bool arm_v7s_pte_is_cont(arm_v7s_iopte pte, int lvl)
419382
{
420383
if (lvl == 1 && !ARM_V7S_PTE_IS_TABLE(pte, lvl))
@@ -591,77 +554,6 @@ static void arm_v7s_free_pgtable(struct io_pgtable *iop)
591554
kfree(data);
592555
}
593556

594-
static arm_v7s_iopte arm_v7s_split_cont(struct arm_v7s_io_pgtable *data,
595-
unsigned long iova, int idx, int lvl,
596-
arm_v7s_iopte *ptep)
597-
{
598-
struct io_pgtable *iop = &data->iop;
599-
arm_v7s_iopte pte;
600-
size_t size = ARM_V7S_BLOCK_SIZE(lvl);
601-
int i;
602-
603-
/* Check that we didn't lose a race to get the lock */
604-
pte = *ptep;
605-
if (!arm_v7s_pte_is_cont(pte, lvl))
606-
return pte;
607-
608-
ptep -= idx & (ARM_V7S_CONT_PAGES - 1);
609-
pte = arm_v7s_cont_to_pte(pte, lvl);
610-
for (i = 0; i < ARM_V7S_CONT_PAGES; i++)
611-
ptep[i] = pte + i * size;
612-
613-
__arm_v7s_pte_sync(ptep, ARM_V7S_CONT_PAGES, &iop->cfg);
614-
615-
size *= ARM_V7S_CONT_PAGES;
616-
io_pgtable_tlb_flush_walk(iop, iova, size, size);
617-
return pte;
618-
}
619-
620-
static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data,
621-
struct iommu_iotlb_gather *gather,
622-
unsigned long iova, size_t size,
623-
arm_v7s_iopte blk_pte,
624-
arm_v7s_iopte *ptep)
625-
{
626-
struct io_pgtable_cfg *cfg = &data->iop.cfg;
627-
arm_v7s_iopte pte, *tablep;
628-
int i, unmap_idx, num_entries, num_ptes;
629-
630-
tablep = __arm_v7s_alloc_table(2, GFP_ATOMIC, data);
631-
if (!tablep)
632-
return 0; /* Bytes unmapped */
633-
634-
num_ptes = ARM_V7S_PTES_PER_LVL(2, cfg);
635-
num_entries = size >> ARM_V7S_LVL_SHIFT(2);
636-
unmap_idx = ARM_V7S_LVL_IDX(iova, 2, cfg);
637-
638-
pte = arm_v7s_prot_to_pte(arm_v7s_pte_to_prot(blk_pte, 1), 2, cfg);
639-
if (num_entries > 1)
640-
pte = arm_v7s_pte_to_cont(pte, 2);
641-
642-
for (i = 0; i < num_ptes; i += num_entries, pte += size) {
643-
/* Unmap! */
644-
if (i == unmap_idx)
645-
continue;
646-
647-
__arm_v7s_set_pte(&tablep[i], pte, num_entries, cfg);
648-
}
649-
650-
pte = arm_v7s_install_table(tablep, ptep, blk_pte, cfg);
651-
if (pte != blk_pte) {
652-
__arm_v7s_free_table(tablep, 2, data);
653-
654-
if (!ARM_V7S_PTE_IS_TABLE(pte, 1))
655-
return 0;
656-
657-
tablep = iopte_deref(pte, 1, data);
658-
return __arm_v7s_unmap(data, gather, iova, size, 2, tablep);
659-
}
660-
661-
io_pgtable_tlb_add_page(&data->iop, gather, iova, size);
662-
return size;
663-
}
664-
665557
static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
666558
struct iommu_iotlb_gather *gather,
667559
unsigned long iova, size_t size, int lvl,
@@ -694,11 +586,8 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
694586
* case in a lock for the sake of correctness and be done with it.
695587
*/
696588
if (num_entries <= 1 && arm_v7s_pte_is_cont(pte[0], lvl)) {
697-
unsigned long flags;
698-
699-
spin_lock_irqsave(&data->split_lock, flags);
700-
pte[0] = arm_v7s_split_cont(data, iova, idx, lvl, ptep);
701-
spin_unlock_irqrestore(&data->split_lock, flags);
589+
WARN_ONCE(true, "Unmap of a partial large IOPTE is not allowed");
590+
return 0;
702591
}
703592

704593
/* If the size matches this level, we're in the right place */
@@ -721,12 +610,8 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
721610
}
722611
return size;
723612
} else if (lvl == 1 && !ARM_V7S_PTE_IS_TABLE(pte[0], lvl)) {
724-
/*
725-
* Insert a table at the next level to map the old region,
726-
* minus the part we want to unmap
727-
*/
728-
return arm_v7s_split_blk_unmap(data, gather, iova, size, pte[0],
729-
ptep);
613+
WARN_ONCE(true, "Unmap of a partial large IOPTE is not allowed");
614+
return 0;
730615
}
731616

732617
/* Keep on walkin' */
@@ -811,8 +696,6 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
811696
if (!data)
812697
return NULL;
813698

814-
spin_lock_init(&data->split_lock);
815-
816699
/*
817700
* ARM_MTK_TTBR_EXT extend the translation table base support larger
818701
* memory address.
@@ -936,8 +819,8 @@ static int __init arm_v7s_do_selftests(void)
936819
.quirks = IO_PGTABLE_QUIRK_ARM_NS,
937820
.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
938821
};
939-
unsigned int iova, size, iova_start;
940-
unsigned int i, loopnr = 0;
822+
unsigned int iova, size;
823+
unsigned int i;
941824
size_t mapped;
942825

943826
selftest_running = true;
@@ -985,26 +868,6 @@ static int __init arm_v7s_do_selftests(void)
985868
return __FAIL(ops);
986869

987870
iova += SZ_16M;
988-
loopnr++;
989-
}
990-
991-
/* Partial unmap */
992-
i = 1;
993-
size = 1UL << __ffs(cfg.pgsize_bitmap);
994-
while (i < loopnr) {
995-
iova_start = i * SZ_16M;
996-
if (ops->unmap_pages(ops, iova_start + size, size, 1, NULL) != size)
997-
return __FAIL(ops);
998-
999-
/* Remap of partial unmap */
1000-
if (ops->map_pages(ops, iova_start + size, size, size, 1,
1001-
IOMMU_READ, GFP_KERNEL, &mapped))
1002-
return __FAIL(ops);
1003-
1004-
if (ops->iova_to_phys(ops, iova_start + size + 42)
1005-
!= (size + 42))
1006-
return __FAIL(ops);
1007-
i++;
1008871
}
1009872

1010873
/* Full unmap */

0 commit comments

Comments
 (0)